aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/card/Kconfig9
-rw-r--r--drivers/mmc/card/block.c723
-rw-r--r--drivers/mmc/card/mmc_test.c723
-rw-r--r--drivers/mmc/card/queue.c223
-rw-r--r--drivers/mmc/card/queue.h33
-rw-r--r--drivers/mmc/core/Kconfig17
-rw-r--r--drivers/mmc/core/bus.c39
-rw-r--r--drivers/mmc/core/core.c446
-rw-r--r--drivers/mmc/core/core.h1
-rw-r--r--drivers/mmc/core/host.c22
-rw-r--r--drivers/mmc/core/host.h8
-rw-r--r--drivers/mmc/core/mmc.c61
-rw-r--r--drivers/mmc/core/mmc_ops.c72
-rw-r--r--drivers/mmc/core/mmc_ops.h3
-rw-r--r--drivers/mmc/core/sd.c244
-rw-r--r--drivers/mmc/core/sdio.c481
-rw-r--r--drivers/mmc/core/sdio_bus.c21
-rw-r--r--drivers/mmc/core/sdio_io.c33
-rw-r--r--drivers/mmc/host/Kconfig84
-rw-r--r--drivers/mmc/host/Makefile26
-rw-r--r--drivers/mmc/host/at91_mci.c3
-rw-r--r--drivers/mmc/host/at91_mci.h115
-rw-r--r--drivers/mmc/host/atmel-mci.c63
-rw-r--r--drivers/mmc/host/dw_mmc.c454
-rw-r--r--drivers/mmc/host/dw_mmc.h17
-rw-r--r--drivers/mmc/host/mmci.c171
-rw-r--r--drivers/mmc/host/mmci.h8
-rw-r--r--drivers/mmc/host/mxcmmc.c9
-rw-r--r--drivers/mmc/host/mxs-mmc.c30
-rw-r--r--drivers/mmc/host/omap_hsmmc.c671
-rw-r--r--drivers/mmc/host/sdhci-cns3xxx.c44
-rw-r--r--drivers/mmc/host/sdhci-dove.c43
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c373
-rw-r--r--drivers/mmc/host/sdhci-of-core.c253
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c86
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c67
-rw-r--r--drivers/mmc/host/sdhci-of.h42
-rw-r--r--drivers/mmc/host/sdhci-pci.c54
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c249
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h90
-rw-r--r--drivers/mmc/host/sdhci-pxa.c303
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c244
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c290
-rw-r--r--drivers/mmc/host/sdhci-s3c.c12
-rw-r--r--drivers/mmc/host/sdhci-tegra.c1027
-rw-r--r--drivers/mmc/host/sdhci.c238
-rw-r--r--drivers/mmc/host/sdhci.h7
-rw-r--r--drivers/mmc/host/sh_mmcif.c27
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c36
-rw-r--r--drivers/mmc/host/tmio_mmc.c2
-rw-r--r--drivers/mmc/host/tmio_mmc.h54
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c8
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c205
-rw-r--r--drivers/mmc/host/vub300.c6
54 files changed, 6371 insertions, 2199 deletions
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 3b1f783bf92..ebb4afe6c70 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -50,6 +50,15 @@ config MMC_BLOCK_BOUNCE
50 50
51 If unsure, say Y here. 51 If unsure, say Y here.
52 52
53config MMC_BLOCK_DEFERRED_RESUME
54 bool "Deferr MMC layer resume until I/O is requested"
55 depends on MMC_BLOCK
56 default n
57 help
58 Say Y here to enable deferred MMC resume until I/O
59 is requested. This will reduce overall resume latency and
60 save power when theres an SD card inserted but not being used.
61
53config SDIO_UART 62config SDIO_UART
54 tristate "SDIO UART/GPS class support" 63 tristate "SDIO UART/GPS class support"
55 help 64 help
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index f85e4222455..2bd93d7a517 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -59,6 +59,8 @@ MODULE_ALIAS("mmc:block");
59#define INAND_CMD38_ARG_SECTRIM1 0x81 59#define INAND_CMD38_ARG_SECTRIM1 0x81
60#define INAND_CMD38_ARG_SECTRIM2 0x88 60#define INAND_CMD38_ARG_SECTRIM2 0x88
61 61
62#define MMC_CMD_RETRIES 10
63
62static DEFINE_MUTEX(block_mutex); 64static DEFINE_MUTEX(block_mutex);
63 65
64/* 66/*
@@ -106,6 +108,16 @@ struct mmc_blk_data {
106 108
107static DEFINE_MUTEX(open_lock); 109static DEFINE_MUTEX(open_lock);
108 110
111enum mmc_blk_status {
112 MMC_BLK_SUCCESS = 0,
113 MMC_BLK_PARTIAL,
114 MMC_BLK_RETRY,
115 MMC_BLK_RETRY_SINGLE,
116 MMC_BLK_DATA_ERR,
117 MMC_BLK_CMD_ERR,
118 MMC_BLK_ABORT,
119};
120
109module_param(perdev_minors, int, 0444); 121module_param(perdev_minors, int, 0444);
110MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); 122MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
111 123
@@ -126,11 +138,7 @@ static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
126 138
127static inline int mmc_get_devidx(struct gendisk *disk) 139static inline int mmc_get_devidx(struct gendisk *disk)
128{ 140{
129 int devmaj = MAJOR(disk_devt(disk)); 141 int devidx = disk->first_minor / perdev_minors;
130 int devidx = MINOR(disk_devt(disk)) / perdev_minors;
131
132 if (!devmaj)
133 devidx = disk->first_minor / perdev_minors;
134 return devidx; 142 return devidx;
135} 143}
136 144
@@ -427,14 +435,6 @@ static const struct block_device_operations mmc_bdops = {
427#endif 435#endif
428}; 436};
429 437
430struct mmc_blk_request {
431 struct mmc_request mrq;
432 struct mmc_command sbc;
433 struct mmc_command cmd;
434 struct mmc_command stop;
435 struct mmc_data data;
436};
437
438static inline int mmc_blk_part_switch(struct mmc_card *card, 438static inline int mmc_blk_part_switch(struct mmc_card *card,
439 struct mmc_blk_data *md) 439 struct mmc_blk_data *md)
440{ 440{
@@ -525,7 +525,20 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
525 return result; 525 return result;
526} 526}
527 527
528static u32 get_card_status(struct mmc_card *card, struct request *req) 528static int send_stop(struct mmc_card *card, u32 *status)
529{
530 struct mmc_command cmd = {0};
531 int err;
532
533 cmd.opcode = MMC_STOP_TRANSMISSION;
534 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
535 err = mmc_wait_for_cmd(card->host, &cmd, 5);
536 if (err == 0)
537 *status = cmd.resp[0];
538 return err;
539}
540
541static int get_card_status(struct mmc_card *card, u32 *status, int retries)
529{ 542{
530 struct mmc_command cmd = {0}; 543 struct mmc_command cmd = {0};
531 int err; 544 int err;
@@ -534,11 +547,145 @@ static u32 get_card_status(struct mmc_card *card, struct request *req)
534 if (!mmc_host_is_spi(card->host)) 547 if (!mmc_host_is_spi(card->host))
535 cmd.arg = card->rca << 16; 548 cmd.arg = card->rca << 16;
536 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 549 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
537 err = mmc_wait_for_cmd(card->host, &cmd, 0); 550 err = mmc_wait_for_cmd(card->host, &cmd, retries);
551 if (err == 0)
552 *status = cmd.resp[0];
553 return err;
554}
555
556#define ERR_RETRY 2
557#define ERR_ABORT 1
558#define ERR_CONTINUE 0
559
560static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
561 bool status_valid, u32 status)
562{
563 switch (error) {
564 case -EILSEQ:
565 /* response crc error, retry the r/w cmd */
566 pr_err("%s: %s sending %s command, card status %#x\n",
567 req->rq_disk->disk_name, "response CRC error",
568 name, status);
569 return ERR_RETRY;
570
571 case -ETIMEDOUT:
572 pr_err("%s: %s sending %s command, card status %#x\n",
573 req->rq_disk->disk_name, "timed out", name, status);
574
575 /* If the status cmd initially failed, retry the r/w cmd */
576 if (!status_valid) {
577 pr_err("%s: status not valid, retrying timeout\n", req->rq_disk->disk_name);
578 return ERR_RETRY;
579 }
580 /*
581 * If it was a r/w cmd crc error, or illegal command
582 * (eg, issued in wrong state) then retry - we should
583 * have corrected the state problem above.
584 */
585 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
586 pr_err("%s: command error, retrying timeout\n", req->rq_disk->disk_name);
587 return ERR_RETRY;
588 }
589
590 /* Otherwise abort the command */
591 pr_err("%s: not retrying timeout\n", req->rq_disk->disk_name);
592 return ERR_ABORT;
593
594 default:
595 /* We don't understand the error code the driver gave us */
596 pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
597 req->rq_disk->disk_name, error, status);
598 return ERR_ABORT;
599 }
600}
601
602/*
603 * Initial r/w and stop cmd error recovery.
604 * We don't know whether the card received the r/w cmd or not, so try to
605 * restore things back to a sane state. Essentially, we do this as follows:
606 * - Obtain card status. If the first attempt to obtain card status fails,
607 * the status word will reflect the failed status cmd, not the failed
608 * r/w cmd. If we fail to obtain card status, it suggests we can no
609 * longer communicate with the card.
610 * - Check the card state. If the card received the cmd but there was a
611 * transient problem with the response, it might still be in a data transfer
612 * mode. Try to send it a stop command. If this fails, we can't recover.
613 * - If the r/w cmd failed due to a response CRC error, it was probably
614 * transient, so retry the cmd.
615 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
616 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
617 * illegal cmd, retry.
618 * Otherwise we don't understand what happened, so abort.
619 */
620static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
621 struct mmc_blk_request *brq)
622{
623 bool prev_cmd_status_valid = true;
624 u32 status, stop_status = 0;
625 int err, retry;
626
627 /*
628 * Try to get card status which indicates both the card state
629 * and why there was no response. If the first attempt fails,
630 * we can't be sure the returned status is for the r/w command.
631 */
632 for (retry = 2; retry >= 0; retry--) {
633 err = get_card_status(card, &status, 0);
634 if (!err)
635 break;
636
637 prev_cmd_status_valid = false;
638 pr_err("%s: error %d sending status command, %sing\n",
639 req->rq_disk->disk_name, err, retry ? "retry" : "abort");
640 }
641
642 /* We couldn't get a response from the card. Give up. */
538 if (err) 643 if (err)
539 printk(KERN_ERR "%s: error %d sending status command", 644 return ERR_ABORT;
540 req->rq_disk->disk_name, err); 645
541 return cmd.resp[0]; 646 /*
647 * Check the current card state. If it is in some data transfer
648 * mode, tell it to stop (and hopefully transition back to TRAN.)
649 */
650 if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
651 R1_CURRENT_STATE(status) == R1_STATE_RCV) {
652 err = send_stop(card, &stop_status);
653 if (err)
654 pr_err("%s: error %d sending stop command\n",
655 req->rq_disk->disk_name, err);
656
657 /*
658 * If the stop cmd also timed out, the card is probably
659 * not present, so abort. Other errors are bad news too.
660 */
661 if (err)
662 return ERR_ABORT;
663 }
664
665 /* Check for set block count errors */
666 if (brq->sbc.error)
667 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
668 prev_cmd_status_valid, status);
669
670 /* Check for r/w command errors */
671 if (brq->cmd.error)
672 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
673 prev_cmd_status_valid, status);
674
675 /* Now for stop errors. These aren't fatal to the transfer. */
676 pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
677 req->rq_disk->disk_name, brq->stop.error,
678 brq->cmd.resp[0], status);
679
680 /*
681 * Subsitute in our own stop status as this will give the error
682 * state which happened during the execution of the r/w command.
683 */
684 if (stop_status) {
685 brq->stop.resp[0] = stop_status;
686 brq->stop.error = 0;
687 }
688 return ERR_CONTINUE;
542} 689}
543 690
544static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) 691static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
@@ -669,242 +816,333 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
669 } 816 }
670} 817}
671 818
672static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) 819#define CMD_ERRORS \
820 (R1_OUT_OF_RANGE | /* Command argument out of range */ \
821 R1_ADDRESS_ERROR | /* Misaligned address */ \
822 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
823 R1_WP_VIOLATION | /* Tried to write to protected block */ \
824 R1_CC_ERROR | /* Card controller error */ \
825 R1_ERROR) /* General/unknown error */
826
827static int mmc_blk_err_check(struct mmc_card *card,
828 struct mmc_async_req *areq)
673{ 829{
674 struct mmc_blk_data *md = mq->data; 830 enum mmc_blk_status ret = MMC_BLK_SUCCESS;
675 struct mmc_card *card = md->queue.card; 831 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
676 struct mmc_blk_request brq; 832 mmc_active);
677 int ret = 1, disable_multi = 0; 833 struct mmc_blk_request *brq = &mq_mrq->brq;
834 struct request *req = mq_mrq->req;
678 835
679 /* 836 /*
680 * Reliable writes are used to implement Forced Unit Access and 837 * sbc.error indicates a problem with the set block count
681 * REQ_META accesses, and are supported only on MMCs. 838 * command. No data will have been transferred.
839 *
840 * cmd.error indicates a problem with the r/w command. No
841 * data will have been transferred.
842 *
843 * stop.error indicates a problem with the stop command. Data
844 * may have been transferred, or may still be transferring.
682 */ 845 */
683 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || 846 if (brq->sbc.error || brq->cmd.error || brq->stop.error) {
684 (req->cmd_flags & REQ_META)) && 847 switch (mmc_blk_cmd_recovery(card, req, brq)) {
685 (rq_data_dir(req) == WRITE) && 848 case ERR_RETRY:
686 (md->flags & MMC_BLK_REL_WR); 849 return MMC_BLK_RETRY;
850 case ERR_ABORT:
851 return MMC_BLK_ABORT;
852 case ERR_CONTINUE:
853 break;
854 }
855 }
687 856
688 do { 857 /*
689 struct mmc_command cmd = {0}; 858 * Check for errors relating to the execution of the
690 u32 readcmd, writecmd, status = 0; 859 * initial command - such as address errors. No data
691 860 * has been transferred.
692 memset(&brq, 0, sizeof(struct mmc_blk_request)); 861 */
693 brq.mrq.cmd = &brq.cmd; 862 if (brq->cmd.resp[0] & CMD_ERRORS) {
694 brq.mrq.data = &brq.data; 863 pr_err("%s: r/w command failed, status = %#x\n",
695 864 req->rq_disk->disk_name, brq->cmd.resp[0]);
696 brq.cmd.arg = blk_rq_pos(req); 865 return MMC_BLK_ABORT;
697 if (!mmc_card_blockaddr(card)) 866 }
698 brq.cmd.arg <<= 9;
699 brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
700 brq.data.blksz = 512;
701 brq.stop.opcode = MMC_STOP_TRANSMISSION;
702 brq.stop.arg = 0;
703 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
704 brq.data.blocks = blk_rq_sectors(req);
705 867
706 /* 868 /*
707 * The block layer doesn't support all sector count 869 * Everything else is either success, or a data error of some
708 * restrictions, so we need to be prepared for too big 870 * kind. If it was a write, we may have transitioned to
709 * requests. 871 * program mode, which we have to wait for it to complete.
710 */ 872 */
711 if (brq.data.blocks > card->host->max_blk_count) 873 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
712 brq.data.blocks = card->host->max_blk_count; 874 u32 status;
875 do {
876 int err = get_card_status(card, &status, 5);
877 if (err) {
878 printk(KERN_ERR "%s: error %d requesting status\n",
879 req->rq_disk->disk_name, err);
880 return MMC_BLK_CMD_ERR;
881 }
882 /*
883 * Some cards mishandle the status bits,
884 * so make sure to check both the busy
885 * indication and the card state.
886 */
887 } while (!(status & R1_READY_FOR_DATA) ||
888 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
889 }
713 890
714 /* 891 if (brq->data.error) {
715 * After a read error, we redo the request one sector at a time 892 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
716 * in order to accurately determine which sectors can be read 893 req->rq_disk->disk_name, brq->data.error,
717 * successfully. 894 (unsigned)blk_rq_pos(req),
718 */ 895 (unsigned)blk_rq_sectors(req),
719 if (disable_multi && brq.data.blocks > 1) 896 brq->cmd.resp[0], brq->stop.resp[0]);
720 brq.data.blocks = 1;
721 897
722 if (brq.data.blocks > 1 || do_rel_wr) {
723 /* SPI multiblock writes terminate using a special
724 * token, not a STOP_TRANSMISSION request.
725 */
726 if (!mmc_host_is_spi(card->host) ||
727 rq_data_dir(req) == READ)
728 brq.mrq.stop = &brq.stop;
729 readcmd = MMC_READ_MULTIPLE_BLOCK;
730 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
731 } else {
732 brq.mrq.stop = NULL;
733 readcmd = MMC_READ_SINGLE_BLOCK;
734 writecmd = MMC_WRITE_BLOCK;
735 }
736 if (rq_data_dir(req) == READ) { 898 if (rq_data_dir(req) == READ) {
737 brq.cmd.opcode = readcmd; 899 if (brq->data.blocks > 1) {
738 brq.data.flags |= MMC_DATA_READ; 900 /* Redo read one sector at a time */
901 pr_warning("%s: retrying using single block read\n",
902 req->rq_disk->disk_name);
903 return MMC_BLK_RETRY_SINGLE;
904 }
905 return MMC_BLK_DATA_ERR;
739 } else { 906 } else {
740 brq.cmd.opcode = writecmd; 907 return MMC_BLK_CMD_ERR;
741 brq.data.flags |= MMC_DATA_WRITE;
742 } 908 }
909 }
743 910
744 if (do_rel_wr) 911 if (ret == MMC_BLK_SUCCESS &&
745 mmc_apply_rel_rw(&brq, card, req); 912 blk_rq_bytes(req) != brq->data.bytes_xfered)
913 ret = MMC_BLK_PARTIAL;
746 914
747 /* 915 return ret;
748 * Pre-defined multi-block transfers are preferable to 916}
749 * open ended-ones (and necessary for reliable writes).
750 * However, it is not sufficient to just send CMD23,
751 * and avoid the final CMD12, as on an error condition
752 * CMD12 (stop) needs to be sent anyway. This, coupled
753 * with Auto-CMD23 enhancements provided by some
754 * hosts, means that the complexity of dealing
755 * with this is best left to the host. If CMD23 is
756 * supported by card and host, we'll fill sbc in and let
757 * the host deal with handling it correctly. This means
758 * that for hosts that don't expose MMC_CAP_CMD23, no
759 * change of behavior will be observed.
760 *
761 * N.B: Some MMC cards experience perf degradation.
762 * We'll avoid using CMD23-bounded multiblock writes for
763 * these, while retaining features like reliable writes.
764 */
765 917
766 if ((md->flags & MMC_BLK_CMD23) && 918static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
767 mmc_op_multi(brq.cmd.opcode) && 919 struct mmc_card *card,
768 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) { 920 int disable_multi,
769 brq.sbc.opcode = MMC_SET_BLOCK_COUNT; 921 struct mmc_queue *mq)
770 brq.sbc.arg = brq.data.blocks | 922{
771 (do_rel_wr ? (1 << 31) : 0); 923 u32 readcmd, writecmd;
772 brq.sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; 924 struct mmc_blk_request *brq = &mqrq->brq;
773 brq.mrq.sbc = &brq.sbc; 925 struct request *req = mqrq->req;
774 } 926 struct mmc_blk_data *md = mq->data;
775 927
776 mmc_set_data_timeout(&brq.data, card); 928 /*
929 * Reliable writes are used to implement Forced Unit Access and
930 * REQ_META accesses, and are supported only on MMCs.
931 *
932 * XXX: this really needs a good explanation of why REQ_META
933 * is treated special.
934 */
935 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
936 (req->cmd_flags & REQ_META)) &&
937 (rq_data_dir(req) == WRITE) &&
938 (md->flags & MMC_BLK_REL_WR);
777 939
778 brq.data.sg = mq->sg; 940 memset(brq, 0, sizeof(struct mmc_blk_request));
779 brq.data.sg_len = mmc_queue_map_sg(mq); 941 brq->mrq.cmd = &brq->cmd;
942 brq->mrq.data = &brq->data;
943
944 brq->cmd.arg = blk_rq_pos(req);
945 if (!mmc_card_blockaddr(card))
946 brq->cmd.arg <<= 9;
947 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
948 brq->cmd.retries = MMC_CMD_RETRIES;
949 brq->data.blksz = 512;
950 brq->stop.opcode = MMC_STOP_TRANSMISSION;
951 brq->stop.arg = 0;
952 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
953 brq->data.blocks = blk_rq_sectors(req);
780 954
781 /* 955 /*
782 * Adjust the sg list so it is the same size as the 956 * The block layer doesn't support all sector count
783 * request. 957 * restrictions, so we need to be prepared for too big
958 * requests.
959 */
960 if (brq->data.blocks > card->host->max_blk_count)
961 brq->data.blocks = card->host->max_blk_count;
962
963 /*
964 * After a read error, we redo the request one sector at a time
965 * in order to accurately determine which sectors can be read
966 * successfully.
967 */
968 if (disable_multi && brq->data.blocks > 1)
969 brq->data.blocks = 1;
970
971 if (brq->data.blocks > 1 || do_rel_wr) {
972 /* SPI multiblock writes terminate using a special
973 * token, not a STOP_TRANSMISSION request.
784 */ 974 */
785 if (brq.data.blocks != blk_rq_sectors(req)) { 975 if (!mmc_host_is_spi(card->host) ||
786 int i, data_size = brq.data.blocks << 9; 976 rq_data_dir(req) == READ)
787 struct scatterlist *sg; 977 brq->mrq.stop = &brq->stop;
788 978 readcmd = MMC_READ_MULTIPLE_BLOCK;
789 for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) { 979 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
790 data_size -= sg->length; 980 } else {
791 if (data_size <= 0) { 981 brq->mrq.stop = NULL;
792 sg->length += data_size; 982 readcmd = MMC_READ_SINGLE_BLOCK;
793 i++; 983 writecmd = MMC_WRITE_BLOCK;
794 break; 984 }
795 } 985 if (rq_data_dir(req) == READ) {
796 } 986 brq->cmd.opcode = readcmd;
797 brq.data.sg_len = i; 987 brq->data.flags |= MMC_DATA_READ;
798 } 988 } else {
989 brq->cmd.opcode = writecmd;
990 brq->data.flags |= MMC_DATA_WRITE;
991 }
799 992
800 mmc_queue_bounce_pre(mq); 993 if (do_rel_wr)
994 mmc_apply_rel_rw(brq, card, req);
801 995
802 mmc_wait_for_req(card->host, &brq.mrq); 996 /*
997 * Pre-defined multi-block transfers are preferable to
998 * open ended-ones (and necessary for reliable writes).
999 * However, it is not sufficient to just send CMD23,
1000 * and avoid the final CMD12, as on an error condition
1001 * CMD12 (stop) needs to be sent anyway. This, coupled
1002 * with Auto-CMD23 enhancements provided by some
1003 * hosts, means that the complexity of dealing
1004 * with this is best left to the host. If CMD23 is
1005 * supported by card and host, we'll fill sbc in and let
1006 * the host deal with handling it correctly. This means
1007 * that for hosts that don't expose MMC_CAP_CMD23, no
1008 * change of behavior will be observed.
1009 *
1010 * N.B: Some MMC cards experience perf degradation.
1011 * We'll avoid using CMD23-bounded multiblock writes for
1012 * these, while retaining features like reliable writes.
1013 */
803 1014
804 mmc_queue_bounce_post(mq); 1015 if ((md->flags & MMC_BLK_CMD23) &&
1016 mmc_op_multi(brq->cmd.opcode) &&
1017 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
1018 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1019 brq->sbc.arg = brq->data.blocks |
1020 (do_rel_wr ? (1 << 31) : 0);
1021 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1022 brq->mrq.sbc = &brq->sbc;
1023 }
805 1024
806 /* 1025 mmc_set_data_timeout(&brq->data, card);
807 * Check for errors here, but don't jump to cmd_err
808 * until later as we need to wait for the card to leave
809 * programming mode even when things go wrong.
810 */
811 if (brq.sbc.error || brq.cmd.error ||
812 brq.data.error || brq.stop.error) {
813 if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
814 /* Redo read one sector at a time */
815 printk(KERN_WARNING "%s: retrying using single "
816 "block read\n", req->rq_disk->disk_name);
817 disable_multi = 1;
818 continue;
819 }
820 status = get_card_status(card, req);
821 }
822 1026
823 if (brq.sbc.error) { 1027 brq->data.sg = mqrq->sg;
824 printk(KERN_ERR "%s: error %d sending SET_BLOCK_COUNT " 1028 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
825 "command, response %#x, card status %#x\n",
826 req->rq_disk->disk_name, brq.sbc.error,
827 brq.sbc.resp[0], status);
828 }
829 1029
830 if (brq.cmd.error) { 1030 /*
831 printk(KERN_ERR "%s: error %d sending read/write " 1031 * Adjust the sg list so it is the same size as the
832 "command, response %#x, card status %#x\n", 1032 * request.
833 req->rq_disk->disk_name, brq.cmd.error, 1033 */
834 brq.cmd.resp[0], status); 1034 if (brq->data.blocks != blk_rq_sectors(req)) {
1035 int i, data_size = brq->data.blocks << 9;
1036 struct scatterlist *sg;
1037
1038 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1039 data_size -= sg->length;
1040 if (data_size <= 0) {
1041 sg->length += data_size;
1042 i++;
1043 break;
1044 }
835 } 1045 }
1046 brq->data.sg_len = i;
1047 }
836 1048
837 if (brq.data.error) { 1049 mqrq->mmc_active.mrq = &brq->mrq;
838 if (brq.data.error == -ETIMEDOUT && brq.mrq.stop) 1050 mqrq->mmc_active.err_check = mmc_blk_err_check;
839 /* 'Stop' response contains card status */
840 status = brq.mrq.stop->resp[0];
841 printk(KERN_ERR "%s: error %d transferring data,"
842 " sector %u, nr %u, card status %#x\n",
843 req->rq_disk->disk_name, brq.data.error,
844 (unsigned)blk_rq_pos(req),
845 (unsigned)blk_rq_sectors(req), status);
846 }
847 1051
848 if (brq.stop.error) { 1052 mmc_queue_bounce_pre(mqrq);
849 printk(KERN_ERR "%s: error %d sending stop command, " 1053}
850 "response %#x, card status %#x\n",
851 req->rq_disk->disk_name, brq.stop.error,
852 brq.stop.resp[0], status);
853 }
854 1054
855 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { 1055static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
856 do { 1056{
857 int err; 1057 struct mmc_blk_data *md = mq->data;
858 1058 struct mmc_card *card = md->queue.card;
859 cmd.opcode = MMC_SEND_STATUS; 1059 struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
860 cmd.arg = card->rca << 16; 1060 int ret = 1, disable_multi = 0, retry = 0;
861 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 1061 enum mmc_blk_status status;
862 err = mmc_wait_for_cmd(card->host, &cmd, 5); 1062 struct mmc_queue_req *mq_rq;
863 if (err) { 1063 struct request *req;
864 printk(KERN_ERR "%s: error %d requesting status\n", 1064 struct mmc_async_req *areq;
865 req->rq_disk->disk_name, err); 1065
866 goto cmd_err; 1066 if (!rqc && !mq->mqrq_prev->req)
867 } 1067 return 0;
868 /*
869 * Some cards mishandle the status bits,
870 * so make sure to check both the busy
871 * indication and the card state.
872 */
873 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
874 (R1_CURRENT_STATE(cmd.resp[0]) == 7));
875
876#if 0
877 if (cmd.resp[0] & ~0x00000900)
878 printk(KERN_ERR "%s: status = %08x\n",
879 req->rq_disk->disk_name, cmd.resp[0]);
880 if (mmc_decode_status(cmd.resp))
881 goto cmd_err;
882#endif
883 }
884 1068
885 if (brq.cmd.error || brq.stop.error || brq.data.error) { 1069 do {
886 if (rq_data_dir(req) == READ) { 1070 if (rqc) {
1071 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1072 areq = &mq->mqrq_cur->mmc_active;
1073 } else
1074 areq = NULL;
1075 areq = mmc_start_req(card->host, areq, (int *) &status);
1076 if (!areq)
1077 return 0;
1078
1079 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
1080 brq = &mq_rq->brq;
1081 req = mq_rq->req;
1082 mmc_queue_bounce_post(mq_rq);
1083
1084 switch (status) {
1085 case MMC_BLK_SUCCESS:
1086 case MMC_BLK_PARTIAL:
1087 /*
1088 * A block was successfully transferred.
1089 */
1090 spin_lock_irq(&md->lock);
1091 ret = __blk_end_request(req, 0,
1092 brq->data.bytes_xfered);
1093 spin_unlock_irq(&md->lock);
1094 if (status == MMC_BLK_SUCCESS && ret) {
887 /* 1095 /*
888 * After an error, we redo I/O one sector at a 1096 * The blk_end_request has returned non zero
889 * time, so we only reach here after trying to 1097 * even though all data is transfered and no
890 * read a single sector. 1098 * erros returned by host.
1099 * If this happen it's a bug.
891 */ 1100 */
892 spin_lock_irq(&md->lock); 1101 printk(KERN_ERR "%s BUG rq_tot %d d_xfer %d\n",
893 ret = __blk_end_request(req, -EIO, brq.data.blksz); 1102 __func__, blk_rq_bytes(req),
894 spin_unlock_irq(&md->lock); 1103 brq->data.bytes_xfered);
895 continue; 1104 rqc = NULL;
1105 goto cmd_abort;
896 } 1106 }
1107 break;
1108 case MMC_BLK_CMD_ERR:
897 goto cmd_err; 1109 goto cmd_err;
1110 case MMC_BLK_RETRY_SINGLE:
1111 disable_multi = 1;
1112 break;
1113 case MMC_BLK_RETRY:
1114 if (retry++ < 5)
1115 break;
1116 case MMC_BLK_ABORT:
1117 goto cmd_abort;
1118 case MMC_BLK_DATA_ERR:
1119 /*
1120 * After an error, we redo I/O one sector at a
1121 * time, so we only reach here after trying to
1122 * read a single sector.
1123 */
1124 spin_lock_irq(&md->lock);
1125 ret = __blk_end_request(req, -EIO,
1126 brq->data.blksz);
1127 spin_unlock_irq(&md->lock);
1128 if (!ret)
1129 goto start_new_req;
1130 break;
898 } 1131 }
899 1132
900 /* 1133 if (ret) {
901 * A block was successfully transferred. 1134 /*
902 */ 1135 * In case of a none complete request
903 spin_lock_irq(&md->lock); 1136 * prepare it again and resend.
904 ret = __blk_end_request(req, 0, brq.data.bytes_xfered); 1137 */
905 spin_unlock_irq(&md->lock); 1138 mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq);
1139 mmc_start_req(card->host, &mq_rq->mmc_active, NULL);
1140 }
906 } while (ret); 1141 } while (ret);
907 1142
1143 if (brq->cmd.resp[0] & R1_URGENT_BKOPS)
1144 mmc_card_set_need_bkops(card);
1145
908 return 1; 1146 return 1;
909 1147
910 cmd_err: 1148 cmd_err:
@@ -927,44 +1165,76 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
927 } 1165 }
928 } else { 1166 } else {
929 spin_lock_irq(&md->lock); 1167 spin_lock_irq(&md->lock);
930 ret = __blk_end_request(req, 0, brq.data.bytes_xfered); 1168 ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
931 spin_unlock_irq(&md->lock); 1169 spin_unlock_irq(&md->lock);
932 } 1170 }
933 1171
1172 cmd_abort:
934 spin_lock_irq(&md->lock); 1173 spin_lock_irq(&md->lock);
935 while (ret) 1174 while (ret)
936 ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); 1175 ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
937 spin_unlock_irq(&md->lock); 1176 spin_unlock_irq(&md->lock);
938 1177
1178 start_new_req:
1179 if (rqc) {
1180 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1181 mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL);
1182 }
1183
939 return 0; 1184 return 0;
940} 1185}
941 1186
1187static int
1188mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card);
1189
942static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) 1190static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
943{ 1191{
944 int ret; 1192 int ret;
945 struct mmc_blk_data *md = mq->data; 1193 struct mmc_blk_data *md = mq->data;
946 struct mmc_card *card = md->queue.card; 1194 struct mmc_card *card = md->queue.card;
947 1195
948 mmc_claim_host(card->host); 1196#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
1197 if (mmc_bus_needs_resume(card->host)) {
1198 mmc_resume_bus(card->host);
1199 mmc_blk_set_blksize(md, card);
1200 }
1201#endif
1202
1203 if (req && !mq->mqrq_prev->req)
1204 /* claim host only for the first request */
1205 mmc_claim_host(card->host);
1206
949 ret = mmc_blk_part_switch(card, md); 1207 ret = mmc_blk_part_switch(card, md);
950 if (ret) { 1208 if (ret) {
951 ret = 0; 1209 ret = 0;
952 goto out; 1210 goto out;
953 } 1211 }
954 1212
955 if (req->cmd_flags & REQ_DISCARD) { 1213 if (req && req->cmd_flags & REQ_DISCARD) {
1214 /* complete ongoing async transfer before issuing discard */
1215 if (card->host->areq)
1216 mmc_blk_issue_rw_rq(mq, NULL);
956 if (req->cmd_flags & REQ_SECURE) 1217 if (req->cmd_flags & REQ_SECURE)
957 ret = mmc_blk_issue_secdiscard_rq(mq, req); 1218 ret = mmc_blk_issue_secdiscard_rq(mq, req);
958 else 1219 else
959 ret = mmc_blk_issue_discard_rq(mq, req); 1220 ret = mmc_blk_issue_discard_rq(mq, req);
960 } else if (req->cmd_flags & REQ_FLUSH) { 1221 } else if (req && req->cmd_flags & REQ_FLUSH) {
1222 /* complete ongoing async transfer before issuing flush */
1223 if (card->host->areq)
1224 mmc_blk_issue_rw_rq(mq, NULL);
961 ret = mmc_blk_issue_flush(mq, req); 1225 ret = mmc_blk_issue_flush(mq, req);
962 } else { 1226 } else {
1227 /* Abort any current bk ops of eMMC card by issuing HPI */
1228 if (mmc_card_mmc(mq->card) && mmc_card_doing_bkops(mq->card))
1229 mmc_interrupt_hpi(mq->card);
1230
963 ret = mmc_blk_issue_rw_rq(mq, req); 1231 ret = mmc_blk_issue_rw_rq(mq, req);
964 } 1232 }
965 1233
966out: 1234out:
967 mmc_release_host(card->host); 1235 if (!req)
1236 /* release host only when there are no more requests */
1237 mmc_release_host(card->host);
968 return ret; 1238 return ret;
969} 1239}
970 1240
@@ -1038,6 +1308,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
1038 md->disk->queue = md->queue.queue; 1308 md->disk->queue = md->queue.queue;
1039 md->disk->driverfs_dev = parent; 1309 md->disk->driverfs_dev = parent;
1040 set_disk_ro(md->disk, md->read_only || default_ro); 1310 set_disk_ro(md->disk, md->read_only || default_ro);
1311 md->disk->flags = GENHD_FL_EXT_DEVT;
1041 1312
1042 /* 1313 /*
1043 * As discussed on lkml, GENHD_FL_REMOVABLE should: 1314 * As discussed on lkml, GENHD_FL_REMOVABLE should:
@@ -1277,6 +1548,9 @@ static int mmc_blk_probe(struct mmc_card *card)
1277 mmc_set_drvdata(card, md); 1548 mmc_set_drvdata(card, md);
1278 mmc_fixup_device(card, blk_fixups); 1549 mmc_fixup_device(card, blk_fixups);
1279 1550
1551#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
1552 mmc_set_bus_resume_policy(card->host, 1);
1553#endif
1280 if (mmc_add_disk(md)) 1554 if (mmc_add_disk(md))
1281 goto out; 1555 goto out;
1282 1556
@@ -1302,6 +1576,9 @@ static void mmc_blk_remove(struct mmc_card *card)
1302 mmc_release_host(card->host); 1576 mmc_release_host(card->host);
1303 mmc_blk_remove_req(md); 1577 mmc_blk_remove_req(md);
1304 mmc_set_drvdata(card, NULL); 1578 mmc_set_drvdata(card, NULL);
1579#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
1580 mmc_set_bus_resume_policy(card->host, 0);
1581#endif
1305} 1582}
1306 1583
1307#ifdef CONFIG_PM 1584#ifdef CONFIG_PM
@@ -1325,7 +1602,9 @@ static int mmc_blk_resume(struct mmc_card *card)
1325 struct mmc_blk_data *md = mmc_get_drvdata(card); 1602 struct mmc_blk_data *md = mmc_get_drvdata(card);
1326 1603
1327 if (md) { 1604 if (md) {
1605#ifndef CONFIG_MMC_BLOCK_DEFERRED_RESUME
1328 mmc_blk_set_blksize(md, card); 1606 mmc_blk_set_blksize(md, card);
1607#endif
1329 1608
1330 /* 1609 /*
1331 * Resume involves the card going into idle state, 1610 * Resume involves the card going into idle state,
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 233cdfae92f..440b97d9e44 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -148,6 +148,117 @@ struct mmc_test_card {
148 struct mmc_test_general_result *gr; 148 struct mmc_test_general_result *gr;
149}; 149};
150 150
151enum mmc_test_prep_media {
152 MMC_TEST_PREP_NONE = 0,
153 MMC_TEST_PREP_WRITE_FULL = 1 << 0,
154 MMC_TEST_PREP_ERASE = 1 << 1,
155};
156
157struct mmc_test_multiple_rw {
158 unsigned int *sg_len;
159 unsigned int *bs;
160 unsigned int len;
161 unsigned int size;
162 bool do_write;
163 bool do_nonblock_req;
164 enum mmc_test_prep_media prepare;
165};
166
167struct mmc_test_async_req {
168 struct mmc_async_req areq;
169 struct mmc_test_card *test;
170};
171
172struct mmc_test_parameter {
173 const char *name;
174 long value;
175 long (*exec)(struct mmc_test_card *);
176 const char *input;
177};
178
179static long mmc_test_set_testcase(struct mmc_test_card *test);
180static long mmc_test_set_clock(struct mmc_test_card *test);
181static long mmc_test_set_bus_width(struct mmc_test_card *test);
182static long mmc_test_set_timing(struct mmc_test_card *test);
183
184
185static struct mmc_test_parameter mmc_test_parameter[] = {
186 {
187 .name = "Testcase Number",
188 .value = 1,
189 .exec = mmc_test_set_testcase,
190 .input = "-n",
191 },
192 {
193 .name = "Clock Rate",
194 .value = -1,
195 .exec = mmc_test_set_clock,
196 .input = "-c",
197 },
198 {
199 .name = "Bus Width",
200 .value = -1,
201 .exec = mmc_test_set_bus_width,
202 .input = "-b",
203 },
204 {
205 .name = "Timing",
206 .value = -1,
207 .exec = mmc_test_set_timing,
208 .input = "-t",
209 },
210};
211
212static long mmc_test_set_testcase(struct mmc_test_card *test)
213{
214 return 0;
215}
216
217static long mmc_test_set_clock(struct mmc_test_card *test)
218{
219 long clock = mmc_test_parameter[1].value;
220 if (-1 == clock)
221 return test->card->host->ios.clock;
222 WARN_ON(clock < test->card->host->f_min);
223 if (clock > test->card->host->f_max)
224 clock = test->card->host->f_max;
225
226 test->card->host->ios.clock = clock;
227
228 return test->card->host->ios.clock;
229}
230
231static long mmc_test_set_bus_width(struct mmc_test_card *test)
232{
233 long bus_width = mmc_test_parameter[2].value;
234 if (-1 == bus_width)
235 return test->card->host->ios.bus_width;
236
237 test->card->host->ios.bus_width = bus_width;
238
239 return test->card->host->ios.bus_width = bus_width;
240}
241
242static long mmc_test_set_timing(struct mmc_test_card *test)
243{
244 long timing = mmc_test_parameter[3].value;
245 if (-1 == timing)
246 return test->card->host->ios.timing;
247 test->card->host->ios.timing = timing;
248
249 return test->card->host->ios.timing;
250}
251
252static void mmc_test_set_parameters(struct mmc_test_card *test)
253{
254 int i;
255 for (i = 0; i < ARRAY_SIZE(mmc_test_parameter); i++) {
256 printk(KERN_INFO "Parameter[%s] set to [%ld]\n",
257 mmc_test_parameter[i].name,
258 mmc_test_parameter[i].exec(test));
259 }
260}
261
151/*******************************************************************/ 262/*******************************************************************/
152/* General helper functions */ 263/* General helper functions */
153/*******************************************************************/ 264/*******************************************************************/
@@ -203,7 +314,7 @@ static void mmc_test_prepare_mrq(struct mmc_test_card *test,
203static int mmc_test_busy(struct mmc_command *cmd) 314static int mmc_test_busy(struct mmc_command *cmd)
204{ 315{
205 return !(cmd->resp[0] & R1_READY_FOR_DATA) || 316 return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
206 (R1_CURRENT_STATE(cmd->resp[0]) == 7); 317 (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
207} 318}
208 319
209/* 320/*
@@ -367,21 +478,26 @@ out_free:
367 * Map memory into a scatterlist. Optionally allow the same memory to be 478 * Map memory into a scatterlist. Optionally allow the same memory to be
368 * mapped more than once. 479 * mapped more than once.
369 */ 480 */
370static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz, 481static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size,
371 struct scatterlist *sglist, int repeat, 482 struct scatterlist *sglist, int repeat,
372 unsigned int max_segs, unsigned int max_seg_sz, 483 unsigned int max_segs, unsigned int max_seg_sz,
373 unsigned int *sg_len) 484 unsigned int *sg_len, int min_sg_len)
374{ 485{
375 struct scatterlist *sg = NULL; 486 struct scatterlist *sg = NULL;
376 unsigned int i; 487 unsigned int i;
488 unsigned long sz = size;
377 489
378 sg_init_table(sglist, max_segs); 490 sg_init_table(sglist, max_segs);
491 if (min_sg_len > max_segs)
492 min_sg_len = max_segs;
379 493
380 *sg_len = 0; 494 *sg_len = 0;
381 do { 495 do {
382 for (i = 0; i < mem->cnt; i++) { 496 for (i = 0; i < mem->cnt; i++) {
383 unsigned long len = PAGE_SIZE << mem->arr[i].order; 497 unsigned long len = PAGE_SIZE << mem->arr[i].order;
384 498
499 if (min_sg_len && (size / min_sg_len < len))
500 len = ALIGN(size / min_sg_len, 512);
385 if (len > sz) 501 if (len > sz)
386 len = sz; 502 len = sz;
387 if (len > max_seg_sz) 503 if (len > max_seg_sz)
@@ -554,11 +670,12 @@ static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
554 670
555 printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took " 671 printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
556 "%lu.%09lu seconds (%u kB/s, %u KiB/s, " 672 "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
557 "%u.%02u IOPS)\n", 673 "%u.%02u IOPS, sg_len %d)\n",
558 mmc_hostname(test->card->host), count, sectors, count, 674 mmc_hostname(test->card->host), count, sectors, count,
559 sectors >> 1, (sectors & 1 ? ".5" : ""), 675 sectors >> 1, (sectors & 1 ? ".5" : ""),
560 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec, 676 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
561 rate / 1000, rate / 1024, iops / 100, iops % 100); 677 rate / 1000, rate / 1024, iops / 100, iops % 100,
678 test->area.sg_len);
562 679
563 mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops); 680 mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
564} 681}
@@ -661,7 +778,7 @@ static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
661 * Checks that a normal transfer didn't have any errors 778 * Checks that a normal transfer didn't have any errors
662 */ 779 */
663static int mmc_test_check_result(struct mmc_test_card *test, 780static int mmc_test_check_result(struct mmc_test_card *test,
664 struct mmc_request *mrq) 781 struct mmc_request *mrq)
665{ 782{
666 int ret; 783 int ret;
667 784
@@ -685,6 +802,17 @@ static int mmc_test_check_result(struct mmc_test_card *test,
685 return ret; 802 return ret;
686} 803}
687 804
805static int mmc_test_check_result_async(struct mmc_card *card,
806 struct mmc_async_req *areq)
807{
808 struct mmc_test_async_req *test_async =
809 container_of(areq, struct mmc_test_async_req, areq);
810
811 mmc_test_wait_busy(test_async->test);
812
813 return mmc_test_check_result(test_async->test, areq->mrq);
814}
815
688/* 816/*
689 * Checks that a "short transfer" behaved as expected 817 * Checks that a "short transfer" behaved as expected
690 */ 818 */
@@ -720,6 +848,85 @@ static int mmc_test_check_broken_result(struct mmc_test_card *test,
720} 848}
721 849
722/* 850/*
851 * Tests nonblock transfer with certain parameters
852 */
853static void mmc_test_nonblock_reset(struct mmc_request *mrq,
854 struct mmc_command *cmd,
855 struct mmc_command *stop,
856 struct mmc_data *data)
857{
858 memset(mrq, 0, sizeof(struct mmc_request));
859 memset(cmd, 0, sizeof(struct mmc_command));
860 memset(data, 0, sizeof(struct mmc_data));
861 memset(stop, 0, sizeof(struct mmc_command));
862
863 mrq->cmd = cmd;
864 mrq->data = data;
865 mrq->stop = stop;
866}
867static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
868 struct scatterlist *sg, unsigned sg_len,
869 unsigned dev_addr, unsigned blocks,
870 unsigned blksz, int write, int count)
871{
872 struct mmc_request mrq1;
873 struct mmc_command cmd1;
874 struct mmc_command stop1;
875 struct mmc_data data1;
876
877 struct mmc_request mrq2;
878 struct mmc_command cmd2;
879 struct mmc_command stop2;
880 struct mmc_data data2;
881
882 struct mmc_test_async_req test_areq[2];
883 struct mmc_async_req *done_areq;
884 struct mmc_async_req *cur_areq = &test_areq[0].areq;
885 struct mmc_async_req *other_areq = &test_areq[1].areq;
886 int i;
887 int ret;
888
889 test_areq[0].test = test;
890 test_areq[1].test = test;
891
892 mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1);
893 mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2);
894
895 cur_areq->mrq = &mrq1;
896 cur_areq->err_check = mmc_test_check_result_async;
897 other_areq->mrq = &mrq2;
898 other_areq->err_check = mmc_test_check_result_async;
899
900 for (i = 0; i < count; i++) {
901 mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
902 blocks, blksz, write);
903 done_areq = mmc_start_req(test->card->host, cur_areq, &ret);
904
905 if (ret || (!done_areq && i > 0))
906 goto err;
907
908 if (done_areq) {
909 if (done_areq->mrq == &mrq2)
910 mmc_test_nonblock_reset(&mrq2, &cmd2,
911 &stop2, &data2);
912 else
913 mmc_test_nonblock_reset(&mrq1, &cmd1,
914 &stop1, &data1);
915 }
916 done_areq = cur_areq;
917 cur_areq = other_areq;
918 other_areq = done_areq;
919 dev_addr += blocks;
920 }
921
922 done_areq = mmc_start_req(test->card->host, NULL, &ret);
923
924 return ret;
925err:
926 return ret;
927}
928
929/*
723 * Tests a basic transfer with certain parameters 930 * Tests a basic transfer with certain parameters
724 */ 931 */
725static int mmc_test_simple_transfer(struct mmc_test_card *test, 932static int mmc_test_simple_transfer(struct mmc_test_card *test,
@@ -1302,7 +1509,7 @@ static int mmc_test_no_highmem(struct mmc_test_card *test)
1302 * Map sz bytes so that it can be transferred. 1509 * Map sz bytes so that it can be transferred.
1303 */ 1510 */
1304static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz, 1511static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
1305 int max_scatter) 1512 int max_scatter, int min_sg_len)
1306{ 1513{
1307 struct mmc_test_area *t = &test->area; 1514 struct mmc_test_area *t = &test->area;
1308 int err; 1515 int err;
@@ -1315,7 +1522,7 @@ static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
1315 &t->sg_len); 1522 &t->sg_len);
1316 } else { 1523 } else {
1317 err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs, 1524 err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1318 t->max_seg_sz, &t->sg_len); 1525 t->max_seg_sz, &t->sg_len, min_sg_len);
1319 } 1526 }
1320 if (err) 1527 if (err)
1321 printk(KERN_INFO "%s: Failed to map sg list\n", 1528 printk(KERN_INFO "%s: Failed to map sg list\n",
@@ -1336,14 +1543,17 @@ static int mmc_test_area_transfer(struct mmc_test_card *test,
1336} 1543}
1337 1544
1338/* 1545/*
1339 * Map and transfer bytes. 1546 * Map and transfer bytes for multiple transfers.
1340 */ 1547 */
1341static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz, 1548static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
1342 unsigned int dev_addr, int write, int max_scatter, 1549 unsigned int dev_addr, int write,
1343 int timed) 1550 int max_scatter, int timed, int count,
1551 bool nonblock, int min_sg_len)
1344{ 1552{
1345 struct timespec ts1, ts2; 1553 struct timespec ts1, ts2;
1346 int ret; 1554 int ret = 0;
1555 int i;
1556 struct mmc_test_area *t = &test->area;
1347 1557
1348 /* 1558 /*
1349 * In the case of a maximally scattered transfer, the maximum transfer 1559 * In the case of a maximally scattered transfer, the maximum transfer
@@ -1361,14 +1571,21 @@ static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1361 sz = max_tfr; 1571 sz = max_tfr;
1362 } 1572 }
1363 1573
1364 ret = mmc_test_area_map(test, sz, max_scatter); 1574 ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len);
1365 if (ret) 1575 if (ret)
1366 return ret; 1576 return ret;
1367 1577
1368 if (timed) 1578 if (timed)
1369 getnstimeofday(&ts1); 1579 getnstimeofday(&ts1);
1580 if (nonblock)
1581 ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
1582 dev_addr, t->blocks, 512, write, count);
1583 else
1584 for (i = 0; i < count && ret == 0; i++) {
1585 ret = mmc_test_area_transfer(test, dev_addr, write);
1586 dev_addr += sz >> 9;
1587 }
1370 1588
1371 ret = mmc_test_area_transfer(test, dev_addr, write);
1372 if (ret) 1589 if (ret)
1373 return ret; 1590 return ret;
1374 1591
@@ -1376,11 +1593,19 @@ static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1376 getnstimeofday(&ts2); 1593 getnstimeofday(&ts2);
1377 1594
1378 if (timed) 1595 if (timed)
1379 mmc_test_print_rate(test, sz, &ts1, &ts2); 1596 mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
1380 1597
1381 return 0; 1598 return 0;
1382} 1599}
1383 1600
1601static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1602 unsigned int dev_addr, int write, int max_scatter,
1603 int timed)
1604{
1605 return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
1606 timed, 1, false, 0);
1607}
1608
1384/* 1609/*
1385 * Write the test area entirely. 1610 * Write the test area entirely.
1386 */ 1611 */
@@ -1954,6 +2179,245 @@ static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
1954 return mmc_test_large_seq_perf(test, 1); 2179 return mmc_test_large_seq_perf(test, 1);
1955} 2180}
1956 2181
2182static int mmc_test_rw_multiple(struct mmc_test_card *test,
2183 struct mmc_test_multiple_rw *tdata,
2184 unsigned int reqsize, unsigned int size,
2185 int min_sg_len)
2186{
2187 unsigned int dev_addr;
2188 struct mmc_test_area *t = &test->area;
2189 int ret = 0;
2190
2191 /* Set up test area */
2192 if (size > mmc_test_capacity(test->card) / 2 * 512)
2193 size = mmc_test_capacity(test->card) / 2 * 512;
2194 if (reqsize > t->max_tfr)
2195 reqsize = t->max_tfr;
2196 dev_addr = mmc_test_capacity(test->card) / 4;
2197 if ((dev_addr & 0xffff0000))
2198 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2199 else
2200 dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
2201 if (!dev_addr)
2202 goto err;
2203
2204 if (reqsize > size)
2205 return 0;
2206
2207 /* prepare test area */
2208 if (mmc_can_erase(test->card) &&
2209 tdata->prepare & MMC_TEST_PREP_ERASE) {
2210 ret = mmc_erase(test->card, dev_addr,
2211 size / 512, MMC_SECURE_ERASE_ARG);
2212 if (ret)
2213 ret = mmc_erase(test->card, dev_addr,
2214 size / 512, MMC_ERASE_ARG);
2215 if (ret)
2216 goto err;
2217 }
2218
2219 /* Run test */
2220 ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
2221 tdata->do_write, 0, 1, size / reqsize,
2222 tdata->do_nonblock_req, min_sg_len);
2223 if (ret)
2224 goto err;
2225
2226 return ret;
2227 err:
2228 printk(KERN_INFO "[%s] error\n", __func__);
2229 return ret;
2230}
2231
2232static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
2233 struct mmc_test_multiple_rw *rw)
2234{
2235 int ret = 0;
2236 int i;
2237 void *pre_req = test->card->host->ops->pre_req;
2238 void *post_req = test->card->host->ops->post_req;
2239
2240 if (rw->do_nonblock_req &&
2241 ((!pre_req && post_req) || (pre_req && !post_req))) {
2242 printk(KERN_INFO "error: only one of pre/post is defined\n");
2243 return -EINVAL;
2244 }
2245
2246 for (i = 0 ; i < rw->len && ret == 0; i++) {
2247 ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0);
2248 if (ret)
2249 break;
2250 }
2251 return ret;
2252}
2253
2254static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
2255 struct mmc_test_multiple_rw *rw)
2256{
2257 int ret = 0;
2258 int i;
2259
2260 for (i = 0 ; i < rw->len && ret == 0; i++) {
2261 ret = mmc_test_rw_multiple(test, rw, 512*1024, rw->size,
2262 rw->sg_len[i]);
2263 if (ret)
2264 break;
2265 }
2266 return ret;
2267}
2268
2269/*
2270 * Multiple blocking write 4k to 4 MB chunks
2271 */
2272static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
2273{
2274 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2275 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2276 struct mmc_test_multiple_rw test_data = {
2277 .bs = bs,
2278 .size = TEST_AREA_MAX_SIZE,
2279 .len = ARRAY_SIZE(bs),
2280 .do_write = true,
2281 .do_nonblock_req = false,
2282 .prepare = MMC_TEST_PREP_ERASE,
2283 };
2284
2285 return mmc_test_rw_multiple_size(test, &test_data);
2286};
2287
2288/*
2289 * Multiple non-blocking write 4k to 4 MB chunks
2290 */
2291static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
2292{
2293 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2294 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2295 struct mmc_test_multiple_rw test_data = {
2296 .bs = bs,
2297 .size = TEST_AREA_MAX_SIZE,
2298 .len = ARRAY_SIZE(bs),
2299 .do_write = true,
2300 .do_nonblock_req = true,
2301 .prepare = MMC_TEST_PREP_ERASE,
2302 };
2303
2304 return mmc_test_rw_multiple_size(test, &test_data);
2305}
2306
2307/*
2308 * Multiple blocking read 4k to 4 MB chunks
2309 */
2310static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
2311{
2312 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2313 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2314 struct mmc_test_multiple_rw test_data = {
2315 .bs = bs,
2316 .size = TEST_AREA_MAX_SIZE,
2317 .len = ARRAY_SIZE(bs),
2318 .do_write = false,
2319 .do_nonblock_req = false,
2320 .prepare = MMC_TEST_PREP_NONE,
2321 };
2322
2323 return mmc_test_rw_multiple_size(test, &test_data);
2324}
2325
2326/*
2327 * Multiple non-blocking read 4k to 4 MB chunks
2328 */
2329static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
2330{
2331 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2332 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2333 struct mmc_test_multiple_rw test_data = {
2334 .bs = bs,
2335 .size = TEST_AREA_MAX_SIZE,
2336 .len = ARRAY_SIZE(bs),
2337 .do_write = false,
2338 .do_nonblock_req = true,
2339 .prepare = MMC_TEST_PREP_NONE,
2340 };
2341
2342 return mmc_test_rw_multiple_size(test, &test_data);
2343}
2344
2345/*
2346 * Multiple blocking write 1 to 512 sg elements
2347 */
2348static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test)
2349{
2350 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2351 1 << 7, 1 << 8, 1 << 9};
2352 struct mmc_test_multiple_rw test_data = {
2353 .sg_len = sg_len,
2354 .size = TEST_AREA_MAX_SIZE,
2355 .len = ARRAY_SIZE(sg_len),
2356 .do_write = true,
2357 .do_nonblock_req = false,
2358 .prepare = MMC_TEST_PREP_ERASE,
2359 };
2360
2361 return mmc_test_rw_multiple_sg_len(test, &test_data);
2362};
2363
2364/*
2365 * Multiple non-blocking write 1 to 512 sg elements
2366 */
2367static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test)
2368{
2369 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2370 1 << 7, 1 << 8, 1 << 9};
2371 struct mmc_test_multiple_rw test_data = {
2372 .sg_len = sg_len,
2373 .size = TEST_AREA_MAX_SIZE,
2374 .len = ARRAY_SIZE(sg_len),
2375 .do_write = true,
2376 .do_nonblock_req = true,
2377 .prepare = MMC_TEST_PREP_ERASE,
2378 };
2379
2380 return mmc_test_rw_multiple_sg_len(test, &test_data);
2381}
2382
2383/*
2384 * Multiple blocking read 1 to 512 sg elements
2385 */
2386static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test)
2387{
2388 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2389 1 << 7, 1 << 8, 1 << 9};
2390 struct mmc_test_multiple_rw test_data = {
2391 .sg_len = sg_len,
2392 .size = TEST_AREA_MAX_SIZE,
2393 .len = ARRAY_SIZE(sg_len),
2394 .do_write = false,
2395 .do_nonblock_req = false,
2396 .prepare = MMC_TEST_PREP_NONE,
2397 };
2398
2399 return mmc_test_rw_multiple_sg_len(test, &test_data);
2400}
2401
2402/*
2403 * Multiple non-blocking read 1 to 512 sg elements
2404 */
2405static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
2406{
2407 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2408 1 << 7, 1 << 8, 1 << 9};
2409 struct mmc_test_multiple_rw test_data = {
2410 .sg_len = sg_len,
2411 .size = TEST_AREA_MAX_SIZE,
2412 .len = ARRAY_SIZE(sg_len),
2413 .do_write = false,
2414 .do_nonblock_req = true,
2415 .prepare = MMC_TEST_PREP_NONE,
2416 };
2417
2418 return mmc_test_rw_multiple_sg_len(test, &test_data);
2419}
2420
1957static const struct mmc_test_case mmc_test_cases[] = { 2421static const struct mmc_test_case mmc_test_cases[] = {
1958 { 2422 {
1959 .name = "Basic write (no data verification)", 2423 .name = "Basic write (no data verification)",
@@ -2221,6 +2685,61 @@ static const struct mmc_test_case mmc_test_cases[] = {
2221 .cleanup = mmc_test_area_cleanup, 2685 .cleanup = mmc_test_area_cleanup,
2222 }, 2686 },
2223 2687
2688 {
2689 .name = "Write performance with blocking req 4k to 4MB",
2690 .prepare = mmc_test_area_prepare,
2691 .run = mmc_test_profile_mult_write_blocking_perf,
2692 .cleanup = mmc_test_area_cleanup,
2693 },
2694
2695 {
2696 .name = "Write performance with non-blocking req 4k to 4MB",
2697 .prepare = mmc_test_area_prepare,
2698 .run = mmc_test_profile_mult_write_nonblock_perf,
2699 .cleanup = mmc_test_area_cleanup,
2700 },
2701
2702 {
2703 .name = "Read performance with blocking req 4k to 4MB",
2704 .prepare = mmc_test_area_prepare,
2705 .run = mmc_test_profile_mult_read_blocking_perf,
2706 .cleanup = mmc_test_area_cleanup,
2707 },
2708
2709 {
2710 .name = "Read performance with non-blocking req 4k to 4MB",
2711 .prepare = mmc_test_area_prepare,
2712 .run = mmc_test_profile_mult_read_nonblock_perf,
2713 .cleanup = mmc_test_area_cleanup,
2714 },
2715
2716 {
2717 .name = "Write performance blocking req 1 to 512 sg elems",
2718 .prepare = mmc_test_area_prepare,
2719 .run = mmc_test_profile_sglen_wr_blocking_perf,
2720 .cleanup = mmc_test_area_cleanup,
2721 },
2722
2723 {
2724 .name = "Write performance non-blocking req 1 to 512 sg elems",
2725 .prepare = mmc_test_area_prepare,
2726 .run = mmc_test_profile_sglen_wr_nonblock_perf,
2727 .cleanup = mmc_test_area_cleanup,
2728 },
2729
2730 {
2731 .name = "Read performance blocking req 1 to 512 sg elems",
2732 .prepare = mmc_test_area_prepare,
2733 .run = mmc_test_profile_sglen_r_blocking_perf,
2734 .cleanup = mmc_test_area_cleanup,
2735 },
2736
2737 {
2738 .name = "Read performance non-blocking req 1 to 512 sg elems",
2739 .prepare = mmc_test_area_prepare,
2740 .run = mmc_test_profile_sglen_r_nonblock_perf,
2741 .cleanup = mmc_test_area_cleanup,
2742 },
2224}; 2743};
2225 2744
2226static DEFINE_MUTEX(mmc_test_lock); 2745static DEFINE_MUTEX(mmc_test_lock);
@@ -2236,6 +2755,8 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
2236 2755
2237 mmc_claim_host(test->card->host); 2756 mmc_claim_host(test->card->host);
2238 2757
2758 mmc_test_set_parameters(test);
2759
2239 for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) { 2760 for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
2240 struct mmc_test_general_result *gr; 2761 struct mmc_test_general_result *gr;
2241 2762
@@ -2348,6 +2869,23 @@ static void mmc_test_free_result(struct mmc_card *card)
2348 2869
2349static LIST_HEAD(mmc_test_file_test); 2870static LIST_HEAD(mmc_test_file_test);
2350 2871
2872static void mmc_test_usage(struct seq_file *sf)
2873{
2874 int i = 0;
2875
2876 seq_printf(sf, "\nHow to run test:"
2877 "\necho <testcase> [[param1 value1].... ] > test"
2878 "\nExample:: echo 1 -b 4 -c 2500000 -t 2"
2879 "\n\nSupported parameters in sequence\n");
2880
2881 for (i = 0; i < ARRAY_SIZE(mmc_test_parameter); i++) {
2882 seq_printf(sf, "Parameter%d Name:[%s] option:[%s]\n",
2883 i + 1, mmc_test_parameter[i].name,
2884 mmc_test_parameter[i].input);
2885 }
2886 seq_printf(sf, "\'-1\' passed to take default value\n\n\n");
2887}
2888
2351static int mtf_test_show(struct seq_file *sf, void *data) 2889static int mtf_test_show(struct seq_file *sf, void *data)
2352{ 2890{
2353 struct mmc_card *card = (struct mmc_card *)sf->private; 2891 struct mmc_card *card = (struct mmc_card *)sf->private;
@@ -2382,24 +2920,92 @@ static int mtf_test_open(struct inode *inode, struct file *file)
2382 return single_open(file, mtf_test_show, inode->i_private); 2920 return single_open(file, mtf_test_show, inode->i_private);
2383} 2921}
2384 2922
2923static int mmc_test_extract_parameters(char *data_buf)
2924{
2925 char *running = NULL;
2926 char *token = NULL;
2927 const char delimiters[] = " ";
2928 long value;
2929 int i;
2930 int set = 0;
2931
2932 running = data_buf;
2933
2934 /*Example:
2935 * echo <testcasenumber> [[param1 value1] [param1 value1]] > test
2936 * $] echo 1 > test | Execute testcase 1
2937 * $] echo 1 -c 2500000 | execute tesecase 1 and set clock to 2500000
2938 * $] echo 1 -b 4 -c 2500000 -t 2 |
2939 * execute tesecase 1, set clock to 2500000, set bus_width 4,
2940 * and set timing to 2
2941 */
2942
2943 while ((token = strsep(&running, delimiters))) {
2944 if (strict_strtol(token, 10, &value)) {
2945 /* [Param1 value1] combination
2946 * Compare with available param list
2947 */
2948 for (i = 0; i < ARRAY_SIZE(mmc_test_parameter); i++) {
2949 if (!strcmp(mmc_test_parameter[i].input,
2950 token)) {
2951 /* Valid Option, extract following
2952 * value and save it
2953 */
2954 token = strsep(&running, delimiters);
2955 if (strict_strtol(token, 10,
2956 &(mmc_test_parameter[i].value))) {
2957
2958 printk(KERN_ERR "wrong parameter value\n");
2959 return -EINVAL;
2960 } else {
2961 break;
2962 }
2963 }
2964 }
2965 if (i == ARRAY_SIZE(mmc_test_parameter)) {
2966 printk(KERN_ERR "uknown mmc_test option\n");
2967 return -EINVAL;
2968 }
2969 } else {
2970 /* Testcase number */
2971 if (!set) {
2972 mmc_test_parameter[0].value = value;
2973 set = 1;
2974 } else {
2975 printk(KERN_ERR "invalid options");
2976 return -EINVAL;
2977 }
2978 }
2979 }
2980 return 0;
2981}
2982
2385static ssize_t mtf_test_write(struct file *file, const char __user *buf, 2983static ssize_t mtf_test_write(struct file *file, const char __user *buf,
2386 size_t count, loff_t *pos) 2984 size_t count, loff_t *pos)
2387{ 2985{
2388 struct seq_file *sf = (struct seq_file *)file->private_data; 2986 struct seq_file *sf = (struct seq_file *)file->private_data;
2389 struct mmc_card *card = (struct mmc_card *)sf->private; 2987 struct mmc_card *card = (struct mmc_card *)sf->private;
2390 struct mmc_test_card *test; 2988 struct mmc_test_card *test;
2391 char lbuf[12]; 2989 char *data_buf = NULL;
2392 long testcase; 2990 long testcase;
2393 2991
2394 if (count >= sizeof(lbuf)) 2992 data_buf = kzalloc(count, GFP_KERNEL);
2395 return -EINVAL; 2993 if (data_buf == NULL)
2994 return -ENOMEM;
2396 2995
2397 if (copy_from_user(lbuf, buf, count)) 2996 if (copy_from_user(data_buf, buf, count)) {
2997 kfree(data_buf);
2998 return -EFAULT;
2999 }
3000 data_buf[strlen(data_buf) - 1] = '\0';
3001 if (mmc_test_extract_parameters(data_buf)) {
3002 mmc_test_usage(sf);
2398 return -EFAULT; 3003 return -EFAULT;
2399 lbuf[count] = '\0'; 3004 }
2400 3005
2401 if (strict_strtol(lbuf, 10, &testcase)) 3006 kfree(data_buf);
2402 return -EINVAL; 3007
3008 testcase = mmc_test_parameter[0].value;
2403 3009
2404 test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL); 3010 test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
2405 if (!test) 3011 if (!test)
@@ -2445,7 +3051,33 @@ static const struct file_operations mmc_test_fops_test = {
2445 .release = single_release, 3051 .release = single_release,
2446}; 3052};
2447 3053
2448static void mmc_test_free_file_test(struct mmc_card *card) 3054static int mtf_testlist_show(struct seq_file *sf, void *data)
3055{
3056 int i;
3057
3058 mutex_lock(&mmc_test_lock);
3059
3060 for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
3061 seq_printf(sf, "%d:\t%s\n", i+1, mmc_test_cases[i].name);
3062
3063 mutex_unlock(&mmc_test_lock);
3064
3065 return 0;
3066}
3067
3068static int mtf_testlist_open(struct inode *inode, struct file *file)
3069{
3070 return single_open(file, mtf_testlist_show, inode->i_private);
3071}
3072
3073static const struct file_operations mmc_test_fops_testlist = {
3074 .open = mtf_testlist_open,
3075 .read = seq_read,
3076 .llseek = seq_lseek,
3077 .release = single_release,
3078};
3079
3080static void mmc_test_free_dbgfs_file(struct mmc_card *card)
2449{ 3081{
2450 struct mmc_test_dbgfs_file *df, *dfs; 3082 struct mmc_test_dbgfs_file *df, *dfs;
2451 3083
@@ -2462,23 +3094,21 @@ static void mmc_test_free_file_test(struct mmc_card *card)
2462 mutex_unlock(&mmc_test_lock); 3094 mutex_unlock(&mmc_test_lock);
2463} 3095}
2464 3096
2465static int mmc_test_register_file_test(struct mmc_card *card) 3097static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
3098 const char *name, mode_t mode, const struct file_operations *fops)
2466{ 3099{
2467 struct dentry *file = NULL; 3100 struct dentry *file = NULL;
2468 struct mmc_test_dbgfs_file *df; 3101 struct mmc_test_dbgfs_file *df;
2469 int ret = 0;
2470
2471 mutex_lock(&mmc_test_lock);
2472 3102
2473 if (card->debugfs_root) 3103 if (card->debugfs_root)
2474 file = debugfs_create_file("test", S_IWUSR | S_IRUGO, 3104 file = debugfs_create_file(name, mode, card->debugfs_root,
2475 card->debugfs_root, card, &mmc_test_fops_test); 3105 card, fops);
2476 3106
2477 if (IS_ERR_OR_NULL(file)) { 3107 if (IS_ERR_OR_NULL(file)) {
2478 dev_err(&card->dev, 3108 dev_err(&card->dev,
2479 "Can't create file. Perhaps debugfs is disabled.\n"); 3109 "Can't create %s. Perhaps debugfs is disabled.\n",
2480 ret = -ENODEV; 3110 name);
2481 goto err; 3111 return -ENODEV;
2482 } 3112 }
2483 3113
2484 df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL); 3114 df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
@@ -2486,14 +3116,31 @@ static int mmc_test_register_file_test(struct mmc_card *card)
2486 debugfs_remove(file); 3116 debugfs_remove(file);
2487 dev_err(&card->dev, 3117 dev_err(&card->dev,
2488 "Can't allocate memory for internal usage.\n"); 3118 "Can't allocate memory for internal usage.\n");
2489 ret = -ENOMEM; 3119 return -ENOMEM;
2490 goto err;
2491 } 3120 }
2492 3121
2493 df->card = card; 3122 df->card = card;
2494 df->file = file; 3123 df->file = file;
2495 3124
2496 list_add(&df->link, &mmc_test_file_test); 3125 list_add(&df->link, &mmc_test_file_test);
3126 return 0;
3127}
3128
3129static int mmc_test_register_dbgfs_file(struct mmc_card *card)
3130{
3131 int ret;
3132
3133 mutex_lock(&mmc_test_lock);
3134
3135 ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
3136 &mmc_test_fops_test);
3137 if (ret)
3138 goto err;
3139
3140 ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
3141 &mmc_test_fops_testlist);
3142 if (ret)
3143 goto err;
2497 3144
2498err: 3145err:
2499 mutex_unlock(&mmc_test_lock); 3146 mutex_unlock(&mmc_test_lock);
@@ -2508,7 +3155,7 @@ static int mmc_test_probe(struct mmc_card *card)
2508 if (!mmc_card_mmc(card) && !mmc_card_sd(card)) 3155 if (!mmc_card_mmc(card) && !mmc_card_sd(card))
2509 return -ENODEV; 3156 return -ENODEV;
2510 3157
2511 ret = mmc_test_register_file_test(card); 3158 ret = mmc_test_register_dbgfs_file(card);
2512 if (ret) 3159 if (ret)
2513 return ret; 3160 return ret;
2514 3161
@@ -2520,7 +3167,7 @@ static int mmc_test_probe(struct mmc_card *card)
2520static void mmc_test_remove(struct mmc_card *card) 3167static void mmc_test_remove(struct mmc_card *card)
2521{ 3168{
2522 mmc_test_free_result(card); 3169 mmc_test_free_result(card);
2523 mmc_test_free_file_test(card); 3170 mmc_test_free_dbgfs_file(card);
2524} 3171}
2525 3172
2526static struct mmc_driver mmc_driver = { 3173static struct mmc_driver mmc_driver = {
@@ -2540,7 +3187,7 @@ static void __exit mmc_test_exit(void)
2540{ 3187{
2541 /* Clear stalled data if card is still plugged */ 3188 /* Clear stalled data if card is still plugged */
2542 mmc_test_free_result(NULL); 3189 mmc_test_free_result(NULL);
2543 mmc_test_free_file_test(NULL); 3190 mmc_test_free_dbgfs_file(NULL);
2544 3191
2545 mmc_unregister_driver(&mmc_driver); 3192 mmc_unregister_driver(&mmc_driver);
2546} 3193}
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 6413afa318d..5db38cbcea6 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -52,14 +52,24 @@ static int mmc_queue_thread(void *d)
52 down(&mq->thread_sem); 52 down(&mq->thread_sem);
53 do { 53 do {
54 struct request *req = NULL; 54 struct request *req = NULL;
55 struct mmc_queue_req *tmp;
55 56
56 spin_lock_irq(q->queue_lock); 57 spin_lock_irq(q->queue_lock);
57 set_current_state(TASK_INTERRUPTIBLE); 58 set_current_state(TASK_INTERRUPTIBLE);
58 req = blk_fetch_request(q); 59 req = blk_fetch_request(q);
59 mq->req = req; 60 mq->mqrq_cur->req = req;
60 spin_unlock_irq(q->queue_lock); 61 spin_unlock_irq(q->queue_lock);
61 62
62 if (!req) { 63 if (req || mq->mqrq_prev->req) {
64 set_current_state(TASK_RUNNING);
65 mq->issue_fn(mq, req);
66 } else {
67 /*
68 * Since the queue is empty, start synchronous
69 * background ops if there is a request for it.
70 */
71 if (mmc_card_need_bkops(mq->card))
72 mmc_bkops_start(mq->card, true);
63 if (kthread_should_stop()) { 73 if (kthread_should_stop()) {
64 set_current_state(TASK_RUNNING); 74 set_current_state(TASK_RUNNING);
65 break; 75 break;
@@ -67,11 +77,14 @@ static int mmc_queue_thread(void *d)
67 up(&mq->thread_sem); 77 up(&mq->thread_sem);
68 schedule(); 78 schedule();
69 down(&mq->thread_sem); 79 down(&mq->thread_sem);
70 continue;
71 } 80 }
72 set_current_state(TASK_RUNNING);
73 81
74 mq->issue_fn(mq, req); 82 /* Current request becomes previous request and vice versa. */
83 mq->mqrq_prev->brq.mrq.data = NULL;
84 mq->mqrq_prev->req = NULL;
85 tmp = mq->mqrq_prev;
86 mq->mqrq_prev = mq->mqrq_cur;
87 mq->mqrq_cur = tmp;
75 } while (1); 88 } while (1);
76 up(&mq->thread_sem); 89 up(&mq->thread_sem);
77 90
@@ -97,10 +110,46 @@ static void mmc_request(struct request_queue *q)
97 return; 110 return;
98 } 111 }
99 112
100 if (!mq->req) 113 if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
101 wake_up_process(mq->thread); 114 wake_up_process(mq->thread);
102} 115}
103 116
117struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
118{
119 struct scatterlist *sg;
120
121 sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
122 if (!sg)
123 *err = -ENOMEM;
124 else {
125 *err = 0;
126 sg_init_table(sg, sg_len);
127 }
128
129 return sg;
130}
131
132static void mmc_queue_setup_discard(struct request_queue *q,
133 struct mmc_card *card)
134{
135 unsigned max_discard;
136
137 max_discard = mmc_calc_max_discard(card);
138 if (!max_discard)
139 return;
140
141 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
142 q->limits.max_discard_sectors = max_discard;
143 if (card->erased_byte == 0)
144 q->limits.discard_zeroes_data = 1;
145 q->limits.discard_granularity = card->pref_erase << 9;
146 /* granularity must not be greater than max. discard */
147 if (card->pref_erase > max_discard)
148 q->limits.discard_granularity = 0;
149 if (mmc_can_secure_erase_trim(card))
150 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
151}
152
104/** 153/**
105 * mmc_init_queue - initialise a queue structure. 154 * mmc_init_queue - initialise a queue structure.
106 * @mq: mmc queue 155 * @mq: mmc queue
@@ -116,6 +165,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
116 struct mmc_host *host = card->host; 165 struct mmc_host *host = card->host;
117 u64 limit = BLK_BOUNCE_HIGH; 166 u64 limit = BLK_BOUNCE_HIGH;
118 int ret; 167 int ret;
168 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
169 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
119 170
120 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) 171 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
121 limit = *mmc_dev(host)->dma_mask; 172 limit = *mmc_dev(host)->dma_mask;
@@ -125,21 +176,16 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
125 if (!mq->queue) 176 if (!mq->queue)
126 return -ENOMEM; 177 return -ENOMEM;
127 178
179 memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
180 memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev));
181 mq->mqrq_cur = mqrq_cur;
182 mq->mqrq_prev = mqrq_prev;
128 mq->queue->queuedata = mq; 183 mq->queue->queuedata = mq;
129 mq->req = NULL;
130 184
131 blk_queue_prep_rq(mq->queue, mmc_prep_request); 185 blk_queue_prep_rq(mq->queue, mmc_prep_request);
132 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); 186 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
133 if (mmc_can_erase(card)) { 187 if (mmc_can_erase(card))
134 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue); 188 mmc_queue_setup_discard(mq->queue, card);
135 mq->queue->limits.max_discard_sectors = UINT_MAX;
136 if (card->erased_byte == 0)
137 mq->queue->limits.discard_zeroes_data = 1;
138 mq->queue->limits.discard_granularity = card->pref_erase << 9;
139 if (mmc_can_secure_erase_trim(card))
140 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD,
141 mq->queue);
142 }
143 189
144#ifdef CONFIG_MMC_BLOCK_BOUNCE 190#ifdef CONFIG_MMC_BLOCK_BOUNCE
145 if (host->max_segs == 1) { 191 if (host->max_segs == 1) {
@@ -155,53 +201,64 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
155 bouncesz = host->max_blk_count * 512; 201 bouncesz = host->max_blk_count * 512;
156 202
157 if (bouncesz > 512) { 203 if (bouncesz > 512) {
158 mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 204 mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
159 if (!mq->bounce_buf) { 205 if (!mqrq_cur->bounce_buf) {
206 printk(KERN_WARNING "%s: unable to "
207 "allocate bounce cur buffer\n",
208 mmc_card_name(card));
209 }
210 mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
211 if (!mqrq_prev->bounce_buf) {
160 printk(KERN_WARNING "%s: unable to " 212 printk(KERN_WARNING "%s: unable to "
161 "allocate bounce buffer\n", 213 "allocate bounce prev buffer\n",
162 mmc_card_name(card)); 214 mmc_card_name(card));
215 kfree(mqrq_cur->bounce_buf);
216 mqrq_cur->bounce_buf = NULL;
163 } 217 }
164 } 218 }
165 219
166 if (mq->bounce_buf) { 220 if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
167 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); 221 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
168 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); 222 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
169 blk_queue_max_segments(mq->queue, bouncesz / 512); 223 blk_queue_max_segments(mq->queue, bouncesz / 512);
170 blk_queue_max_segment_size(mq->queue, bouncesz); 224 blk_queue_max_segment_size(mq->queue, bouncesz);
171 225
172 mq->sg = kmalloc(sizeof(struct scatterlist), 226 mqrq_cur->sg = mmc_alloc_sg(1, &ret);
173 GFP_KERNEL); 227 if (ret)
174 if (!mq->sg) {
175 ret = -ENOMEM;
176 goto cleanup_queue; 228 goto cleanup_queue;
177 }
178 sg_init_table(mq->sg, 1);
179 229
180 mq->bounce_sg = kmalloc(sizeof(struct scatterlist) * 230 mqrq_cur->bounce_sg =
181 bouncesz / 512, GFP_KERNEL); 231 mmc_alloc_sg(bouncesz / 512, &ret);
182 if (!mq->bounce_sg) { 232 if (ret)
183 ret = -ENOMEM; 233 goto cleanup_queue;
234
235 mqrq_prev->sg = mmc_alloc_sg(1, &ret);
236 if (ret)
237 goto cleanup_queue;
238
239 mqrq_prev->bounce_sg =
240 mmc_alloc_sg(bouncesz / 512, &ret);
241 if (ret)
184 goto cleanup_queue; 242 goto cleanup_queue;
185 }
186 sg_init_table(mq->bounce_sg, bouncesz / 512);
187 } 243 }
188 } 244 }
189#endif 245#endif
190 246
191 if (!mq->bounce_buf) { 247 if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
192 blk_queue_bounce_limit(mq->queue, limit); 248 blk_queue_bounce_limit(mq->queue, limit);
193 blk_queue_max_hw_sectors(mq->queue, 249 blk_queue_max_hw_sectors(mq->queue,
194 min(host->max_blk_count, host->max_req_size / 512)); 250 min(host->max_blk_count, host->max_req_size / 512));
195 blk_queue_max_segments(mq->queue, host->max_segs); 251 blk_queue_max_segments(mq->queue, host->max_segs);
196 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 252 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
197 253
198 mq->sg = kmalloc(sizeof(struct scatterlist) * 254 mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
199 host->max_segs, GFP_KERNEL); 255 if (ret)
200 if (!mq->sg) { 256 goto cleanup_queue;
201 ret = -ENOMEM; 257
258
259 mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
260 if (ret)
202 goto cleanup_queue; 261 goto cleanup_queue;
203 }
204 sg_init_table(mq->sg, host->max_segs);
205 } 262 }
206 263
207 sema_init(&mq->thread_sem, 1); 264 sema_init(&mq->thread_sem, 1);
@@ -216,16 +273,22 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
216 273
217 return 0; 274 return 0;
218 free_bounce_sg: 275 free_bounce_sg:
219 if (mq->bounce_sg) 276 kfree(mqrq_cur->bounce_sg);
220 kfree(mq->bounce_sg); 277 mqrq_cur->bounce_sg = NULL;
221 mq->bounce_sg = NULL; 278 kfree(mqrq_prev->bounce_sg);
279 mqrq_prev->bounce_sg = NULL;
280
222 cleanup_queue: 281 cleanup_queue:
223 if (mq->sg) 282 kfree(mqrq_cur->sg);
224 kfree(mq->sg); 283 mqrq_cur->sg = NULL;
225 mq->sg = NULL; 284 kfree(mqrq_cur->bounce_buf);
226 if (mq->bounce_buf) 285 mqrq_cur->bounce_buf = NULL;
227 kfree(mq->bounce_buf); 286
228 mq->bounce_buf = NULL; 287 kfree(mqrq_prev->sg);
288 mqrq_prev->sg = NULL;
289 kfree(mqrq_prev->bounce_buf);
290 mqrq_prev->bounce_buf = NULL;
291
229 blk_cleanup_queue(mq->queue); 292 blk_cleanup_queue(mq->queue);
230 return ret; 293 return ret;
231} 294}
@@ -234,6 +297,8 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
234{ 297{
235 struct request_queue *q = mq->queue; 298 struct request_queue *q = mq->queue;
236 unsigned long flags; 299 unsigned long flags;
300 struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
301 struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
237 302
238 /* Make sure the queue isn't suspended, as that will deadlock */ 303 /* Make sure the queue isn't suspended, as that will deadlock */
239 mmc_queue_resume(mq); 304 mmc_queue_resume(mq);
@@ -247,16 +312,23 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
247 blk_start_queue(q); 312 blk_start_queue(q);
248 spin_unlock_irqrestore(q->queue_lock, flags); 313 spin_unlock_irqrestore(q->queue_lock, flags);
249 314
250 if (mq->bounce_sg) 315 kfree(mqrq_cur->bounce_sg);
251 kfree(mq->bounce_sg); 316 mqrq_cur->bounce_sg = NULL;
252 mq->bounce_sg = NULL;
253 317
254 kfree(mq->sg); 318 kfree(mqrq_cur->sg);
255 mq->sg = NULL; 319 mqrq_cur->sg = NULL;
256 320
257 if (mq->bounce_buf) 321 kfree(mqrq_cur->bounce_buf);
258 kfree(mq->bounce_buf); 322 mqrq_cur->bounce_buf = NULL;
259 mq->bounce_buf = NULL; 323
324 kfree(mqrq_prev->bounce_sg);
325 mqrq_prev->bounce_sg = NULL;
326
327 kfree(mqrq_prev->sg);
328 mqrq_prev->sg = NULL;
329
330 kfree(mqrq_prev->bounce_buf);
331 mqrq_prev->bounce_buf = NULL;
260 332
261 mq->card = NULL; 333 mq->card = NULL;
262} 334}
@@ -309,27 +381,27 @@ void mmc_queue_resume(struct mmc_queue *mq)
309/* 381/*
310 * Prepare the sg list(s) to be handed of to the host driver 382 * Prepare the sg list(s) to be handed of to the host driver
311 */ 383 */
312unsigned int mmc_queue_map_sg(struct mmc_queue *mq) 384unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
313{ 385{
314 unsigned int sg_len; 386 unsigned int sg_len;
315 size_t buflen; 387 size_t buflen;
316 struct scatterlist *sg; 388 struct scatterlist *sg;
317 int i; 389 int i;
318 390
319 if (!mq->bounce_buf) 391 if (!mqrq->bounce_buf)
320 return blk_rq_map_sg(mq->queue, mq->req, mq->sg); 392 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
321 393
322 BUG_ON(!mq->bounce_sg); 394 BUG_ON(!mqrq->bounce_sg);
323 395
324 sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg); 396 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
325 397
326 mq->bounce_sg_len = sg_len; 398 mqrq->bounce_sg_len = sg_len;
327 399
328 buflen = 0; 400 buflen = 0;
329 for_each_sg(mq->bounce_sg, sg, sg_len, i) 401 for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
330 buflen += sg->length; 402 buflen += sg->length;
331 403
332 sg_init_one(mq->sg, mq->bounce_buf, buflen); 404 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
333 405
334 return 1; 406 return 1;
335} 407}
@@ -338,31 +410,30 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
338 * If writing, bounce the data to the buffer before the request 410 * If writing, bounce the data to the buffer before the request
339 * is sent to the host driver 411 * is sent to the host driver
340 */ 412 */
341void mmc_queue_bounce_pre(struct mmc_queue *mq) 413void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
342{ 414{
343 if (!mq->bounce_buf) 415 if (!mqrq->bounce_buf)
344 return; 416 return;
345 417
346 if (rq_data_dir(mq->req) != WRITE) 418 if (rq_data_dir(mqrq->req) != WRITE)
347 return; 419 return;
348 420
349 sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len, 421 sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
350 mq->bounce_buf, mq->sg[0].length); 422 mqrq->bounce_buf, mqrq->sg[0].length);
351} 423}
352 424
353/* 425/*
354 * If reading, bounce the data from the buffer after the request 426 * If reading, bounce the data from the buffer after the request
355 * has been handled by the host driver 427 * has been handled by the host driver
356 */ 428 */
357void mmc_queue_bounce_post(struct mmc_queue *mq) 429void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
358{ 430{
359 if (!mq->bounce_buf) 431 if (!mqrq->bounce_buf)
360 return; 432 return;
361 433
362 if (rq_data_dir(mq->req) != READ) 434 if (rq_data_dir(mqrq->req) != READ)
363 return; 435 return;
364 436
365 sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len, 437 sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
366 mq->bounce_buf, mq->sg[0].length); 438 mqrq->bounce_buf, mqrq->sg[0].length);
367} 439}
368
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 6223ef8dc9c..d2a1eb4b9f9 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -4,19 +4,35 @@
4struct request; 4struct request;
5struct task_struct; 5struct task_struct;
6 6
7struct mmc_blk_request {
8 struct mmc_request mrq;
9 struct mmc_command sbc;
10 struct mmc_command cmd;
11 struct mmc_command stop;
12 struct mmc_data data;
13};
14
15struct mmc_queue_req {
16 struct request *req;
17 struct mmc_blk_request brq;
18 struct scatterlist *sg;
19 char *bounce_buf;
20 struct scatterlist *bounce_sg;
21 unsigned int bounce_sg_len;
22 struct mmc_async_req mmc_active;
23};
24
7struct mmc_queue { 25struct mmc_queue {
8 struct mmc_card *card; 26 struct mmc_card *card;
9 struct task_struct *thread; 27 struct task_struct *thread;
10 struct semaphore thread_sem; 28 struct semaphore thread_sem;
11 unsigned int flags; 29 unsigned int flags;
12 struct request *req;
13 int (*issue_fn)(struct mmc_queue *, struct request *); 30 int (*issue_fn)(struct mmc_queue *, struct request *);
14 void *data; 31 void *data;
15 struct request_queue *queue; 32 struct request_queue *queue;
16 struct scatterlist *sg; 33 struct mmc_queue_req mqrq[2];
17 char *bounce_buf; 34 struct mmc_queue_req *mqrq_cur;
18 struct scatterlist *bounce_sg; 35 struct mmc_queue_req *mqrq_prev;
19 unsigned int bounce_sg_len;
20}; 36};
21 37
22extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *, 38extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
@@ -25,8 +41,9 @@ extern void mmc_cleanup_queue(struct mmc_queue *);
25extern void mmc_queue_suspend(struct mmc_queue *); 41extern void mmc_queue_suspend(struct mmc_queue *);
26extern void mmc_queue_resume(struct mmc_queue *); 42extern void mmc_queue_resume(struct mmc_queue *);
27 43
28extern unsigned int mmc_queue_map_sg(struct mmc_queue *); 44extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
29extern void mmc_queue_bounce_pre(struct mmc_queue *); 45 struct mmc_queue_req *);
30extern void mmc_queue_bounce_post(struct mmc_queue *); 46extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
47extern void mmc_queue_bounce_post(struct mmc_queue_req *);
31 48
32#endif 49#endif
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index ef103871517..85c2e1acd15 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -27,3 +27,20 @@ config MMC_CLKGATE
27 support handling this in order for it to be of any use. 27 support handling this in order for it to be of any use.
28 28
29 If unsure, say N. 29 If unsure, say N.
30
31config MMC_EMBEDDED_SDIO
32 boolean "MMC embedded SDIO device support (EXPERIMENTAL)"
33 depends on EXPERIMENTAL
34 help
35 If you say Y here, support will be added for embedded SDIO
36 devices which do not contain the necessary enumeration
37 support in hardware to be properly detected.
38
39config MMC_PARANOID_SD_INIT
40 bool "Enable paranoid SD card initialization (EXPERIMENTAL)"
41 depends on EXPERIMENTAL
42 help
43 If you say Y here, the MMC layer will be extra paranoid
44 about re-trying SD init requests. This can be a useful
45 work-around for buggy controllers and hardware. Enable
46 if you are experiencing issues with SD detection.
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 393d817ed04..f4bdbe6982c 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -25,6 +25,10 @@
25 25
26#define to_mmc_driver(d) container_of(d, struct mmc_driver, drv) 26#define to_mmc_driver(d) container_of(d, struct mmc_driver, drv)
27 27
28#ifdef CONFIG_MMC_TEST
29static struct mmc_driver *mmc_test_drv;
30#endif
31
28static ssize_t mmc_type_show(struct device *dev, 32static ssize_t mmc_type_show(struct device *dev,
29 struct device_attribute *attr, char *buf) 33 struct device_attribute *attr, char *buf)
30{ 34{
@@ -107,6 +111,13 @@ static int mmc_bus_probe(struct device *dev)
107 struct mmc_driver *drv = to_mmc_driver(dev->driver); 111 struct mmc_driver *drv = to_mmc_driver(dev->driver);
108 struct mmc_card *card = mmc_dev_to_card(dev); 112 struct mmc_card *card = mmc_dev_to_card(dev);
109 113
114#ifdef CONFIG_MMC_TEST
115 /*
116 * Hack: Explicitly invoking mmc_test probe to co-exist with mmcblk driver.
117 */
118 mmc_test_drv->probe(card);
119#endif
120
110 return drv->probe(card); 121 return drv->probe(card);
111} 122}
112 123
@@ -120,18 +131,19 @@ static int mmc_bus_remove(struct device *dev)
120 return 0; 131 return 0;
121} 132}
122 133
123static int mmc_bus_suspend(struct device *dev, pm_message_t state) 134static int mmc_bus_pm_suspend(struct device *dev)
124{ 135{
125 struct mmc_driver *drv = to_mmc_driver(dev->driver); 136 struct mmc_driver *drv = to_mmc_driver(dev->driver);
126 struct mmc_card *card = mmc_dev_to_card(dev); 137 struct mmc_card *card = mmc_dev_to_card(dev);
127 int ret = 0; 138 int ret = 0;
139 pm_message_t state = { PM_EVENT_SUSPEND };
128 140
129 if (dev->driver && drv->suspend) 141 if (dev->driver && drv->suspend)
130 ret = drv->suspend(card, state); 142 ret = drv->suspend(card, state);
131 return ret; 143 return ret;
132} 144}
133 145
134static int mmc_bus_resume(struct device *dev) 146static int mmc_bus_pm_resume(struct device *dev)
135{ 147{
136 struct mmc_driver *drv = to_mmc_driver(dev->driver); 148 struct mmc_driver *drv = to_mmc_driver(dev->driver);
137 struct mmc_card *card = mmc_dev_to_card(dev); 149 struct mmc_card *card = mmc_dev_to_card(dev);
@@ -143,7 +155,6 @@ static int mmc_bus_resume(struct device *dev)
143} 155}
144 156
145#ifdef CONFIG_PM_RUNTIME 157#ifdef CONFIG_PM_RUNTIME
146
147static int mmc_runtime_suspend(struct device *dev) 158static int mmc_runtime_suspend(struct device *dev)
148{ 159{
149 struct mmc_card *card = mmc_dev_to_card(dev); 160 struct mmc_card *card = mmc_dev_to_card(dev);
@@ -162,21 +173,13 @@ static int mmc_runtime_idle(struct device *dev)
162{ 173{
163 return pm_runtime_suspend(dev); 174 return pm_runtime_suspend(dev);
164} 175}
176#endif /* CONFIG_PM_RUNTIME */
165 177
166static const struct dev_pm_ops mmc_bus_pm_ops = { 178static const struct dev_pm_ops mmc_bus_pm_ops = {
167 .runtime_suspend = mmc_runtime_suspend, 179 SET_SYSTEM_SLEEP_PM_OPS(mmc_bus_pm_suspend, mmc_bus_pm_resume)
168 .runtime_resume = mmc_runtime_resume, 180 SET_RUNTIME_PM_OPS(mmc_runtime_suspend, mmc_runtime_resume, mmc_runtime_idle)
169 .runtime_idle = mmc_runtime_idle,
170}; 181};
171 182
172#define MMC_PM_OPS_PTR (&mmc_bus_pm_ops)
173
174#else /* !CONFIG_PM_RUNTIME */
175
176#define MMC_PM_OPS_PTR NULL
177
178#endif /* !CONFIG_PM_RUNTIME */
179
180static struct bus_type mmc_bus_type = { 183static struct bus_type mmc_bus_type = {
181 .name = "mmc", 184 .name = "mmc",
182 .dev_attrs = mmc_dev_attrs, 185 .dev_attrs = mmc_dev_attrs,
@@ -184,9 +187,7 @@ static struct bus_type mmc_bus_type = {
184 .uevent = mmc_bus_uevent, 187 .uevent = mmc_bus_uevent,
185 .probe = mmc_bus_probe, 188 .probe = mmc_bus_probe,
186 .remove = mmc_bus_remove, 189 .remove = mmc_bus_remove,
187 .suspend = mmc_bus_suspend, 190 .pm = &mmc_bus_pm_ops,
188 .resume = mmc_bus_resume,
189 .pm = MMC_PM_OPS_PTR,
190}; 191};
191 192
192int mmc_register_bus(void) 193int mmc_register_bus(void)
@@ -206,6 +207,10 @@ void mmc_unregister_bus(void)
206int mmc_register_driver(struct mmc_driver *drv) 207int mmc_register_driver(struct mmc_driver *drv)
207{ 208{
208 drv->drv.bus = &mmc_bus_type; 209 drv->drv.bus = &mmc_bus_type;
210#ifdef CONFIG_MMC_TEST
211 if (!strcmp(drv->drv.name, "mmc_test"))
212 mmc_test_drv = drv;
213#endif
209 return driver_register(&drv->drv); 214 return driver_register(&drv->drv);
210} 215}
211 216
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 7843efe2235..2a288e936a8 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -5,6 +5,7 @@
5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved. 5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. 7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
8 * Copyright (c) 2012 NVIDIA Corporation, All Rights Reserved.
8 * 9 *
9 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as 11 * it under the terms of the GNU General Public License version 2 as
@@ -23,6 +24,8 @@
23#include <linux/log2.h> 24#include <linux/log2.h>
24#include <linux/regulator/consumer.h> 25#include <linux/regulator/consumer.h>
25#include <linux/pm_runtime.h> 26#include <linux/pm_runtime.h>
27#include <linux/suspend.h>
28#include <linux/wakelock.h>
26 29
27#include <linux/mmc/card.h> 30#include <linux/mmc/card.h>
28#include <linux/mmc/host.h> 31#include <linux/mmc/host.h>
@@ -106,6 +109,11 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
106 109
107 cmd->retries--; 110 cmd->retries--;
108 cmd->error = 0; 111 cmd->error = 0;
112 if (mrq->data) {
113 mrq->data->error = 0;
114 if (mrq->stop)
115 mrq->stop->error = 0;
116 }
109 host->ops->request(host, mrq); 117 host->ops->request(host, mrq);
110 } else { 118 } else {
111 led_trigger_event(host->led, LED_OFF); 119 led_trigger_event(host->led, LED_OFF);
@@ -132,7 +140,7 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
132 if (mrq->done) 140 if (mrq->done)
133 mrq->done(mrq); 141 mrq->done(mrq);
134 142
135 mmc_host_clk_gate(host); 143 mmc_host_clk_release(host);
136 } 144 }
137} 145}
138 146
@@ -191,17 +199,117 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
191 mrq->stop->mrq = mrq; 199 mrq->stop->mrq = mrq;
192 } 200 }
193 } 201 }
194 mmc_host_clk_ungate(host); 202 mmc_host_clk_hold(host);
195 led_trigger_event(host->led, LED_FULL); 203 led_trigger_event(host->led, LED_FULL);
196 host->ops->request(host, mrq); 204 host->ops->request(host, mrq);
197} 205}
198 206
199static void mmc_wait_done(struct mmc_request *mrq) 207static void mmc_wait_done(struct mmc_request *mrq)
200{ 208{
201 complete(mrq->done_data); 209 complete(&mrq->completion);
210}
211
212static void __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
213{
214 init_completion(&mrq->completion);
215 mrq->done = mmc_wait_done;
216 mmc_start_request(host, mrq);
217}
218
219static void mmc_wait_for_req_done(struct mmc_host *host,
220 struct mmc_request *mrq)
221{
222 wait_for_completion(&mrq->completion);
223}
224
225/**
226 * mmc_pre_req - Prepare for a new request
227 * @host: MMC host to prepare command
228 * @mrq: MMC request to prepare for
229 * @is_first_req: true if there is no previous started request
230 * that may run in parellel to this call, otherwise false
231 *
232 * mmc_pre_req() is called in prior to mmc_start_req() to let
233 * host prepare for the new request. Preparation of a request may be
234 * performed while another request is running on the host.
235 */
236static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
237 bool is_first_req)
238{
239 if (host->ops->pre_req)
240 host->ops->pre_req(host, mrq, is_first_req);
241}
242
243/**
244 * mmc_post_req - Post process a completed request
245 * @host: MMC host to post process command
246 * @mrq: MMC request to post process for
247 * @err: Error, if non zero, clean up any resources made in pre_req
248 *
249 * Let the host post process a completed request. Post processing of
250 * a request may be performed while another reuqest is running.
251 */
252static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
253 int err)
254{
255 if (host->ops->post_req)
256 host->ops->post_req(host, mrq, err);
202} 257}
203 258
204/** 259/**
260 * mmc_start_req - start a non-blocking request
261 * @host: MMC host to start command
262 * @areq: async request to start
263 * @error: out parameter returns 0 for success, otherwise non zero
264 *
265 * Start a new MMC custom command request for a host.
266 * If there is on ongoing async request wait for completion
267 * of that request and start the new one and return.
268 * Does not wait for the new request to complete.
269 *
270 * Returns the completed request, NULL in case of none completed.
271 * Wait for the an ongoing request (previoulsy started) to complete and
272 * return the completed request. If there is no ongoing request, NULL
273 * is returned without waiting. NULL is not an error condition.
274 */
275struct mmc_async_req *mmc_start_req(struct mmc_host *host,
276 struct mmc_async_req *areq, int *error)
277{
278 int err = 0;
279 struct mmc_async_req *data = host->areq;
280
281 /* Prepare a new request */
282 if (areq)
283 mmc_pre_req(host, areq->mrq, !host->areq);
284
285 if (host->areq) {
286 mmc_wait_for_req_done(host, host->areq->mrq);
287 err = host->areq->err_check(host->card, host->areq);
288 if (err) {
289 mmc_post_req(host, host->areq->mrq, 0);
290 if (areq)
291 mmc_post_req(host, areq->mrq, -EINVAL);
292
293 host->areq = NULL;
294 goto out;
295 }
296 }
297
298 if (areq)
299 __mmc_start_req(host, areq->mrq);
300
301 if (host->areq)
302 mmc_post_req(host, host->areq->mrq, 0);
303
304 host->areq = areq;
305 out:
306 if (error)
307 *error = err;
308 return data;
309}
310EXPORT_SYMBOL(mmc_start_req);
311
312/**
205 * mmc_wait_for_req - start a request and wait for completion 313 * mmc_wait_for_req - start a request and wait for completion
206 * @host: MMC host to start command 314 * @host: MMC host to start command
207 * @mrq: MMC request to start 315 * @mrq: MMC request to start
@@ -212,17 +320,114 @@ static void mmc_wait_done(struct mmc_request *mrq)
212 */ 320 */
213void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) 321void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
214{ 322{
215 DECLARE_COMPLETION_ONSTACK(complete); 323 __mmc_start_req(host, mrq);
324 mmc_wait_for_req_done(host, mrq);
325}
326EXPORT_SYMBOL(mmc_wait_for_req);
216 327
217 mrq->done_data = &complete; 328/**
218 mrq->done = mmc_wait_done; 329 * mmc_bkops_start - Issue start for mmc background ops
330 * @card: the MMC card associated with bkops
331 * @is_synchronous: is the backops synchronous
332 *
333 * Issued background ops without the busy wait.
334 */
335int mmc_bkops_start(struct mmc_card *card, bool is_synchronous)
336{
337 int err;
338 unsigned long flags;
219 339
220 mmc_start_request(host, mrq); 340 BUG_ON(!card);
341
342 if (!card->ext_csd.bk_ops_en || mmc_card_doing_bkops(card))
343 return 1;
344
345 mmc_claim_host(card->host);
346 err = mmc_send_bk_ops_cmd(card, is_synchronous);
347 if (err)
348 pr_err("%s: abort bk ops (%d error)\n",
349 mmc_hostname(card->host), err);
221 350
222 wait_for_completion(&complete); 351 /*
352 * Incase of asynchronous backops, set card state
353 * to doing bk ops to ensure that HPI is issued before
354 * handling any new request in the queue.
355 */
356 spin_lock_irqsave(&card->host->lock, flags);
357 mmc_card_clr_need_bkops(card);
358 if (!is_synchronous)
359 mmc_card_set_doing_bkops(card);
360 spin_unlock_irqrestore(&card->host->lock, flags);
361
362 mmc_release_host(card->host);
363
364 return err;
223} 365}
366EXPORT_SYMBOL(mmc_bkops_start);
224 367
225EXPORT_SYMBOL(mmc_wait_for_req); 368/**
369 * mmc_interrupt_hpi - Issue for High priority Interrupt
370 * @card: the MMC card associated with the HPI transfer
371 *
372 * Issued High Priority Interrupt, and check for card status
373 * util out-of prg-state.
374 */
375int mmc_interrupt_hpi(struct mmc_card *card)
376{
377 int err;
378 u32 status;
379 unsigned long flags;
380
381 BUG_ON(!card);
382
383 if (!mmc_card_mmc(card))
384 return 1;
385
386 if (!card->ext_csd.hpi_en) {
387 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
388 return 1;
389 }
390
391 mmc_claim_host(card->host);
392 err = mmc_send_status(card, &status);
393 if (err) {
394 pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
395 goto out;
396 }
397
398 /*
399 * If the card status is in PRG-state, we can send the HPI command.
400 */
401 if (R1_CURRENT_STATE(status) == R1_STATE_PRG) {
402 do {
403 /*
404 * We don't know when the HPI command will finish
405 * processing, so we need to resend HPI until out
406 * of prg-state, and keep checking the card status
407 * with SEND_STATUS. If a timeout error occurs when
408 * sending the HPI command, we are already out of
409 * prg-state.
410 */
411 err = mmc_send_hpi_cmd(card, &status);
412 if (err)
413 pr_debug("%s: abort HPI (%d error)\n",
414 mmc_hostname(card->host), err);
415
416 err = mmc_send_status(card, &status);
417 if (err)
418 break;
419 } while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
420 } else
421 pr_debug("%s: Left prg-state\n", mmc_hostname(card->host));
422
423out:
424 spin_lock_irqsave(&card->host->lock, flags);
425 mmc_card_clr_doing_bkops(card);
426 spin_unlock_irqrestore(&card->host->lock, flags);
427 mmc_release_host(card->host);
428 return err;
429}
430EXPORT_SYMBOL(mmc_interrupt_hpi);
226 431
227/** 432/**
228 * mmc_wait_for_cmd - start a command and wait for completion 433 * mmc_wait_for_cmd - start a command and wait for completion
@@ -634,15 +839,17 @@ static inline void mmc_set_ios(struct mmc_host *host)
634 */ 839 */
635void mmc_set_chip_select(struct mmc_host *host, int mode) 840void mmc_set_chip_select(struct mmc_host *host, int mode)
636{ 841{
842 mmc_host_clk_hold(host);
637 host->ios.chip_select = mode; 843 host->ios.chip_select = mode;
638 mmc_set_ios(host); 844 mmc_set_ios(host);
845 mmc_host_clk_release(host);
639} 846}
640 847
641/* 848/*
642 * Sets the host clock to the highest possible frequency that 849 * Sets the host clock to the highest possible frequency that
643 * is below "hz". 850 * is below "hz".
644 */ 851 */
645void mmc_set_clock(struct mmc_host *host, unsigned int hz) 852static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
646{ 853{
647 WARN_ON(hz < host->f_min); 854 WARN_ON(hz < host->f_min);
648 855
@@ -653,6 +860,13 @@ void mmc_set_clock(struct mmc_host *host, unsigned int hz)
653 mmc_set_ios(host); 860 mmc_set_ios(host);
654} 861}
655 862
863void mmc_set_clock(struct mmc_host *host, unsigned int hz)
864{
865 mmc_host_clk_hold(host);
866 __mmc_set_clock(host, hz);
867 mmc_host_clk_release(host);
868}
869
656#ifdef CONFIG_MMC_CLKGATE 870#ifdef CONFIG_MMC_CLKGATE
657/* 871/*
658 * This gates the clock by setting it to 0 Hz. 872 * This gates the clock by setting it to 0 Hz.
@@ -685,7 +899,7 @@ void mmc_ungate_clock(struct mmc_host *host)
685 if (host->clk_old) { 899 if (host->clk_old) {
686 BUG_ON(host->ios.clock); 900 BUG_ON(host->ios.clock);
687 /* This call will also set host->clk_gated to false */ 901 /* This call will also set host->clk_gated to false */
688 mmc_set_clock(host, host->clk_old); 902 __mmc_set_clock(host, host->clk_old);
689 } 903 }
690} 904}
691 905
@@ -713,8 +927,10 @@ void mmc_set_ungated(struct mmc_host *host)
713 */ 927 */
714void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) 928void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
715{ 929{
930 mmc_host_clk_hold(host);
716 host->ios.bus_mode = mode; 931 host->ios.bus_mode = mode;
717 mmc_set_ios(host); 932 mmc_set_ios(host);
933 mmc_host_clk_release(host);
718} 934}
719 935
720/* 936/*
@@ -722,8 +938,10 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
722 */ 938 */
723void mmc_set_bus_width(struct mmc_host *host, unsigned int width) 939void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
724{ 940{
941 mmc_host_clk_hold(host);
725 host->ios.bus_width = width; 942 host->ios.bus_width = width;
726 mmc_set_ios(host); 943 mmc_set_ios(host);
944 mmc_host_clk_release(host);
727} 945}
728 946
729/** 947/**
@@ -921,8 +1139,10 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
921 1139
922 ocr &= 3 << bit; 1140 ocr &= 3 << bit;
923 1141
1142 mmc_host_clk_hold(host);
924 host->ios.vdd = bit; 1143 host->ios.vdd = bit;
925 mmc_set_ios(host); 1144 mmc_set_ios(host);
1145 mmc_host_clk_release(host);
926 } else { 1146 } else {
927 pr_warning("%s: host doesn't support card's voltages\n", 1147 pr_warning("%s: host doesn't support card's voltages\n",
928 mmc_hostname(host)); 1148 mmc_hostname(host));
@@ -969,8 +1189,10 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11
969 */ 1189 */
970void mmc_set_timing(struct mmc_host *host, unsigned int timing) 1190void mmc_set_timing(struct mmc_host *host, unsigned int timing)
971{ 1191{
1192 mmc_host_clk_hold(host);
972 host->ios.timing = timing; 1193 host->ios.timing = timing;
973 mmc_set_ios(host); 1194 mmc_set_ios(host);
1195 mmc_host_clk_release(host);
974} 1196}
975 1197
976/* 1198/*
@@ -978,8 +1200,10 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing)
978 */ 1200 */
979void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type) 1201void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
980{ 1202{
1203 mmc_host_clk_hold(host);
981 host->ios.drv_type = drv_type; 1204 host->ios.drv_type = drv_type;
982 mmc_set_ios(host); 1205 mmc_set_ios(host);
1206 mmc_host_clk_release(host);
983} 1207}
984 1208
985/* 1209/*
@@ -997,6 +1221,8 @@ static void mmc_power_up(struct mmc_host *host)
997{ 1221{
998 int bit; 1222 int bit;
999 1223
1224 mmc_host_clk_hold(host);
1225
1000 /* If ocr is set, we use it */ 1226 /* If ocr is set, we use it */
1001 if (host->ocr) 1227 if (host->ocr)
1002 bit = ffs(host->ocr) - 1; 1228 bit = ffs(host->ocr) - 1;
@@ -1032,10 +1258,14 @@ static void mmc_power_up(struct mmc_host *host)
1032 * time required to reach a stable voltage. 1258 * time required to reach a stable voltage.
1033 */ 1259 */
1034 mmc_delay(10); 1260 mmc_delay(10);
1261
1262 mmc_host_clk_release(host);
1035} 1263}
1036 1264
1037static void mmc_power_off(struct mmc_host *host) 1265void mmc_power_off(struct mmc_host *host)
1038{ 1266{
1267 mmc_host_clk_hold(host);
1268
1039 host->ios.clock = 0; 1269 host->ios.clock = 0;
1040 host->ios.vdd = 0; 1270 host->ios.vdd = 0;
1041 1271
@@ -1053,6 +1283,8 @@ static void mmc_power_off(struct mmc_host *host)
1053 host->ios.bus_width = MMC_BUS_WIDTH_1; 1283 host->ios.bus_width = MMC_BUS_WIDTH_1;
1054 host->ios.timing = MMC_TIMING_LEGACY; 1284 host->ios.timing = MMC_TIMING_LEGACY;
1055 mmc_set_ios(host); 1285 mmc_set_ios(host);
1286
1287 mmc_host_clk_release(host);
1056} 1288}
1057 1289
1058/* 1290/*
@@ -1094,6 +1326,36 @@ static inline void mmc_bus_put(struct mmc_host *host)
1094 spin_unlock_irqrestore(&host->lock, flags); 1326 spin_unlock_irqrestore(&host->lock, flags);
1095} 1327}
1096 1328
1329int mmc_resume_bus(struct mmc_host *host)
1330{
1331 unsigned long flags;
1332
1333 if (!mmc_bus_needs_resume(host))
1334 return -EINVAL;
1335
1336 printk("%s: Starting deferred resume\n", mmc_hostname(host));
1337 spin_lock_irqsave(&host->lock, flags);
1338 host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME;
1339 host->rescan_disable = 0;
1340 spin_unlock_irqrestore(&host->lock, flags);
1341
1342 mmc_bus_get(host);
1343 if (host->bus_ops && !host->bus_dead) {
1344 mmc_power_up(host);
1345 BUG_ON(!host->bus_ops->resume);
1346 host->bus_ops->resume(host);
1347 }
1348
1349 if (host->bus_ops->detect && !host->bus_dead)
1350 host->bus_ops->detect(host);
1351
1352 mmc_bus_put(host);
1353 printk("%s: Deferred resume completed\n", mmc_hostname(host));
1354 return 0;
1355}
1356
1357EXPORT_SYMBOL(mmc_resume_bus);
1358
1097/* 1359/*
1098 * Assign a mmc bus handler to a host. Only one bus handler may control a 1360 * Assign a mmc bus handler to a host. Only one bus handler may control a
1099 * host at any given time. 1361 * host at any given time.
@@ -1120,8 +1382,7 @@ void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1120} 1382}
1121 1383
1122/* 1384/*
1123 * Remove the current bus handler from a host. Assumes that there are 1385 * Remove the current bus handler from a host.
1124 * no interesting cards left, so the bus is powered down.
1125 */ 1386 */
1126void mmc_detach_bus(struct mmc_host *host) 1387void mmc_detach_bus(struct mmc_host *host)
1127{ 1388{
@@ -1138,8 +1399,6 @@ void mmc_detach_bus(struct mmc_host *host)
1138 1399
1139 spin_unlock_irqrestore(&host->lock, flags); 1400 spin_unlock_irqrestore(&host->lock, flags);
1140 1401
1141 mmc_power_off(host);
1142
1143 mmc_bus_put(host); 1402 mmc_bus_put(host);
1144} 1403}
1145 1404
@@ -1162,6 +1421,7 @@ void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1162 spin_unlock_irqrestore(&host->lock, flags); 1421 spin_unlock_irqrestore(&host->lock, flags);
1163#endif 1422#endif
1164 1423
1424 wake_lock(&host->detect_wake_lock);
1165 mmc_schedule_delayed_work(&host->detect, delay); 1425 mmc_schedule_delayed_work(&host->detect, delay);
1166} 1426}
1167 1427
@@ -1408,7 +1668,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1408 goto out; 1668 goto out;
1409 } 1669 }
1410 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || 1670 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
1411 R1_CURRENT_STATE(cmd.resp[0]) == 7); 1671 R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG);
1412out: 1672out:
1413 return err; 1673 return err;
1414} 1674}
@@ -1516,6 +1776,82 @@ int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
1516} 1776}
1517EXPORT_SYMBOL(mmc_erase_group_aligned); 1777EXPORT_SYMBOL(mmc_erase_group_aligned);
1518 1778
1779static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
1780 unsigned int arg)
1781{
1782 struct mmc_host *host = card->host;
1783 unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
1784 unsigned int last_timeout = 0;
1785
1786 if (card->erase_shift)
1787 max_qty = UINT_MAX >> card->erase_shift;
1788 else if (mmc_card_sd(card))
1789 max_qty = UINT_MAX;
1790 else
1791 max_qty = UINT_MAX / card->erase_size;
1792
1793 /* Find the largest qty with an OK timeout */
1794 do {
1795 y = 0;
1796 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
1797 timeout = mmc_erase_timeout(card, arg, qty + x);
1798 if (timeout > host->max_discard_to)
1799 break;
1800 if (timeout < last_timeout)
1801 break;
1802 last_timeout = timeout;
1803 y = x;
1804 }
1805 qty += y;
1806 } while (y);
1807
1808 if (!qty)
1809 return 0;
1810
1811 if (qty == 1)
1812 return 1;
1813
1814 /* Convert qty to sectors */
1815 if (card->erase_shift)
1816 max_discard = --qty << card->erase_shift;
1817 else if (mmc_card_sd(card))
1818 max_discard = qty;
1819 else
1820 max_discard = --qty * card->erase_size;
1821
1822 return max_discard;
1823}
1824
1825unsigned int mmc_calc_max_discard(struct mmc_card *card)
1826{
1827 struct mmc_host *host = card->host;
1828 unsigned int max_discard, max_trim;
1829
1830 if (!host->max_discard_to)
1831 return UINT_MAX;
1832
1833 /*
1834 * Without erase_group_def set, MMC erase timeout depends on clock
1835 * frequence which can change. In that case, the best choice is
1836 * just the preferred erase size.
1837 */
1838 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
1839 return card->pref_erase;
1840
1841 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
1842 if (mmc_can_trim(card)) {
1843 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
1844 if (max_trim < max_discard)
1845 max_discard = max_trim;
1846 } else if (max_discard < card->erase_size) {
1847 max_discard = 0;
1848 }
1849 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
1850 mmc_hostname(host), max_discard, host->max_discard_to);
1851 return max_discard;
1852}
1853EXPORT_SYMBOL(mmc_calc_max_discard);
1854
1519int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen) 1855int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
1520{ 1856{
1521 struct mmc_command cmd = {0}; 1857 struct mmc_command cmd = {0};
@@ -1568,6 +1904,7 @@ void mmc_rescan(struct work_struct *work)
1568 struct mmc_host *host = 1904 struct mmc_host *host =
1569 container_of(work, struct mmc_host, detect.work); 1905 container_of(work, struct mmc_host, detect.work);
1570 int i; 1906 int i;
1907 bool extend_wakelock = false;
1571 1908
1572 if (host->rescan_disable) 1909 if (host->rescan_disable)
1573 return; 1910 return;
@@ -1582,6 +1919,12 @@ void mmc_rescan(struct work_struct *work)
1582 && !(host->caps & MMC_CAP_NONREMOVABLE)) 1919 && !(host->caps & MMC_CAP_NONREMOVABLE))
1583 host->bus_ops->detect(host); 1920 host->bus_ops->detect(host);
1584 1921
1922 /* If the card was removed the bus will be marked
1923 * as dead - extend the wakelock so userspace
1924 * can respond */
1925 if (host->bus_dead)
1926 extend_wakelock = 1;
1927
1585 /* 1928 /*
1586 * Let mmc_bus_put() free the bus/bus_ops if we've found that 1929 * Let mmc_bus_put() free the bus/bus_ops if we've found that
1587 * the card is no longer present. 1930 * the card is no longer present.
@@ -1606,16 +1949,24 @@ void mmc_rescan(struct work_struct *work)
1606 1949
1607 mmc_claim_host(host); 1950 mmc_claim_host(host);
1608 for (i = 0; i < ARRAY_SIZE(freqs); i++) { 1951 for (i = 0; i < ARRAY_SIZE(freqs); i++) {
1609 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) 1952 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) {
1953 extend_wakelock = true;
1610 break; 1954 break;
1955 }
1611 if (freqs[i] <= host->f_min) 1956 if (freqs[i] <= host->f_min)
1612 break; 1957 break;
1613 } 1958 }
1614 mmc_release_host(host); 1959 mmc_release_host(host);
1615 1960
1616 out: 1961 out:
1617 if (host->caps & MMC_CAP_NEEDS_POLL) 1962 if (extend_wakelock)
1963 wake_lock_timeout(&host->detect_wake_lock, HZ / 2);
1964 else
1965 wake_unlock(&host->detect_wake_lock);
1966 if (host->caps & MMC_CAP_NEEDS_POLL) {
1967 wake_lock(&host->detect_wake_lock);
1618 mmc_schedule_delayed_work(&host->detect, HZ); 1968 mmc_schedule_delayed_work(&host->detect, HZ);
1969 }
1619} 1970}
1620 1971
1621void mmc_start_host(struct mmc_host *host) 1972void mmc_start_host(struct mmc_host *host)
@@ -1635,7 +1986,8 @@ void mmc_stop_host(struct mmc_host *host)
1635 1986
1636 if (host->caps & MMC_CAP_DISABLE) 1987 if (host->caps & MMC_CAP_DISABLE)
1637 cancel_delayed_work(&host->disable); 1988 cancel_delayed_work(&host->disable);
1638 cancel_delayed_work_sync(&host->detect); 1989 if (cancel_delayed_work_sync(&host->detect))
1990 wake_unlock(&host->detect_wake_lock);
1639 mmc_flush_scheduled_work(); 1991 mmc_flush_scheduled_work();
1640 1992
1641 /* clear pm flags now and let card drivers set them as needed */ 1993 /* clear pm flags now and let card drivers set them as needed */
@@ -1648,6 +2000,7 @@ void mmc_stop_host(struct mmc_host *host)
1648 2000
1649 mmc_claim_host(host); 2001 mmc_claim_host(host);
1650 mmc_detach_bus(host); 2002 mmc_detach_bus(host);
2003 mmc_power_off(host);
1651 mmc_release_host(host); 2004 mmc_release_host(host);
1652 mmc_bus_put(host); 2005 mmc_bus_put(host);
1653 return; 2006 return;
@@ -1663,6 +2016,10 @@ int mmc_power_save_host(struct mmc_host *host)
1663{ 2016{
1664 int ret = 0; 2017 int ret = 0;
1665 2018
2019#ifdef CONFIG_MMC_DEBUG
2020 pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
2021#endif
2022
1666 mmc_bus_get(host); 2023 mmc_bus_get(host);
1667 2024
1668 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 2025 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
@@ -1685,6 +2042,10 @@ int mmc_power_restore_host(struct mmc_host *host)
1685{ 2042{
1686 int ret; 2043 int ret;
1687 2044
2045#ifdef CONFIG_MMC_DEBUG
2046 pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
2047#endif
2048
1688 mmc_bus_get(host); 2049 mmc_bus_get(host);
1689 2050
1690 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 2051 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
@@ -1751,9 +2112,17 @@ int mmc_suspend_host(struct mmc_host *host)
1751{ 2112{
1752 int err = 0; 2113 int err = 0;
1753 2114
2115 if (mmc_bus_needs_resume(host))
2116 return 0;
2117
2118 if (mmc_card_mmc(host->card) && mmc_card_doing_bkops(host->card))
2119 mmc_interrupt_hpi(host->card);
2120 mmc_card_clr_need_bkops(host->card);
2121
1754 if (host->caps & MMC_CAP_DISABLE) 2122 if (host->caps & MMC_CAP_DISABLE)
1755 cancel_delayed_work(&host->disable); 2123 cancel_delayed_work(&host->disable);
1756 cancel_delayed_work(&host->detect); 2124 if (cancel_delayed_work(&host->detect))
2125 wake_unlock(&host->detect_wake_lock);
1757 mmc_flush_scheduled_work(); 2126 mmc_flush_scheduled_work();
1758 2127
1759 mmc_bus_get(host); 2128 mmc_bus_get(host);
@@ -1769,6 +2138,7 @@ int mmc_suspend_host(struct mmc_host *host)
1769 host->bus_ops->remove(host); 2138 host->bus_ops->remove(host);
1770 mmc_claim_host(host); 2139 mmc_claim_host(host);
1771 mmc_detach_bus(host); 2140 mmc_detach_bus(host);
2141 mmc_power_off(host);
1772 mmc_release_host(host); 2142 mmc_release_host(host);
1773 host->pm_flags = 0; 2143 host->pm_flags = 0;
1774 err = 0; 2144 err = 0;
@@ -1793,6 +2163,12 @@ int mmc_resume_host(struct mmc_host *host)
1793 int err = 0; 2163 int err = 0;
1794 2164
1795 mmc_bus_get(host); 2165 mmc_bus_get(host);
2166 if (mmc_bus_manual_resume(host)) {
2167 host->bus_resume_flags |= MMC_BUSRESUME_NEEDS_RESUME;
2168 mmc_bus_put(host);
2169 return 0;
2170 }
2171
1796 if (host->bus_ops && !host->bus_dead) { 2172 if (host->bus_ops && !host->bus_dead) {
1797 if (!mmc_card_keep_power(host)) { 2173 if (!mmc_card_keep_power(host)) {
1798 mmc_power_up(host); 2174 mmc_power_up(host);
@@ -1843,9 +2219,14 @@ int mmc_pm_notify(struct notifier_block *notify_block,
1843 case PM_SUSPEND_PREPARE: 2219 case PM_SUSPEND_PREPARE:
1844 2220
1845 spin_lock_irqsave(&host->lock, flags); 2221 spin_lock_irqsave(&host->lock, flags);
2222 if (mmc_bus_needs_resume(host)) {
2223 spin_unlock_irqrestore(&host->lock, flags);
2224 break;
2225 }
1846 host->rescan_disable = 1; 2226 host->rescan_disable = 1;
1847 spin_unlock_irqrestore(&host->lock, flags); 2227 spin_unlock_irqrestore(&host->lock, flags);
1848 cancel_delayed_work_sync(&host->detect); 2228 if (cancel_delayed_work_sync(&host->detect))
2229 wake_unlock(&host->detect_wake_lock);
1849 2230
1850 if (!host->bus_ops || host->bus_ops->suspend) 2231 if (!host->bus_ops || host->bus_ops->suspend)
1851 break; 2232 break;
@@ -1856,6 +2237,7 @@ int mmc_pm_notify(struct notifier_block *notify_block,
1856 host->bus_ops->remove(host); 2237 host->bus_ops->remove(host);
1857 2238
1858 mmc_detach_bus(host); 2239 mmc_detach_bus(host);
2240 mmc_power_off(host);
1859 mmc_release_host(host); 2241 mmc_release_host(host);
1860 host->pm_flags = 0; 2242 host->pm_flags = 0;
1861 break; 2243 break;
@@ -1865,6 +2247,10 @@ int mmc_pm_notify(struct notifier_block *notify_block,
1865 case PM_POST_RESTORE: 2247 case PM_POST_RESTORE:
1866 2248
1867 spin_lock_irqsave(&host->lock, flags); 2249 spin_lock_irqsave(&host->lock, flags);
2250 if (mmc_bus_manual_resume(host)) {
2251 spin_unlock_irqrestore(&host->lock, flags);
2252 break;
2253 }
1868 host->rescan_disable = 0; 2254 host->rescan_disable = 0;
1869 spin_unlock_irqrestore(&host->lock, flags); 2255 spin_unlock_irqrestore(&host->lock, flags);
1870 mmc_detect_change(host, 0); 2256 mmc_detect_change(host, 0);
@@ -1875,6 +2261,22 @@ int mmc_pm_notify(struct notifier_block *notify_block,
1875} 2261}
1876#endif 2262#endif
1877 2263
2264#ifdef CONFIG_MMC_EMBEDDED_SDIO
2265void mmc_set_embedded_sdio_data(struct mmc_host *host,
2266 struct sdio_cis *cis,
2267 struct sdio_cccr *cccr,
2268 struct sdio_embedded_func *funcs,
2269 int num_funcs)
2270{
2271 host->embedded_sdio_data.cis = cis;
2272 host->embedded_sdio_data.cccr = cccr;
2273 host->embedded_sdio_data.funcs = funcs;
2274 host->embedded_sdio_data.num_funcs = num_funcs;
2275}
2276
2277EXPORT_SYMBOL(mmc_set_embedded_sdio_data);
2278#endif
2279
1878static int __init mmc_init(void) 2280static int __init mmc_init(void)
1879{ 2281{
1880 int ret; 2282 int ret;
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index d9411ed2a39..14664f1fb16 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -43,6 +43,7 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage,
43 bool cmd11); 43 bool cmd11);
44void mmc_set_timing(struct mmc_host *host, unsigned int timing); 44void mmc_set_timing(struct mmc_host *host, unsigned int timing);
45void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type); 45void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type);
46void mmc_power_off(struct mmc_host *host);
46 47
47static inline void mmc_delay(unsigned int ms) 48static inline void mmc_delay(unsigned int ms)
48{ 49{
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index b29d3e8fd3a..e09f0a7eb65 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -119,14 +119,14 @@ static void mmc_host_clk_gate_work(struct work_struct *work)
119} 119}
120 120
121/** 121/**
122 * mmc_host_clk_ungate - ungate hardware MCI clocks 122 * mmc_host_clk_hold - ungate hardware MCI clocks
123 * @host: host to ungate. 123 * @host: host to ungate.
124 * 124 *
125 * Makes sure the host ios.clock is restored to a non-zero value 125 * Makes sure the host ios.clock is restored to a non-zero value
126 * past this call. Increase clock reference count and ungate clock 126 * past this call. Increase clock reference count and ungate clock
127 * if we're the first user. 127 * if we're the first user.
128 */ 128 */
129void mmc_host_clk_ungate(struct mmc_host *host) 129void mmc_host_clk_hold(struct mmc_host *host)
130{ 130{
131 unsigned long flags; 131 unsigned long flags;
132 132
@@ -164,14 +164,14 @@ static bool mmc_host_may_gate_card(struct mmc_card *card)
164} 164}
165 165
166/** 166/**
167 * mmc_host_clk_gate - gate off hardware MCI clocks 167 * mmc_host_clk_release - gate off hardware MCI clocks
168 * @host: host to gate. 168 * @host: host to gate.
169 * 169 *
170 * Calls the host driver with ios.clock set to zero as often as possible 170 * Calls the host driver with ios.clock set to zero as often as possible
171 * in order to gate off hardware MCI clocks. Decrease clock reference 171 * in order to gate off hardware MCI clocks. Decrease clock reference
172 * count and schedule disabling of clock. 172 * count and schedule disabling of clock.
173 */ 173 */
174void mmc_host_clk_gate(struct mmc_host *host) 174void mmc_host_clk_release(struct mmc_host *host)
175{ 175{
176 unsigned long flags; 176 unsigned long flags;
177 177
@@ -179,7 +179,7 @@ void mmc_host_clk_gate(struct mmc_host *host)
179 host->clk_requests--; 179 host->clk_requests--;
180 if (mmc_host_may_gate_card(host->card) && 180 if (mmc_host_may_gate_card(host->card) &&
181 !host->clk_requests) 181 !host->clk_requests)
182 schedule_work(&host->clk_gate_work); 182 queue_work(system_nrt_wq, &host->clk_gate_work);
183 spin_unlock_irqrestore(&host->clk_lock, flags); 183 spin_unlock_irqrestore(&host->clk_lock, flags);
184} 184}
185 185
@@ -231,7 +231,7 @@ static inline void mmc_host_clk_exit(struct mmc_host *host)
231 if (cancel_work_sync(&host->clk_gate_work)) 231 if (cancel_work_sync(&host->clk_gate_work))
232 mmc_host_clk_gate_delayed(host); 232 mmc_host_clk_gate_delayed(host);
233 if (host->clk_gated) 233 if (host->clk_gated)
234 mmc_host_clk_ungate(host); 234 mmc_host_clk_hold(host);
235 /* There should be only one user now */ 235 /* There should be only one user now */
236 WARN_ON(host->clk_requests > 1); 236 WARN_ON(host->clk_requests > 1);
237} 237}
@@ -284,6 +284,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
284 284
285 spin_lock_init(&host->lock); 285 spin_lock_init(&host->lock);
286 init_waitqueue_head(&host->wq); 286 init_waitqueue_head(&host->wq);
287 wake_lock_init(&host->detect_wake_lock, WAKE_LOCK_SUSPEND,
288 kasprintf(GFP_KERNEL, "%s_detect", mmc_hostname(host)));
287 INIT_DELAYED_WORK(&host->detect, mmc_rescan); 289 INIT_DELAYED_WORK(&host->detect, mmc_rescan);
288 INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable); 290 INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable);
289#ifdef CONFIG_PM 291#ifdef CONFIG_PM
@@ -336,7 +338,8 @@ int mmc_add_host(struct mmc_host *host)
336#endif 338#endif
337 339
338 mmc_start_host(host); 340 mmc_start_host(host);
339 register_pm_notifier(&host->pm_notify); 341 if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
342 register_pm_notifier(&host->pm_notify);
340 343
341 return 0; 344 return 0;
342} 345}
@@ -353,7 +356,9 @@ EXPORT_SYMBOL(mmc_add_host);
353 */ 356 */
354void mmc_remove_host(struct mmc_host *host) 357void mmc_remove_host(struct mmc_host *host)
355{ 358{
356 unregister_pm_notifier(&host->pm_notify); 359 if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
360 unregister_pm_notifier(&host->pm_notify);
361
357 mmc_stop_host(host); 362 mmc_stop_host(host);
358 363
359#ifdef CONFIG_DEBUG_FS 364#ifdef CONFIG_DEBUG_FS
@@ -380,6 +385,7 @@ void mmc_free_host(struct mmc_host *host)
380 spin_lock(&mmc_host_lock); 385 spin_lock(&mmc_host_lock);
381 idr_remove(&mmc_host_idr, host->index); 386 idr_remove(&mmc_host_idr, host->index);
382 spin_unlock(&mmc_host_lock); 387 spin_unlock(&mmc_host_lock);
388 wake_lock_destroy(&host->detect_wake_lock);
383 389
384 put_device(&host->class_dev); 390 put_device(&host->class_dev);
385} 391}
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index de199f91192..fb8a5cd2e4a 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -16,16 +16,16 @@ int mmc_register_host_class(void);
16void mmc_unregister_host_class(void); 16void mmc_unregister_host_class(void);
17 17
18#ifdef CONFIG_MMC_CLKGATE 18#ifdef CONFIG_MMC_CLKGATE
19void mmc_host_clk_ungate(struct mmc_host *host); 19void mmc_host_clk_hold(struct mmc_host *host);
20void mmc_host_clk_gate(struct mmc_host *host); 20void mmc_host_clk_release(struct mmc_host *host);
21unsigned int mmc_host_clk_rate(struct mmc_host *host); 21unsigned int mmc_host_clk_rate(struct mmc_host *host);
22 22
23#else 23#else
24static inline void mmc_host_clk_ungate(struct mmc_host *host) 24static inline void mmc_host_clk_hold(struct mmc_host *host)
25{ 25{
26} 26}
27 27
28static inline void mmc_host_clk_gate(struct mmc_host *host) 28static inline void mmc_host_clk_release(struct mmc_host *host)
29{ 29{
30} 30}
31 31
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index aa7d1d79b8c..69fb2275845 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -4,6 +4,7 @@
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved. 4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved. 5 * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
6 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. 6 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
7 * Copyright (c) 2012 NVIDIA Corporation, All Rights Reserved.
7 * 8 *
8 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
@@ -259,7 +260,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
259 } 260 }
260 261
261 card->ext_csd.rev = ext_csd[EXT_CSD_REV]; 262 card->ext_csd.rev = ext_csd[EXT_CSD_REV];
262 if (card->ext_csd.rev > 5) { 263 if (card->ext_csd.rev > 6) {
263 printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n", 264 printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n",
264 mmc_hostname(card->host), card->ext_csd.rev); 265 mmc_hostname(card->host), card->ext_csd.rev);
265 err = -EINVAL; 266 err = -EINVAL;
@@ -359,6 +360,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
359 * card has the Enhanced area enabled. If so, export enhanced 360 * card has the Enhanced area enabled. If so, export enhanced
360 * area offset and size to user by adding sysfs interface. 361 * area offset and size to user by adding sysfs interface.
361 */ 362 */
363 card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
362 if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) && 364 if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
363 (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) { 365 (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
364 u8 hc_erase_grp_sz = 366 u8 hc_erase_grp_sz =
@@ -402,8 +404,28 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
402 ext_csd[EXT_CSD_TRIM_MULT]; 404 ext_csd[EXT_CSD_TRIM_MULT];
403 } 405 }
404 406
405 if (card->ext_csd.rev >= 5) 407 card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT];
408 if (card->ext_csd.rev >= 5) {
406 card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM]; 409 card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
410 /* check whether the eMMC card supports HPI */
411 if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) {
412 card->ext_csd.hpi = 1;
413 if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
414 card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION;
415 else
416 card->ext_csd.hpi_cmd = MMC_SEND_STATUS;
417 /*
418 * Indicate the maximum timeout to close
419 * a command interrupted by HPI
420 */
421 card->ext_csd.out_of_int_time =
422 ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10;
423 }
424
425 /* Check whether the eMMC card supports background ops */
426 if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)
427 card->ext_csd.bk_ops = 1;
428 }
407 429
408 if (ext_csd[EXT_CSD_ERASED_MEM_CONT]) 430 if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
409 card->erased_byte = 0xFF; 431 card->erased_byte = 0xFF;
@@ -726,6 +748,40 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
726 } 748 }
727 749
728 /* 750 /*
751 * Enable HPI feature (if supported)
752 */
753 if (card->ext_csd.hpi && (card->host->caps & MMC_CAP_BKOPS)) {
754 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
755 EXT_CSD_HPI_MGMT, 1, 0);
756 if (err && err != -EBADMSG)
757 goto free_card;
758 if (err) {
759 pr_warning("%s: Enabling HPI failed\n",
760 mmc_hostname(card->host));
761 err = 0;
762 } else {
763 card->ext_csd.hpi_en = 1;
764 }
765 }
766
767 /*
768 * Enable Background ops feature (if supported)
769 */
770 if (card->ext_csd.bk_ops && (card->host->caps & MMC_CAP_BKOPS)) {
771 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
772 EXT_CSD_BKOPS_EN, 1, 0);
773 if (err && err != -EBADMSG)
774 goto free_card;
775 if (err) {
776 pr_warning("%s: Enabling BK ops failed\n",
777 mmc_hostname(card->host));
778 err = 0;
779 } else {
780 card->ext_csd.bk_ops_en = 1;
781 }
782 }
783
784 /*
729 * Compute bus speed. 785 * Compute bus speed.
730 */ 786 */
731 max_dtr = (unsigned int)-1; 787 max_dtr = (unsigned int)-1;
@@ -891,6 +947,7 @@ static void mmc_detect(struct mmc_host *host)
891 947
892 mmc_claim_host(host); 948 mmc_claim_host(host);
893 mmc_detach_bus(host); 949 mmc_detach_bus(host);
950 mmc_power_off(host);
894 mmc_release_host(host); 951 mmc_release_host(host);
895 } 952 }
896} 953}
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 845ce7c533b..330b968393d 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -407,7 +407,7 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
407 break; 407 break;
408 if (mmc_host_is_spi(card->host)) 408 if (mmc_host_is_spi(card->host))
409 break; 409 break;
410 } while (R1_CURRENT_STATE(status) == 7); 410 } while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
411 411
412 if (mmc_host_is_spi(card->host)) { 412 if (mmc_host_is_spi(card->host)) {
413 if (status & R1_SPI_ILLEGAL_COMMAND) 413 if (status & R1_SPI_ILLEGAL_COMMAND)
@@ -547,3 +547,73 @@ int mmc_bus_test(struct mmc_card *card, u8 bus_width)
547 err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width); 547 err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
548 return err; 548 return err;
549} 549}
550
551int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
552{
553 struct mmc_command cmd = {0};
554 unsigned int opcode;
555 unsigned int flags;
556 int err;
557
558 opcode = card->ext_csd.hpi_cmd;
559 flags = MMC_RSP_R1 | MMC_CMD_AC;
560
561 cmd.opcode = opcode;
562 cmd.arg = card->rca << 16 | 1;
563 cmd.flags = flags;
564
565 err = mmc_wait_for_cmd(card->host, &cmd, 0);
566 if (err) {
567 pr_warn("%s: error %d interrupting operation. "
568 "HPI command response %#x\n", mmc_hostname(card->host),
569 err, cmd.resp[0]);
570 return err;
571 }
572 if (status)
573 *status = cmd.resp[0];
574
575 return 0;
576}
577
578int mmc_send_bk_ops_cmd(struct mmc_card *card, bool is_synchronous)
579{
580 int err;
581 struct mmc_command cmd;
582 u32 status;
583
584 BUG_ON(!card);
585 BUG_ON(!card->host);
586
587 memset(&cmd, 0, sizeof(struct mmc_command));
588
589 cmd.opcode = MMC_SWITCH;
590 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
591 (EXT_CSD_BKOPS_EN << 16) |
592 (1 << 8) |
593 EXT_CSD_CMD_SET_NORMAL;
594 if (is_synchronous)
595 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
596 else
597 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
598
599 err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
600 if (err)
601 return err;
602
603 /* Must check status to be sure of no errors */
604 do {
605 err = mmc_send_status(card, &status);
606 if (err)
607 return err;
608 if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
609 break;
610 } while (R1_CURRENT_STATE(status) == 7);
611
612 if (status & 0xFDFFA000)
613 printk(KERN_ERR "%s: unexpected status %#x after "
614 "switch", mmc_hostname(card->host), status);
615 if (status & R1_SWITCH_ERROR)
616 return -EBADMSG;
617
618 return 0;
619}
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index 9276946fa5b..d8f157dee14 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -2,6 +2,7 @@
2 * linux/drivers/mmc/core/mmc_ops.h 2 * linux/drivers/mmc/core/mmc_ops.h
3 * 3 *
4 * Copyright 2006-2007 Pierre Ossman 4 * Copyright 2006-2007 Pierre Ossman
5 * Copyright (c) 2012 NVIDIA Corporation, All Rights Reserved.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -26,6 +27,8 @@ int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
26int mmc_spi_set_crc(struct mmc_host *host, int use_crc); 27int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
27int mmc_card_sleepawake(struct mmc_host *host, int sleep); 28int mmc_card_sleepawake(struct mmc_host *host, int sleep);
28int mmc_bus_test(struct mmc_card *card, u8 bus_width); 29int mmc_bus_test(struct mmc_card *card, u8 bus_width);
30int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status);
31int mmc_send_bk_ops_cmd(struct mmc_card *card, bool is_synchronous);
29 32
30#endif 33#endif
31 34
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index ff2774128aa..cb2a9d4d451 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -348,11 +348,11 @@ static int mmc_read_switch(struct mmc_card *card)
348 } 348 }
349 349
350 card->sw_caps.sd3_curr_limit = status[7]; 350 card->sw_caps.sd3_curr_limit = status[7];
351 } else {
352 if (status[13] & 0x02)
353 card->sw_caps.hs_max_dtr = 50000000;
354 } 351 }
355 352
353 if (status[13] & 0x02)
354 card->sw_caps.hs_max_dtr = 50000000;
355
356out: 356out:
357 kfree(status); 357 kfree(status);
358 358
@@ -409,110 +409,141 @@ out:
409 409
410static int sd_select_driver_type(struct mmc_card *card, u8 *status) 410static int sd_select_driver_type(struct mmc_card *card, u8 *status)
411{ 411{
412 int host_drv_type = 0, card_drv_type = 0; 412 int host_drv_type = SD_DRIVER_TYPE_B;
413 int card_drv_type = SD_DRIVER_TYPE_B;
414 int drive_strength;
413 int err; 415 int err;
414 416
415 /* 417 /*
416 * If the host doesn't support any of the Driver Types A,C or D, 418 * If the host doesn't support any of the Driver Types A,C or D,
417 * default Driver Type B is used. 419 * or there is no board specific handler then default Driver
420 * Type B is used.
418 */ 421 */
419 if (!(card->host->caps & (MMC_CAP_DRIVER_TYPE_A | MMC_CAP_DRIVER_TYPE_C 422 if (!(card->host->caps & (MMC_CAP_DRIVER_TYPE_A | MMC_CAP_DRIVER_TYPE_C
420 | MMC_CAP_DRIVER_TYPE_D))) 423 | MMC_CAP_DRIVER_TYPE_D)))
421 return 0; 424 return 0;
422 425
423 if (card->host->caps & MMC_CAP_DRIVER_TYPE_A) { 426 if (!card->host->ops->select_drive_strength)
424 host_drv_type = MMC_SET_DRIVER_TYPE_A; 427 return 0;
425 if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_A) 428
426 card_drv_type = MMC_SET_DRIVER_TYPE_A; 429 if (card->host->caps & MMC_CAP_DRIVER_TYPE_A)
427 else if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_B) 430 host_drv_type |= SD_DRIVER_TYPE_A;
428 card_drv_type = MMC_SET_DRIVER_TYPE_B; 431
429 else if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C) 432 if (card->host->caps & MMC_CAP_DRIVER_TYPE_C)
430 card_drv_type = MMC_SET_DRIVER_TYPE_C; 433 host_drv_type |= SD_DRIVER_TYPE_C;
431 } else if (card->host->caps & MMC_CAP_DRIVER_TYPE_C) { 434
432 host_drv_type = MMC_SET_DRIVER_TYPE_C; 435 if (card->host->caps & MMC_CAP_DRIVER_TYPE_D)
433 if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C) 436 host_drv_type |= SD_DRIVER_TYPE_D;
434 card_drv_type = MMC_SET_DRIVER_TYPE_C;
435 } else if (!(card->host->caps & MMC_CAP_DRIVER_TYPE_D)) {
436 /*
437 * If we are here, that means only the default driver type
438 * B is supported by the host.
439 */
440 host_drv_type = MMC_SET_DRIVER_TYPE_B;
441 if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_B)
442 card_drv_type = MMC_SET_DRIVER_TYPE_B;
443 else if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
444 card_drv_type = MMC_SET_DRIVER_TYPE_C;
445 }
446 437
447 err = mmc_sd_switch(card, 1, 2, card_drv_type, status); 438 if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_A)
439 card_drv_type |= SD_DRIVER_TYPE_A;
440
441 if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
442 card_drv_type |= SD_DRIVER_TYPE_C;
443
444 if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_D)
445 card_drv_type |= SD_DRIVER_TYPE_D;
446
447 /*
448 * The drive strength that the hardware can support
449 * depends on the board design. Pass the appropriate
450 * information and let the hardware specific code
451 * return what is possible given the options
452 */
453 drive_strength = card->host->ops->select_drive_strength(
454 card->sw_caps.uhs_max_dtr,
455 host_drv_type, card_drv_type);
456
457 err = mmc_sd_switch(card, 1, 2, drive_strength, status);
448 if (err) 458 if (err)
449 return err; 459 return err;
450 460
451 if ((status[15] & 0xF) != card_drv_type) { 461 if ((status[15] & 0xF) != drive_strength) {
452 printk(KERN_WARNING "%s: Problem setting driver strength!\n", 462 printk(KERN_WARNING "%s: Problem setting drive strength!\n",
453 mmc_hostname(card->host)); 463 mmc_hostname(card->host));
454 return 0; 464 return 0;
455 } 465 }
456 466
457 mmc_set_driver_type(card->host, host_drv_type); 467 mmc_set_driver_type(card->host, drive_strength);
458 468
459 return 0; 469 return 0;
460} 470}
461 471
462static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status) 472static void sd_update_bus_speed_mode(struct mmc_card *card)
463{ 473{
464 unsigned int bus_speed = 0, timing = 0;
465 int err;
466
467 /* 474 /*
468 * If the host doesn't support any of the UHS-I modes, fallback on 475 * If the host doesn't support any of the UHS-I modes, fallback on
469 * default speed. 476 * default speed.
470 */ 477 */
471 if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | 478 if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
472 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) 479 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) {
473 return 0; 480 card->sd_bus_speed = 0;
481 return;
482 }
474 483
475 if ((card->host->caps & MMC_CAP_UHS_SDR104) && 484 if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
476 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) { 485 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) {
477 bus_speed = UHS_SDR104_BUS_SPEED; 486 card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
478 timing = MMC_TIMING_UHS_SDR104;
479 card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
480 } else if ((card->host->caps & MMC_CAP_UHS_DDR50) && 487 } else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
481 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) { 488 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
482 bus_speed = UHS_DDR50_BUS_SPEED; 489 card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
483 timing = MMC_TIMING_UHS_DDR50;
484 card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
485 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | 490 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
486 MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode & 491 MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
487 SD_MODE_UHS_SDR50)) { 492 SD_MODE_UHS_SDR50)) {
488 bus_speed = UHS_SDR50_BUS_SPEED; 493 card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
489 timing = MMC_TIMING_UHS_SDR50;
490 card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
491 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | 494 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
492 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) && 495 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
493 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) { 496 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
494 bus_speed = UHS_SDR25_BUS_SPEED; 497 card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
495 timing = MMC_TIMING_UHS_SDR25;
496 card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
497 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | 498 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
498 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 | 499 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
499 MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode & 500 MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
500 SD_MODE_UHS_SDR12)) { 501 SD_MODE_UHS_SDR12)) {
501 bus_speed = UHS_SDR12_BUS_SPEED; 502 card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
502 timing = MMC_TIMING_UHS_SDR12; 503 }
503 card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR; 504}
505
506static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
507{
508 int err;
509 unsigned int timing = 0;
510
511 switch (card->sd_bus_speed) {
512 case UHS_SDR104_BUS_SPEED:
513 timing = MMC_TIMING_UHS_SDR104;
514 card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
515 break;
516 case UHS_DDR50_BUS_SPEED:
517 timing = MMC_TIMING_UHS_DDR50;
518 card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
519 break;
520 case UHS_SDR50_BUS_SPEED:
521 timing = MMC_TIMING_UHS_SDR50;
522 card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
523 break;
524 case UHS_SDR25_BUS_SPEED:
525 timing = MMC_TIMING_UHS_SDR25;
526 card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
527 break;
528 case UHS_SDR12_BUS_SPEED:
529 timing = MMC_TIMING_UHS_SDR12;
530 card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
531 break;
532 default:
533 return 0;
504 } 534 }
505 535
506 card->sd_bus_speed = bus_speed; 536 err = mmc_sd_switch(card, 1, 0, card->sd_bus_speed, status);
507 err = mmc_sd_switch(card, 1, 0, bus_speed, status);
508 if (err) 537 if (err)
509 return err; 538 return err;
510 539
511 if ((status[16] & 0xF) != bus_speed) 540 if ((status[16] & 0xF) != card->sd_bus_speed)
512 printk(KERN_WARNING "%s: Problem setting bus speed mode!\n", 541 printk(KERN_WARNING "%s: Problem setting bus speed mode!\n",
513 mmc_hostname(card->host)); 542 mmc_hostname(card->host));
514 else { 543 else {
515 mmc_set_timing(card->host, timing); 544 mmc_set_timing(card->host, timing);
545 if (timing == MMC_TIMING_UHS_DDR50)
546 mmc_card_set_ddr_mode(card);
516 mmc_set_clock(card->host, card->sw_caps.uhs_max_dtr); 547 mmc_set_clock(card->host, card->sw_caps.uhs_max_dtr);
517 } 548 }
518 549
@@ -608,18 +639,24 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
608 mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4); 639 mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
609 } 640 }
610 641
642 /*
643 * Select the bus speed mode depending on host
644 * and card capability.
645 */
646 sd_update_bus_speed_mode(card);
647
611 /* Set the driver strength for the card */ 648 /* Set the driver strength for the card */
612 err = sd_select_driver_type(card, status); 649 err = sd_select_driver_type(card, status);
613 if (err) 650 if (err)
614 goto out; 651 goto out;
615 652
616 /* Set bus speed mode of the card */ 653 /* Set current limit for the card */
617 err = sd_set_bus_speed_mode(card, status); 654 err = sd_set_current_limit(card, status);
618 if (err) 655 if (err)
619 goto out; 656 goto out;
620 657
621 /* Set current limit for the card */ 658 /* Set bus speed mode of the card */
622 err = sd_set_current_limit(card, status); 659 err = sd_set_bus_speed_mode(card, status);
623 if (err) 660 if (err)
624 goto out; 661 goto out;
625 662
@@ -764,6 +801,9 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
764 bool reinit) 801 bool reinit)
765{ 802{
766 int err; 803 int err;
804#ifdef CONFIG_MMC_PARANOID_SD_INIT
805 int retries;
806#endif
767 807
768 if (!reinit) { 808 if (!reinit) {
769 /* 809 /*
@@ -790,7 +830,26 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
790 /* 830 /*
791 * Fetch switch information from card. 831 * Fetch switch information from card.
792 */ 832 */
833#ifdef CONFIG_MMC_PARANOID_SD_INIT
834 for (retries = 1; retries <= 3; retries++) {
835 err = mmc_read_switch(card);
836 if (!err) {
837 if (retries > 1) {
838 printk(KERN_WARNING
839 "%s: recovered\n",
840 mmc_hostname(host));
841 }
842 break;
843 } else {
844 printk(KERN_WARNING
845 "%s: read switch failed (attempt %d)\n",
846 mmc_hostname(host), retries);
847 }
848 }
849#else
793 err = mmc_read_switch(card); 850 err = mmc_read_switch(card);
851#endif
852
794 if (err) 853 if (err)
795 return err; 854 return err;
796 } 855 }
@@ -989,18 +1048,36 @@ static void mmc_sd_remove(struct mmc_host *host)
989 */ 1048 */
990static void mmc_sd_detect(struct mmc_host *host) 1049static void mmc_sd_detect(struct mmc_host *host)
991{ 1050{
992 int err; 1051 int err = 0;
1052#ifdef CONFIG_MMC_PARANOID_SD_INIT
1053 int retries = 5;
1054#endif
993 1055
994 BUG_ON(!host); 1056 BUG_ON(!host);
995 BUG_ON(!host->card); 1057 BUG_ON(!host->card);
996 1058
997 mmc_claim_host(host); 1059 mmc_claim_host(host);
998 1060
999 /* 1061 /*
1000 * Just check if our card has been removed. 1062 * Just check if our card has been removed.
1001 */ 1063 */
1064#ifdef CONFIG_MMC_PARANOID_SD_INIT
1065 while(retries) {
1066 err = mmc_send_status(host->card, NULL);
1067 if (err) {
1068 retries--;
1069 udelay(5);
1070 continue;
1071 }
1072 break;
1073 }
1074 if (!retries) {
1075 printk(KERN_ERR "%s(%s): Unable to re-detect card (%d)\n",
1076 __func__, mmc_hostname(host), err);
1077 }
1078#else
1002 err = mmc_send_status(host->card, NULL); 1079 err = mmc_send_status(host->card, NULL);
1003 1080#endif
1004 mmc_release_host(host); 1081 mmc_release_host(host);
1005 1082
1006 if (err) { 1083 if (err) {
@@ -1008,6 +1085,7 @@ static void mmc_sd_detect(struct mmc_host *host)
1008 1085
1009 mmc_claim_host(host); 1086 mmc_claim_host(host);
1010 mmc_detach_bus(host); 1087 mmc_detach_bus(host);
1088 mmc_power_off(host);
1011 mmc_release_host(host); 1089 mmc_release_host(host);
1012 } 1090 }
1013} 1091}
@@ -1038,12 +1116,31 @@ static int mmc_sd_suspend(struct mmc_host *host)
1038static int mmc_sd_resume(struct mmc_host *host) 1116static int mmc_sd_resume(struct mmc_host *host)
1039{ 1117{
1040 int err; 1118 int err;
1119#ifdef CONFIG_MMC_PARANOID_SD_INIT
1120 int retries;
1121#endif
1041 1122
1042 BUG_ON(!host); 1123 BUG_ON(!host);
1043 BUG_ON(!host->card); 1124 BUG_ON(!host->card);
1044 1125
1045 mmc_claim_host(host); 1126 mmc_claim_host(host);
1127#ifdef CONFIG_MMC_PARANOID_SD_INIT
1128 retries = 5;
1129 while (retries) {
1130 err = mmc_sd_init_card(host, host->ocr, host->card);
1131
1132 if (err) {
1133 printk(KERN_ERR "%s: Re-init card rc = %d (retries = %d)\n",
1134 mmc_hostname(host), err, retries);
1135 mdelay(5);
1136 retries--;
1137 continue;
1138 }
1139 break;
1140 }
1141#else
1046 err = mmc_sd_init_card(host, host->ocr, host->card); 1142 err = mmc_sd_init_card(host, host->ocr, host->card);
1143#endif
1047 mmc_release_host(host); 1144 mmc_release_host(host);
1048 1145
1049 return err; 1146 return err;
@@ -1095,6 +1192,9 @@ int mmc_attach_sd(struct mmc_host *host)
1095{ 1192{
1096 int err; 1193 int err;
1097 u32 ocr; 1194 u32 ocr;
1195#ifdef CONFIG_MMC_PARANOID_SD_INIT
1196 int retries;
1197#endif
1098 1198
1099 BUG_ON(!host); 1199 BUG_ON(!host);
1100 WARN_ON(!host->claimed); 1200 WARN_ON(!host->claimed);
@@ -1159,9 +1259,27 @@ int mmc_attach_sd(struct mmc_host *host)
1159 /* 1259 /*
1160 * Detect and init the card. 1260 * Detect and init the card.
1161 */ 1261 */
1262#ifdef CONFIG_MMC_PARANOID_SD_INIT
1263 retries = 5;
1264 while (retries) {
1265 err = mmc_sd_init_card(host, host->ocr, NULL);
1266 if (err) {
1267 retries--;
1268 continue;
1269 }
1270 break;
1271 }
1272
1273 if (!retries) {
1274 printk(KERN_ERR "%s: mmc_sd_init_card() failure (err = %d)\n",
1275 mmc_hostname(host), err);
1276 goto err;
1277 }
1278#else
1162 err = mmc_sd_init_card(host, host->ocr, NULL); 1279 err = mmc_sd_init_card(host, host->ocr, NULL);
1163 if (err) 1280 if (err)
1164 goto err; 1281 goto err;
1282#endif
1165 1283
1166 mmc_release_host(host); 1284 mmc_release_host(host);
1167 err = mmc_add_card(host->card); 1285 err = mmc_add_card(host->card);
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 262fff01917..3d8a5e41a48 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -27,6 +27,10 @@
27#include "sdio_ops.h" 27#include "sdio_ops.h"
28#include "sdio_cis.h" 28#include "sdio_cis.h"
29 29
30#ifdef CONFIG_MMC_EMBEDDED_SDIO
31#include <linux/mmc/sdio_ids.h>
32#endif
33
30static int sdio_read_fbr(struct sdio_func *func) 34static int sdio_read_fbr(struct sdio_func *func)
31{ 35{
32 int ret; 36 int ret;
@@ -102,6 +106,7 @@ static int sdio_read_cccr(struct mmc_card *card)
102 int ret; 106 int ret;
103 int cccr_vsn; 107 int cccr_vsn;
104 unsigned char data; 108 unsigned char data;
109 unsigned char speed;
105 110
106 memset(&card->cccr, 0, sizeof(struct sdio_cccr)); 111 memset(&card->cccr, 0, sizeof(struct sdio_cccr));
107 112
@@ -111,8 +116,8 @@ static int sdio_read_cccr(struct mmc_card *card)
111 116
112 cccr_vsn = data & 0x0f; 117 cccr_vsn = data & 0x0f;
113 118
114 if (cccr_vsn > SDIO_CCCR_REV_1_20) { 119 if (cccr_vsn > SDIO_CCCR_REV_3_00) {
115 printk(KERN_ERR "%s: unrecognised CCCR structure version %d\n", 120 pr_err("%s: unrecognised CCCR structure version %d\n",
116 mmc_hostname(card->host), cccr_vsn); 121 mmc_hostname(card->host), cccr_vsn);
117 return -EINVAL; 122 return -EINVAL;
118 } 123 }
@@ -140,12 +145,60 @@ static int sdio_read_cccr(struct mmc_card *card)
140 } 145 }
141 146
142 if (cccr_vsn >= SDIO_CCCR_REV_1_20) { 147 if (cccr_vsn >= SDIO_CCCR_REV_1_20) {
143 ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &data); 148 ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);
144 if (ret) 149 if (ret)
145 goto out; 150 goto out;
146 151
147 if (data & SDIO_SPEED_SHS) 152 card->scr.sda_spec3 = 0;
148 card->cccr.high_speed = 1; 153 card->sw_caps.sd3_bus_mode = 0;
154 card->sw_caps.sd3_drv_type = 0;
155 if (cccr_vsn >= SDIO_CCCR_REV_3_00) {
156 card->scr.sda_spec3 = 1;
157 ret = mmc_io_rw_direct(card, 0, 0,
158 SDIO_CCCR_UHS, 0, &data);
159 if (ret)
160 goto out;
161
162 if (card->host->caps &
163 (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
164 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
165 MMC_CAP_UHS_DDR50)) {
166 if (data & SDIO_UHS_DDR50)
167 card->sw_caps.sd3_bus_mode
168 |= SD_MODE_UHS_DDR50;
169
170 if (data & SDIO_UHS_SDR50)
171 card->sw_caps.sd3_bus_mode
172 |= SD_MODE_UHS_SDR50;
173
174 if (data & SDIO_UHS_SDR104)
175 card->sw_caps.sd3_bus_mode
176 |= SD_MODE_UHS_SDR104;
177 }
178
179 ret = mmc_io_rw_direct(card, 0, 0,
180 SDIO_CCCR_DRIVE_STRENGTH, 0, &data);
181 if (ret)
182 goto out;
183
184 if (data & SDIO_DRIVE_SDTA)
185 card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_A;
186 if (data & SDIO_DRIVE_SDTC)
187 card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_C;
188 if (data & SDIO_DRIVE_SDTD)
189 card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_D;
190 }
191
192 /* if no uhs mode ensure we check for high speed */
193 if (!card->sw_caps.sd3_bus_mode) {
194 if (speed & SDIO_SPEED_SHS) {
195 card->cccr.high_speed = 1;
196 card->sw_caps.hs_max_dtr = 50000000;
197 } else {
198 card->cccr.high_speed = 0;
199 card->sw_caps.hs_max_dtr = 25000000;
200 }
201 }
149 } 202 }
150 203
151out: 204out:
@@ -327,6 +380,193 @@ static unsigned mmc_sdio_get_max_clock(struct mmc_card *card)
327 return max_dtr; 380 return max_dtr;
328} 381}
329 382
383static unsigned char host_drive_to_sdio_drive(int host_strength)
384{
385 switch (host_strength) {
386 case MMC_SET_DRIVER_TYPE_A:
387 return SDIO_DTSx_SET_TYPE_A;
388 case MMC_SET_DRIVER_TYPE_B:
389 return SDIO_DTSx_SET_TYPE_B;
390 case MMC_SET_DRIVER_TYPE_C:
391 return SDIO_DTSx_SET_TYPE_C;
392 case MMC_SET_DRIVER_TYPE_D:
393 return SDIO_DTSx_SET_TYPE_D;
394 default:
395 return SDIO_DTSx_SET_TYPE_B;
396 }
397}
398
399static void sdio_select_driver_type(struct mmc_card *card)
400{
401 int host_drv_type = SD_DRIVER_TYPE_B;
402 int card_drv_type = SD_DRIVER_TYPE_B;
403 int drive_strength;
404 unsigned char card_strength;
405 int err;
406
407 /*
408 * If the host doesn't support any of the Driver Types A,C or D,
409 * or there is no board specific handler then default Driver
410 * Type B is used.
411 */
412 if (!(card->host->caps &
413 (MMC_CAP_DRIVER_TYPE_A |
414 MMC_CAP_DRIVER_TYPE_C |
415 MMC_CAP_DRIVER_TYPE_D)))
416 return;
417
418 if (!card->host->ops->select_drive_strength)
419 return;
420
421 if (card->host->caps & MMC_CAP_DRIVER_TYPE_A)
422 host_drv_type |= SD_DRIVER_TYPE_A;
423
424 if (card->host->caps & MMC_CAP_DRIVER_TYPE_C)
425 host_drv_type |= SD_DRIVER_TYPE_C;
426
427 if (card->host->caps & MMC_CAP_DRIVER_TYPE_D)
428 host_drv_type |= SD_DRIVER_TYPE_D;
429
430 if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_A)
431 card_drv_type |= SD_DRIVER_TYPE_A;
432
433 if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
434 card_drv_type |= SD_DRIVER_TYPE_C;
435
436 if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_D)
437 card_drv_type |= SD_DRIVER_TYPE_D;
438
439 /*
440 * The drive strength that the hardware can support
441 * depends on the board design. Pass the appropriate
442 * information and let the hardware specific code
443 * return what is possible given the options
444 */
445 drive_strength = card->host->ops->select_drive_strength(
446 card->sw_caps.uhs_max_dtr,
447 host_drv_type, card_drv_type);
448
449 /* if error just use default for drive strength B */
450 err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_DRIVE_STRENGTH, 0,
451 &card_strength);
452 if (err)
453 return;
454
455 card_strength &= ~(SDIO_DRIVE_DTSx_MASK<<SDIO_DRIVE_DTSx_SHIFT);
456 card_strength |= host_drive_to_sdio_drive(drive_strength);
457
458 err = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_DRIVE_STRENGTH,
459 card_strength, NULL);
460
461 /* if error default to drive strength B */
462 if (!err)
463 mmc_set_driver_type(card->host, drive_strength);
464}
465
466
467static int sdio_set_bus_speed_mode(struct mmc_card *card)
468{
469 unsigned int bus_speed, timing;
470 int err;
471 unsigned char speed;
472
473 /*
474 * If the host doesn't support any of the UHS-I modes, fallback on
475 * default speed.
476 */
477 if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
478 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50)))
479 return 0;
480
481 bus_speed = SDIO_SPEED_SDR12;
482 timing = MMC_TIMING_UHS_SDR12;
483 if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
484 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) {
485 bus_speed = SDIO_SPEED_SDR104;
486 timing = MMC_TIMING_UHS_SDR104;
487 card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
488 } else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
489 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
490 bus_speed = SDIO_SPEED_DDR50;
491 timing = MMC_TIMING_UHS_DDR50;
492 card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
493 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
494 MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
495 SD_MODE_UHS_SDR50)) {
496 bus_speed = SDIO_SPEED_SDR50;
497 timing = MMC_TIMING_UHS_SDR50;
498 card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
499 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
500 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
501 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
502 bus_speed = SDIO_SPEED_SDR25;
503 timing = MMC_TIMING_UHS_SDR25;
504 card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
505 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
506 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
507 MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
508 SD_MODE_UHS_SDR12)) {
509 bus_speed = SDIO_SPEED_SDR12;
510 timing = MMC_TIMING_UHS_SDR12;
511 card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
512 }
513
514 err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);
515 if (err)
516 return err;
517
518 speed &= ~SDIO_SPEED_BSS_MASK;
519 speed |= bus_speed;
520 err = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_SPEED, speed, NULL);
521 if (err)
522 return err;
523
524 if (bus_speed) {
525 mmc_set_timing(card->host, timing);
526 mmc_set_clock(card->host, card->sw_caps.uhs_max_dtr);
527 }
528
529 return 0;
530}
531
532/*
533 * UHS-I specific initialization procedure
534 */
535static int mmc_sdio_init_uhs_card(struct mmc_card *card)
536{
537 int err;
538
539 if (!card->scr.sda_spec3)
540 return 0;
541
542 /*
543 * Switch to wider bus (if supported).
544 */
545 if (card->host->caps & MMC_CAP_4_BIT_DATA) {
546 err = sdio_enable_4bit_bus(card);
547 if (err > 0) {
548 mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
549 err = 0;
550 }
551 }
552
553 /* Set the driver strength for the card */
554 sdio_select_driver_type(card);
555
556 /* Set bus speed mode of the card */
557 err = sdio_set_bus_speed_mode(card);
558 if (err)
559 goto out;
560
561 /* Initialize and start re-tuning timer */
562 if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning)
563 err = card->host->ops->execute_tuning(card->host);
564
565out:
566
567 return err;
568}
569
330/* 570/*
331 * Handle the detection and initialisation of a card. 571 * Handle the detection and initialisation of a card.
332 * 572 *
@@ -394,6 +634,29 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
394 host->ops->init_card(host, card); 634 host->ops->init_card(host, card);
395 635
396 /* 636 /*
637 * If the host and card support UHS-I mode request the card
638 * to switch to 1.8V signaling level. No 1.8v signalling if
639 * UHS mode is not enabled to maintain compatibilty and some
640 * systems that claim 1.8v signalling in fact do not support
641 * it.
642 */
643 if ((ocr & R4_18V_PRESENT) &&
644 (host->caps &
645 (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
646 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
647 MMC_CAP_UHS_DDR50))) {
648 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, true);
649 if (err) {
650 ocr &= ~R4_18V_PRESENT;
651 host->ocr &= ~R4_18V_PRESENT;
652 }
653 err = 0;
654 } else {
655 ocr &= ~R4_18V_PRESENT;
656 host->ocr &= ~R4_18V_PRESENT;
657 }
658
659 /*
397 * For native busses: set card RCA and quit open drain mode. 660 * For native busses: set card RCA and quit open drain mode.
398 */ 661 */
399 if (!powered_resume && !mmc_host_is_spi(host)) { 662 if (!powered_resume && !mmc_host_is_spi(host)) {
@@ -449,19 +712,35 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
449 goto finish; 712 goto finish;
450 } 713 }
451 714
452 /* 715#ifdef CONFIG_MMC_EMBEDDED_SDIO
453 * Read the common registers. 716 if (host->embedded_sdio_data.cccr)
454 */ 717 memcpy(&card->cccr, host->embedded_sdio_data.cccr, sizeof(struct sdio_cccr));
455 err = sdio_read_cccr(card); 718 else {
456 if (err) 719#endif
457 goto remove; 720 /*
721 * Read the common registers.
722 */
723 err = sdio_read_cccr(card);
724 if (err)
725 goto remove;
726#ifdef CONFIG_MMC_EMBEDDED_SDIO
727 }
728#endif
458 729
459 /* 730#ifdef CONFIG_MMC_EMBEDDED_SDIO
460 * Read the common CIS tuples. 731 if (host->embedded_sdio_data.cis)
461 */ 732 memcpy(&card->cis, host->embedded_sdio_data.cis, sizeof(struct sdio_cis));
462 err = sdio_read_common_cis(card); 733 else {
463 if (err) 734#endif
464 goto remove; 735 /*
736 * Read the common CIS tuples.
737 */
738 err = sdio_read_common_cis(card);
739 if (err)
740 goto remove;
741#ifdef CONFIG_MMC_EMBEDDED_SDIO
742 }
743#endif
465 744
466 if (oldcard) { 745 if (oldcard) {
467 int same = (card->cis.vendor == oldcard->cis.vendor && 746 int same = (card->cis.vendor == oldcard->cis.vendor &&
@@ -494,29 +773,39 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
494 if (err) 773 if (err)
495 goto remove; 774 goto remove;
496 775
497 /* 776 /* Initialization sequence for UHS-I cards */
498 * Switch to high-speed (if supported). 777 /* Only if card supports 1.8v and UHS signaling */
499 */ 778 if ((ocr & R4_18V_PRESENT) && card->sw_caps.sd3_bus_mode) {
500 err = sdio_enable_hs(card); 779 err = mmc_sdio_init_uhs_card(card);
501 if (err > 0) 780 if (err)
502 mmc_sd_go_highspeed(card); 781 goto remove;
503 else if (err)
504 goto remove;
505 782
506 /* 783 /* Card is an ultra-high-speed card */
507 * Change to the card's maximum speed. 784 mmc_sd_card_set_uhs(card);
508 */ 785 } else {
509 mmc_set_clock(host, mmc_sdio_get_max_clock(card)); 786 /*
787 * Switch to high-speed (if supported).
788 */
789 err = sdio_enable_hs(card);
790 if (err > 0)
791 mmc_sd_go_highspeed(card);
792 else if (err)
793 goto remove;
510 794
511 /* 795 /*
512 * Switch to wider bus (if supported). 796 * Change to the card's maximum speed.
513 */ 797 */
514 err = sdio_enable_4bit_bus(card); 798 mmc_set_clock(host, mmc_sdio_get_max_clock(card));
515 if (err > 0)
516 mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
517 else if (err)
518 goto remove;
519 799
800 /*
801 * Switch to wider bus (if supported).
802 */
803 err = sdio_enable_4bit_bus(card);
804 if (err > 0)
805 mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
806 else if (err)
807 goto remove;
808 }
520finish: 809finish:
521 if (!oldcard) 810 if (!oldcard)
522 host->card = card; 811 host->card = card;
@@ -597,6 +886,7 @@ out:
597 886
598 mmc_claim_host(host); 887 mmc_claim_host(host);
599 mmc_detach_bus(host); 888 mmc_detach_bus(host);
889 mmc_power_off(host);
600 mmc_release_host(host); 890 mmc_release_host(host);
601 } 891 }
602} 892}
@@ -798,8 +1088,17 @@ int mmc_attach_sdio(struct mmc_host *host)
798 * Detect and init the card. 1088 * Detect and init the card.
799 */ 1089 */
800 err = mmc_sdio_init_card(host, host->ocr, NULL, 0); 1090 err = mmc_sdio_init_card(host, host->ocr, NULL, 0);
801 if (err) 1091 if (err) {
802 goto err; 1092 if (err == -EAGAIN) {
1093 /*
1094 * Retry initialization with S18R set to 0.
1095 */
1096 host->ocr &= ~R4_18V_PRESENT;
1097 err = mmc_sdio_init_card(host, host->ocr, NULL, 0);
1098 }
1099 if (err)
1100 goto err;
1101 }
803 card = host->card; 1102 card = host->card;
804 1103
805 /* 1104 /*
@@ -826,14 +1125,36 @@ int mmc_attach_sdio(struct mmc_host *host)
826 funcs = (ocr & 0x70000000) >> 28; 1125 funcs = (ocr & 0x70000000) >> 28;
827 card->sdio_funcs = 0; 1126 card->sdio_funcs = 0;
828 1127
1128#ifdef CONFIG_MMC_EMBEDDED_SDIO
1129 if (host->embedded_sdio_data.funcs)
1130 card->sdio_funcs = funcs = host->embedded_sdio_data.num_funcs;
1131#endif
1132
829 /* 1133 /*
830 * Initialize (but don't add) all present functions. 1134 * Initialize (but don't add) all present functions.
831 */ 1135 */
832 for (i = 0; i < funcs; i++, card->sdio_funcs++) { 1136 for (i = 0; i < funcs; i++, card->sdio_funcs++) {
833 err = sdio_init_func(host->card, i + 1); 1137#ifdef CONFIG_MMC_EMBEDDED_SDIO
834 if (err) 1138 if (host->embedded_sdio_data.funcs) {
835 goto remove; 1139 struct sdio_func *tmp;
836 1140
1141 tmp = sdio_alloc_func(host->card);
1142 if (IS_ERR(tmp))
1143 goto remove;
1144 tmp->num = (i + 1);
1145 card->sdio_func[i] = tmp;
1146 tmp->class = host->embedded_sdio_data.funcs[i].f_class;
1147 tmp->max_blksize = host->embedded_sdio_data.funcs[i].f_maxblksize;
1148 tmp->vendor = card->cis.vendor;
1149 tmp->device = card->cis.device;
1150 } else {
1151#endif
1152 err = sdio_init_func(host->card, i + 1);
1153 if (err)
1154 goto remove;
1155#ifdef CONFIG_MMC_EMBEDDED_SDIO
1156 }
1157#endif
837 /* 1158 /*
838 * Enable Runtime PM for this func (if supported) 1159 * Enable Runtime PM for this func (if supported)
839 */ 1160 */
@@ -881,3 +1202,77 @@ err:
881 return err; 1202 return err;
882} 1203}
883 1204
1205int sdio_reset_comm(struct mmc_card *card)
1206{
1207 struct mmc_host *host = card->host;
1208 u32 ocr;
1209 int err;
1210
1211 printk("%s():\n", __func__);
1212 mmc_claim_host(host);
1213
1214 mmc_go_idle(host);
1215
1216 mmc_set_clock(host, host->f_min);
1217
1218 err = mmc_send_io_op_cond(host, 0, &ocr);
1219 if (err)
1220 goto err;
1221
1222 host->ocr = mmc_select_voltage(host, ocr);
1223 if (!host->ocr) {
1224 err = -EINVAL;
1225 goto err;
1226 }
1227
1228 err = mmc_send_io_op_cond(host, host->ocr, &ocr);
1229 if (err)
1230 goto err;
1231
1232 if (mmc_host_is_spi(host)) {
1233 err = mmc_spi_set_crc(host, use_spi_crc);
1234 if (err)
1235 goto err;
1236 }
1237
1238 if (!mmc_host_is_spi(host)) {
1239 err = mmc_send_relative_addr(host, &card->rca);
1240 if (err)
1241 goto err;
1242 mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
1243 }
1244 if (!mmc_host_is_spi(host)) {
1245 err = mmc_select_card(card);
1246 if (err)
1247 goto err;
1248 }
1249
1250 /*
1251 * Switch to high-speed (if supported).
1252 */
1253 err = sdio_enable_hs(card);
1254 if (err > 0)
1255 mmc_sd_go_highspeed(card);
1256 else if (err)
1257 goto err;
1258
1259 /*
1260 * Change to the card's maximum speed.
1261 */
1262 mmc_set_clock(host, mmc_sdio_get_max_clock(card));
1263
1264 err = sdio_enable_4bit_bus(card);
1265 if (err > 0)
1266 mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
1267 else if (err)
1268 goto err;
1269
1270 mmc_release_host(host);
1271 return 0;
1272err:
1273 printk("%s: Error resetting SDIO communications (%d)\n",
1274 mmc_hostname(host), err);
1275 mmc_release_host(host);
1276 return err;
1277}
1278EXPORT_SYMBOL(sdio_reset_comm);
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index d2565df8a7f..ca58c307a12 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -23,6 +23,10 @@
23#include "sdio_cis.h" 23#include "sdio_cis.h"
24#include "sdio_bus.h" 24#include "sdio_bus.h"
25 25
26#ifdef CONFIG_MMC_EMBEDDED_SDIO
27#include <linux/mmc/host.h>
28#endif
29
26/* show configuration fields */ 30/* show configuration fields */
27#define sdio_config_attr(field, format_string) \ 31#define sdio_config_attr(field, format_string) \
28static ssize_t \ 32static ssize_t \
@@ -167,11 +171,8 @@ static int sdio_bus_remove(struct device *dev)
167 int ret = 0; 171 int ret = 0;
168 172
169 /* Make sure card is powered before invoking ->remove() */ 173 /* Make sure card is powered before invoking ->remove() */
170 if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) { 174 if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
171 ret = pm_runtime_get_sync(dev); 175 pm_runtime_get_sync(dev);
172 if (ret < 0)
173 goto out;
174 }
175 176
176 drv->remove(func); 177 drv->remove(func);
177 178
@@ -191,7 +192,6 @@ static int sdio_bus_remove(struct device *dev)
191 if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) 192 if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
192 pm_runtime_put_sync(dev); 193 pm_runtime_put_sync(dev);
193 194
194out:
195 return ret; 195 return ret;
196} 196}
197 197
@@ -260,7 +260,14 @@ static void sdio_release_func(struct device *dev)
260{ 260{
261 struct sdio_func *func = dev_to_sdio_func(dev); 261 struct sdio_func *func = dev_to_sdio_func(dev);
262 262
263 sdio_free_func_cis(func); 263#ifdef CONFIG_MMC_EMBEDDED_SDIO
264 /*
265 * If this device is embedded then we never allocated
266 * cis tables for this func
267 */
268 if (!func->card->host->embedded_sdio_data.funcs)
269#endif
270 sdio_free_func_cis(func);
264 271
265 if (func->info) 272 if (func->info)
266 kfree(func->info); 273 kfree(func->info);
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index 0f687cdeb06..549a3414464 100644
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -383,6 +383,39 @@ u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret)
383EXPORT_SYMBOL_GPL(sdio_readb); 383EXPORT_SYMBOL_GPL(sdio_readb);
384 384
385/** 385/**
386 * sdio_readb_ext - read a single byte from a SDIO function
387 * @func: SDIO function to access
388 * @addr: address to read
389 * @err_ret: optional status value from transfer
390 * @in: value to add to argument
391 *
392 * Reads a single byte from the address space of a given SDIO
393 * function. If there is a problem reading the address, 0xff
394 * is returned and @err_ret will contain the error code.
395 */
396unsigned char sdio_readb_ext(struct sdio_func *func, unsigned int addr,
397 int *err_ret, unsigned in)
398{
399 int ret;
400 unsigned char val;
401
402 BUG_ON(!func);
403
404 if (err_ret)
405 *err_ret = 0;
406
407 ret = mmc_io_rw_direct(func->card, 0, func->num, addr, (u8)in, &val);
408 if (ret) {
409 if (err_ret)
410 *err_ret = ret;
411 return 0xFF;
412 }
413
414 return val;
415}
416EXPORT_SYMBOL_GPL(sdio_readb_ext);
417
418/**
386 * sdio_writeb - write a single byte to a SDIO function 419 * sdio_writeb - write a single byte to a SDIO function
387 * @func: SDIO function to access 420 * @func: SDIO function to access
388 * @b: byte to write 421 * @b: byte to write
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 56dbf3f6ad0..8c87096531e 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -81,28 +81,32 @@ config MMC_RICOH_MMC
81 81
82 If unsure, say Y. 82 If unsure, say Y.
83 83
84config MMC_SDHCI_OF 84config MMC_SDHCI_PLTFM
85 tristate "SDHCI support on OpenFirmware platforms" 85 tristate "SDHCI platform and OF driver helper"
86 depends on MMC_SDHCI && OF 86 depends on MMC_SDHCI
87 help 87 help
88 This selects the OF support for Secure Digital Host Controller 88 This selects the common helper functions support for Secure Digital
89 Interfaces. 89 Host Controller Interface based platform and OF drivers.
90
91 If you have a controller with this interface, say Y or M here.
90 92
91 If unsure, say N. 93 If unsure, say N.
92 94
93config MMC_SDHCI_OF_ESDHC 95config MMC_SDHCI_OF_ESDHC
94 bool "SDHCI OF support for the Freescale eSDHC controller" 96 tristate "SDHCI OF support for the Freescale eSDHC controller"
95 depends on MMC_SDHCI_OF 97 depends on MMC_SDHCI_PLTFM
96 depends on PPC_OF 98 depends on PPC_OF
97 select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER 99 select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
98 help 100 help
99 This selects the Freescale eSDHC controller support. 101 This selects the Freescale eSDHC controller support.
100 102
103 If you have a controller with this interface, say Y or M here.
104
101 If unsure, say N. 105 If unsure, say N.
102 106
103config MMC_SDHCI_OF_HLWD 107config MMC_SDHCI_OF_HLWD
104 bool "SDHCI OF support for the Nintendo Wii SDHCI controllers" 108 tristate "SDHCI OF support for the Nintendo Wii SDHCI controllers"
105 depends on MMC_SDHCI_OF 109 depends on MMC_SDHCI_PLTFM
106 depends on PPC_OF 110 depends on PPC_OF
107 select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER 111 select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
108 help 112 help
@@ -110,40 +114,36 @@ config MMC_SDHCI_OF_HLWD
110 found in the "Hollywood" chipset of the Nintendo Wii video game 114 found in the "Hollywood" chipset of the Nintendo Wii video game
111 console. 115 console.
112 116
113 If unsure, say N.
114
115config MMC_SDHCI_PLTFM
116 tristate "SDHCI support on the platform specific bus"
117 depends on MMC_SDHCI
118 help
119 This selects the platform specific bus support for Secure Digital Host
120 Controller Interface.
121
122 If you have a controller with this interface, say Y or M here. 117 If you have a controller with this interface, say Y or M here.
123 118
124 If unsure, say N. 119 If unsure, say N.
125 120
126config MMC_SDHCI_CNS3XXX 121config MMC_SDHCI_CNS3XXX
127 bool "SDHCI support on the Cavium Networks CNS3xxx SoC" 122 tristate "SDHCI support on the Cavium Networks CNS3xxx SoC"
128 depends on ARCH_CNS3XXX 123 depends on ARCH_CNS3XXX
129 depends on MMC_SDHCI_PLTFM 124 depends on MMC_SDHCI_PLTFM
130 help 125 help
131 This selects the SDHCI support for CNS3xxx System-on-Chip devices. 126 This selects the SDHCI support for CNS3xxx System-on-Chip devices.
132 127
128 If you have a controller with this interface, say Y or M here.
129
133 If unsure, say N. 130 If unsure, say N.
134 131
135config MMC_SDHCI_ESDHC_IMX 132config MMC_SDHCI_ESDHC_IMX
136 bool "SDHCI platform support for the Freescale eSDHC i.MX controller" 133 tristate "SDHCI platform support for the Freescale eSDHC i.MX controller"
137 depends on MMC_SDHCI_PLTFM && (ARCH_MX25 || ARCH_MX35 || ARCH_MX5) 134 depends on ARCH_MX25 || ARCH_MX35 || ARCH_MX5
135 depends on MMC_SDHCI_PLTFM
138 select MMC_SDHCI_IO_ACCESSORS 136 select MMC_SDHCI_IO_ACCESSORS
139 help 137 help
140 This selects the Freescale eSDHC controller support on the platform 138 This selects the Freescale eSDHC controller support on the platform
141 bus, found on platforms like mx35/51. 139 bus, found on platforms like mx35/51.
142 140
141 If you have a controller with this interface, say Y or M here.
142
143 If unsure, say N. 143 If unsure, say N.
144 144
145config MMC_SDHCI_DOVE 145config MMC_SDHCI_DOVE
146 bool "SDHCI support on Marvell's Dove SoC" 146 tristate "SDHCI support on Marvell's Dove SoC"
147 depends on ARCH_DOVE 147 depends on ARCH_DOVE
148 depends on MMC_SDHCI_PLTFM 148 depends on MMC_SDHCI_PLTFM
149 select MMC_SDHCI_IO_ACCESSORS 149 select MMC_SDHCI_IO_ACCESSORS
@@ -151,11 +151,14 @@ config MMC_SDHCI_DOVE
151 This selects the Secure Digital Host Controller Interface in 151 This selects the Secure Digital Host Controller Interface in
152 Marvell's Dove SoC. 152 Marvell's Dove SoC.
153 153
154 If you have a controller with this interface, say Y or M here.
155
154 If unsure, say N. 156 If unsure, say N.
155 157
156config MMC_SDHCI_TEGRA 158config MMC_SDHCI_TEGRA
157 bool "SDHCI platform support for the Tegra SD/MMC Controller" 159 tristate "SDHCI platform support for the Tegra SD/MMC Controller"
158 depends on MMC_SDHCI_PLTFM && ARCH_TEGRA 160 depends on ARCH_TEGRA
161 depends on MMC_SDHCI_PLTFM
159 select MMC_SDHCI_IO_ACCESSORS 162 select MMC_SDHCI_IO_ACCESSORS
160 help 163 help
161 This selects the Tegra SD/MMC controller. If you have a Tegra 164 This selects the Tegra SD/MMC controller. If you have a Tegra
@@ -178,14 +181,28 @@ config MMC_SDHCI_S3C
178 181
179 If unsure, say N. 182 If unsure, say N.
180 183
181config MMC_SDHCI_PXA 184config MMC_SDHCI_PXAV3
182 tristate "Marvell PXA168/PXA910/MMP2 SD Host Controller support" 185 tristate "Marvell MMP2 SD Host Controller support (PXAV3)"
183 depends on ARCH_PXA || ARCH_MMP 186 depends on CLKDEV_LOOKUP
184 select MMC_SDHCI 187 select MMC_SDHCI
185 select MMC_SDHCI_IO_ACCESSORS 188 select MMC_SDHCI_PLTFM
189 default CPU_MMP2
190 help
191 This selects the Marvell(R) PXAV3 SD Host Controller.
192 If you have a MMP2 platform with SD Host Controller
193 and a card slot, say Y or M here.
194
195 If unsure, say N.
196
197config MMC_SDHCI_PXAV2
198 tristate "Marvell PXA9XX SD Host Controller support (PXAV2)"
199 depends on CLKDEV_LOOKUP
200 select MMC_SDHCI
201 select MMC_SDHCI_PLTFM
202 default CPU_PXA910
186 help 203 help
187 This selects the Marvell(R) PXA168/PXA910/MMP2 SD Host Controller. 204 This selects the Marvell(R) PXAV2 SD Host Controller.
188 If you have a PXA168/PXA910/MMP2 platform with SD Host Controller 205 If you have a PXA9XX platform with SD Host Controller
189 and a card slot, say Y or M here. 206 and a card slot, say Y or M here.
190 207
191 If unsure, say N. 208 If unsure, say N.
@@ -281,13 +298,12 @@ config MMC_ATMELMCI
281endchoice 298endchoice
282 299
283config MMC_ATMELMCI_DMA 300config MMC_ATMELMCI_DMA
284 bool "Atmel MCI DMA support (EXPERIMENTAL)" 301 bool "Atmel MCI DMA support"
285 depends on MMC_ATMELMCI && (AVR32 || ARCH_AT91SAM9G45) && DMA_ENGINE && EXPERIMENTAL 302 depends on MMC_ATMELMCI && (AVR32 || ARCH_AT91SAM9G45) && DMA_ENGINE
286 help 303 help
287 Say Y here to have the Atmel MCI driver use a DMA engine to 304 Say Y here to have the Atmel MCI driver use a DMA engine to
288 do data transfers and thus increase the throughput and 305 do data transfers and thus increase the throughput and
289 reduce the CPU utilization. Note that this is highly 306 reduce the CPU utilization.
290 experimental and may cause the driver to lock up.
291 307
292 If unsure, say N. 308 If unsure, say N.
293 309
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 58a5cf73d6e..f5ea51bd0ed 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -1,6 +1,7 @@
1# 1#
2# Makefile for MMC/SD host controller drivers 2# Makefile for MMC/SD host controller drivers
3# 3#
4GCOV_PROFILE_sdhci-tegra.o := y
4 5
5obj-$(CONFIG_MMC_ARMMMCI) += mmci.o 6obj-$(CONFIG_MMC_ARMMMCI) += mmci.o
6obj-$(CONFIG_MMC_PXA) += pxamci.o 7obj-$(CONFIG_MMC_PXA) += pxamci.o
@@ -9,7 +10,8 @@ obj-$(CONFIG_MMC_MXC) += mxcmmc.o
9obj-$(CONFIG_MMC_MXS) += mxs-mmc.o 10obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
10obj-$(CONFIG_MMC_SDHCI) += sdhci.o 11obj-$(CONFIG_MMC_SDHCI) += sdhci.o
11obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o 12obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
12obj-$(CONFIG_MMC_SDHCI_PXA) += sdhci-pxa.o 13obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o
14obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o
13obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o 15obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
14obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o 16obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o
15obj-$(CONFIG_MMC_WBSD) += wbsd.o 17obj-$(CONFIG_MMC_WBSD) += wbsd.o
@@ -31,9 +33,7 @@ obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o
31obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o 33obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
32obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o 34obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o
33tmio_mmc_core-y := tmio_mmc_pio.o 35tmio_mmc_core-y := tmio_mmc_pio.o
34ifneq ($(CONFIG_MMC_SDHI),n) 36tmio_mmc_core-$(subst m,y,$(CONFIG_MMC_SDHI)) += tmio_mmc_dma.o
35tmio_mmc_core-y += tmio_mmc_dma.o
36endif
37obj-$(CONFIG_MMC_SDHI) += sh_mobile_sdhi.o 37obj-$(CONFIG_MMC_SDHI) += sh_mobile_sdhi.o
38obj-$(CONFIG_MMC_CB710) += cb710-mmc.o 38obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
39obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o 39obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
@@ -44,17 +44,13 @@ obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o
44obj-$(CONFIG_MMC_VUB300) += vub300.o 44obj-$(CONFIG_MMC_VUB300) += vub300.o
45obj-$(CONFIG_MMC_USHC) += ushc.o 45obj-$(CONFIG_MMC_USHC) += ushc.o
46 46
47obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-platform.o 47obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
48sdhci-platform-y := sdhci-pltfm.o 48obj-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o
49sdhci-platform-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o 49obj-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o
50sdhci-platform-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o 50obj-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o
51sdhci-platform-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o 51obj-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o
52sdhci-platform-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o 52obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o
53 53obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o
54obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
55sdhci-of-y := sdhci-of-core.o
56sdhci-of-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o
57sdhci-of-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o
58 54
59ifeq ($(CONFIG_CB710_DEBUG),y) 55ifeq ($(CONFIG_CB710_DEBUG),y)
60 CFLAGS-cb710-mmc += -DDEBUG 56 CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index d3e6a962f42..a4aa3af86fe 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -77,7 +77,8 @@
77 77
78#include <mach/board.h> 78#include <mach/board.h>
79#include <mach/cpu.h> 79#include <mach/cpu.h>
80#include <mach/at91_mci.h> 80
81#include "at91_mci.h"
81 82
82#define DRIVER_NAME "at91_mci" 83#define DRIVER_NAME "at91_mci"
83 84
diff --git a/drivers/mmc/host/at91_mci.h b/drivers/mmc/host/at91_mci.h
new file mode 100644
index 00000000000..eec3a6b1c2b
--- /dev/null
+++ b/drivers/mmc/host/at91_mci.h
@@ -0,0 +1,115 @@
1/*
2 * drivers/mmc/host/at91_mci.h
3 *
4 * Copyright (C) 2005 Ivan Kokshaysky
5 * Copyright (C) SAN People
6 *
7 * MultiMedia Card Interface (MCI) registers.
8 * Based on AT91RM9200 datasheet revision F.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
16#ifndef AT91_MCI_H
17#define AT91_MCI_H
18
19#define AT91_MCI_CR 0x00 /* Control Register */
20#define AT91_MCI_MCIEN (1 << 0) /* Multi-Media Interface Enable */
21#define AT91_MCI_MCIDIS (1 << 1) /* Multi-Media Interface Disable */
22#define AT91_MCI_PWSEN (1 << 2) /* Power Save Mode Enable */
23#define AT91_MCI_PWSDIS (1 << 3) /* Power Save Mode Disable */
24#define AT91_MCI_SWRST (1 << 7) /* Software Reset */
25
26#define AT91_MCI_MR 0x04 /* Mode Register */
27#define AT91_MCI_CLKDIV (0xff << 0) /* Clock Divider */
28#define AT91_MCI_PWSDIV (7 << 8) /* Power Saving Divider */
29#define AT91_MCI_RDPROOF (1 << 11) /* Read Proof Enable [SAM926[03] only] */
30#define AT91_MCI_WRPROOF (1 << 12) /* Write Proof Enable [SAM926[03] only] */
31#define AT91_MCI_PDCFBYTE (1 << 13) /* PDC Force Byte Transfer [SAM926[03] only] */
32#define AT91_MCI_PDCPADV (1 << 14) /* PDC Padding Value */
33#define AT91_MCI_PDCMODE (1 << 15) /* PDC-orientated Mode */
34#define AT91_MCI_BLKLEN (0xfff << 18) /* Data Block Length */
35
36#define AT91_MCI_DTOR 0x08 /* Data Timeout Register */
37#define AT91_MCI_DTOCYC (0xf << 0) /* Data Timeout Cycle Number */
38#define AT91_MCI_DTOMUL (7 << 4) /* Data Timeout Multiplier */
39#define AT91_MCI_DTOMUL_1 (0 << 4)
40#define AT91_MCI_DTOMUL_16 (1 << 4)
41#define AT91_MCI_DTOMUL_128 (2 << 4)
42#define AT91_MCI_DTOMUL_256 (3 << 4)
43#define AT91_MCI_DTOMUL_1K (4 << 4)
44#define AT91_MCI_DTOMUL_4K (5 << 4)
45#define AT91_MCI_DTOMUL_64K (6 << 4)
46#define AT91_MCI_DTOMUL_1M (7 << 4)
47
48#define AT91_MCI_SDCR 0x0c /* SD Card Register */
49#define AT91_MCI_SDCSEL (3 << 0) /* SD Card Selector */
50#define AT91_MCI_SDCBUS (1 << 7) /* 1-bit or 4-bit bus */
51
52#define AT91_MCI_ARGR 0x10 /* Argument Register */
53
54#define AT91_MCI_CMDR 0x14 /* Command Register */
55#define AT91_MCI_CMDNB (0x3f << 0) /* Command Number */
56#define AT91_MCI_RSPTYP (3 << 6) /* Response Type */
57#define AT91_MCI_RSPTYP_NONE (0 << 6)
58#define AT91_MCI_RSPTYP_48 (1 << 6)
59#define AT91_MCI_RSPTYP_136 (2 << 6)
60#define AT91_MCI_SPCMD (7 << 8) /* Special Command */
61#define AT91_MCI_SPCMD_NONE (0 << 8)
62#define AT91_MCI_SPCMD_INIT (1 << 8)
63#define AT91_MCI_SPCMD_SYNC (2 << 8)
64#define AT91_MCI_SPCMD_ICMD (4 << 8)
65#define AT91_MCI_SPCMD_IRESP (5 << 8)
66#define AT91_MCI_OPDCMD (1 << 11) /* Open Drain Command */
67#define AT91_MCI_MAXLAT (1 << 12) /* Max Latency for Command to Response */
68#define AT91_MCI_TRCMD (3 << 16) /* Transfer Command */
69#define AT91_MCI_TRCMD_NONE (0 << 16)
70#define AT91_MCI_TRCMD_START (1 << 16)
71#define AT91_MCI_TRCMD_STOP (2 << 16)
72#define AT91_MCI_TRDIR (1 << 18) /* Transfer Direction */
73#define AT91_MCI_TRTYP (3 << 19) /* Transfer Type */
74#define AT91_MCI_TRTYP_BLOCK (0 << 19)
75#define AT91_MCI_TRTYP_MULTIPLE (1 << 19)
76#define AT91_MCI_TRTYP_STREAM (2 << 19)
77#define AT91_MCI_TRTYP_SDIO_BYTE (4 << 19)
78#define AT91_MCI_TRTYP_SDIO_BLOCK (5 << 19)
79
80#define AT91_MCI_BLKR 0x18 /* Block Register */
81#define AT91_MCI_BLKR_BCNT(n) ((0xffff & (n)) << 0) /* Block count */
82#define AT91_MCI_BLKR_BLKLEN(n) ((0xffff & (n)) << 16) /* Block length */
83
84#define AT91_MCI_RSPR(n) (0x20 + ((n) * 4)) /* Response Registers 0-3 */
85#define AT91_MCR_RDR 0x30 /* Receive Data Register */
86#define AT91_MCR_TDR 0x34 /* Transmit Data Register */
87
88#define AT91_MCI_SR 0x40 /* Status Register */
89#define AT91_MCI_CMDRDY (1 << 0) /* Command Ready */
90#define AT91_MCI_RXRDY (1 << 1) /* Receiver Ready */
91#define AT91_MCI_TXRDY (1 << 2) /* Transmit Ready */
92#define AT91_MCI_BLKE (1 << 3) /* Data Block Ended */
93#define AT91_MCI_DTIP (1 << 4) /* Data Transfer in Progress */
94#define AT91_MCI_NOTBUSY (1 << 5) /* Data Not Busy */
95#define AT91_MCI_ENDRX (1 << 6) /* End of RX Buffer */
96#define AT91_MCI_ENDTX (1 << 7) /* End fo TX Buffer */
97#define AT91_MCI_SDIOIRQA (1 << 8) /* SDIO Interrupt for Slot A */
98#define AT91_MCI_SDIOIRQB (1 << 9) /* SDIO Interrupt for Slot B */
99#define AT91_MCI_RXBUFF (1 << 14) /* RX Buffer Full */
100#define AT91_MCI_TXBUFE (1 << 15) /* TX Buffer Empty */
101#define AT91_MCI_RINDE (1 << 16) /* Response Index Error */
102#define AT91_MCI_RDIRE (1 << 17) /* Response Direction Error */
103#define AT91_MCI_RCRCE (1 << 18) /* Response CRC Error */
104#define AT91_MCI_RENDE (1 << 19) /* Response End Bit Error */
105#define AT91_MCI_RTOE (1 << 20) /* Response Time-out Error */
106#define AT91_MCI_DCRCE (1 << 21) /* Data CRC Error */
107#define AT91_MCI_DTOE (1 << 22) /* Data Time-out Error */
108#define AT91_MCI_OVRE (1 << 30) /* Overrun */
109#define AT91_MCI_UNRE (1 << 31) /* Underrun */
110
111#define AT91_MCI_IER 0x44 /* Interrupt Enable Register */
112#define AT91_MCI_IDR 0x48 /* Interrupt Disable Register */
113#define AT91_MCI_IMR 0x4c /* Interrupt Mask Register */
114
115#endif
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index aa8039f473c..fa8cae1d700 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -203,6 +203,7 @@ struct atmel_mci_slot {
203#define ATMCI_CARD_PRESENT 0 203#define ATMCI_CARD_PRESENT 0
204#define ATMCI_CARD_NEED_INIT 1 204#define ATMCI_CARD_NEED_INIT 1
205#define ATMCI_SHUTDOWN 2 205#define ATMCI_SHUTDOWN 2
206#define ATMCI_SUSPENDED 3
206 207
207 int detect_pin; 208 int detect_pin;
208 int wp_pin; 209 int wp_pin;
@@ -1878,10 +1879,72 @@ static int __exit atmci_remove(struct platform_device *pdev)
1878 return 0; 1879 return 0;
1879} 1880}
1880 1881
1882#ifdef CONFIG_PM
1883static int atmci_suspend(struct device *dev)
1884{
1885 struct atmel_mci *host = dev_get_drvdata(dev);
1886 int i;
1887
1888 for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
1889 struct atmel_mci_slot *slot = host->slot[i];
1890 int ret;
1891
1892 if (!slot)
1893 continue;
1894 ret = mmc_suspend_host(slot->mmc);
1895 if (ret < 0) {
1896 while (--i >= 0) {
1897 slot = host->slot[i];
1898 if (slot
1899 && test_bit(ATMCI_SUSPENDED, &slot->flags)) {
1900 mmc_resume_host(host->slot[i]->mmc);
1901 clear_bit(ATMCI_SUSPENDED, &slot->flags);
1902 }
1903 }
1904 return ret;
1905 } else {
1906 set_bit(ATMCI_SUSPENDED, &slot->flags);
1907 }
1908 }
1909
1910 return 0;
1911}
1912
1913static int atmci_resume(struct device *dev)
1914{
1915 struct atmel_mci *host = dev_get_drvdata(dev);
1916 int i;
1917 int ret = 0;
1918
1919 for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
1920 struct atmel_mci_slot *slot = host->slot[i];
1921 int err;
1922
1923 slot = host->slot[i];
1924 if (!slot)
1925 continue;
1926 if (!test_bit(ATMCI_SUSPENDED, &slot->flags))
1927 continue;
1928 err = mmc_resume_host(slot->mmc);
1929 if (err < 0)
1930 ret = err;
1931 else
1932 clear_bit(ATMCI_SUSPENDED, &slot->flags);
1933 }
1934
1935 return ret;
1936}
1937static SIMPLE_DEV_PM_OPS(atmci_pm, atmci_suspend, atmci_resume);
1938#define ATMCI_PM_OPS (&atmci_pm)
1939#else
1940#define ATMCI_PM_OPS NULL
1941#endif
1942
1881static struct platform_driver atmci_driver = { 1943static struct platform_driver atmci_driver = {
1882 .remove = __exit_p(atmci_remove), 1944 .remove = __exit_p(atmci_remove),
1883 .driver = { 1945 .driver = {
1884 .name = "atmel_mci", 1946 .name = "atmel_mci",
1947 .pm = ATMCI_PM_OPS,
1885 }, 1948 },
1886}; 1949};
1887 1950
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 66dcddb9c20..ff0f714b012 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -33,6 +33,7 @@
33#include <linux/mmc/dw_mmc.h> 33#include <linux/mmc/dw_mmc.h>
34#include <linux/bitops.h> 34#include <linux/bitops.h>
35#include <linux/regulator/consumer.h> 35#include <linux/regulator/consumer.h>
36#include <linux/workqueue.h>
36 37
37#include "dw_mmc.h" 38#include "dw_mmc.h"
38 39
@@ -61,7 +62,7 @@ struct idmac_desc {
61 62
62 u32 des1; /* Buffer sizes */ 63 u32 des1; /* Buffer sizes */
63#define IDMAC_SET_BUFFER1_SIZE(d, s) \ 64#define IDMAC_SET_BUFFER1_SIZE(d, s) \
64 ((d)->des1 = ((d)->des1 & 0x03ffc000) | ((s) & 0x3fff)) 65 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
65 66
66 u32 des2; /* buffer 1 physical address */ 67 u32 des2; /* buffer 1 physical address */
67 68
@@ -100,6 +101,8 @@ struct dw_mci_slot {
100 int last_detect_state; 101 int last_detect_state;
101}; 102};
102 103
104static struct workqueue_struct *dw_mci_card_workqueue;
105
103#if defined(CONFIG_DEBUG_FS) 106#if defined(CONFIG_DEBUG_FS)
104static int dw_mci_req_show(struct seq_file *s, void *v) 107static int dw_mci_req_show(struct seq_file *s, void *v)
105{ 108{
@@ -284,7 +287,7 @@ static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
284/* DMA interface functions */ 287/* DMA interface functions */
285static void dw_mci_stop_dma(struct dw_mci *host) 288static void dw_mci_stop_dma(struct dw_mci *host)
286{ 289{
287 if (host->use_dma) { 290 if (host->using_dma) {
288 host->dma_ops->stop(host); 291 host->dma_ops->stop(host);
289 host->dma_ops->cleanup(host); 292 host->dma_ops->cleanup(host);
290 } else { 293 } else {
@@ -432,6 +435,8 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
432 unsigned int i, direction, sg_len; 435 unsigned int i, direction, sg_len;
433 u32 temp; 436 u32 temp;
434 437
438 host->using_dma = 0;
439
435 /* If we don't have a channel, we can't do DMA */ 440 /* If we don't have a channel, we can't do DMA */
436 if (!host->use_dma) 441 if (!host->use_dma)
437 return -ENODEV; 442 return -ENODEV;
@@ -451,6 +456,8 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
451 return -EINVAL; 456 return -EINVAL;
452 } 457 }
453 458
459 host->using_dma = 1;
460
454 if (data->flags & MMC_DATA_READ) 461 if (data->flags & MMC_DATA_READ)
455 direction = DMA_FROM_DEVICE; 462 direction = DMA_FROM_DEVICE;
456 else 463 else
@@ -489,14 +496,18 @@ static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
489 host->sg = NULL; 496 host->sg = NULL;
490 host->data = data; 497 host->data = data;
491 498
499 if (data->flags & MMC_DATA_READ)
500 host->dir_status = DW_MCI_RECV_STATUS;
501 else
502 host->dir_status = DW_MCI_SEND_STATUS;
503
492 if (dw_mci_submit_data_dma(host, data)) { 504 if (dw_mci_submit_data_dma(host, data)) {
493 host->sg = data->sg; 505 host->sg = data->sg;
494 host->pio_offset = 0; 506 host->pio_offset = 0;
495 if (data->flags & MMC_DATA_READ) 507 host->part_buf_start = 0;
496 host->dir_status = DW_MCI_RECV_STATUS; 508 host->part_buf_count = 0;
497 else
498 host->dir_status = DW_MCI_SEND_STATUS;
499 509
510 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
500 temp = mci_readl(host, INTMASK); 511 temp = mci_readl(host, INTMASK);
501 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR; 512 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
502 mci_writel(host, INTMASK, temp); 513 mci_writel(host, INTMASK, temp);
@@ -574,7 +585,7 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot)
574 } 585 }
575 586
576 /* Set the current slot bus width */ 587 /* Set the current slot bus width */
577 mci_writel(host, CTYPE, slot->ctype); 588 mci_writel(host, CTYPE, (slot->ctype << slot->id));
578} 589}
579 590
580static void dw_mci_start_request(struct dw_mci *host, 591static void dw_mci_start_request(struct dw_mci *host,
@@ -624,13 +635,13 @@ static void dw_mci_start_request(struct dw_mci *host,
624 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop); 635 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
625} 636}
626 637
638/* must be called with host->lock held */
627static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot, 639static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
628 struct mmc_request *mrq) 640 struct mmc_request *mrq)
629{ 641{
630 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n", 642 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
631 host->state); 643 host->state);
632 644
633 spin_lock_bh(&host->lock);
634 slot->mrq = mrq; 645 slot->mrq = mrq;
635 646
636 if (host->state == STATE_IDLE) { 647 if (host->state == STATE_IDLE) {
@@ -639,8 +650,6 @@ static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
639 } else { 650 } else {
640 list_add_tail(&slot->queue_node, &host->queue); 651 list_add_tail(&slot->queue_node, &host->queue);
641 } 652 }
642
643 spin_unlock_bh(&host->lock);
644} 653}
645 654
646static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) 655static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -650,14 +659,23 @@ static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
650 659
651 WARN_ON(slot->mrq); 660 WARN_ON(slot->mrq);
652 661
662 /*
663 * The check for card presence and queueing of the request must be
664 * atomic, otherwise the card could be removed in between and the
665 * request wouldn't fail until another card was inserted.
666 */
667 spin_lock_bh(&host->lock);
668
653 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) { 669 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
670 spin_unlock_bh(&host->lock);
654 mrq->cmd->error = -ENOMEDIUM; 671 mrq->cmd->error = -ENOMEDIUM;
655 mmc_request_done(mmc, mrq); 672 mmc_request_done(mmc, mrq);
656 return; 673 return;
657 } 674 }
658 675
659 /* We don't support multiple blocks of weird lengths. */
660 dw_mci_queue_request(host, slot, mrq); 676 dw_mci_queue_request(host, slot, mrq);
677
678 spin_unlock_bh(&host->lock);
661} 679}
662 680
663static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 681static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -681,7 +699,7 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
681 } 699 }
682 700
683 /* DDR mode set */ 701 /* DDR mode set */
684 if (ios->ddr) { 702 if (ios->timing == MMC_TIMING_UHS_DDR50) {
685 regs = mci_readl(slot->host, UHS_REG); 703 regs = mci_readl(slot->host, UHS_REG);
686 regs |= (0x1 << slot->id) << 16; 704 regs |= (0x1 << slot->id) << 16;
687 mci_writel(slot->host, UHS_REG, regs); 705 mci_writel(slot->host, UHS_REG, regs);
@@ -831,7 +849,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
831 struct mmc_command *cmd; 849 struct mmc_command *cmd;
832 enum dw_mci_state state; 850 enum dw_mci_state state;
833 enum dw_mci_state prev_state; 851 enum dw_mci_state prev_state;
834 u32 status; 852 u32 status, ctrl;
835 853
836 spin_lock(&host->lock); 854 spin_lock(&host->lock);
837 855
@@ -891,13 +909,19 @@ static void dw_mci_tasklet_func(unsigned long priv)
891 909
892 if (status & DW_MCI_DATA_ERROR_FLAGS) { 910 if (status & DW_MCI_DATA_ERROR_FLAGS) {
893 if (status & SDMMC_INT_DTO) { 911 if (status & SDMMC_INT_DTO) {
894 dev_err(&host->pdev->dev,
895 "data timeout error\n");
896 data->error = -ETIMEDOUT; 912 data->error = -ETIMEDOUT;
897 } else if (status & SDMMC_INT_DCRC) { 913 } else if (status & SDMMC_INT_DCRC) {
898 dev_err(&host->pdev->dev,
899 "data CRC error\n");
900 data->error = -EILSEQ; 914 data->error = -EILSEQ;
915 } else if (status & SDMMC_INT_EBE &&
916 host->dir_status ==
917 DW_MCI_SEND_STATUS) {
918 /*
919 * No data CRC status was returned.
920 * The number of bytes transferred will
921 * be exaggerated in PIO mode.
922 */
923 data->bytes_xfered = 0;
924 data->error = -ETIMEDOUT;
901 } else { 925 } else {
902 dev_err(&host->pdev->dev, 926 dev_err(&host->pdev->dev,
903 "data FIFO error " 927 "data FIFO error "
@@ -905,6 +929,16 @@ static void dw_mci_tasklet_func(unsigned long priv)
905 status); 929 status);
906 data->error = -EIO; 930 data->error = -EIO;
907 } 931 }
932 /*
933 * After an error, there may be data lingering
934 * in the FIFO, so reset it - doing so
935 * generates a block interrupt, hence setting
936 * the scatter-gather pointer to NULL.
937 */
938 host->sg = NULL;
939 ctrl = mci_readl(host, CTRL);
940 ctrl |= SDMMC_CTRL_FIFO_RESET;
941 mci_writel(host, CTRL, ctrl);
908 } else { 942 } else {
909 data->bytes_xfered = data->blocks * data->blksz; 943 data->bytes_xfered = data->blocks * data->blksz;
910 data->error = 0; 944 data->error = 0;
@@ -946,84 +980,278 @@ unlock:
946 980
947} 981}
948 982
949static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt) 983/* push final bytes to part_buf, only use during push */
984static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
950{ 985{
951 u16 *pdata = (u16 *)buf; 986 memcpy((void *)&host->part_buf, buf, cnt);
987 host->part_buf_count = cnt;
988}
952 989
953 WARN_ON(cnt % 2 != 0); 990/* append bytes to part_buf, only use during push */
991static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
992{
993 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
994 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
995 host->part_buf_count += cnt;
996 return cnt;
997}
954 998
955 cnt = cnt >> 1; 999/* pull first bytes from part_buf, only use during pull */
956 while (cnt > 0) { 1000static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
957 mci_writew(host, DATA, *pdata++); 1001{
958 cnt--; 1002 cnt = min(cnt, (int)host->part_buf_count);
1003 if (cnt) {
1004 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1005 cnt);
1006 host->part_buf_count -= cnt;
1007 host->part_buf_start += cnt;
959 } 1008 }
1009 return cnt;
960} 1010}
961 1011
962static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt) 1012/* pull final bytes from the part_buf, assuming it's just been filled */
1013static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
963{ 1014{
964 u16 *pdata = (u16 *)buf; 1015 memcpy(buf, &host->part_buf, cnt);
1016 host->part_buf_start = cnt;
1017 host->part_buf_count = (1 << host->data_shift) - cnt;
1018}
965 1019
966 WARN_ON(cnt % 2 != 0); 1020static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1021{
1022 /* try and push anything in the part_buf */
1023 if (unlikely(host->part_buf_count)) {
1024 int len = dw_mci_push_part_bytes(host, buf, cnt);
1025 buf += len;
1026 cnt -= len;
1027 if (!sg_next(host->sg) || host->part_buf_count == 2) {
1028 mci_writew(host, DATA, host->part_buf16);
1029 host->part_buf_count = 0;
1030 }
1031 }
1032#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1033 if (unlikely((unsigned long)buf & 0x1)) {
1034 while (cnt >= 2) {
1035 u16 aligned_buf[64];
1036 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1037 int items = len >> 1;
1038 int i;
1039 /* memcpy from input buffer into aligned buffer */
1040 memcpy(aligned_buf, buf, len);
1041 buf += len;
1042 cnt -= len;
1043 /* push data from aligned buffer into fifo */
1044 for (i = 0; i < items; ++i)
1045 mci_writew(host, DATA, aligned_buf[i]);
1046 }
1047 } else
1048#endif
1049 {
1050 u16 *pdata = buf;
1051 for (; cnt >= 2; cnt -= 2)
1052 mci_writew(host, DATA, *pdata++);
1053 buf = pdata;
1054 }
1055 /* put anything remaining in the part_buf */
1056 if (cnt) {
1057 dw_mci_set_part_bytes(host, buf, cnt);
1058 if (!sg_next(host->sg))
1059 mci_writew(host, DATA, host->part_buf16);
1060 }
1061}
967 1062
968 cnt = cnt >> 1; 1063static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
969 while (cnt > 0) { 1064{
970 *pdata++ = mci_readw(host, DATA); 1065#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
971 cnt--; 1066 if (unlikely((unsigned long)buf & 0x1)) {
1067 while (cnt >= 2) {
1068 /* pull data from fifo into aligned buffer */
1069 u16 aligned_buf[64];
1070 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1071 int items = len >> 1;
1072 int i;
1073 for (i = 0; i < items; ++i)
1074 aligned_buf[i] = mci_readw(host, DATA);
1075 /* memcpy from aligned buffer into output buffer */
1076 memcpy(buf, aligned_buf, len);
1077 buf += len;
1078 cnt -= len;
1079 }
1080 } else
1081#endif
1082 {
1083 u16 *pdata = buf;
1084 for (; cnt >= 2; cnt -= 2)
1085 *pdata++ = mci_readw(host, DATA);
1086 buf = pdata;
1087 }
1088 if (cnt) {
1089 host->part_buf16 = mci_readw(host, DATA);
1090 dw_mci_pull_final_bytes(host, buf, cnt);
972 } 1091 }
973} 1092}
974 1093
975static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt) 1094static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
976{ 1095{
977 u32 *pdata = (u32 *)buf; 1096 /* try and push anything in the part_buf */
978 1097 if (unlikely(host->part_buf_count)) {
979 WARN_ON(cnt % 4 != 0); 1098 int len = dw_mci_push_part_bytes(host, buf, cnt);
980 WARN_ON((unsigned long)pdata & 0x3); 1099 buf += len;
981 1100 cnt -= len;
982 cnt = cnt >> 2; 1101 if (!sg_next(host->sg) || host->part_buf_count == 4) {
983 while (cnt > 0) { 1102 mci_writel(host, DATA, host->part_buf32);
984 mci_writel(host, DATA, *pdata++); 1103 host->part_buf_count = 0;
985 cnt--; 1104 }
1105 }
1106#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1107 if (unlikely((unsigned long)buf & 0x3)) {
1108 while (cnt >= 4) {
1109 u32 aligned_buf[32];
1110 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1111 int items = len >> 2;
1112 int i;
1113 /* memcpy from input buffer into aligned buffer */
1114 memcpy(aligned_buf, buf, len);
1115 buf += len;
1116 cnt -= len;
1117 /* push data from aligned buffer into fifo */
1118 for (i = 0; i < items; ++i)
1119 mci_writel(host, DATA, aligned_buf[i]);
1120 }
1121 } else
1122#endif
1123 {
1124 u32 *pdata = buf;
1125 for (; cnt >= 4; cnt -= 4)
1126 mci_writel(host, DATA, *pdata++);
1127 buf = pdata;
1128 }
1129 /* put anything remaining in the part_buf */
1130 if (cnt) {
1131 dw_mci_set_part_bytes(host, buf, cnt);
1132 if (!sg_next(host->sg))
1133 mci_writel(host, DATA, host->part_buf32);
986 } 1134 }
987} 1135}
988 1136
989static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt) 1137static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
990{ 1138{
991 u32 *pdata = (u32 *)buf; 1139#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
992 1140 if (unlikely((unsigned long)buf & 0x3)) {
993 WARN_ON(cnt % 4 != 0); 1141 while (cnt >= 4) {
994 WARN_ON((unsigned long)pdata & 0x3); 1142 /* pull data from fifo into aligned buffer */
995 1143 u32 aligned_buf[32];
996 cnt = cnt >> 2; 1144 int len = min(cnt & -4, (int)sizeof(aligned_buf));
997 while (cnt > 0) { 1145 int items = len >> 2;
998 *pdata++ = mci_readl(host, DATA); 1146 int i;
999 cnt--; 1147 for (i = 0; i < items; ++i)
1148 aligned_buf[i] = mci_readl(host, DATA);
1149 /* memcpy from aligned buffer into output buffer */
1150 memcpy(buf, aligned_buf, len);
1151 buf += len;
1152 cnt -= len;
1153 }
1154 } else
1155#endif
1156 {
1157 u32 *pdata = buf;
1158 for (; cnt >= 4; cnt -= 4)
1159 *pdata++ = mci_readl(host, DATA);
1160 buf = pdata;
1161 }
1162 if (cnt) {
1163 host->part_buf32 = mci_readl(host, DATA);
1164 dw_mci_pull_final_bytes(host, buf, cnt);
1000 } 1165 }
1001} 1166}
1002 1167
1003static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt) 1168static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1004{ 1169{
1005 u64 *pdata = (u64 *)buf; 1170 /* try and push anything in the part_buf */
1006 1171 if (unlikely(host->part_buf_count)) {
1007 WARN_ON(cnt % 8 != 0); 1172 int len = dw_mci_push_part_bytes(host, buf, cnt);
1008 1173 buf += len;
1009 cnt = cnt >> 3; 1174 cnt -= len;
1010 while (cnt > 0) { 1175 if (!sg_next(host->sg) || host->part_buf_count == 8) {
1011 mci_writeq(host, DATA, *pdata++); 1176 mci_writew(host, DATA, host->part_buf);
1012 cnt--; 1177 host->part_buf_count = 0;
1178 }
1179 }
1180#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1181 if (unlikely((unsigned long)buf & 0x7)) {
1182 while (cnt >= 8) {
1183 u64 aligned_buf[16];
1184 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1185 int items = len >> 3;
1186 int i;
1187 /* memcpy from input buffer into aligned buffer */
1188 memcpy(aligned_buf, buf, len);
1189 buf += len;
1190 cnt -= len;
1191 /* push data from aligned buffer into fifo */
1192 for (i = 0; i < items; ++i)
1193 mci_writeq(host, DATA, aligned_buf[i]);
1194 }
1195 } else
1196#endif
1197 {
1198 u64 *pdata = buf;
1199 for (; cnt >= 8; cnt -= 8)
1200 mci_writeq(host, DATA, *pdata++);
1201 buf = pdata;
1202 }
1203 /* put anything remaining in the part_buf */
1204 if (cnt) {
1205 dw_mci_set_part_bytes(host, buf, cnt);
1206 if (!sg_next(host->sg))
1207 mci_writeq(host, DATA, host->part_buf);
1013 } 1208 }
1014} 1209}
1015 1210
1016static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt) 1211static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1017{ 1212{
1018 u64 *pdata = (u64 *)buf; 1213#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1214 if (unlikely((unsigned long)buf & 0x7)) {
1215 while (cnt >= 8) {
1216 /* pull data from fifo into aligned buffer */
1217 u64 aligned_buf[16];
1218 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1219 int items = len >> 3;
1220 int i;
1221 for (i = 0; i < items; ++i)
1222 aligned_buf[i] = mci_readq(host, DATA);
1223 /* memcpy from aligned buffer into output buffer */
1224 memcpy(buf, aligned_buf, len);
1225 buf += len;
1226 cnt -= len;
1227 }
1228 } else
1229#endif
1230 {
1231 u64 *pdata = buf;
1232 for (; cnt >= 8; cnt -= 8)
1233 *pdata++ = mci_readq(host, DATA);
1234 buf = pdata;
1235 }
1236 if (cnt) {
1237 host->part_buf = mci_readq(host, DATA);
1238 dw_mci_pull_final_bytes(host, buf, cnt);
1239 }
1240}
1019 1241
1020 WARN_ON(cnt % 8 != 0); 1242static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1243{
1244 int len;
1021 1245
1022 cnt = cnt >> 3; 1246 /* get remaining partial bytes */
1023 while (cnt > 0) { 1247 len = dw_mci_pull_part_bytes(host, buf, cnt);
1024 *pdata++ = mci_readq(host, DATA); 1248 if (unlikely(len == cnt))
1025 cnt--; 1249 return;
1026 } 1250 buf += len;
1251 cnt -= len;
1252
1253 /* get the rest of the data */
1254 host->pull_data(host, buf, cnt);
1027} 1255}
1028 1256
1029static void dw_mci_read_data_pio(struct dw_mci *host) 1257static void dw_mci_read_data_pio(struct dw_mci *host)
@@ -1037,9 +1265,10 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
1037 unsigned int nbytes = 0, len; 1265 unsigned int nbytes = 0, len;
1038 1266
1039 do { 1267 do {
1040 len = SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift; 1268 len = host->part_buf_count +
1269 (SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift);
1041 if (offset + len <= sg->length) { 1270 if (offset + len <= sg->length) {
1042 host->pull_data(host, (void *)(buf + offset), len); 1271 dw_mci_pull_data(host, (void *)(buf + offset), len);
1043 1272
1044 offset += len; 1273 offset += len;
1045 nbytes += len; 1274 nbytes += len;
@@ -1055,8 +1284,8 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
1055 } 1284 }
1056 } else { 1285 } else {
1057 unsigned int remaining = sg->length - offset; 1286 unsigned int remaining = sg->length - offset;
1058 host->pull_data(host, (void *)(buf + offset), 1287 dw_mci_pull_data(host, (void *)(buf + offset),
1059 remaining); 1288 remaining);
1060 nbytes += remaining; 1289 nbytes += remaining;
1061 1290
1062 flush_dcache_page(sg_page(sg)); 1291 flush_dcache_page(sg_page(sg));
@@ -1066,7 +1295,7 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
1066 1295
1067 offset = len - remaining; 1296 offset = len - remaining;
1068 buf = sg_virt(sg); 1297 buf = sg_virt(sg);
1069 host->pull_data(host, buf, offset); 1298 dw_mci_pull_data(host, buf, offset);
1070 nbytes += offset; 1299 nbytes += offset;
1071 } 1300 }
1072 1301
@@ -1083,7 +1312,6 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
1083 return; 1312 return;
1084 } 1313 }
1085 } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/ 1314 } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
1086 len = SDMMC_GET_FCNT(mci_readl(host, STATUS));
1087 host->pio_offset = offset; 1315 host->pio_offset = offset;
1088 data->bytes_xfered += nbytes; 1316 data->bytes_xfered += nbytes;
1089 return; 1317 return;
@@ -1105,8 +1333,9 @@ static void dw_mci_write_data_pio(struct dw_mci *host)
1105 unsigned int nbytes = 0, len; 1333 unsigned int nbytes = 0, len;
1106 1334
1107 do { 1335 do {
1108 len = SDMMC_FIFO_SZ - 1336 len = ((host->fifo_depth -
1109 (SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift); 1337 SDMMC_GET_FCNT(mci_readl(host, STATUS))) << shift)
1338 - host->part_buf_count;
1110 if (offset + len <= sg->length) { 1339 if (offset + len <= sg->length) {
1111 host->push_data(host, (void *)(buf + offset), len); 1340 host->push_data(host, (void *)(buf + offset), len);
1112 1341
@@ -1151,10 +1380,8 @@ static void dw_mci_write_data_pio(struct dw_mci *host)
1151 return; 1380 return;
1152 } 1381 }
1153 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ 1382 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1154
1155 host->pio_offset = offset; 1383 host->pio_offset = offset;
1156 data->bytes_xfered += nbytes; 1384 data->bytes_xfered += nbytes;
1157
1158 return; 1385 return;
1159 1386
1160done: 1387done:
@@ -1202,7 +1429,6 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1202 host->cmd_status = status; 1429 host->cmd_status = status;
1203 smp_wmb(); 1430 smp_wmb();
1204 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 1431 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1205 tasklet_schedule(&host->tasklet);
1206 } 1432 }
1207 1433
1208 if (pending & DW_MCI_DATA_ERROR_FLAGS) { 1434 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
@@ -1211,7 +1437,9 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1211 host->data_status = status; 1437 host->data_status = status;
1212 smp_wmb(); 1438 smp_wmb();
1213 set_bit(EVENT_DATA_ERROR, &host->pending_events); 1439 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1214 tasklet_schedule(&host->tasklet); 1440 if (!(pending & (SDMMC_INT_DTO | SDMMC_INT_DCRC |
1441 SDMMC_INT_SBE | SDMMC_INT_EBE)))
1442 tasklet_schedule(&host->tasklet);
1215 } 1443 }
1216 1444
1217 if (pending & SDMMC_INT_DATA_OVER) { 1445 if (pending & SDMMC_INT_DATA_OVER) {
@@ -1229,13 +1457,13 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1229 1457
1230 if (pending & SDMMC_INT_RXDR) { 1458 if (pending & SDMMC_INT_RXDR) {
1231 mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 1459 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1232 if (host->sg) 1460 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
1233 dw_mci_read_data_pio(host); 1461 dw_mci_read_data_pio(host);
1234 } 1462 }
1235 1463
1236 if (pending & SDMMC_INT_TXDR) { 1464 if (pending & SDMMC_INT_TXDR) {
1237 mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 1465 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1238 if (host->sg) 1466 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
1239 dw_mci_write_data_pio(host); 1467 dw_mci_write_data_pio(host);
1240 } 1468 }
1241 1469
@@ -1246,7 +1474,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1246 1474
1247 if (pending & SDMMC_INT_CD) { 1475 if (pending & SDMMC_INT_CD) {
1248 mci_writel(host, RINTSTS, SDMMC_INT_CD); 1476 mci_writel(host, RINTSTS, SDMMC_INT_CD);
1249 tasklet_schedule(&host->card_tasklet); 1477 queue_work(dw_mci_card_workqueue, &host->card_work);
1250 } 1478 }
1251 1479
1252 } while (pass_count++ < 5); 1480 } while (pass_count++ < 5);
@@ -1265,9 +1493,9 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1265 return IRQ_HANDLED; 1493 return IRQ_HANDLED;
1266} 1494}
1267 1495
1268static void dw_mci_tasklet_card(unsigned long data) 1496static void dw_mci_work_routine_card(struct work_struct *work)
1269{ 1497{
1270 struct dw_mci *host = (struct dw_mci *)data; 1498 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
1271 int i; 1499 int i;
1272 1500
1273 for (i = 0; i < host->num_slots; i++) { 1501 for (i = 0; i < host->num_slots; i++) {
@@ -1279,22 +1507,21 @@ static void dw_mci_tasklet_card(unsigned long data)
1279 1507
1280 present = dw_mci_get_cd(mmc); 1508 present = dw_mci_get_cd(mmc);
1281 while (present != slot->last_detect_state) { 1509 while (present != slot->last_detect_state) {
1282 spin_lock(&host->lock);
1283
1284 dev_dbg(&slot->mmc->class_dev, "card %s\n", 1510 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1285 present ? "inserted" : "removed"); 1511 present ? "inserted" : "removed");
1286 1512
1513 /* Power up slot (before spin_lock, may sleep) */
1514 if (present != 0 && host->pdata->setpower)
1515 host->pdata->setpower(slot->id, mmc->ocr_avail);
1516
1517 spin_lock_bh(&host->lock);
1518
1287 /* Card change detected */ 1519 /* Card change detected */
1288 slot->last_detect_state = present; 1520 slot->last_detect_state = present;
1289 1521
1290 /* Power up slot */ 1522 /* Mark card as present if applicable */
1291 if (present != 0) { 1523 if (present != 0)
1292 if (host->pdata->setpower)
1293 host->pdata->setpower(slot->id,
1294 mmc->ocr_avail);
1295
1296 set_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1524 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1297 }
1298 1525
1299 /* Clean up queue if present */ 1526 /* Clean up queue if present */
1300 mrq = slot->mrq; 1527 mrq = slot->mrq;
@@ -1344,8 +1571,6 @@ static void dw_mci_tasklet_card(unsigned long data)
1344 1571
1345 /* Power down slot */ 1572 /* Power down slot */
1346 if (present == 0) { 1573 if (present == 0) {
1347 if (host->pdata->setpower)
1348 host->pdata->setpower(slot->id, 0);
1349 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1574 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1350 1575
1351 /* 1576 /*
@@ -1367,7 +1592,12 @@ static void dw_mci_tasklet_card(unsigned long data)
1367 1592
1368 } 1593 }
1369 1594
1370 spin_unlock(&host->lock); 1595 spin_unlock_bh(&host->lock);
1596
1597 /* Power down slot (after spin_unlock, may sleep) */
1598 if (present == 0 && host->pdata->setpower)
1599 host->pdata->setpower(slot->id, 0);
1600
1371 present = dw_mci_get_cd(mmc); 1601 present = dw_mci_get_cd(mmc);
1372 } 1602 }
1373 1603
@@ -1416,7 +1646,7 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
1416 mmc->caps |= MMC_CAP_4_BIT_DATA; 1646 mmc->caps |= MMC_CAP_4_BIT_DATA;
1417 1647
1418 if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED) 1648 if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED)
1419 mmc->caps |= MMC_CAP_SD_HIGHSPEED; 1649 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
1420 1650
1421#ifdef CONFIG_MMC_DW_IDMAC 1651#ifdef CONFIG_MMC_DW_IDMAC
1422 mmc->max_segs = host->ring_size; 1652 mmc->max_segs = host->ring_size;
@@ -1467,7 +1697,7 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
1467 * Card may have been plugged in prior to boot so we 1697 * Card may have been plugged in prior to boot so we
1468 * need to run the detect tasklet 1698 * need to run the detect tasklet
1469 */ 1699 */
1470 tasklet_schedule(&host->card_tasklet); 1700 queue_work(dw_mci_card_workqueue, &host->card_work);
1471 1701
1472 return 0; 1702 return 0;
1473} 1703}
@@ -1595,7 +1825,7 @@ static int dw_mci_probe(struct platform_device *pdev)
1595 INIT_LIST_HEAD(&host->queue); 1825 INIT_LIST_HEAD(&host->queue);
1596 1826
1597 ret = -ENOMEM; 1827 ret = -ENOMEM;
1598 host->regs = ioremap(regs->start, regs->end - regs->start + 1); 1828 host->regs = ioremap(regs->start, resource_size(regs));
1599 if (!host->regs) 1829 if (!host->regs)
1600 goto err_freehost; 1830 goto err_freehost;
1601 1831
@@ -1645,8 +1875,19 @@ static int dw_mci_probe(struct platform_device *pdev)
1645 * FIFO threshold settings RxMark = fifo_size / 2 - 1, 1875 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
1646 * Tx Mark = fifo_size / 2 DMA Size = 8 1876 * Tx Mark = fifo_size / 2 DMA Size = 8
1647 */ 1877 */
1648 fifo_size = mci_readl(host, FIFOTH); 1878 if (!host->pdata->fifo_depth) {
1649 fifo_size = (fifo_size >> 16) & 0x7ff; 1879 /*
1880 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
1881 * have been overwritten by the bootloader, just like we're
1882 * about to do, so if you know the value for your hardware, you
1883 * should put it in the platform data.
1884 */
1885 fifo_size = mci_readl(host, FIFOTH);
1886 fifo_size = 1 + ((fifo_size >> 16) & 0x7ff);
1887 } else {
1888 fifo_size = host->pdata->fifo_depth;
1889 }
1890 host->fifo_depth = fifo_size;
1650 host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) | 1891 host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) |
1651 ((fifo_size/2) << 0)); 1892 ((fifo_size/2) << 0));
1652 mci_writel(host, FIFOTH, host->fifoth_val); 1893 mci_writel(host, FIFOTH, host->fifoth_val);
@@ -1656,12 +1897,15 @@ static int dw_mci_probe(struct platform_device *pdev)
1656 mci_writel(host, CLKSRC, 0); 1897 mci_writel(host, CLKSRC, 0);
1657 1898
1658 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host); 1899 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
1659 tasklet_init(&host->card_tasklet, 1900 dw_mci_card_workqueue = alloc_workqueue("dw-mci-card",
1660 dw_mci_tasklet_card, (unsigned long)host); 1901 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
1902 if (!dw_mci_card_workqueue)
1903 goto err_dmaunmap;
1904 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
1661 1905
1662 ret = request_irq(irq, dw_mci_interrupt, 0, "dw-mci", host); 1906 ret = request_irq(irq, dw_mci_interrupt, 0, "dw-mci", host);
1663 if (ret) 1907 if (ret)
1664 goto err_dmaunmap; 1908 goto err_workqueue;
1665 1909
1666 platform_set_drvdata(pdev, host); 1910 platform_set_drvdata(pdev, host);
1667 1911
@@ -1690,7 +1934,9 @@ static int dw_mci_probe(struct platform_device *pdev)
1690 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */ 1934 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
1691 1935
1692 dev_info(&pdev->dev, "DW MMC controller at irq %d, " 1936 dev_info(&pdev->dev, "DW MMC controller at irq %d, "
1693 "%d bit host data width\n", irq, width); 1937 "%d bit host data width, "
1938 "%u deep fifo\n",
1939 irq, width, fifo_size);
1694 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) 1940 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
1695 dev_info(&pdev->dev, "Internal DMAC interrupt fix enabled.\n"); 1941 dev_info(&pdev->dev, "Internal DMAC interrupt fix enabled.\n");
1696 1942
@@ -1705,6 +1951,9 @@ err_init_slot:
1705 } 1951 }
1706 free_irq(irq, host); 1952 free_irq(irq, host);
1707 1953
1954err_workqueue:
1955 destroy_workqueue(dw_mci_card_workqueue);
1956
1708err_dmaunmap: 1957err_dmaunmap:
1709 if (host->use_dma && host->dma_ops->exit) 1958 if (host->use_dma && host->dma_ops->exit)
1710 host->dma_ops->exit(host); 1959 host->dma_ops->exit(host);
@@ -1744,6 +1993,7 @@ static int __exit dw_mci_remove(struct platform_device *pdev)
1744 mci_writel(host, CLKSRC, 0); 1993 mci_writel(host, CLKSRC, 0);
1745 1994
1746 free_irq(platform_get_irq(pdev, 0), host); 1995 free_irq(platform_get_irq(pdev, 0), host);
1996 destroy_workqueue(dw_mci_card_workqueue);
1747 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); 1997 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
1748 1998
1749 if (host->use_dma && host->dma_ops->exit) 1999 if (host->use_dma && host->dma_ops->exit)
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 23c662af561..027d3773539 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -118,7 +118,6 @@
118#define SDMMC_CMD_INDX(n) ((n) & 0x1F) 118#define SDMMC_CMD_INDX(n) ((n) & 0x1F)
119/* Status register defines */ 119/* Status register defines */
120#define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FF) 120#define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FF)
121#define SDMMC_FIFO_SZ 32
122/* Internal DMAC interrupt defines */ 121/* Internal DMAC interrupt defines */
123#define SDMMC_IDMAC_INT_AI BIT(9) 122#define SDMMC_IDMAC_INT_AI BIT(9)
124#define SDMMC_IDMAC_INT_NI BIT(8) 123#define SDMMC_IDMAC_INT_NI BIT(8)
@@ -134,22 +133,22 @@
134 133
135/* Register access macros */ 134/* Register access macros */
136#define mci_readl(dev, reg) \ 135#define mci_readl(dev, reg) \
137 __raw_readl(dev->regs + SDMMC_##reg) 136 __raw_readl((dev)->regs + SDMMC_##reg)
138#define mci_writel(dev, reg, value) \ 137#define mci_writel(dev, reg, value) \
139 __raw_writel((value), dev->regs + SDMMC_##reg) 138 __raw_writel((value), (dev)->regs + SDMMC_##reg)
140 139
141/* 16-bit FIFO access macros */ 140/* 16-bit FIFO access macros */
142#define mci_readw(dev, reg) \ 141#define mci_readw(dev, reg) \
143 __raw_readw(dev->regs + SDMMC_##reg) 142 __raw_readw((dev)->regs + SDMMC_##reg)
144#define mci_writew(dev, reg, value) \ 143#define mci_writew(dev, reg, value) \
145 __raw_writew((value), dev->regs + SDMMC_##reg) 144 __raw_writew((value), (dev)->regs + SDMMC_##reg)
146 145
147/* 64-bit FIFO access macros */ 146/* 64-bit FIFO access macros */
148#ifdef readq 147#ifdef readq
149#define mci_readq(dev, reg) \ 148#define mci_readq(dev, reg) \
150 __raw_readq(dev->regs + SDMMC_##reg) 149 __raw_readq((dev)->regs + SDMMC_##reg)
151#define mci_writeq(dev, reg, value) \ 150#define mci_writeq(dev, reg, value) \
152 __raw_writeq((value), dev->regs + SDMMC_##reg) 151 __raw_writeq((value), (dev)->regs + SDMMC_##reg)
153#else 152#else
154/* 153/*
155 * Dummy readq implementation for architectures that don't define it. 154 * Dummy readq implementation for architectures that don't define it.
@@ -160,9 +159,9 @@
160 * rest of the code free from ifdefs. 159 * rest of the code free from ifdefs.
161 */ 160 */
162#define mci_readq(dev, reg) \ 161#define mci_readq(dev, reg) \
163 (*(volatile u64 __force *)(dev->regs + SDMMC_##reg)) 162 (*(volatile u64 __force *)((dev)->regs + SDMMC_##reg))
164#define mci_writeq(dev, reg, value) \ 163#define mci_writeq(dev, reg, value) \
165 (*(volatile u64 __force *)(dev->regs + SDMMC_##reg) = value) 164 (*(volatile u64 __force *)((dev)->regs + SDMMC_##reg) = (value))
166#endif 165#endif
167 166
168#endif /* _DW_MMC_H_ */ 167#endif /* _DW_MMC_H_ */
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index fe140724a02..d8eac248ee4 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -226,6 +226,9 @@ static void __devinit mmci_dma_setup(struct mmci_host *host)
226 return; 226 return;
227 } 227 }
228 228
229 /* initialize pre request cookie */
230 host->next_data.cookie = 1;
231
229 /* Try to acquire a generic DMA engine slave channel */ 232 /* Try to acquire a generic DMA engine slave channel */
230 dma_cap_zero(mask); 233 dma_cap_zero(mask);
231 dma_cap_set(DMA_SLAVE, mask); 234 dma_cap_set(DMA_SLAVE, mask);
@@ -335,7 +338,8 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
335 dir = DMA_FROM_DEVICE; 338 dir = DMA_FROM_DEVICE;
336 } 339 }
337 340
338 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); 341 if (!data->host_cookie)
342 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
339 343
340 /* 344 /*
341 * Use of DMA with scatter-gather is impossible. 345 * Use of DMA with scatter-gather is impossible.
@@ -353,7 +357,8 @@ static void mmci_dma_data_error(struct mmci_host *host)
353 dmaengine_terminate_all(host->dma_current); 357 dmaengine_terminate_all(host->dma_current);
354} 358}
355 359
356static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 360static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
361 struct mmci_host_next *next)
357{ 362{
358 struct variant_data *variant = host->variant; 363 struct variant_data *variant = host->variant;
359 struct dma_slave_config conf = { 364 struct dma_slave_config conf = {
@@ -364,13 +369,20 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
364 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ 369 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
365 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ 370 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
366 }; 371 };
367 struct mmc_data *data = host->data;
368 struct dma_chan *chan; 372 struct dma_chan *chan;
369 struct dma_device *device; 373 struct dma_device *device;
370 struct dma_async_tx_descriptor *desc; 374 struct dma_async_tx_descriptor *desc;
371 int nr_sg; 375 int nr_sg;
372 376
373 host->dma_current = NULL; 377 /* Check if next job is already prepared */
378 if (data->host_cookie && !next &&
379 host->dma_current && host->dma_desc_current)
380 return 0;
381
382 if (!next) {
383 host->dma_current = NULL;
384 host->dma_desc_current = NULL;
385 }
374 386
375 if (data->flags & MMC_DATA_READ) { 387 if (data->flags & MMC_DATA_READ) {
376 conf.direction = DMA_FROM_DEVICE; 388 conf.direction = DMA_FROM_DEVICE;
@@ -385,7 +397,7 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
385 return -EINVAL; 397 return -EINVAL;
386 398
387 /* If less than or equal to the fifo size, don't bother with DMA */ 399 /* If less than or equal to the fifo size, don't bother with DMA */
388 if (host->size <= variant->fifosize) 400 if (data->blksz * data->blocks <= variant->fifosize)
389 return -EINVAL; 401 return -EINVAL;
390 402
391 device = chan->device; 403 device = chan->device;
@@ -399,14 +411,38 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
399 if (!desc) 411 if (!desc)
400 goto unmap_exit; 412 goto unmap_exit;
401 413
402 /* Okay, go for it. */ 414 if (next) {
403 host->dma_current = chan; 415 next->dma_chan = chan;
416 next->dma_desc = desc;
417 } else {
418 host->dma_current = chan;
419 host->dma_desc_current = desc;
420 }
421
422 return 0;
404 423
424 unmap_exit:
425 if (!next)
426 dmaengine_terminate_all(chan);
427 dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
428 return -ENOMEM;
429}
430
431static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
432{
433 int ret;
434 struct mmc_data *data = host->data;
435
436 ret = mmci_dma_prep_data(host, host->data, NULL);
437 if (ret)
438 return ret;
439
440 /* Okay, go for it. */
405 dev_vdbg(mmc_dev(host->mmc), 441 dev_vdbg(mmc_dev(host->mmc),
406 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", 442 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
407 data->sg_len, data->blksz, data->blocks, data->flags); 443 data->sg_len, data->blksz, data->blocks, data->flags);
408 dmaengine_submit(desc); 444 dmaengine_submit(host->dma_desc_current);
409 dma_async_issue_pending(chan); 445 dma_async_issue_pending(host->dma_current);
410 446
411 datactrl |= MCI_DPSM_DMAENABLE; 447 datactrl |= MCI_DPSM_DMAENABLE;
412 448
@@ -421,14 +457,90 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
421 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, 457 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
422 host->base + MMCIMASK0); 458 host->base + MMCIMASK0);
423 return 0; 459 return 0;
460}
424 461
425unmap_exit: 462static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
426 dmaengine_terminate_all(chan); 463{
427 dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction); 464 struct mmci_host_next *next = &host->next_data;
428 return -ENOMEM; 465
466 if (data->host_cookie && data->host_cookie != next->cookie) {
467 printk(KERN_WARNING "[%s] invalid cookie: data->host_cookie %d"
468 " host->next_data.cookie %d\n",
469 __func__, data->host_cookie, host->next_data.cookie);
470 data->host_cookie = 0;
471 }
472
473 if (!data->host_cookie)
474 return;
475
476 host->dma_desc_current = next->dma_desc;
477 host->dma_current = next->dma_chan;
478
479 next->dma_desc = NULL;
480 next->dma_chan = NULL;
429} 481}
482
483static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
484 bool is_first_req)
485{
486 struct mmci_host *host = mmc_priv(mmc);
487 struct mmc_data *data = mrq->data;
488 struct mmci_host_next *nd = &host->next_data;
489
490 if (!data)
491 return;
492
493 if (data->host_cookie) {
494 data->host_cookie = 0;
495 return;
496 }
497
498 /* if config for dma */
499 if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) ||
500 ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) {
501 if (mmci_dma_prep_data(host, data, nd))
502 data->host_cookie = 0;
503 else
504 data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
505 }
506}
507
508static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
509 int err)
510{
511 struct mmci_host *host = mmc_priv(mmc);
512 struct mmc_data *data = mrq->data;
513 struct dma_chan *chan;
514 enum dma_data_direction dir;
515
516 if (!data)
517 return;
518
519 if (data->flags & MMC_DATA_READ) {
520 dir = DMA_FROM_DEVICE;
521 chan = host->dma_rx_channel;
522 } else {
523 dir = DMA_TO_DEVICE;
524 chan = host->dma_tx_channel;
525 }
526
527
528 /* if config for dma */
529 if (chan) {
530 if (err)
531 dmaengine_terminate_all(chan);
532 if (err || data->host_cookie)
533 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
534 data->sg_len, dir);
535 mrq->data->host_cookie = 0;
536 }
537}
538
430#else 539#else
431/* Blank functions if the DMA engine is not available */ 540/* Blank functions if the DMA engine is not available */
541static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
542{
543}
432static inline void mmci_dma_setup(struct mmci_host *host) 544static inline void mmci_dma_setup(struct mmci_host *host)
433{ 545{
434} 546}
@@ -449,6 +561,10 @@ static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datac
449{ 561{
450 return -ENOSYS; 562 return -ENOSYS;
451} 563}
564
565#define mmci_pre_request NULL
566#define mmci_post_request NULL
567
452#endif 568#endif
453 569
454static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) 570static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
@@ -557,7 +673,8 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
557 unsigned int status) 673 unsigned int status)
558{ 674{
559 /* First check for errors */ 675 /* First check for errors */
560 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 676 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
677 MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
561 u32 remain, success; 678 u32 remain, success;
562 679
563 /* Terminate the DMA transfer */ 680 /* Terminate the DMA transfer */
@@ -636,8 +753,12 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
636 } 753 }
637 754
638 if (!cmd->data || cmd->error) { 755 if (!cmd->data || cmd->error) {
639 if (host->data) 756 if (host->data) {
757 /* Terminate the DMA transfer */
758 if (dma_inprogress(host))
759 mmci_dma_data_error(host);
640 mmci_stop_data(host); 760 mmci_stop_data(host);
761 }
641 mmci_request_end(host, cmd->mrq); 762 mmci_request_end(host, cmd->mrq);
642 } else if (!(cmd->data->flags & MMC_DATA_READ)) { 763 } else if (!(cmd->data->flags & MMC_DATA_READ)) {
643 mmci_start_data(host, cmd->data); 764 mmci_start_data(host, cmd->data);
@@ -837,8 +958,9 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
837 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); 958 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
838 959
839 data = host->data; 960 data = host->data;
840 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN| 961 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
841 MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data) 962 MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND|
963 MCI_DATABLOCKEND) && data)
842 mmci_data_irq(host, data, status); 964 mmci_data_irq(host, data, status);
843 965
844 cmd = host->cmd; 966 cmd = host->cmd;
@@ -872,6 +994,9 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
872 994
873 host->mrq = mrq; 995 host->mrq = mrq;
874 996
997 if (mrq->data)
998 mmci_get_next_data(host, mrq->data);
999
875 if (mrq->data && mrq->data->flags & MMC_DATA_READ) 1000 if (mrq->data && mrq->data->flags & MMC_DATA_READ)
876 mmci_start_data(host, mrq->data); 1001 mmci_start_data(host, mrq->data);
877 1002
@@ -986,6 +1111,8 @@ static irqreturn_t mmci_cd_irq(int irq, void *dev_id)
986 1111
987static const struct mmc_host_ops mmci_ops = { 1112static const struct mmc_host_ops mmci_ops = {
988 .request = mmci_request, 1113 .request = mmci_request,
1114 .pre_req = mmci_pre_request,
1115 .post_req = mmci_post_request,
989 .set_ios = mmci_set_ios, 1116 .set_ios = mmci_set_ios,
990 .get_ro = mmci_get_ro, 1117 .get_ro = mmci_get_ro,
991 .get_cd = mmci_get_cd, 1118 .get_cd = mmci_get_cd,
@@ -1063,7 +1190,15 @@ static int __devinit mmci_probe(struct amba_device *dev,
1063 } 1190 }
1064 1191
1065 mmc->ops = &mmci_ops; 1192 mmc->ops = &mmci_ops;
1066 mmc->f_min = (host->mclk + 511) / 512; 1193 /*
1194 * The ARM and ST versions of the block have slightly different
1195 * clock divider equations which means that the minimum divider
1196 * differs too.
1197 */
1198 if (variant->st_clkdiv)
1199 mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
1200 else
1201 mmc->f_min = DIV_ROUND_UP(host->mclk, 512);
1067 /* 1202 /*
1068 * If the platform data supplies a maximum operating 1203 * If the platform data supplies a maximum operating
1069 * frequency, this takes precedence. Else, we fall back 1204 * frequency, this takes precedence. Else, we fall back
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 2164e8c6476..79e4143ab9d 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -166,6 +166,12 @@ struct clk;
166struct variant_data; 166struct variant_data;
167struct dma_chan; 167struct dma_chan;
168 168
169struct mmci_host_next {
170 struct dma_async_tx_descriptor *dma_desc;
171 struct dma_chan *dma_chan;
172 s32 cookie;
173};
174
169struct mmci_host { 175struct mmci_host {
170 phys_addr_t phybase; 176 phys_addr_t phybase;
171 void __iomem *base; 177 void __iomem *base;
@@ -203,6 +209,8 @@ struct mmci_host {
203 struct dma_chan *dma_current; 209 struct dma_chan *dma_current;
204 struct dma_chan *dma_rx_channel; 210 struct dma_chan *dma_rx_channel;
205 struct dma_chan *dma_tx_channel; 211 struct dma_chan *dma_tx_channel;
212 struct dma_async_tx_descriptor *dma_desc_current;
213 struct mmci_host_next next_data;
206 214
207#define dma_inprogress(host) ((host)->dma_current) 215#define dma_inprogress(host) ((host)->dma_current)
208#else 216#else
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index cc20e025932..b87143d0aeb 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -715,13 +715,13 @@ static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
715 int burstlen, ret; 715 int burstlen, ret;
716 716
717 /* 717 /*
718 * use burstlen of 64 in 4 bit mode (--> reg value 0) 718 * use burstlen of 64 (16 words) in 4 bit mode (--> reg value 0)
719 * use burstlen of 16 in 1 bit mode (--> reg value 16) 719 * use burstlen of 16 (4 words) in 1 bit mode (--> reg value 16)
720 */ 720 */
721 if (ios->bus_width == MMC_BUS_WIDTH_4) 721 if (ios->bus_width == MMC_BUS_WIDTH_4)
722 burstlen = 64;
723 else
724 burstlen = 16; 722 burstlen = 16;
723 else
724 burstlen = 4;
725 725
726 if (mxcmci_use_dma(host) && burstlen != host->burstlen) { 726 if (mxcmci_use_dma(host) && burstlen != host->burstlen) {
727 host->burstlen = burstlen; 727 host->burstlen = burstlen;
@@ -731,6 +731,7 @@ static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
731 "failed to config DMA channel. Falling back to PIO\n"); 731 "failed to config DMA channel. Falling back to PIO\n");
732 dma_release_channel(host->dma); 732 dma_release_channel(host->dma);
733 host->do_dma = 0; 733 host->do_dma = 0;
734 host->dma = NULL;
734 } 735 }
735 } 736 }
736 737
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 99d39a6a103..d513d47364d 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -564,40 +564,38 @@ static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
564 564
565static void mxs_mmc_set_clk_rate(struct mxs_mmc_host *host, unsigned int rate) 565static void mxs_mmc_set_clk_rate(struct mxs_mmc_host *host, unsigned int rate)
566{ 566{
567 unsigned int ssp_rate, bit_rate; 567 unsigned int ssp_clk, ssp_sck;
568 u32 div1, div2; 568 u32 clock_divide, clock_rate;
569 u32 val; 569 u32 val;
570 570
571 ssp_rate = clk_get_rate(host->clk); 571 ssp_clk = clk_get_rate(host->clk);
572 572
573 for (div1 = 2; div1 < 254; div1 += 2) { 573 for (clock_divide = 2; clock_divide <= 254; clock_divide += 2) {
574 div2 = ssp_rate / rate / div1; 574 clock_rate = DIV_ROUND_UP(ssp_clk, rate * clock_divide);
575 if (div2 < 0x100) 575 clock_rate = (clock_rate > 0) ? clock_rate - 1 : 0;
576 if (clock_rate <= 255)
576 break; 577 break;
577 } 578 }
578 579
579 if (div1 >= 254) { 580 if (clock_divide > 254) {
580 dev_err(mmc_dev(host->mmc), 581 dev_err(mmc_dev(host->mmc),
581 "%s: cannot set clock to %d\n", __func__, rate); 582 "%s: cannot set clock to %d\n", __func__, rate);
582 return; 583 return;
583 } 584 }
584 585
585 if (div2 == 0) 586 ssp_sck = ssp_clk / clock_divide / (1 + clock_rate);
586 bit_rate = ssp_rate / div1;
587 else
588 bit_rate = ssp_rate / div1 / div2;
589 587
590 val = readl(host->base + HW_SSP_TIMING); 588 val = readl(host->base + HW_SSP_TIMING);
591 val &= ~(BM_SSP_TIMING_CLOCK_DIVIDE | BM_SSP_TIMING_CLOCK_RATE); 589 val &= ~(BM_SSP_TIMING_CLOCK_DIVIDE | BM_SSP_TIMING_CLOCK_RATE);
592 val |= BF_SSP(div1, TIMING_CLOCK_DIVIDE); 590 val |= BF_SSP(clock_divide, TIMING_CLOCK_DIVIDE);
593 val |= BF_SSP(div2 - 1, TIMING_CLOCK_RATE); 591 val |= BF_SSP(clock_rate, TIMING_CLOCK_RATE);
594 writel(val, host->base + HW_SSP_TIMING); 592 writel(val, host->base + HW_SSP_TIMING);
595 593
596 host->clk_rate = bit_rate; 594 host->clk_rate = ssp_sck;
597 595
598 dev_dbg(mmc_dev(host->mmc), 596 dev_dbg(mmc_dev(host->mmc),
599 "%s: div1 %d, div2 %d, ssp %d, bit %d, rate %d\n", 597 "%s: clock_divide %d, clock_rate %d, ssp_clk %d, rate_actual %d, rate_requested %d\n",
600 __func__, div1, div2, ssp_rate, bit_rate, rate); 598 __func__, clock_divide, clock_rate, ssp_clk, ssp_sck, rate);
601} 599}
602 600
603static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 601static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index dedf3dab8a3..21e4a799df4 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -17,6 +17,7 @@
17 17
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/kernel.h>
20#include <linux/debugfs.h> 21#include <linux/debugfs.h>
21#include <linux/seq_file.h> 22#include <linux/seq_file.h>
22#include <linux/interrupt.h> 23#include <linux/interrupt.h>
@@ -33,6 +34,7 @@
33#include <linux/semaphore.h> 34#include <linux/semaphore.h>
34#include <linux/gpio.h> 35#include <linux/gpio.h>
35#include <linux/regulator/consumer.h> 36#include <linux/regulator/consumer.h>
37#include <linux/pm_runtime.h>
36#include <plat/dma.h> 38#include <plat/dma.h>
37#include <mach/hardware.h> 39#include <mach/hardware.h>
38#include <plat/board.h> 40#include <plat/board.h>
@@ -116,15 +118,13 @@
116#define OMAP_MMC4_DEVID 3 118#define OMAP_MMC4_DEVID 3
117#define OMAP_MMC5_DEVID 4 119#define OMAP_MMC5_DEVID 4
118 120
121#define MMC_AUTOSUSPEND_DELAY 100
119#define MMC_TIMEOUT_MS 20 122#define MMC_TIMEOUT_MS 20
120#define OMAP_MMC_MASTER_CLOCK 96000000 123#define OMAP_MMC_MASTER_CLOCK 96000000
124#define OMAP_MMC_MIN_CLOCK 400000
125#define OMAP_MMC_MAX_CLOCK 52000000
121#define DRIVER_NAME "omap_hsmmc" 126#define DRIVER_NAME "omap_hsmmc"
122 127
123/* Timeouts for entering power saving states on inactivity, msec */
124#define OMAP_MMC_DISABLED_TIMEOUT 100
125#define OMAP_MMC_SLEEP_TIMEOUT 1000
126#define OMAP_MMC_OFF_TIMEOUT 8000
127
128/* 128/*
129 * One controller can have multiple slots, like on some omap boards using 129 * One controller can have multiple slots, like on some omap boards using
130 * omap.c controller driver. Luckily this is not currently done on any known 130 * omap.c controller driver. Luckily this is not currently done on any known
@@ -141,6 +141,11 @@
141#define OMAP_HSMMC_WRITE(base, reg, val) \ 141#define OMAP_HSMMC_WRITE(base, reg, val) \
142 __raw_writel((val), (base) + OMAP_HSMMC_##reg) 142 __raw_writel((val), (base) + OMAP_HSMMC_##reg)
143 143
144struct omap_hsmmc_next {
145 unsigned int dma_len;
146 s32 cookie;
147};
148
144struct omap_hsmmc_host { 149struct omap_hsmmc_host {
145 struct device *dev; 150 struct device *dev;
146 struct mmc_host *mmc; 151 struct mmc_host *mmc;
@@ -148,7 +153,6 @@ struct omap_hsmmc_host {
148 struct mmc_command *cmd; 153 struct mmc_command *cmd;
149 struct mmc_data *data; 154 struct mmc_data *data;
150 struct clk *fclk; 155 struct clk *fclk;
151 struct clk *iclk;
152 struct clk *dbclk; 156 struct clk *dbclk;
153 /* 157 /*
154 * vcc == configured supply 158 * vcc == configured supply
@@ -184,6 +188,7 @@ struct omap_hsmmc_host {
184 int reqs_blocked; 188 int reqs_blocked;
185 int use_reg; 189 int use_reg;
186 int req_in_progress; 190 int req_in_progress;
191 struct omap_hsmmc_next next_data;
187 192
188 struct omap_mmc_platform_data *pdata; 193 struct omap_mmc_platform_data *pdata;
189}; 194};
@@ -548,6 +553,15 @@ static void omap_hsmmc_gpio_free(struct omap_mmc_platform_data *pdata)
548} 553}
549 554
550/* 555/*
556 * Start clock to the card
557 */
558static void omap_hsmmc_start_clock(struct omap_hsmmc_host *host)
559{
560 OMAP_HSMMC_WRITE(host->base, SYSCTL,
561 OMAP_HSMMC_READ(host->base, SYSCTL) | CEN);
562}
563
564/*
551 * Stop clock to the card 565 * Stop clock to the card
552 */ 566 */
553static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host) 567static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host)
@@ -584,6 +598,81 @@ static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host)
584 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); 598 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
585} 599}
586 600
601/* Calculate divisor for the given clock frequency */
602static u16 calc_divisor(struct mmc_ios *ios)
603{
604 u16 dsor = 0;
605
606 if (ios->clock) {
607 dsor = DIV_ROUND_UP(OMAP_MMC_MASTER_CLOCK, ios->clock);
608 if (dsor > 250)
609 dsor = 250;
610 }
611
612 return dsor;
613}
614
615static void omap_hsmmc_set_clock(struct omap_hsmmc_host *host)
616{
617 struct mmc_ios *ios = &host->mmc->ios;
618 unsigned long regval;
619 unsigned long timeout;
620
621 dev_dbg(mmc_dev(host->mmc), "Set clock to %uHz\n", ios->clock);
622
623 omap_hsmmc_stop_clock(host);
624
625 regval = OMAP_HSMMC_READ(host->base, SYSCTL);
626 regval = regval & ~(CLKD_MASK | DTO_MASK);
627 regval = regval | (calc_divisor(ios) << 6) | (DTO << 16);
628 OMAP_HSMMC_WRITE(host->base, SYSCTL, regval);
629 OMAP_HSMMC_WRITE(host->base, SYSCTL,
630 OMAP_HSMMC_READ(host->base, SYSCTL) | ICE);
631
632 /* Wait till the ICS bit is set */
633 timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
634 while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS
635 && time_before(jiffies, timeout))
636 cpu_relax();
637
638 omap_hsmmc_start_clock(host);
639}
640
641static void omap_hsmmc_set_bus_width(struct omap_hsmmc_host *host)
642{
643 struct mmc_ios *ios = &host->mmc->ios;
644 u32 con;
645
646 con = OMAP_HSMMC_READ(host->base, CON);
647 switch (ios->bus_width) {
648 case MMC_BUS_WIDTH_8:
649 OMAP_HSMMC_WRITE(host->base, CON, con | DW8);
650 break;
651 case MMC_BUS_WIDTH_4:
652 OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
653 OMAP_HSMMC_WRITE(host->base, HCTL,
654 OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT);
655 break;
656 case MMC_BUS_WIDTH_1:
657 OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
658 OMAP_HSMMC_WRITE(host->base, HCTL,
659 OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT);
660 break;
661 }
662}
663
664static void omap_hsmmc_set_bus_mode(struct omap_hsmmc_host *host)
665{
666 struct mmc_ios *ios = &host->mmc->ios;
667 u32 con;
668
669 con = OMAP_HSMMC_READ(host->base, CON);
670 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
671 OMAP_HSMMC_WRITE(host->base, CON, con | OD);
672 else
673 OMAP_HSMMC_WRITE(host->base, CON, con & ~OD);
674}
675
587#ifdef CONFIG_PM 676#ifdef CONFIG_PM
588 677
589/* 678/*
@@ -595,8 +684,7 @@ static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
595 struct mmc_ios *ios = &host->mmc->ios; 684 struct mmc_ios *ios = &host->mmc->ios;
596 struct omap_mmc_platform_data *pdata = host->pdata; 685 struct omap_mmc_platform_data *pdata = host->pdata;
597 int context_loss = 0; 686 int context_loss = 0;
598 u32 hctl, capa, con; 687 u32 hctl, capa;
599 u16 dsor = 0;
600 unsigned long timeout; 688 unsigned long timeout;
601 689
602 if (pdata->get_context_loss_count) { 690 if (pdata->get_context_loss_count) {
@@ -658,54 +746,12 @@ static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
658 if (host->power_mode == MMC_POWER_OFF) 746 if (host->power_mode == MMC_POWER_OFF)
659 goto out; 747 goto out;
660 748
661 con = OMAP_HSMMC_READ(host->base, CON); 749 omap_hsmmc_set_bus_width(host);
662 switch (ios->bus_width) {
663 case MMC_BUS_WIDTH_8:
664 OMAP_HSMMC_WRITE(host->base, CON, con | DW8);
665 break;
666 case MMC_BUS_WIDTH_4:
667 OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
668 OMAP_HSMMC_WRITE(host->base, HCTL,
669 OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT);
670 break;
671 case MMC_BUS_WIDTH_1:
672 OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
673 OMAP_HSMMC_WRITE(host->base, HCTL,
674 OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT);
675 break;
676 }
677
678 if (ios->clock) {
679 dsor = OMAP_MMC_MASTER_CLOCK / ios->clock;
680 if (dsor < 1)
681 dsor = 1;
682
683 if (OMAP_MMC_MASTER_CLOCK / dsor > ios->clock)
684 dsor++;
685
686 if (dsor > 250)
687 dsor = 250;
688 }
689
690 OMAP_HSMMC_WRITE(host->base, SYSCTL,
691 OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN);
692 OMAP_HSMMC_WRITE(host->base, SYSCTL, (dsor << 6) | (DTO << 16));
693 OMAP_HSMMC_WRITE(host->base, SYSCTL,
694 OMAP_HSMMC_READ(host->base, SYSCTL) | ICE);
695 750
696 timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); 751 omap_hsmmc_set_clock(host);
697 while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS
698 && time_before(jiffies, timeout))
699 ;
700 752
701 OMAP_HSMMC_WRITE(host->base, SYSCTL, 753 omap_hsmmc_set_bus_mode(host);
702 OMAP_HSMMC_READ(host->base, SYSCTL) | CEN);
703 754
704 con = OMAP_HSMMC_READ(host->base, CON);
705 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
706 OMAP_HSMMC_WRITE(host->base, CON, con | OD);
707 else
708 OMAP_HSMMC_WRITE(host->base, CON, con & ~OD);
709out: 755out:
710 host->context_loss = context_loss; 756 host->context_loss = context_loss;
711 757
@@ -973,14 +1019,14 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
973 * Readable error output 1019 * Readable error output
974 */ 1020 */
975#ifdef CONFIG_MMC_DEBUG 1021#ifdef CONFIG_MMC_DEBUG
976static void omap_hsmmc_report_irq(struct omap_hsmmc_host *host, u32 status) 1022static void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, u32 status)
977{ 1023{
978 /* --- means reserved bit without definition at documentation */ 1024 /* --- means reserved bit without definition at documentation */
979 static const char *omap_hsmmc_status_bits[] = { 1025 static const char *omap_hsmmc_status_bits[] = {
980 "CC", "TC", "BGE", "---", "BWR", "BRR", "---", "---", "CIRQ", 1026 "CC" , "TC" , "BGE", "---", "BWR" , "BRR" , "---" , "---" ,
981 "OBI", "---", "---", "---", "---", "---", "ERRI", "CTO", "CCRC", 1027 "CIRQ", "OBI" , "---", "---", "---" , "---" , "---" , "ERRI",
982 "CEB", "CIE", "DTO", "DCRC", "DEB", "---", "ACE", "---", 1028 "CTO" , "CCRC", "CEB", "CIE", "DTO" , "DCRC", "DEB" , "---" ,
983 "---", "---", "---", "CERR", "CERR", "BADA", "---", "---", "---" 1029 "ACE" , "---" , "---", "---", "CERR", "BADA", "---" , "---"
984 }; 1030 };
985 char res[256]; 1031 char res[256];
986 char *buf = res; 1032 char *buf = res;
@@ -997,6 +1043,11 @@ static void omap_hsmmc_report_irq(struct omap_hsmmc_host *host, u32 status)
997 1043
998 dev_dbg(mmc_dev(host->mmc), "%s\n", res); 1044 dev_dbg(mmc_dev(host->mmc), "%s\n", res);
999} 1045}
1046#else
1047static inline void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host,
1048 u32 status)
1049{
1050}
1000#endif /* CONFIG_MMC_DEBUG */ 1051#endif /* CONFIG_MMC_DEBUG */
1001 1052
1002/* 1053/*
@@ -1055,9 +1106,7 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
1055 dev_dbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status); 1106 dev_dbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status);
1056 1107
1057 if (status & ERR) { 1108 if (status & ERR) {
1058#ifdef CONFIG_MMC_DEBUG 1109 omap_hsmmc_dbg_report_irq(host, status);
1059 omap_hsmmc_report_irq(host, status);
1060#endif
1061 if ((status & CMD_TIMEOUT) || 1110 if ((status & CMD_TIMEOUT) ||
1062 (status & CMD_CRC)) { 1111 (status & CMD_CRC)) {
1063 if (host->cmd) { 1112 if (host->cmd) {
@@ -1155,8 +1204,7 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
1155 int ret; 1204 int ret;
1156 1205
1157 /* Disable the clocks */ 1206 /* Disable the clocks */
1158 clk_disable(host->fclk); 1207 pm_runtime_put_sync(host->dev);
1159 clk_disable(host->iclk);
1160 if (host->got_dbclk) 1208 if (host->got_dbclk)
1161 clk_disable(host->dbclk); 1209 clk_disable(host->dbclk);
1162 1210
@@ -1167,8 +1215,7 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
1167 if (!ret) 1215 if (!ret)
1168 ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1, 1216 ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1,
1169 vdd); 1217 vdd);
1170 clk_enable(host->iclk); 1218 pm_runtime_get_sync(host->dev);
1171 clk_enable(host->fclk);
1172 if (host->got_dbclk) 1219 if (host->got_dbclk)
1173 clk_enable(host->dbclk); 1220 clk_enable(host->dbclk);
1174 1221
@@ -1322,7 +1369,7 @@ static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host,
1322static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data) 1369static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
1323{ 1370{
1324 struct omap_hsmmc_host *host = cb_data; 1371 struct omap_hsmmc_host *host = cb_data;
1325 struct mmc_data *data = host->mrq->data; 1372 struct mmc_data *data;
1326 int dma_ch, req_in_progress; 1373 int dma_ch, req_in_progress;
1327 1374
1328 if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) { 1375 if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
@@ -1337,6 +1384,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
1337 return; 1384 return;
1338 } 1385 }
1339 1386
1387 data = host->mrq->data;
1340 host->dma_sg_idx++; 1388 host->dma_sg_idx++;
1341 if (host->dma_sg_idx < host->dma_len) { 1389 if (host->dma_sg_idx < host->dma_len) {
1342 /* Fire up the next transfer. */ 1390 /* Fire up the next transfer. */
@@ -1346,8 +1394,9 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
1346 return; 1394 return;
1347 } 1395 }
1348 1396
1349 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 1397 if (!data->host_cookie)
1350 omap_hsmmc_get_dma_dir(host, data)); 1398 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
1399 omap_hsmmc_get_dma_dir(host, data));
1351 1400
1352 req_in_progress = host->req_in_progress; 1401 req_in_progress = host->req_in_progress;
1353 dma_ch = host->dma_ch; 1402 dma_ch = host->dma_ch;
@@ -1365,6 +1414,45 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
1365 } 1414 }
1366} 1415}
1367 1416
1417static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
1418 struct mmc_data *data,
1419 struct omap_hsmmc_next *next)
1420{
1421 int dma_len;
1422
1423 if (!next && data->host_cookie &&
1424 data->host_cookie != host->next_data.cookie) {
1425 printk(KERN_WARNING "[%s] invalid cookie: data->host_cookie %d"
1426 " host->next_data.cookie %d\n",
1427 __func__, data->host_cookie, host->next_data.cookie);
1428 data->host_cookie = 0;
1429 }
1430
1431 /* Check if next job is already prepared */
1432 if (next ||
1433 (!next && data->host_cookie != host->next_data.cookie)) {
1434 dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
1435 data->sg_len,
1436 omap_hsmmc_get_dma_dir(host, data));
1437
1438 } else {
1439 dma_len = host->next_data.dma_len;
1440 host->next_data.dma_len = 0;
1441 }
1442
1443
1444 if (dma_len == 0)
1445 return -EINVAL;
1446
1447 if (next) {
1448 next->dma_len = dma_len;
1449 data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie;
1450 } else
1451 host->dma_len = dma_len;
1452
1453 return 0;
1454}
1455
1368/* 1456/*
1369 * Routine to configure and start DMA for the MMC card 1457 * Routine to configure and start DMA for the MMC card
1370 */ 1458 */
@@ -1398,9 +1486,10 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
1398 mmc_hostname(host->mmc), ret); 1486 mmc_hostname(host->mmc), ret);
1399 return ret; 1487 return ret;
1400 } 1488 }
1489 ret = omap_hsmmc_pre_dma_transfer(host, data, NULL);
1490 if (ret)
1491 return ret;
1401 1492
1402 host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
1403 data->sg_len, omap_hsmmc_get_dma_dir(host, data));
1404 host->dma_ch = dma_ch; 1493 host->dma_ch = dma_ch;
1405 host->dma_sg_idx = 0; 1494 host->dma_sg_idx = 0;
1406 1495
@@ -1480,6 +1569,35 @@ omap_hsmmc_prepare_data(struct omap_hsmmc_host *host, struct mmc_request *req)
1480 return 0; 1569 return 0;
1481} 1570}
1482 1571
1572static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
1573 int err)
1574{
1575 struct omap_hsmmc_host *host = mmc_priv(mmc);
1576 struct mmc_data *data = mrq->data;
1577
1578 if (host->use_dma) {
1579 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
1580 omap_hsmmc_get_dma_dir(host, data));
1581 data->host_cookie = 0;
1582 }
1583}
1584
1585static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
1586 bool is_first_req)
1587{
1588 struct omap_hsmmc_host *host = mmc_priv(mmc);
1589
1590 if (mrq->data->host_cookie) {
1591 mrq->data->host_cookie = 0;
1592 return ;
1593 }
1594
1595 if (host->use_dma)
1596 if (omap_hsmmc_pre_dma_transfer(host, mrq->data,
1597 &host->next_data))
1598 mrq->data->host_cookie = 0;
1599}
1600
1483/* 1601/*
1484 * Request function. for read/write operation 1602 * Request function. for read/write operation
1485 */ 1603 */
@@ -1528,13 +1646,9 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
1528static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1646static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1529{ 1647{
1530 struct omap_hsmmc_host *host = mmc_priv(mmc); 1648 struct omap_hsmmc_host *host = mmc_priv(mmc);
1531 u16 dsor = 0;
1532 unsigned long regval;
1533 unsigned long timeout;
1534 u32 con;
1535 int do_send_init_stream = 0; 1649 int do_send_init_stream = 0;
1536 1650
1537 mmc_host_enable(host->mmc); 1651 pm_runtime_get_sync(host->dev);
1538 1652
1539 if (ios->power_mode != host->power_mode) { 1653 if (ios->power_mode != host->power_mode) {
1540 switch (ios->power_mode) { 1654 switch (ios->power_mode) {
@@ -1557,22 +1671,7 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1557 1671
1558 /* FIXME: set registers based only on changes to ios */ 1672 /* FIXME: set registers based only on changes to ios */
1559 1673
1560 con = OMAP_HSMMC_READ(host->base, CON); 1674 omap_hsmmc_set_bus_width(host);
1561 switch (mmc->ios.bus_width) {
1562 case MMC_BUS_WIDTH_8:
1563 OMAP_HSMMC_WRITE(host->base, CON, con | DW8);
1564 break;
1565 case MMC_BUS_WIDTH_4:
1566 OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
1567 OMAP_HSMMC_WRITE(host->base, HCTL,
1568 OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT);
1569 break;
1570 case MMC_BUS_WIDTH_1:
1571 OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
1572 OMAP_HSMMC_WRITE(host->base, HCTL,
1573 OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT);
1574 break;
1575 }
1576 1675
1577 if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) { 1676 if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
1578 /* Only MMC1 can interface at 3V without some flavor 1677 /* Only MMC1 can interface at 3V without some flavor
@@ -1592,47 +1691,14 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1592 } 1691 }
1593 } 1692 }
1594 1693
1595 if (ios->clock) { 1694 omap_hsmmc_set_clock(host);
1596 dsor = OMAP_MMC_MASTER_CLOCK / ios->clock;
1597 if (dsor < 1)
1598 dsor = 1;
1599
1600 if (OMAP_MMC_MASTER_CLOCK / dsor > ios->clock)
1601 dsor++;
1602
1603 if (dsor > 250)
1604 dsor = 250;
1605 }
1606 omap_hsmmc_stop_clock(host);
1607 regval = OMAP_HSMMC_READ(host->base, SYSCTL);
1608 regval = regval & ~(CLKD_MASK);
1609 regval = regval | (dsor << 6) | (DTO << 16);
1610 OMAP_HSMMC_WRITE(host->base, SYSCTL, regval);
1611 OMAP_HSMMC_WRITE(host->base, SYSCTL,
1612 OMAP_HSMMC_READ(host->base, SYSCTL) | ICE);
1613
1614 /* Wait till the ICS bit is set */
1615 timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
1616 while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS
1617 && time_before(jiffies, timeout))
1618 msleep(1);
1619
1620 OMAP_HSMMC_WRITE(host->base, SYSCTL,
1621 OMAP_HSMMC_READ(host->base, SYSCTL) | CEN);
1622 1695
1623 if (do_send_init_stream) 1696 if (do_send_init_stream)
1624 send_init_stream(host); 1697 send_init_stream(host);
1625 1698
1626 con = OMAP_HSMMC_READ(host->base, CON); 1699 omap_hsmmc_set_bus_mode(host);
1627 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
1628 OMAP_HSMMC_WRITE(host->base, CON, con | OD);
1629 else
1630 OMAP_HSMMC_WRITE(host->base, CON, con & ~OD);
1631 1700
1632 if (host->power_mode == MMC_POWER_OFF) 1701 pm_runtime_put_autosuspend(host->dev);
1633 mmc_host_disable(host->mmc);
1634 else
1635 mmc_host_lazy_disable(host->mmc);
1636} 1702}
1637 1703
1638static int omap_hsmmc_get_cd(struct mmc_host *mmc) 1704static int omap_hsmmc_get_cd(struct mmc_host *mmc)
@@ -1688,230 +1754,12 @@ static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host)
1688 set_sd_bus_power(host); 1754 set_sd_bus_power(host);
1689} 1755}
1690 1756
1691/*
1692 * Dynamic power saving handling, FSM:
1693 * ENABLED -> DISABLED -> CARDSLEEP / REGSLEEP -> OFF
1694 * ^___________| | |
1695 * |______________________|______________________|
1696 *
1697 * ENABLED: mmc host is fully functional
1698 * DISABLED: fclk is off
1699 * CARDSLEEP: fclk is off, card is asleep, voltage regulator is asleep
1700 * REGSLEEP: fclk is off, voltage regulator is asleep
1701 * OFF: fclk is off, voltage regulator is off
1702 *
1703 * Transition handlers return the timeout for the next state transition
1704 * or negative error.
1705 */
1706
1707enum {ENABLED = 0, DISABLED, CARDSLEEP, REGSLEEP, OFF};
1708
1709/* Handler for [ENABLED -> DISABLED] transition */
1710static int omap_hsmmc_enabled_to_disabled(struct omap_hsmmc_host *host)
1711{
1712 omap_hsmmc_context_save(host);
1713 clk_disable(host->fclk);
1714 host->dpm_state = DISABLED;
1715
1716 dev_dbg(mmc_dev(host->mmc), "ENABLED -> DISABLED\n");
1717
1718 if (host->power_mode == MMC_POWER_OFF)
1719 return 0;
1720
1721 return OMAP_MMC_SLEEP_TIMEOUT;
1722}
1723
1724/* Handler for [DISABLED -> REGSLEEP / CARDSLEEP] transition */
1725static int omap_hsmmc_disabled_to_sleep(struct omap_hsmmc_host *host)
1726{
1727 int err, new_state;
1728
1729 if (!mmc_try_claim_host(host->mmc))
1730 return 0;
1731
1732 clk_enable(host->fclk);
1733 omap_hsmmc_context_restore(host);
1734 if (mmc_card_can_sleep(host->mmc)) {
1735 err = mmc_card_sleep(host->mmc);
1736 if (err < 0) {
1737 clk_disable(host->fclk);
1738 mmc_release_host(host->mmc);
1739 return err;
1740 }
1741 new_state = CARDSLEEP;
1742 } else {
1743 new_state = REGSLEEP;
1744 }
1745 if (mmc_slot(host).set_sleep)
1746 mmc_slot(host).set_sleep(host->dev, host->slot_id, 1, 0,
1747 new_state == CARDSLEEP);
1748 /* FIXME: turn off bus power and perhaps interrupts too */
1749 clk_disable(host->fclk);
1750 host->dpm_state = new_state;
1751
1752 mmc_release_host(host->mmc);
1753
1754 dev_dbg(mmc_dev(host->mmc), "DISABLED -> %s\n",
1755 host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP");
1756
1757 if (mmc_slot(host).no_off)
1758 return 0;
1759
1760 if ((host->mmc->caps & MMC_CAP_NONREMOVABLE) ||
1761 mmc_slot(host).card_detect ||
1762 (mmc_slot(host).get_cover_state &&
1763 mmc_slot(host).get_cover_state(host->dev, host->slot_id)))
1764 return OMAP_MMC_OFF_TIMEOUT;
1765
1766 return 0;
1767}
1768
1769/* Handler for [REGSLEEP / CARDSLEEP -> OFF] transition */
1770static int omap_hsmmc_sleep_to_off(struct omap_hsmmc_host *host)
1771{
1772 if (!mmc_try_claim_host(host->mmc))
1773 return 0;
1774
1775 if (mmc_slot(host).no_off)
1776 return 0;
1777
1778 if (!((host->mmc->caps & MMC_CAP_NONREMOVABLE) ||
1779 mmc_slot(host).card_detect ||
1780 (mmc_slot(host).get_cover_state &&
1781 mmc_slot(host).get_cover_state(host->dev, host->slot_id)))) {
1782 mmc_release_host(host->mmc);
1783 return 0;
1784 }
1785
1786 mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0);
1787 host->vdd = 0;
1788 host->power_mode = MMC_POWER_OFF;
1789
1790 dev_dbg(mmc_dev(host->mmc), "%s -> OFF\n",
1791 host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP");
1792
1793 host->dpm_state = OFF;
1794
1795 mmc_release_host(host->mmc);
1796
1797 return 0;
1798}
1799
1800/* Handler for [DISABLED -> ENABLED] transition */
1801static int omap_hsmmc_disabled_to_enabled(struct omap_hsmmc_host *host)
1802{
1803 int err;
1804
1805 err = clk_enable(host->fclk);
1806 if (err < 0)
1807 return err;
1808
1809 omap_hsmmc_context_restore(host);
1810 host->dpm_state = ENABLED;
1811
1812 dev_dbg(mmc_dev(host->mmc), "DISABLED -> ENABLED\n");
1813
1814 return 0;
1815}
1816
1817/* Handler for [SLEEP -> ENABLED] transition */
1818static int omap_hsmmc_sleep_to_enabled(struct omap_hsmmc_host *host)
1819{
1820 if (!mmc_try_claim_host(host->mmc))
1821 return 0;
1822
1823 clk_enable(host->fclk);
1824 omap_hsmmc_context_restore(host);
1825 if (mmc_slot(host).set_sleep)
1826 mmc_slot(host).set_sleep(host->dev, host->slot_id, 0,
1827 host->vdd, host->dpm_state == CARDSLEEP);
1828 if (mmc_card_can_sleep(host->mmc))
1829 mmc_card_awake(host->mmc);
1830
1831 dev_dbg(mmc_dev(host->mmc), "%s -> ENABLED\n",
1832 host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP");
1833
1834 host->dpm_state = ENABLED;
1835
1836 mmc_release_host(host->mmc);
1837
1838 return 0;
1839}
1840
1841/* Handler for [OFF -> ENABLED] transition */
1842static int omap_hsmmc_off_to_enabled(struct omap_hsmmc_host *host)
1843{
1844 clk_enable(host->fclk);
1845
1846 omap_hsmmc_context_restore(host);
1847 omap_hsmmc_conf_bus_power(host);
1848 mmc_power_restore_host(host->mmc);
1849
1850 host->dpm_state = ENABLED;
1851
1852 dev_dbg(mmc_dev(host->mmc), "OFF -> ENABLED\n");
1853
1854 return 0;
1855}
1856
1857/*
1858 * Bring MMC host to ENABLED from any other PM state.
1859 */
1860static int omap_hsmmc_enable(struct mmc_host *mmc)
1861{
1862 struct omap_hsmmc_host *host = mmc_priv(mmc);
1863
1864 switch (host->dpm_state) {
1865 case DISABLED:
1866 return omap_hsmmc_disabled_to_enabled(host);
1867 case CARDSLEEP:
1868 case REGSLEEP:
1869 return omap_hsmmc_sleep_to_enabled(host);
1870 case OFF:
1871 return omap_hsmmc_off_to_enabled(host);
1872 default:
1873 dev_dbg(mmc_dev(host->mmc), "UNKNOWN state\n");
1874 return -EINVAL;
1875 }
1876}
1877
1878/*
1879 * Bring MMC host in PM state (one level deeper).
1880 */
1881static int omap_hsmmc_disable(struct mmc_host *mmc, int lazy)
1882{
1883 struct omap_hsmmc_host *host = mmc_priv(mmc);
1884
1885 switch (host->dpm_state) {
1886 case ENABLED: {
1887 int delay;
1888
1889 delay = omap_hsmmc_enabled_to_disabled(host);
1890 if (lazy || delay < 0)
1891 return delay;
1892 return 0;
1893 }
1894 case DISABLED:
1895 return omap_hsmmc_disabled_to_sleep(host);
1896 case CARDSLEEP:
1897 case REGSLEEP:
1898 return omap_hsmmc_sleep_to_off(host);
1899 default:
1900 dev_dbg(mmc_dev(host->mmc), "UNKNOWN state\n");
1901 return -EINVAL;
1902 }
1903}
1904
1905static int omap_hsmmc_enable_fclk(struct mmc_host *mmc) 1757static int omap_hsmmc_enable_fclk(struct mmc_host *mmc)
1906{ 1758{
1907 struct omap_hsmmc_host *host = mmc_priv(mmc); 1759 struct omap_hsmmc_host *host = mmc_priv(mmc);
1908 int err;
1909 1760
1910 err = clk_enable(host->fclk); 1761 pm_runtime_get_sync(host->dev);
1911 if (err) 1762
1912 return err;
1913 dev_dbg(mmc_dev(host->mmc), "mmc_fclk: enabled\n");
1914 omap_hsmmc_context_restore(host);
1915 return 0; 1763 return 0;
1916} 1764}
1917 1765
@@ -1919,26 +1767,17 @@ static int omap_hsmmc_disable_fclk(struct mmc_host *mmc, int lazy)
1919{ 1767{
1920 struct omap_hsmmc_host *host = mmc_priv(mmc); 1768 struct omap_hsmmc_host *host = mmc_priv(mmc);
1921 1769
1922 omap_hsmmc_context_save(host); 1770 pm_runtime_mark_last_busy(host->dev);
1923 clk_disable(host->fclk); 1771 pm_runtime_put_autosuspend(host->dev);
1924 dev_dbg(mmc_dev(host->mmc), "mmc_fclk: disabled\n"); 1772
1925 return 0; 1773 return 0;
1926} 1774}
1927 1775
1928static const struct mmc_host_ops omap_hsmmc_ops = { 1776static const struct mmc_host_ops omap_hsmmc_ops = {
1929 .enable = omap_hsmmc_enable_fclk, 1777 .enable = omap_hsmmc_enable_fclk,
1930 .disable = omap_hsmmc_disable_fclk, 1778 .disable = omap_hsmmc_disable_fclk,
1931 .request = omap_hsmmc_request, 1779 .post_req = omap_hsmmc_post_req,
1932 .set_ios = omap_hsmmc_set_ios, 1780 .pre_req = omap_hsmmc_pre_req,
1933 .get_cd = omap_hsmmc_get_cd,
1934 .get_ro = omap_hsmmc_get_ro,
1935 .init_card = omap_hsmmc_init_card,
1936 /* NYET -- enable_sdio_irq */
1937};
1938
1939static const struct mmc_host_ops omap_hsmmc_ps_ops = {
1940 .enable = omap_hsmmc_enable,
1941 .disable = omap_hsmmc_disable,
1942 .request = omap_hsmmc_request, 1781 .request = omap_hsmmc_request,
1943 .set_ios = omap_hsmmc_set_ios, 1782 .set_ios = omap_hsmmc_set_ios,
1944 .get_cd = omap_hsmmc_get_cd, 1783 .get_cd = omap_hsmmc_get_cd,
@@ -1968,15 +1807,12 @@ static int omap_hsmmc_regs_show(struct seq_file *s, void *data)
1968 host->dpm_state, mmc->nesting_cnt, 1807 host->dpm_state, mmc->nesting_cnt,
1969 host->context_loss, context_loss); 1808 host->context_loss, context_loss);
1970 1809
1971 if (host->suspended || host->dpm_state == OFF) { 1810 if (host->suspended) {
1972 seq_printf(s, "host suspended, can't read registers\n"); 1811 seq_printf(s, "host suspended, can't read registers\n");
1973 return 0; 1812 return 0;
1974 } 1813 }
1975 1814
1976 if (clk_enable(host->fclk) != 0) { 1815 pm_runtime_get_sync(host->dev);
1977 seq_printf(s, "can't read the regs\n");
1978 return 0;
1979 }
1980 1816
1981 seq_printf(s, "SYSCONFIG:\t0x%08x\n", 1817 seq_printf(s, "SYSCONFIG:\t0x%08x\n",
1982 OMAP_HSMMC_READ(host->base, SYSCONFIG)); 1818 OMAP_HSMMC_READ(host->base, SYSCONFIG));
@@ -1993,7 +1829,8 @@ static int omap_hsmmc_regs_show(struct seq_file *s, void *data)
1993 seq_printf(s, "CAPA:\t\t0x%08x\n", 1829 seq_printf(s, "CAPA:\t\t0x%08x\n",
1994 OMAP_HSMMC_READ(host->base, CAPA)); 1830 OMAP_HSMMC_READ(host->base, CAPA));
1995 1831
1996 clk_disable(host->fclk); 1832 pm_runtime_mark_last_busy(host->dev);
1833 pm_runtime_put_autosuspend(host->dev);
1997 1834
1998 return 0; 1835 return 0;
1999} 1836}
@@ -2077,14 +1914,12 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
2077 host->mapbase = res->start; 1914 host->mapbase = res->start;
2078 host->base = ioremap(host->mapbase, SZ_4K); 1915 host->base = ioremap(host->mapbase, SZ_4K);
2079 host->power_mode = MMC_POWER_OFF; 1916 host->power_mode = MMC_POWER_OFF;
1917 host->next_data.cookie = 1;
2080 1918
2081 platform_set_drvdata(pdev, host); 1919 platform_set_drvdata(pdev, host);
2082 INIT_WORK(&host->mmc_carddetect_work, omap_hsmmc_detect); 1920 INIT_WORK(&host->mmc_carddetect_work, omap_hsmmc_detect);
2083 1921
2084 if (mmc_slot(host).power_saving) 1922 mmc->ops = &omap_hsmmc_ops;
2085 mmc->ops = &omap_hsmmc_ps_ops;
2086 else
2087 mmc->ops = &omap_hsmmc_ops;
2088 1923
2089 /* 1924 /*
2090 * If regulator_disable can only put vcc_aux to sleep then there is 1925 * If regulator_disable can only put vcc_aux to sleep then there is
@@ -2093,44 +1928,26 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
2093 if (mmc_slot(host).vcc_aux_disable_is_sleep) 1928 if (mmc_slot(host).vcc_aux_disable_is_sleep)
2094 mmc_slot(host).no_off = 1; 1929 mmc_slot(host).no_off = 1;
2095 1930
2096 mmc->f_min = 400000; 1931 mmc->f_min = OMAP_MMC_MIN_CLOCK;
2097 mmc->f_max = 52000000; 1932 mmc->f_max = OMAP_MMC_MAX_CLOCK;
2098 1933
2099 spin_lock_init(&host->irq_lock); 1934 spin_lock_init(&host->irq_lock);
2100 1935
2101 host->iclk = clk_get(&pdev->dev, "ick");
2102 if (IS_ERR(host->iclk)) {
2103 ret = PTR_ERR(host->iclk);
2104 host->iclk = NULL;
2105 goto err1;
2106 }
2107 host->fclk = clk_get(&pdev->dev, "fck"); 1936 host->fclk = clk_get(&pdev->dev, "fck");
2108 if (IS_ERR(host->fclk)) { 1937 if (IS_ERR(host->fclk)) {
2109 ret = PTR_ERR(host->fclk); 1938 ret = PTR_ERR(host->fclk);
2110 host->fclk = NULL; 1939 host->fclk = NULL;
2111 clk_put(host->iclk);
2112 goto err1; 1940 goto err1;
2113 } 1941 }
2114 1942
2115 omap_hsmmc_context_save(host); 1943 omap_hsmmc_context_save(host);
2116 1944
2117 mmc->caps |= MMC_CAP_DISABLE; 1945 mmc->caps |= MMC_CAP_DISABLE;
2118 mmc_set_disable_delay(mmc, OMAP_MMC_DISABLED_TIMEOUT);
2119 /* we start off in DISABLED state */
2120 host->dpm_state = DISABLED;
2121 1946
2122 if (clk_enable(host->iclk) != 0) { 1947 pm_runtime_enable(host->dev);
2123 clk_put(host->iclk); 1948 pm_runtime_get_sync(host->dev);
2124 clk_put(host->fclk); 1949 pm_runtime_set_autosuspend_delay(host->dev, MMC_AUTOSUSPEND_DELAY);
2125 goto err1; 1950 pm_runtime_use_autosuspend(host->dev);
2126 }
2127
2128 if (mmc_host_enable(host->mmc) != 0) {
2129 clk_disable(host->iclk);
2130 clk_put(host->iclk);
2131 clk_put(host->fclk);
2132 goto err1;
2133 }
2134 1951
2135 if (cpu_is_omap2430()) { 1952 if (cpu_is_omap2430()) {
2136 host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck"); 1953 host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck");
@@ -2240,8 +2057,6 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
2240 2057
2241 omap_hsmmc_disable_irq(host); 2058 omap_hsmmc_disable_irq(host);
2242 2059
2243 mmc_host_lazy_disable(host->mmc);
2244
2245 omap_hsmmc_protect_card(host); 2060 omap_hsmmc_protect_card(host);
2246 2061
2247 mmc_add_host(mmc); 2062 mmc_add_host(mmc);
@@ -2259,6 +2074,8 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
2259 } 2074 }
2260 2075
2261 omap_hsmmc_debugfs(mmc); 2076 omap_hsmmc_debugfs(mmc);
2077 pm_runtime_mark_last_busy(host->dev);
2078 pm_runtime_put_autosuspend(host->dev);
2262 2079
2263 return 0; 2080 return 0;
2264 2081
@@ -2274,10 +2091,9 @@ err_reg:
2274err_irq_cd_init: 2091err_irq_cd_init:
2275 free_irq(host->irq, host); 2092 free_irq(host->irq, host);
2276err_irq: 2093err_irq:
2277 mmc_host_disable(host->mmc); 2094 pm_runtime_mark_last_busy(host->dev);
2278 clk_disable(host->iclk); 2095 pm_runtime_put_autosuspend(host->dev);
2279 clk_put(host->fclk); 2096 clk_put(host->fclk);
2280 clk_put(host->iclk);
2281 if (host->got_dbclk) { 2097 if (host->got_dbclk) {
2282 clk_disable(host->dbclk); 2098 clk_disable(host->dbclk);
2283 clk_put(host->dbclk); 2099 clk_put(host->dbclk);
@@ -2299,7 +2115,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
2299 struct resource *res; 2115 struct resource *res;
2300 2116
2301 if (host) { 2117 if (host) {
2302 mmc_host_enable(host->mmc); 2118 pm_runtime_get_sync(host->dev);
2303 mmc_remove_host(host->mmc); 2119 mmc_remove_host(host->mmc);
2304 if (host->use_reg) 2120 if (host->use_reg)
2305 omap_hsmmc_reg_put(host); 2121 omap_hsmmc_reg_put(host);
@@ -2310,10 +2126,9 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
2310 free_irq(mmc_slot(host).card_detect_irq, host); 2126 free_irq(mmc_slot(host).card_detect_irq, host);
2311 flush_work_sync(&host->mmc_carddetect_work); 2127 flush_work_sync(&host->mmc_carddetect_work);
2312 2128
2313 mmc_host_disable(host->mmc); 2129 pm_runtime_put_sync(host->dev);
2314 clk_disable(host->iclk); 2130 pm_runtime_disable(host->dev);
2315 clk_put(host->fclk); 2131 clk_put(host->fclk);
2316 clk_put(host->iclk);
2317 if (host->got_dbclk) { 2132 if (host->got_dbclk) {
2318 clk_disable(host->dbclk); 2133 clk_disable(host->dbclk);
2319 clk_put(host->dbclk); 2134 clk_put(host->dbclk);
@@ -2343,6 +2158,7 @@ static int omap_hsmmc_suspend(struct device *dev)
2343 return 0; 2158 return 0;
2344 2159
2345 if (host) { 2160 if (host) {
2161 pm_runtime_get_sync(host->dev);
2346 host->suspended = 1; 2162 host->suspended = 1;
2347 if (host->pdata->suspend) { 2163 if (host->pdata->suspend) {
2348 ret = host->pdata->suspend(&pdev->dev, 2164 ret = host->pdata->suspend(&pdev->dev,
@@ -2357,13 +2173,11 @@ static int omap_hsmmc_suspend(struct device *dev)
2357 } 2173 }
2358 cancel_work_sync(&host->mmc_carddetect_work); 2174 cancel_work_sync(&host->mmc_carddetect_work);
2359 ret = mmc_suspend_host(host->mmc); 2175 ret = mmc_suspend_host(host->mmc);
2360 mmc_host_enable(host->mmc); 2176
2361 if (ret == 0) { 2177 if (ret == 0) {
2362 omap_hsmmc_disable_irq(host); 2178 omap_hsmmc_disable_irq(host);
2363 OMAP_HSMMC_WRITE(host->base, HCTL, 2179 OMAP_HSMMC_WRITE(host->base, HCTL,
2364 OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP); 2180 OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
2365 mmc_host_disable(host->mmc);
2366 clk_disable(host->iclk);
2367 if (host->got_dbclk) 2181 if (host->got_dbclk)
2368 clk_disable(host->dbclk); 2182 clk_disable(host->dbclk);
2369 } else { 2183 } else {
@@ -2375,9 +2189,8 @@ static int omap_hsmmc_suspend(struct device *dev)
2375 dev_dbg(mmc_dev(host->mmc), 2189 dev_dbg(mmc_dev(host->mmc),
2376 "Unmask interrupt failed\n"); 2190 "Unmask interrupt failed\n");
2377 } 2191 }
2378 mmc_host_disable(host->mmc);
2379 } 2192 }
2380 2193 pm_runtime_put_sync(host->dev);
2381 } 2194 }
2382 return ret; 2195 return ret;
2383} 2196}
@@ -2393,14 +2206,7 @@ static int omap_hsmmc_resume(struct device *dev)
2393 return 0; 2206 return 0;
2394 2207
2395 if (host) { 2208 if (host) {
2396 ret = clk_enable(host->iclk); 2209 pm_runtime_get_sync(host->dev);
2397 if (ret)
2398 goto clk_en_err;
2399
2400 if (mmc_host_enable(host->mmc) != 0) {
2401 clk_disable(host->iclk);
2402 goto clk_en_err;
2403 }
2404 2210
2405 if (host->got_dbclk) 2211 if (host->got_dbclk)
2406 clk_enable(host->dbclk); 2212 clk_enable(host->dbclk);
@@ -2421,15 +2227,12 @@ static int omap_hsmmc_resume(struct device *dev)
2421 if (ret == 0) 2227 if (ret == 0)
2422 host->suspended = 0; 2228 host->suspended = 0;
2423 2229
2424 mmc_host_lazy_disable(host->mmc); 2230 pm_runtime_mark_last_busy(host->dev);
2231 pm_runtime_put_autosuspend(host->dev);
2425 } 2232 }
2426 2233
2427 return ret; 2234 return ret;
2428 2235
2429clk_en_err:
2430 dev_dbg(mmc_dev(host->mmc),
2431 "Failed to enable MMC clocks during resume\n");
2432 return ret;
2433} 2236}
2434 2237
2435#else 2238#else
@@ -2437,9 +2240,33 @@ clk_en_err:
2437#define omap_hsmmc_resume NULL 2240#define omap_hsmmc_resume NULL
2438#endif 2241#endif
2439 2242
2243static int omap_hsmmc_runtime_suspend(struct device *dev)
2244{
2245 struct omap_hsmmc_host *host;
2246
2247 host = platform_get_drvdata(to_platform_device(dev));
2248 omap_hsmmc_context_save(host);
2249 dev_dbg(mmc_dev(host->mmc), "disabled\n");
2250
2251 return 0;
2252}
2253
2254static int omap_hsmmc_runtime_resume(struct device *dev)
2255{
2256 struct omap_hsmmc_host *host;
2257
2258 host = platform_get_drvdata(to_platform_device(dev));
2259 omap_hsmmc_context_restore(host);
2260 dev_dbg(mmc_dev(host->mmc), "enabled\n");
2261
2262 return 0;
2263}
2264
2440static struct dev_pm_ops omap_hsmmc_dev_pm_ops = { 2265static struct dev_pm_ops omap_hsmmc_dev_pm_ops = {
2441 .suspend = omap_hsmmc_suspend, 2266 .suspend = omap_hsmmc_suspend,
2442 .resume = omap_hsmmc_resume, 2267 .resume = omap_hsmmc_resume,
2268 .runtime_suspend = omap_hsmmc_runtime_suspend,
2269 .runtime_resume = omap_hsmmc_runtime_resume,
2443}; 2270};
2444 2271
2445static struct platform_driver omap_hsmmc_driver = { 2272static struct platform_driver omap_hsmmc_driver = {
diff --git a/drivers/mmc/host/sdhci-cns3xxx.c b/drivers/mmc/host/sdhci-cns3xxx.c
index 9ebd1d7759d..4b920b7621c 100644
--- a/drivers/mmc/host/sdhci-cns3xxx.c
+++ b/drivers/mmc/host/sdhci-cns3xxx.c
@@ -15,9 +15,7 @@
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/mmc/host.h> 17#include <linux/mmc/host.h>
18#include <linux/mmc/sdhci-pltfm.h>
19#include <mach/cns3xxx.h> 18#include <mach/cns3xxx.h>
20#include "sdhci.h"
21#include "sdhci-pltfm.h" 19#include "sdhci-pltfm.h"
22 20
23static unsigned int sdhci_cns3xxx_get_max_clk(struct sdhci_host *host) 21static unsigned int sdhci_cns3xxx_get_max_clk(struct sdhci_host *host)
@@ -86,7 +84,7 @@ static struct sdhci_ops sdhci_cns3xxx_ops = {
86 .set_clock = sdhci_cns3xxx_set_clock, 84 .set_clock = sdhci_cns3xxx_set_clock,
87}; 85};
88 86
89struct sdhci_pltfm_data sdhci_cns3xxx_pdata = { 87static struct sdhci_pltfm_data sdhci_cns3xxx_pdata = {
90 .ops = &sdhci_cns3xxx_ops, 88 .ops = &sdhci_cns3xxx_ops,
91 .quirks = SDHCI_QUIRK_BROKEN_DMA | 89 .quirks = SDHCI_QUIRK_BROKEN_DMA |
92 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | 90 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
@@ -95,3 +93,43 @@ struct sdhci_pltfm_data sdhci_cns3xxx_pdata = {
95 SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 93 SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
96 SDHCI_QUIRK_NONSTANDARD_CLOCK, 94 SDHCI_QUIRK_NONSTANDARD_CLOCK,
97}; 95};
96
97static int __devinit sdhci_cns3xxx_probe(struct platform_device *pdev)
98{
99 return sdhci_pltfm_register(pdev, &sdhci_cns3xxx_pdata);
100}
101
102static int __devexit sdhci_cns3xxx_remove(struct platform_device *pdev)
103{
104 return sdhci_pltfm_unregister(pdev);
105}
106
107static struct platform_driver sdhci_cns3xxx_driver = {
108 .driver = {
109 .name = "sdhci-cns3xxx",
110 .owner = THIS_MODULE,
111 },
112 .probe = sdhci_cns3xxx_probe,
113 .remove = __devexit_p(sdhci_cns3xxx_remove),
114#ifdef CONFIG_PM
115 .suspend = sdhci_pltfm_suspend,
116 .resume = sdhci_pltfm_resume,
117#endif
118};
119
120static int __init sdhci_cns3xxx_init(void)
121{
122 return platform_driver_register(&sdhci_cns3xxx_driver);
123}
124module_init(sdhci_cns3xxx_init);
125
126static void __exit sdhci_cns3xxx_exit(void)
127{
128 platform_driver_unregister(&sdhci_cns3xxx_driver);
129}
130module_exit(sdhci_cns3xxx_exit);
131
132MODULE_DESCRIPTION("SDHCI driver for CNS3xxx");
133MODULE_AUTHOR("Scott Shu, "
134 "Anton Vorontsov <avorontsov@mvista.com>");
135MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
index 2aeef4ffed8..f2d29dca442 100644
--- a/drivers/mmc/host/sdhci-dove.c
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -22,7 +22,6 @@
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/mmc/host.h> 23#include <linux/mmc/host.h>
24 24
25#include "sdhci.h"
26#include "sdhci-pltfm.h" 25#include "sdhci-pltfm.h"
27 26
28static u16 sdhci_dove_readw(struct sdhci_host *host, int reg) 27static u16 sdhci_dove_readw(struct sdhci_host *host, int reg)
@@ -61,10 +60,50 @@ static struct sdhci_ops sdhci_dove_ops = {
61 .read_l = sdhci_dove_readl, 60 .read_l = sdhci_dove_readl,
62}; 61};
63 62
64struct sdhci_pltfm_data sdhci_dove_pdata = { 63static struct sdhci_pltfm_data sdhci_dove_pdata = {
65 .ops = &sdhci_dove_ops, 64 .ops = &sdhci_dove_ops,
66 .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER | 65 .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
67 SDHCI_QUIRK_NO_BUSY_IRQ | 66 SDHCI_QUIRK_NO_BUSY_IRQ |
68 SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 67 SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
69 SDHCI_QUIRK_FORCE_DMA, 68 SDHCI_QUIRK_FORCE_DMA,
70}; 69};
70
71static int __devinit sdhci_dove_probe(struct platform_device *pdev)
72{
73 return sdhci_pltfm_register(pdev, &sdhci_dove_pdata);
74}
75
76static int __devexit sdhci_dove_remove(struct platform_device *pdev)
77{
78 return sdhci_pltfm_unregister(pdev);
79}
80
81static struct platform_driver sdhci_dove_driver = {
82 .driver = {
83 .name = "sdhci-dove",
84 .owner = THIS_MODULE,
85 },
86 .probe = sdhci_dove_probe,
87 .remove = __devexit_p(sdhci_dove_remove),
88#ifdef CONFIG_PM
89 .suspend = sdhci_pltfm_suspend,
90 .resume = sdhci_pltfm_resume,
91#endif
92};
93
94static int __init sdhci_dove_init(void)
95{
96 return platform_driver_register(&sdhci_dove_driver);
97}
98module_init(sdhci_dove_init);
99
100static void __exit sdhci_dove_exit(void)
101{
102 platform_driver_unregister(&sdhci_dove_driver);
103}
104module_exit(sdhci_dove_exit);
105
106MODULE_DESCRIPTION("SDHCI driver for Dove");
107MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>, "
108 "Mike Rapoport <mike@compulab.co.il>");
109MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index a19967d0bfc..4dc0028086a 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -16,22 +16,23 @@
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/clk.h> 17#include <linux/clk.h>
18#include <linux/gpio.h> 18#include <linux/gpio.h>
19#include <linux/module.h>
19#include <linux/slab.h> 20#include <linux/slab.h>
20#include <linux/mmc/host.h> 21#include <linux/mmc/host.h>
21#include <linux/mmc/sdhci-pltfm.h>
22#include <linux/mmc/mmc.h> 22#include <linux/mmc/mmc.h>
23#include <linux/mmc/sdio.h> 23#include <linux/mmc/sdio.h>
24#include <mach/hardware.h> 24#include <linux/of.h>
25#include <linux/of_device.h>
26#include <linux/of_gpio.h>
25#include <mach/esdhc.h> 27#include <mach/esdhc.h>
26#include "sdhci.h"
27#include "sdhci-pltfm.h" 28#include "sdhci-pltfm.h"
28#include "sdhci-esdhc.h" 29#include "sdhci-esdhc.h"
29 30
31#define SDHCI_CTRL_D3CD 0x08
30/* VENDOR SPEC register */ 32/* VENDOR SPEC register */
31#define SDHCI_VENDOR_SPEC 0xC0 33#define SDHCI_VENDOR_SPEC 0xC0
32#define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002 34#define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002
33 35
34#define ESDHC_FLAG_GPIO_FOR_CD_WP (1 << 0)
35/* 36/*
36 * The CMDTYPE of the CMD register (offset 0xE) should be set to 37 * The CMDTYPE of the CMD register (offset 0xE) should be set to
37 * "11" when the STOP CMD12 is issued on imx53 to abort one 38 * "11" when the STOP CMD12 is issued on imx53 to abort one
@@ -45,11 +46,68 @@
45 */ 46 */
46#define ESDHC_FLAG_MULTIBLK_NO_INT (1 << 1) 47#define ESDHC_FLAG_MULTIBLK_NO_INT (1 << 1)
47 48
49enum imx_esdhc_type {
50 IMX25_ESDHC,
51 IMX35_ESDHC,
52 IMX51_ESDHC,
53 IMX53_ESDHC,
54};
55
48struct pltfm_imx_data { 56struct pltfm_imx_data {
49 int flags; 57 int flags;
50 u32 scratchpad; 58 u32 scratchpad;
59 enum imx_esdhc_type devtype;
60 struct esdhc_platform_data boarddata;
51}; 61};
52 62
63static struct platform_device_id imx_esdhc_devtype[] = {
64 {
65 .name = "sdhci-esdhc-imx25",
66 .driver_data = IMX25_ESDHC,
67 }, {
68 .name = "sdhci-esdhc-imx35",
69 .driver_data = IMX35_ESDHC,
70 }, {
71 .name = "sdhci-esdhc-imx51",
72 .driver_data = IMX51_ESDHC,
73 }, {
74 .name = "sdhci-esdhc-imx53",
75 .driver_data = IMX53_ESDHC,
76 }, {
77 /* sentinel */
78 }
79};
80MODULE_DEVICE_TABLE(platform, imx_esdhc_devtype);
81
82static const struct of_device_id imx_esdhc_dt_ids[] = {
83 { .compatible = "fsl,imx25-esdhc", .data = &imx_esdhc_devtype[IMX25_ESDHC], },
84 { .compatible = "fsl,imx35-esdhc", .data = &imx_esdhc_devtype[IMX35_ESDHC], },
85 { .compatible = "fsl,imx51-esdhc", .data = &imx_esdhc_devtype[IMX51_ESDHC], },
86 { .compatible = "fsl,imx53-esdhc", .data = &imx_esdhc_devtype[IMX53_ESDHC], },
87 { /* sentinel */ }
88};
89MODULE_DEVICE_TABLE(of, imx_esdhc_dt_ids);
90
91static inline int is_imx25_esdhc(struct pltfm_imx_data *data)
92{
93 return data->devtype == IMX25_ESDHC;
94}
95
96static inline int is_imx35_esdhc(struct pltfm_imx_data *data)
97{
98 return data->devtype == IMX35_ESDHC;
99}
100
101static inline int is_imx51_esdhc(struct pltfm_imx_data *data)
102{
103 return data->devtype == IMX51_ESDHC;
104}
105
106static inline int is_imx53_esdhc(struct pltfm_imx_data *data)
107{
108 return data->devtype == IMX53_ESDHC;
109}
110
53static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg) 111static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg)
54{ 112{
55 void __iomem *base = host->ioaddr + (reg & ~0x3); 113 void __iomem *base = host->ioaddr + (reg & ~0x3);
@@ -62,19 +120,16 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
62{ 120{
63 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 121 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
64 struct pltfm_imx_data *imx_data = pltfm_host->priv; 122 struct pltfm_imx_data *imx_data = pltfm_host->priv;
123 struct esdhc_platform_data *boarddata = &imx_data->boarddata;
65 124
66 /* fake CARD_PRESENT flag on mx25/35 */ 125 /* fake CARD_PRESENT flag */
67 u32 val = readl(host->ioaddr + reg); 126 u32 val = readl(host->ioaddr + reg);
68 127
69 if (unlikely((reg == SDHCI_PRESENT_STATE) 128 if (unlikely((reg == SDHCI_PRESENT_STATE)
70 && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP))) { 129 && gpio_is_valid(boarddata->cd_gpio))) {
71 struct esdhc_platform_data *boarddata = 130 if (gpio_get_value(boarddata->cd_gpio))
72 host->mmc->parent->platform_data;
73
74 if (boarddata && gpio_is_valid(boarddata->cd_gpio)
75 && gpio_get_value(boarddata->cd_gpio))
76 /* no card, if a valid gpio says so... */ 131 /* no card, if a valid gpio says so... */
77 val &= SDHCI_CARD_PRESENT; 132 val &= ~SDHCI_CARD_PRESENT;
78 else 133 else
79 /* ... in all other cases assume card is present */ 134 /* ... in all other cases assume card is present */
80 val |= SDHCI_CARD_PRESENT; 135 val |= SDHCI_CARD_PRESENT;
@@ -87,14 +142,33 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
87{ 142{
88 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 143 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
89 struct pltfm_imx_data *imx_data = pltfm_host->priv; 144 struct pltfm_imx_data *imx_data = pltfm_host->priv;
90 145 struct esdhc_platform_data *boarddata = &imx_data->boarddata;
91 if (unlikely((reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE) 146 u32 data;
92 && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP))) 147
93 /* 148 if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) {
94 * these interrupts won't work with a custom card_detect gpio 149 if (boarddata->cd_type == ESDHC_CD_GPIO)
95 * (only applied to mx25/35) 150 /*
96 */ 151 * These interrupts won't work with a custom
97 val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 152 * card_detect gpio (only applied to mx25/35)
153 */
154 val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
155
156 if (val & SDHCI_INT_CARD_INT) {
157 /*
158 * Clear and then set D3CD bit to avoid missing the
159 * card interrupt. This is a eSDHC controller problem
160 * so we need to apply the following workaround: clear
161 * and set D3CD bit will make eSDHC re-sample the card
162 * interrupt. In case a card interrupt was lost,
163 * re-sample it by the following steps.
164 */
165 data = readl(host->ioaddr + SDHCI_HOST_CONTROL);
166 data &= ~SDHCI_CTRL_D3CD;
167 writel(data, host->ioaddr + SDHCI_HOST_CONTROL);
168 data |= SDHCI_CTRL_D3CD;
169 writel(data, host->ioaddr + SDHCI_HOST_CONTROL);
170 }
171 }
98 172
99 if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) 173 if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)
100 && (reg == SDHCI_INT_STATUS) 174 && (reg == SDHCI_INT_STATUS)
@@ -164,8 +238,10 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
164 */ 238 */
165 return; 239 return;
166 case SDHCI_HOST_CONTROL: 240 case SDHCI_HOST_CONTROL:
167 /* FSL messed up here, so we can just keep those two */ 241 /* FSL messed up here, so we can just keep those three */
168 new_val = val & (SDHCI_CTRL_LED | SDHCI_CTRL_4BITBUS); 242 new_val = val & (SDHCI_CTRL_LED | \
243 SDHCI_CTRL_4BITBUS | \
244 SDHCI_CTRL_D3CD);
169 /* ensure the endianess */ 245 /* ensure the endianess */
170 new_val |= ESDHC_HOST_CONTROL_LE; 246 new_val |= ESDHC_HOST_CONTROL_LE;
171 /* DMA mode bits are shifted */ 247 /* DMA mode bits are shifted */
@@ -175,6 +251,17 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
175 return; 251 return;
176 } 252 }
177 esdhc_clrset_le(host, 0xff, val, reg); 253 esdhc_clrset_le(host, 0xff, val, reg);
254
255 /*
256 * The esdhc has a design violation to SDHC spec which tells
257 * that software reset should not affect card detection circuit.
258 * But esdhc clears its SYSCTL register bits [0..2] during the
259 * software reset. This will stop those clocks that card detection
260 * circuit relies on. To work around it, we turn the clocks on back
261 * to keep card detection circuit functional.
262 */
263 if ((reg == SDHCI_SOFTWARE_RESET) && (val & 1))
264 esdhc_clrset_le(host, 0x7, 0x7, ESDHC_SYSTEM_CONTROL);
178} 265}
179 266
180static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host) 267static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host)
@@ -193,12 +280,22 @@ static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host)
193 280
194static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host) 281static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
195{ 282{
196 struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; 283 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
284 struct pltfm_imx_data *imx_data = pltfm_host->priv;
285 struct esdhc_platform_data *boarddata = &imx_data->boarddata;
286
287 switch (boarddata->wp_type) {
288 case ESDHC_WP_GPIO:
289 if (gpio_is_valid(boarddata->wp_gpio))
290 return gpio_get_value(boarddata->wp_gpio);
291 case ESDHC_WP_CONTROLLER:
292 return !(readl(host->ioaddr + SDHCI_PRESENT_STATE) &
293 SDHCI_WRITE_PROTECT);
294 case ESDHC_WP_NONE:
295 break;
296 }
197 297
198 if (boarddata && gpio_is_valid(boarddata->wp_gpio)) 298 return -ENOSYS;
199 return gpio_get_value(boarddata->wp_gpio);
200 else
201 return -ENOSYS;
202} 299}
203 300
204static struct sdhci_ops sdhci_esdhc_ops = { 301static struct sdhci_ops sdhci_esdhc_ops = {
@@ -210,6 +307,14 @@ static struct sdhci_ops sdhci_esdhc_ops = {
210 .set_clock = esdhc_set_clock, 307 .set_clock = esdhc_set_clock,
211 .get_max_clock = esdhc_pltfm_get_max_clock, 308 .get_max_clock = esdhc_pltfm_get_max_clock,
212 .get_min_clock = esdhc_pltfm_get_min_clock, 309 .get_min_clock = esdhc_pltfm_get_min_clock,
310 .get_ro = esdhc_pltfm_get_ro,
311};
312
313static struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
314 .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_ADMA
315 | SDHCI_QUIRK_BROKEN_CARD_DETECTION,
316 /* ADMA has issues. Might be fixable */
317 .ops = &sdhci_esdhc_ops,
213}; 318};
214 319
215static irqreturn_t cd_irq(int irq, void *data) 320static irqreturn_t cd_irq(int irq, void *data)
@@ -220,112 +325,228 @@ static irqreturn_t cd_irq(int irq, void *data)
220 return IRQ_HANDLED; 325 return IRQ_HANDLED;
221}; 326};
222 327
223static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pdata) 328#ifdef CONFIG_OF
329static int __devinit
330sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
331 struct esdhc_platform_data *boarddata)
224{ 332{
225 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 333 struct device_node *np = pdev->dev.of_node;
226 struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; 334
335 if (!np)
336 return -ENODEV;
337
338 if (of_get_property(np, "fsl,card-wired", NULL))
339 boarddata->cd_type = ESDHC_CD_PERMANENT;
340
341 if (of_get_property(np, "fsl,cd-controller", NULL))
342 boarddata->cd_type = ESDHC_CD_CONTROLLER;
343
344 if (of_get_property(np, "fsl,wp-controller", NULL))
345 boarddata->wp_type = ESDHC_WP_CONTROLLER;
346
347 boarddata->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
348 if (gpio_is_valid(boarddata->cd_gpio))
349 boarddata->cd_type = ESDHC_CD_GPIO;
350
351 boarddata->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
352 if (gpio_is_valid(boarddata->wp_gpio))
353 boarddata->wp_type = ESDHC_WP_GPIO;
354
355 return 0;
356}
357#else
358static inline int
359sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
360 struct esdhc_platform_data *boarddata)
361{
362 return -ENODEV;
363}
364#endif
365
366static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev)
367{
368 const struct of_device_id *of_id =
369 of_match_device(imx_esdhc_dt_ids, &pdev->dev);
370 struct sdhci_pltfm_host *pltfm_host;
371 struct sdhci_host *host;
372 struct esdhc_platform_data *boarddata;
227 struct clk *clk; 373 struct clk *clk;
228 int err; 374 int err;
229 struct pltfm_imx_data *imx_data; 375 struct pltfm_imx_data *imx_data;
230 376
377 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata);
378 if (IS_ERR(host))
379 return PTR_ERR(host);
380
381 pltfm_host = sdhci_priv(host);
382
383 imx_data = kzalloc(sizeof(struct pltfm_imx_data), GFP_KERNEL);
384 if (!imx_data) {
385 err = -ENOMEM;
386 goto err_imx_data;
387 }
388
389 if (of_id)
390 pdev->id_entry = of_id->data;
391 imx_data->devtype = pdev->id_entry->driver_data;
392 pltfm_host->priv = imx_data;
393
231 clk = clk_get(mmc_dev(host->mmc), NULL); 394 clk = clk_get(mmc_dev(host->mmc), NULL);
232 if (IS_ERR(clk)) { 395 if (IS_ERR(clk)) {
233 dev_err(mmc_dev(host->mmc), "clk err\n"); 396 dev_err(mmc_dev(host->mmc), "clk err\n");
234 return PTR_ERR(clk); 397 err = PTR_ERR(clk);
398 goto err_clk_get;
235 } 399 }
236 clk_enable(clk); 400 clk_enable(clk);
237 pltfm_host->clk = clk; 401 pltfm_host->clk = clk;
238 402
239 imx_data = kzalloc(sizeof(struct pltfm_imx_data), GFP_KERNEL); 403 if (!is_imx25_esdhc(imx_data))
240 if (!imx_data) {
241 clk_disable(pltfm_host->clk);
242 clk_put(pltfm_host->clk);
243 return -ENOMEM;
244 }
245 pltfm_host->priv = imx_data;
246
247 if (!cpu_is_mx25())
248 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; 404 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
249 405
250 if (cpu_is_mx25() || cpu_is_mx35()) { 406 if (is_imx25_esdhc(imx_data) || is_imx35_esdhc(imx_data))
251 /* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */ 407 /* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */
252 host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK; 408 host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK;
253 /* write_protect can't be routed to controller, use gpio */
254 sdhci_esdhc_ops.get_ro = esdhc_pltfm_get_ro;
255 }
256 409
257 if (!(cpu_is_mx25() || cpu_is_mx35() || cpu_is_mx51())) 410 if (is_imx53_esdhc(imx_data))
258 imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT; 411 imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT;
259 412
260 if (boarddata) { 413 boarddata = &imx_data->boarddata;
414 if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) {
415 if (!host->mmc->parent->platform_data) {
416 dev_err(mmc_dev(host->mmc), "no board data!\n");
417 err = -EINVAL;
418 goto no_board_data;
419 }
420 imx_data->boarddata = *((struct esdhc_platform_data *)
421 host->mmc->parent->platform_data);
422 }
423
424 /* write_protect */
425 if (boarddata->wp_type == ESDHC_WP_GPIO) {
261 err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP"); 426 err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP");
262 if (err) { 427 if (err) {
263 dev_warn(mmc_dev(host->mmc), 428 dev_warn(mmc_dev(host->mmc),
264 "no write-protect pin available!\n"); 429 "no write-protect pin available!\n");
265 boarddata->wp_gpio = err; 430 boarddata->wp_gpio = -EINVAL;
266 } 431 }
432 } else {
433 boarddata->wp_gpio = -EINVAL;
434 }
267 435
436 /* card_detect */
437 if (boarddata->cd_type != ESDHC_CD_GPIO)
438 boarddata->cd_gpio = -EINVAL;
439
440 switch (boarddata->cd_type) {
441 case ESDHC_CD_GPIO:
268 err = gpio_request_one(boarddata->cd_gpio, GPIOF_IN, "ESDHC_CD"); 442 err = gpio_request_one(boarddata->cd_gpio, GPIOF_IN, "ESDHC_CD");
269 if (err) { 443 if (err) {
270 dev_warn(mmc_dev(host->mmc), 444 dev_err(mmc_dev(host->mmc),
271 "no card-detect pin available!\n"); 445 "no card-detect pin available!\n");
272 goto no_card_detect_pin; 446 goto no_card_detect_pin;
273 } 447 }
274 448
275 /* i.MX5x has issues to be researched */
276 if (!cpu_is_mx25() && !cpu_is_mx35())
277 goto not_supported;
278
279 err = request_irq(gpio_to_irq(boarddata->cd_gpio), cd_irq, 449 err = request_irq(gpio_to_irq(boarddata->cd_gpio), cd_irq,
280 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, 450 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
281 mmc_hostname(host->mmc), host); 451 mmc_hostname(host->mmc), host);
282 if (err) { 452 if (err) {
283 dev_warn(mmc_dev(host->mmc), "request irq error\n"); 453 dev_err(mmc_dev(host->mmc), "request irq error\n");
284 goto no_card_detect_irq; 454 goto no_card_detect_irq;
285 } 455 }
456 /* fall through */
286 457
287 imx_data->flags |= ESDHC_FLAG_GPIO_FOR_CD_WP; 458 case ESDHC_CD_CONTROLLER:
288 /* Now we have a working card_detect again */ 459 /* we have a working card_detect back */
289 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; 460 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
461 break;
462
463 case ESDHC_CD_PERMANENT:
464 host->mmc->caps = MMC_CAP_NONREMOVABLE;
465 break;
466
467 case ESDHC_CD_NONE:
468 break;
290 } 469 }
291 470
471 err = sdhci_add_host(host);
472 if (err)
473 goto err_add_host;
474
292 return 0; 475 return 0;
293 476
294 no_card_detect_irq: 477err_add_host:
295 gpio_free(boarddata->cd_gpio); 478 if (gpio_is_valid(boarddata->cd_gpio))
296 no_card_detect_pin: 479 free_irq(gpio_to_irq(boarddata->cd_gpio), host);
297 boarddata->cd_gpio = err; 480no_card_detect_irq:
298 not_supported: 481 if (gpio_is_valid(boarddata->cd_gpio))
482 gpio_free(boarddata->cd_gpio);
483 if (gpio_is_valid(boarddata->wp_gpio))
484 gpio_free(boarddata->wp_gpio);
485no_card_detect_pin:
486no_board_data:
487 clk_disable(pltfm_host->clk);
488 clk_put(pltfm_host->clk);
489err_clk_get:
299 kfree(imx_data); 490 kfree(imx_data);
300 return 0; 491err_imx_data:
492 sdhci_pltfm_free(pdev);
493 return err;
301} 494}
302 495
303static void esdhc_pltfm_exit(struct sdhci_host *host) 496static int __devexit sdhci_esdhc_imx_remove(struct platform_device *pdev)
304{ 497{
498 struct sdhci_host *host = platform_get_drvdata(pdev);
305 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 499 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
306 struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data;
307 struct pltfm_imx_data *imx_data = pltfm_host->priv; 500 struct pltfm_imx_data *imx_data = pltfm_host->priv;
501 struct esdhc_platform_data *boarddata = &imx_data->boarddata;
502 int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
503
504 sdhci_remove_host(host, dead);
308 505
309 if (boarddata && gpio_is_valid(boarddata->wp_gpio)) 506 if (gpio_is_valid(boarddata->wp_gpio))
310 gpio_free(boarddata->wp_gpio); 507 gpio_free(boarddata->wp_gpio);
311 508
312 if (boarddata && gpio_is_valid(boarddata->cd_gpio)) { 509 if (gpio_is_valid(boarddata->cd_gpio)) {
510 free_irq(gpio_to_irq(boarddata->cd_gpio), host);
313 gpio_free(boarddata->cd_gpio); 511 gpio_free(boarddata->cd_gpio);
314
315 if (!(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION))
316 free_irq(gpio_to_irq(boarddata->cd_gpio), host);
317 } 512 }
318 513
319 clk_disable(pltfm_host->clk); 514 clk_disable(pltfm_host->clk);
320 clk_put(pltfm_host->clk); 515 clk_put(pltfm_host->clk);
321 kfree(imx_data); 516 kfree(imx_data);
517
518 sdhci_pltfm_free(pdev);
519
520 return 0;
322} 521}
323 522
324struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { 523static struct platform_driver sdhci_esdhc_imx_driver = {
325 .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_ADMA 524 .driver = {
326 | SDHCI_QUIRK_BROKEN_CARD_DETECTION, 525 .name = "sdhci-esdhc-imx",
327 /* ADMA has issues. Might be fixable */ 526 .owner = THIS_MODULE,
328 .ops = &sdhci_esdhc_ops, 527 .of_match_table = imx_esdhc_dt_ids,
329 .init = esdhc_pltfm_init, 528 },
330 .exit = esdhc_pltfm_exit, 529 .id_table = imx_esdhc_devtype,
530 .probe = sdhci_esdhc_imx_probe,
531 .remove = __devexit_p(sdhci_esdhc_imx_remove),
532#ifdef CONFIG_PM
533 .suspend = sdhci_pltfm_suspend,
534 .resume = sdhci_pltfm_resume,
535#endif
331}; 536};
537
538static int __init sdhci_esdhc_imx_init(void)
539{
540 return platform_driver_register(&sdhci_esdhc_imx_driver);
541}
542module_init(sdhci_esdhc_imx_init);
543
544static void __exit sdhci_esdhc_imx_exit(void)
545{
546 platform_driver_unregister(&sdhci_esdhc_imx_driver);
547}
548module_exit(sdhci_esdhc_imx_exit);
549
550MODULE_DESCRIPTION("SDHCI driver for Freescale i.MX eSDHC");
551MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
552MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c
deleted file mode 100644
index 60e4186a434..00000000000
--- a/drivers/mmc/host/sdhci-of-core.c
+++ /dev/null
@@ -1,253 +0,0 @@
1/*
2 * OpenFirmware bindings for Secure Digital Host Controller Interface.
3 *
4 * Copyright (c) 2007 Freescale Semiconductor, Inc.
5 * Copyright (c) 2009 MontaVista Software, Inc.
6 *
7 * Authors: Xiaobo Xie <X.Xie@freescale.com>
8 * Anton Vorontsov <avorontsov@ru.mvista.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or (at
13 * your option) any later version.
14 */
15
16#include <linux/err.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/io.h>
20#include <linux/interrupt.h>
21#include <linux/delay.h>
22#include <linux/of.h>
23#include <linux/of_platform.h>
24#include <linux/of_address.h>
25#include <linux/of_irq.h>
26#include <linux/mmc/host.h>
27#ifdef CONFIG_PPC
28#include <asm/machdep.h>
29#endif
30#include "sdhci-of.h"
31#include "sdhci.h"
32
33#ifdef CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
34
35/*
36 * These accessors are designed for big endian hosts doing I/O to
37 * little endian controllers incorporating a 32-bit hardware byte swapper.
38 */
39
40u32 sdhci_be32bs_readl(struct sdhci_host *host, int reg)
41{
42 return in_be32(host->ioaddr + reg);
43}
44
45u16 sdhci_be32bs_readw(struct sdhci_host *host, int reg)
46{
47 return in_be16(host->ioaddr + (reg ^ 0x2));
48}
49
50u8 sdhci_be32bs_readb(struct sdhci_host *host, int reg)
51{
52 return in_8(host->ioaddr + (reg ^ 0x3));
53}
54
55void sdhci_be32bs_writel(struct sdhci_host *host, u32 val, int reg)
56{
57 out_be32(host->ioaddr + reg, val);
58}
59
60void sdhci_be32bs_writew(struct sdhci_host *host, u16 val, int reg)
61{
62 struct sdhci_of_host *of_host = sdhci_priv(host);
63 int base = reg & ~0x3;
64 int shift = (reg & 0x2) * 8;
65
66 switch (reg) {
67 case SDHCI_TRANSFER_MODE:
68 /*
69 * Postpone this write, we must do it together with a
70 * command write that is down below.
71 */
72 of_host->xfer_mode_shadow = val;
73 return;
74 case SDHCI_COMMAND:
75 sdhci_be32bs_writel(host, val << 16 | of_host->xfer_mode_shadow,
76 SDHCI_TRANSFER_MODE);
77 return;
78 }
79 clrsetbits_be32(host->ioaddr + base, 0xffff << shift, val << shift);
80}
81
82void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg)
83{
84 int base = reg & ~0x3;
85 int shift = (reg & 0x3) * 8;
86
87 clrsetbits_be32(host->ioaddr + base , 0xff << shift, val << shift);
88}
89#endif /* CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER */
90
91#ifdef CONFIG_PM
92
93static int sdhci_of_suspend(struct platform_device *ofdev, pm_message_t state)
94{
95 struct sdhci_host *host = dev_get_drvdata(&ofdev->dev);
96
97 return mmc_suspend_host(host->mmc);
98}
99
100static int sdhci_of_resume(struct platform_device *ofdev)
101{
102 struct sdhci_host *host = dev_get_drvdata(&ofdev->dev);
103
104 return mmc_resume_host(host->mmc);
105}
106
107#else
108
109#define sdhci_of_suspend NULL
110#define sdhci_of_resume NULL
111
112#endif
113
114static bool __devinit sdhci_of_wp_inverted(struct device_node *np)
115{
116 if (of_get_property(np, "sdhci,wp-inverted", NULL))
117 return true;
118
119 /* Old device trees don't have the wp-inverted property. */
120#ifdef CONFIG_PPC
121 return machine_is(mpc837x_rdb) || machine_is(mpc837x_mds);
122#else
123 return false;
124#endif
125}
126
127static const struct of_device_id sdhci_of_match[];
128static int __devinit sdhci_of_probe(struct platform_device *ofdev)
129{
130 const struct of_device_id *match;
131 struct device_node *np = ofdev->dev.of_node;
132 struct sdhci_of_data *sdhci_of_data;
133 struct sdhci_host *host;
134 struct sdhci_of_host *of_host;
135 const __be32 *clk;
136 int size;
137 int ret;
138
139 match = of_match_device(sdhci_of_match, &ofdev->dev);
140 if (!match)
141 return -EINVAL;
142 sdhci_of_data = match->data;
143
144 if (!of_device_is_available(np))
145 return -ENODEV;
146
147 host = sdhci_alloc_host(&ofdev->dev, sizeof(*of_host));
148 if (IS_ERR(host))
149 return -ENOMEM;
150
151 of_host = sdhci_priv(host);
152 dev_set_drvdata(&ofdev->dev, host);
153
154 host->ioaddr = of_iomap(np, 0);
155 if (!host->ioaddr) {
156 ret = -ENOMEM;
157 goto err_addr_map;
158 }
159
160 host->irq = irq_of_parse_and_map(np, 0);
161 if (!host->irq) {
162 ret = -EINVAL;
163 goto err_no_irq;
164 }
165
166 host->hw_name = dev_name(&ofdev->dev);
167 if (sdhci_of_data) {
168 host->quirks = sdhci_of_data->quirks;
169 host->ops = &sdhci_of_data->ops;
170 }
171
172 if (of_get_property(np, "sdhci,auto-cmd12", NULL))
173 host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
174
175
176 if (of_get_property(np, "sdhci,1-bit-only", NULL))
177 host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
178
179 if (sdhci_of_wp_inverted(np))
180 host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT;
181
182 clk = of_get_property(np, "clock-frequency", &size);
183 if (clk && size == sizeof(*clk) && *clk)
184 of_host->clock = be32_to_cpup(clk);
185
186 ret = sdhci_add_host(host);
187 if (ret)
188 goto err_add_host;
189
190 return 0;
191
192err_add_host:
193 irq_dispose_mapping(host->irq);
194err_no_irq:
195 iounmap(host->ioaddr);
196err_addr_map:
197 sdhci_free_host(host);
198 return ret;
199}
200
201static int __devexit sdhci_of_remove(struct platform_device *ofdev)
202{
203 struct sdhci_host *host = dev_get_drvdata(&ofdev->dev);
204
205 sdhci_remove_host(host, 0);
206 sdhci_free_host(host);
207 irq_dispose_mapping(host->irq);
208 iounmap(host->ioaddr);
209 return 0;
210}
211
212static const struct of_device_id sdhci_of_match[] = {
213#ifdef CONFIG_MMC_SDHCI_OF_ESDHC
214 { .compatible = "fsl,mpc8379-esdhc", .data = &sdhci_esdhc, },
215 { .compatible = "fsl,mpc8536-esdhc", .data = &sdhci_esdhc, },
216 { .compatible = "fsl,esdhc", .data = &sdhci_esdhc, },
217#endif
218#ifdef CONFIG_MMC_SDHCI_OF_HLWD
219 { .compatible = "nintendo,hollywood-sdhci", .data = &sdhci_hlwd, },
220#endif
221 { .compatible = "generic-sdhci", },
222 {},
223};
224MODULE_DEVICE_TABLE(of, sdhci_of_match);
225
226static struct platform_driver sdhci_of_driver = {
227 .driver = {
228 .name = "sdhci-of",
229 .owner = THIS_MODULE,
230 .of_match_table = sdhci_of_match,
231 },
232 .probe = sdhci_of_probe,
233 .remove = __devexit_p(sdhci_of_remove),
234 .suspend = sdhci_of_suspend,
235 .resume = sdhci_of_resume,
236};
237
238static int __init sdhci_of_init(void)
239{
240 return platform_driver_register(&sdhci_of_driver);
241}
242module_init(sdhci_of_init);
243
244static void __exit sdhci_of_exit(void)
245{
246 platform_driver_unregister(&sdhci_of_driver);
247}
248module_exit(sdhci_of_exit);
249
250MODULE_DESCRIPTION("Secure Digital Host Controller Interface OF driver");
251MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, "
252 "Anton Vorontsov <avorontsov@ru.mvista.com>");
253MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index ba40d6d035c..fe604df6501 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -16,8 +16,7 @@
16#include <linux/io.h> 16#include <linux/io.h>
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/mmc/host.h> 18#include <linux/mmc/host.h>
19#include "sdhci-of.h" 19#include "sdhci-pltfm.h"
20#include "sdhci.h"
21#include "sdhci-esdhc.h" 20#include "sdhci-esdhc.h"
22 21
23static u16 esdhc_readw(struct sdhci_host *host, int reg) 22static u16 esdhc_readw(struct sdhci_host *host, int reg)
@@ -60,32 +59,83 @@ static int esdhc_of_enable_dma(struct sdhci_host *host)
60 59
61static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host) 60static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host)
62{ 61{
63 struct sdhci_of_host *of_host = sdhci_priv(host); 62 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
64 63
65 return of_host->clock; 64 return pltfm_host->clock;
66} 65}
67 66
68static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host) 67static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
69{ 68{
70 struct sdhci_of_host *of_host = sdhci_priv(host); 69 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
71 70
72 return of_host->clock / 256 / 16; 71 return pltfm_host->clock / 256 / 16;
73} 72}
74 73
75struct sdhci_of_data sdhci_esdhc = { 74static struct sdhci_ops sdhci_esdhc_ops = {
75 .read_l = sdhci_be32bs_readl,
76 .read_w = esdhc_readw,
77 .read_b = sdhci_be32bs_readb,
78 .write_l = sdhci_be32bs_writel,
79 .write_w = esdhc_writew,
80 .write_b = esdhc_writeb,
81 .set_clock = esdhc_set_clock,
82 .enable_dma = esdhc_of_enable_dma,
83 .get_max_clock = esdhc_of_get_max_clock,
84 .get_min_clock = esdhc_of_get_min_clock,
85};
86
87static struct sdhci_pltfm_data sdhci_esdhc_pdata = {
76 /* card detection could be handled via GPIO */ 88 /* card detection could be handled via GPIO */
77 .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION 89 .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION
78 | SDHCI_QUIRK_NO_CARD_NO_RESET, 90 | SDHCI_QUIRK_NO_CARD_NO_RESET,
79 .ops = { 91 .ops = &sdhci_esdhc_ops,
80 .read_l = sdhci_be32bs_readl, 92};
81 .read_w = esdhc_readw, 93
82 .read_b = sdhci_be32bs_readb, 94static int __devinit sdhci_esdhc_probe(struct platform_device *pdev)
83 .write_l = sdhci_be32bs_writel, 95{
84 .write_w = esdhc_writew, 96 return sdhci_pltfm_register(pdev, &sdhci_esdhc_pdata);
85 .write_b = esdhc_writeb, 97}
86 .set_clock = esdhc_set_clock, 98
87 .enable_dma = esdhc_of_enable_dma, 99static int __devexit sdhci_esdhc_remove(struct platform_device *pdev)
88 .get_max_clock = esdhc_of_get_max_clock, 100{
89 .get_min_clock = esdhc_of_get_min_clock, 101 return sdhci_pltfm_unregister(pdev);
102}
103
104static const struct of_device_id sdhci_esdhc_of_match[] = {
105 { .compatible = "fsl,mpc8379-esdhc" },
106 { .compatible = "fsl,mpc8536-esdhc" },
107 { .compatible = "fsl,esdhc" },
108 { }
109};
110MODULE_DEVICE_TABLE(of, sdhci_esdhc_of_match);
111
112static struct platform_driver sdhci_esdhc_driver = {
113 .driver = {
114 .name = "sdhci-esdhc",
115 .owner = THIS_MODULE,
116 .of_match_table = sdhci_esdhc_of_match,
90 }, 117 },
118 .probe = sdhci_esdhc_probe,
119 .remove = __devexit_p(sdhci_esdhc_remove),
120#ifdef CONFIG_PM
121 .suspend = sdhci_pltfm_suspend,
122 .resume = sdhci_pltfm_resume,
123#endif
91}; 124};
125
126static int __init sdhci_esdhc_init(void)
127{
128 return platform_driver_register(&sdhci_esdhc_driver);
129}
130module_init(sdhci_esdhc_init);
131
132static void __exit sdhci_esdhc_exit(void)
133{
134 platform_driver_unregister(&sdhci_esdhc_driver);
135}
136module_exit(sdhci_esdhc_exit);
137
138MODULE_DESCRIPTION("SDHCI OF driver for Freescale MPC eSDHC");
139MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, "
140 "Anton Vorontsov <avorontsov@ru.mvista.com>");
141MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c
index 68ddb7546ae..735be131dca 100644
--- a/drivers/mmc/host/sdhci-of-hlwd.c
+++ b/drivers/mmc/host/sdhci-of-hlwd.c
@@ -21,8 +21,7 @@
21 21
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/mmc/host.h> 23#include <linux/mmc/host.h>
24#include "sdhci-of.h" 24#include "sdhci-pltfm.h"
25#include "sdhci.h"
26 25
27/* 26/*
28 * Ops and quirks for the Nintendo Wii SDHCI controllers. 27 * Ops and quirks for the Nintendo Wii SDHCI controllers.
@@ -51,15 +50,63 @@ static void sdhci_hlwd_writeb(struct sdhci_host *host, u8 val, int reg)
51 udelay(SDHCI_HLWD_WRITE_DELAY); 50 udelay(SDHCI_HLWD_WRITE_DELAY);
52} 51}
53 52
54struct sdhci_of_data sdhci_hlwd = { 53static struct sdhci_ops sdhci_hlwd_ops = {
54 .read_l = sdhci_be32bs_readl,
55 .read_w = sdhci_be32bs_readw,
56 .read_b = sdhci_be32bs_readb,
57 .write_l = sdhci_hlwd_writel,
58 .write_w = sdhci_hlwd_writew,
59 .write_b = sdhci_hlwd_writeb,
60};
61
62static struct sdhci_pltfm_data sdhci_hlwd_pdata = {
55 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | 63 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
56 SDHCI_QUIRK_32BIT_DMA_SIZE, 64 SDHCI_QUIRK_32BIT_DMA_SIZE,
57 .ops = { 65 .ops = &sdhci_hlwd_ops,
58 .read_l = sdhci_be32bs_readl, 66};
59 .read_w = sdhci_be32bs_readw, 67
60 .read_b = sdhci_be32bs_readb, 68static int __devinit sdhci_hlwd_probe(struct platform_device *pdev)
61 .write_l = sdhci_hlwd_writel, 69{
62 .write_w = sdhci_hlwd_writew, 70 return sdhci_pltfm_register(pdev, &sdhci_hlwd_pdata);
63 .write_b = sdhci_hlwd_writeb, 71}
72
73static int __devexit sdhci_hlwd_remove(struct platform_device *pdev)
74{
75 return sdhci_pltfm_unregister(pdev);
76}
77
78static const struct of_device_id sdhci_hlwd_of_match[] = {
79 { .compatible = "nintendo,hollywood-sdhci" },
80 { }
81};
82MODULE_DEVICE_TABLE(of, sdhci_hlwd_of_match);
83
84static struct platform_driver sdhci_hlwd_driver = {
85 .driver = {
86 .name = "sdhci-hlwd",
87 .owner = THIS_MODULE,
88 .of_match_table = sdhci_hlwd_of_match,
64 }, 89 },
90 .probe = sdhci_hlwd_probe,
91 .remove = __devexit_p(sdhci_hlwd_remove),
92#ifdef CONFIG_PM
93 .suspend = sdhci_pltfm_suspend,
94 .resume = sdhci_pltfm_resume,
95#endif
65}; 96};
97
98static int __init sdhci_hlwd_init(void)
99{
100 return platform_driver_register(&sdhci_hlwd_driver);
101}
102module_init(sdhci_hlwd_init);
103
104static void __exit sdhci_hlwd_exit(void)
105{
106 platform_driver_unregister(&sdhci_hlwd_driver);
107}
108module_exit(sdhci_hlwd_exit);
109
110MODULE_DESCRIPTION("Nintendo Wii SDHCI OF driver");
111MODULE_AUTHOR("The GameCube Linux Team, Albert Herranz");
112MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-of.h b/drivers/mmc/host/sdhci-of.h
deleted file mode 100644
index ad09ad9915d..00000000000
--- a/drivers/mmc/host/sdhci-of.h
+++ /dev/null
@@ -1,42 +0,0 @@
1/*
2 * OpenFirmware bindings for Secure Digital Host Controller Interface.
3 *
4 * Copyright (c) 2007 Freescale Semiconductor, Inc.
5 * Copyright (c) 2009 MontaVista Software, Inc.
6 *
7 * Authors: Xiaobo Xie <X.Xie@freescale.com>
8 * Anton Vorontsov <avorontsov@ru.mvista.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or (at
13 * your option) any later version.
14 */
15
16#ifndef __SDHCI_OF_H
17#define __SDHCI_OF_H
18
19#include <linux/types.h>
20#include "sdhci.h"
21
22struct sdhci_of_data {
23 unsigned int quirks;
24 struct sdhci_ops ops;
25};
26
27struct sdhci_of_host {
28 unsigned int clock;
29 u16 xfer_mode_shadow;
30};
31
32extern u32 sdhci_be32bs_readl(struct sdhci_host *host, int reg);
33extern u16 sdhci_be32bs_readw(struct sdhci_host *host, int reg);
34extern u8 sdhci_be32bs_readb(struct sdhci_host *host, int reg);
35extern void sdhci_be32bs_writel(struct sdhci_host *host, u32 val, int reg);
36extern void sdhci_be32bs_writew(struct sdhci_host *host, u16 val, int reg);
37extern void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg);
38
39extern struct sdhci_of_data sdhci_esdhc;
40extern struct sdhci_of_data sdhci_hlwd;
41
42#endif /* __SDHCI_OF_H */
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 936bbca19c0..26c528648f3 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -143,6 +143,12 @@ static const struct sdhci_pci_fixes sdhci_cafe = {
143 SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, 143 SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
144}; 144};
145 145
146static int mrst_hc_probe_slot(struct sdhci_pci_slot *slot)
147{
148 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA;
149 return 0;
150}
151
146/* 152/*
147 * ADMA operation is disabled for Moorestown platform due to 153 * ADMA operation is disabled for Moorestown platform due to
148 * hardware bugs. 154 * hardware bugs.
@@ -157,8 +163,15 @@ static int mrst_hc_probe(struct sdhci_pci_chip *chip)
157 return 0; 163 return 0;
158} 164}
159 165
166static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot)
167{
168 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA;
169 return 0;
170}
171
160static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = { 172static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = {
161 .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT, 173 .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
174 .probe_slot = mrst_hc_probe_slot,
162}; 175};
163 176
164static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = { 177static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = {
@@ -170,8 +183,13 @@ static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = {
170 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 183 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
171}; 184};
172 185
173static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc_sdio = { 186static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = {
187 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
188};
189
190static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc = {
174 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 191 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
192 .probe_slot = mfd_emmc_probe_slot,
175}; 193};
176 194
177/* O2Micro extra registers */ 195/* O2Micro extra registers */
@@ -682,7 +700,7 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
682 .device = PCI_DEVICE_ID_INTEL_MFD_SDIO1, 700 .device = PCI_DEVICE_ID_INTEL_MFD_SDIO1,
683 .subvendor = PCI_ANY_ID, 701 .subvendor = PCI_ANY_ID,
684 .subdevice = PCI_ANY_ID, 702 .subdevice = PCI_ANY_ID,
685 .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio, 703 .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio,
686 }, 704 },
687 705
688 { 706 {
@@ -690,7 +708,7 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
690 .device = PCI_DEVICE_ID_INTEL_MFD_SDIO2, 708 .device = PCI_DEVICE_ID_INTEL_MFD_SDIO2,
691 .subvendor = PCI_ANY_ID, 709 .subvendor = PCI_ANY_ID,
692 .subdevice = PCI_ANY_ID, 710 .subdevice = PCI_ANY_ID,
693 .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio, 711 .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio,
694 }, 712 },
695 713
696 { 714 {
@@ -698,7 +716,7 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
698 .device = PCI_DEVICE_ID_INTEL_MFD_EMMC0, 716 .device = PCI_DEVICE_ID_INTEL_MFD_EMMC0,
699 .subvendor = PCI_ANY_ID, 717 .subvendor = PCI_ANY_ID,
700 .subdevice = PCI_ANY_ID, 718 .subdevice = PCI_ANY_ID,
701 .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio, 719 .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc,
702 }, 720 },
703 721
704 { 722 {
@@ -706,7 +724,7 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
706 .device = PCI_DEVICE_ID_INTEL_MFD_EMMC1, 724 .device = PCI_DEVICE_ID_INTEL_MFD_EMMC1,
707 .subvendor = PCI_ANY_ID, 725 .subvendor = PCI_ANY_ID,
708 .subdevice = PCI_ANY_ID, 726 .subdevice = PCI_ANY_ID,
709 .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio, 727 .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc,
710 }, 728 },
711 729
712 { 730 {
@@ -789,8 +807,34 @@ static int sdhci_pci_enable_dma(struct sdhci_host *host)
789 return 0; 807 return 0;
790} 808}
791 809
810static int sdhci_pci_8bit_width(struct sdhci_host *host, int width)
811{
812 u8 ctrl;
813
814 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
815
816 switch (width) {
817 case MMC_BUS_WIDTH_8:
818 ctrl |= SDHCI_CTRL_8BITBUS;
819 ctrl &= ~SDHCI_CTRL_4BITBUS;
820 break;
821 case MMC_BUS_WIDTH_4:
822 ctrl |= SDHCI_CTRL_4BITBUS;
823 ctrl &= ~SDHCI_CTRL_8BITBUS;
824 break;
825 default:
826 ctrl &= ~(SDHCI_CTRL_8BITBUS | SDHCI_CTRL_4BITBUS);
827 break;
828 }
829
830 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
831
832 return 0;
833}
834
792static struct sdhci_ops sdhci_pci_ops = { 835static struct sdhci_ops sdhci_pci_ops = {
793 .enable_dma = sdhci_pci_enable_dma, 836 .enable_dma = sdhci_pci_enable_dma,
837 .platform_8bit_width = sdhci_pci_8bit_width,
794}; 838};
795 839
796/*****************************************************************************\ 840/*****************************************************************************\
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index dbab0407f4b..1179f1be431 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -2,6 +2,12 @@
2 * sdhci-pltfm.c Support for SDHCI platform devices 2 * sdhci-pltfm.c Support for SDHCI platform devices
3 * Copyright (c) 2009 Intel Corporation 3 * Copyright (c) 2009 Intel Corporation
4 * 4 *
5 * Copyright (c) 2007 Freescale Semiconductor, Inc.
6 * Copyright (c) 2009 MontaVista Software, Inc.
7 *
8 * Authors: Xiaobo Xie <X.Xie@freescale.com>
9 * Anton Vorontsov <avorontsov@ru.mvista.com>
10 *
5 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as 12 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation. 13 * published by the Free Software Foundation.
@@ -22,48 +28,67 @@
22 * Inspired by sdhci-pci.c, by Pierre Ossman 28 * Inspired by sdhci-pci.c, by Pierre Ossman
23 */ 29 */
24 30
25#include <linux/delay.h> 31#include <linux/err.h>
26#include <linux/highmem.h> 32#include <linux/of.h>
27#include <linux/mod_devicetable.h> 33#ifdef CONFIG_PPC
28#include <linux/platform_device.h> 34#include <asm/machdep.h>
35#endif
36#include "sdhci-pltfm.h"
29 37
30#include <linux/mmc/host.h> 38static struct sdhci_ops sdhci_pltfm_ops = {
39};
31 40
32#include <linux/io.h> 41#ifdef CONFIG_OF
33#include <linux/mmc/sdhci-pltfm.h> 42static bool sdhci_of_wp_inverted(struct device_node *np)
43{
44 if (of_get_property(np, "sdhci,wp-inverted", NULL))
45 return true;
34 46
35#include "sdhci.h" 47 /* Old device trees don't have the wp-inverted property. */
36#include "sdhci-pltfm.h" 48#ifdef CONFIG_PPC
49 return machine_is(mpc837x_rdb) || machine_is(mpc837x_mds);
50#else
51 return false;
52#endif /* CONFIG_PPC */
53}
37 54
38/*****************************************************************************\ 55void sdhci_get_of_property(struct platform_device *pdev)
39 * * 56{
40 * SDHCI core callbacks * 57 struct device_node *np = pdev->dev.of_node;
41 * * 58 struct sdhci_host *host = platform_get_drvdata(pdev);
42\*****************************************************************************/ 59 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
60 const __be32 *clk;
61 int size;
43 62
44static struct sdhci_ops sdhci_pltfm_ops = { 63 if (of_device_is_available(np)) {
45}; 64 if (of_get_property(np, "sdhci,auto-cmd12", NULL))
65 host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
46 66
47/*****************************************************************************\ 67 if (of_get_property(np, "sdhci,1-bit-only", NULL))
48 * * 68 host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
49 * Device probing/removal * 69
50 * * 70 if (sdhci_of_wp_inverted(np))
51\*****************************************************************************/ 71 host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT;
72
73 clk = of_get_property(np, "clock-frequency", &size);
74 if (clk && size == sizeof(*clk) && *clk)
75 pltfm_host->clock = be32_to_cpup(clk);
76 }
77}
78#else
79void sdhci_get_of_property(struct platform_device *pdev) {}
80#endif /* CONFIG_OF */
81EXPORT_SYMBOL_GPL(sdhci_get_of_property);
52 82
53static int __devinit sdhci_pltfm_probe(struct platform_device *pdev) 83struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
84 struct sdhci_pltfm_data *pdata)
54{ 85{
55 const struct platform_device_id *platid = platform_get_device_id(pdev);
56 struct sdhci_pltfm_data *pdata;
57 struct sdhci_host *host; 86 struct sdhci_host *host;
58 struct sdhci_pltfm_host *pltfm_host; 87 struct sdhci_pltfm_host *pltfm_host;
88 struct device_node *np = pdev->dev.of_node;
59 struct resource *iomem; 89 struct resource *iomem;
60 int ret; 90 int ret;
61 91
62 if (platid && platid->driver_data)
63 pdata = (void *)platid->driver_data;
64 else
65 pdata = pdev->dev.platform_data;
66
67 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 92 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
68 if (!iomem) { 93 if (!iomem) {
69 ret = -ENOMEM; 94 ret = -ENOMEM;
@@ -71,11 +96,10 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
71 } 96 }
72 97
73 if (resource_size(iomem) < 0x100) 98 if (resource_size(iomem) < 0x100)
74 dev_err(&pdev->dev, "Invalid iomem size. You may " 99 dev_err(&pdev->dev, "Invalid iomem size!\n");
75 "experience problems.\n");
76 100
77 /* Some PCI-based MFD need the parent here */ 101 /* Some PCI-based MFD need the parent here */
78 if (pdev->dev.parent != &platform_bus) 102 if (pdev->dev.parent != &platform_bus && !np)
79 host = sdhci_alloc_host(pdev->dev.parent, sizeof(*pltfm_host)); 103 host = sdhci_alloc_host(pdev->dev.parent, sizeof(*pltfm_host));
80 else 104 else
81 host = sdhci_alloc_host(&pdev->dev, sizeof(*pltfm_host)); 105 host = sdhci_alloc_host(&pdev->dev, sizeof(*pltfm_host));
@@ -87,7 +111,7 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
87 111
88 pltfm_host = sdhci_priv(host); 112 pltfm_host = sdhci_priv(host);
89 113
90 host->hw_name = "platform"; 114 host->hw_name = dev_name(&pdev->dev);
91 if (pdata && pdata->ops) 115 if (pdata && pdata->ops)
92 host->ops = pdata->ops; 116 host->ops = pdata->ops;
93 else 117 else
@@ -110,126 +134,121 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
110 goto err_remap; 134 goto err_remap;
111 } 135 }
112 136
113 if (pdata && pdata->init) {
114 ret = pdata->init(host, pdata);
115 if (ret)
116 goto err_plat_init;
117 }
118
119 ret = sdhci_add_host(host);
120 if (ret)
121 goto err_add_host;
122
123 platform_set_drvdata(pdev, host); 137 platform_set_drvdata(pdev, host);
124 138
125 return 0; 139 return host;
126 140
127err_add_host:
128 if (pdata && pdata->exit)
129 pdata->exit(host);
130err_plat_init:
131 iounmap(host->ioaddr);
132err_remap: 141err_remap:
133 release_mem_region(iomem->start, resource_size(iomem)); 142 release_mem_region(iomem->start, resource_size(iomem));
134err_request: 143err_request:
135 sdhci_free_host(host); 144 sdhci_free_host(host);
136err: 145err:
137 printk(KERN_ERR"Probing of sdhci-pltfm failed: %d\n", ret); 146 dev_err(&pdev->dev, "%s failed %d\n", __func__, ret);
138 return ret; 147 return ERR_PTR(ret);
139} 148}
149EXPORT_SYMBOL_GPL(sdhci_pltfm_init);
140 150
141static int __devexit sdhci_pltfm_remove(struct platform_device *pdev) 151void sdhci_pltfm_free(struct platform_device *pdev)
142{ 152{
143 struct sdhci_pltfm_data *pdata = pdev->dev.platform_data;
144 struct sdhci_host *host = platform_get_drvdata(pdev); 153 struct sdhci_host *host = platform_get_drvdata(pdev);
145 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 154 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
146 int dead;
147 u32 scratch;
148
149 dead = 0;
150 scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
151 if (scratch == (u32)-1)
152 dead = 1;
153 155
154 sdhci_remove_host(host, dead);
155 if (pdata && pdata->exit)
156 pdata->exit(host);
157 iounmap(host->ioaddr); 156 iounmap(host->ioaddr);
158 release_mem_region(iomem->start, resource_size(iomem)); 157 release_mem_region(iomem->start, resource_size(iomem));
159 sdhci_free_host(host); 158 sdhci_free_host(host);
160 platform_set_drvdata(pdev, NULL); 159 platform_set_drvdata(pdev, NULL);
160}
161EXPORT_SYMBOL_GPL(sdhci_pltfm_free);
161 162
162 return 0; 163int sdhci_pltfm_register(struct platform_device *pdev,
164 struct sdhci_pltfm_data *pdata)
165{
166 struct sdhci_host *host;
167 int ret = 0;
168
169 host = sdhci_pltfm_init(pdev, pdata);
170 if (IS_ERR(host))
171 return PTR_ERR(host);
172
173 sdhci_get_of_property(pdev);
174
175 ret = sdhci_add_host(host);
176 if (ret)
177 sdhci_pltfm_free(pdev);
178
179 return ret;
163} 180}
181EXPORT_SYMBOL_GPL(sdhci_pltfm_register);
164 182
165static const struct platform_device_id sdhci_pltfm_ids[] = { 183int sdhci_pltfm_unregister(struct platform_device *pdev)
166 { "sdhci", }, 184{
167#ifdef CONFIG_MMC_SDHCI_CNS3XXX 185 struct sdhci_host *host = platform_get_drvdata(pdev);
168 { "sdhci-cns3xxx", (kernel_ulong_t)&sdhci_cns3xxx_pdata }, 186 int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
169#endif 187
170#ifdef CONFIG_MMC_SDHCI_ESDHC_IMX 188 sdhci_remove_host(host, dead);
171 { "sdhci-esdhc-imx", (kernel_ulong_t)&sdhci_esdhc_imx_pdata }, 189 sdhci_pltfm_free(pdev);
172#endif 190
173#ifdef CONFIG_MMC_SDHCI_DOVE 191 return 0;
174 { "sdhci-dove", (kernel_ulong_t)&sdhci_dove_pdata }, 192}
175#endif 193EXPORT_SYMBOL_GPL(sdhci_pltfm_unregister);
176#ifdef CONFIG_MMC_SDHCI_TEGRA
177 { "sdhci-tegra", (kernel_ulong_t)&sdhci_tegra_pdata },
178#endif
179 { },
180};
181MODULE_DEVICE_TABLE(platform, sdhci_pltfm_ids);
182 194
183#ifdef CONFIG_PM 195#ifdef CONFIG_PM
184static int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state) 196int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state)
185{ 197{
186 struct sdhci_host *host = platform_get_drvdata(dev); 198 struct sdhci_host *host = platform_get_drvdata(dev);
199 int ret;
200
201 ret = sdhci_suspend_host(host, state);
202 if (ret) {
203 dev_err(&dev->dev, "suspend failed, error = %d\n", ret);
204 return ret;
205 }
206
207 if (host->ops && host->ops->suspend)
208 ret = host->ops->suspend(host, state);
209 if (ret) {
210 dev_err(&dev->dev, "suspend hook failed, error = %d\n", ret);
211 sdhci_resume_host(host);
212 }
187 213
188 return sdhci_suspend_host(host, state); 214 return ret;
189} 215}
216EXPORT_SYMBOL_GPL(sdhci_pltfm_suspend);
190 217
191static int sdhci_pltfm_resume(struct platform_device *dev) 218int sdhci_pltfm_resume(struct platform_device *dev)
192{ 219{
193 struct sdhci_host *host = platform_get_drvdata(dev); 220 struct sdhci_host *host = platform_get_drvdata(dev);
221 int ret = 0;
194 222
195 return sdhci_resume_host(host); 223 if (host->ops && host->ops->resume)
196} 224 ret = host->ops->resume(host);
197#else 225 if (ret) {
198#define sdhci_pltfm_suspend NULL 226 dev_err(&dev->dev, "resume hook failed, error = %d\n", ret);
199#define sdhci_pltfm_resume NULL 227 return ret;
200#endif /* CONFIG_PM */ 228 }
201 229
202static struct platform_driver sdhci_pltfm_driver = { 230 ret = sdhci_resume_host(host);
203 .driver = { 231 if (ret)
204 .name = "sdhci", 232 dev_err(&dev->dev, "resume failed, error = %d\n", ret);
205 .owner = THIS_MODULE,
206 },
207 .probe = sdhci_pltfm_probe,
208 .remove = __devexit_p(sdhci_pltfm_remove),
209 .id_table = sdhci_pltfm_ids,
210 .suspend = sdhci_pltfm_suspend,
211 .resume = sdhci_pltfm_resume,
212};
213 233
214/*****************************************************************************\ 234 return ret;
215 * * 235}
216 * Driver init/exit * 236EXPORT_SYMBOL_GPL(sdhci_pltfm_resume);
217 * * 237#endif /* CONFIG_PM */
218\*****************************************************************************/
219 238
220static int __init sdhci_drv_init(void) 239static int __init sdhci_pltfm_drv_init(void)
221{ 240{
222 return platform_driver_register(&sdhci_pltfm_driver); 241 pr_info("sdhci-pltfm: SDHCI platform and OF driver helper\n");
242
243 return 0;
223} 244}
245module_init(sdhci_pltfm_drv_init);
224 246
225static void __exit sdhci_drv_exit(void) 247static void __exit sdhci_pltfm_drv_exit(void)
226{ 248{
227 platform_driver_unregister(&sdhci_pltfm_driver);
228} 249}
250module_exit(sdhci_pltfm_drv_exit);
229 251
230module_init(sdhci_drv_init); 252MODULE_DESCRIPTION("SDHCI platform and OF driver helper");
231module_exit(sdhci_drv_exit); 253MODULE_AUTHOR("Intel Corporation");
232
233MODULE_DESCRIPTION("Secure Digital Host Controller Interface platform driver");
234MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
235MODULE_LICENSE("GPL v2"); 254MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index 2b37016ad0a..b92c7f29a4e 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -12,17 +12,95 @@
12#define _DRIVERS_MMC_SDHCI_PLTFM_H 12#define _DRIVERS_MMC_SDHCI_PLTFM_H
13 13
14#include <linux/clk.h> 14#include <linux/clk.h>
15#include <linux/types.h> 15#include <linux/platform_device.h>
16#include <linux/mmc/sdhci-pltfm.h> 16#include "sdhci.h"
17
18struct sdhci_pltfm_data {
19 struct sdhci_ops *ops;
20 u64 quirks;
21};
17 22
18struct sdhci_pltfm_host { 23struct sdhci_pltfm_host {
19 struct clk *clk; 24 struct clk *clk;
20 void *priv; /* to handle quirks across io-accessor calls */ 25 void *priv; /* to handle quirks across io-accessor calls */
26
27 /* migrate from sdhci_of_host */
28 unsigned int clock;
29 u16 xfer_mode_shadow;
21}; 30};
22 31
23extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata; 32#ifdef CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
24extern struct sdhci_pltfm_data sdhci_esdhc_imx_pdata; 33/*
25extern struct sdhci_pltfm_data sdhci_dove_pdata; 34 * These accessors are designed for big endian hosts doing I/O to
26extern struct sdhci_pltfm_data sdhci_tegra_pdata; 35 * little endian controllers incorporating a 32-bit hardware byte swapper.
36 */
37static inline u32 sdhci_be32bs_readl(struct sdhci_host *host, int reg)
38{
39 return in_be32(host->ioaddr + reg);
40}
41
42static inline u16 sdhci_be32bs_readw(struct sdhci_host *host, int reg)
43{
44 return in_be16(host->ioaddr + (reg ^ 0x2));
45}
46
47static inline u8 sdhci_be32bs_readb(struct sdhci_host *host, int reg)
48{
49 return in_8(host->ioaddr + (reg ^ 0x3));
50}
51
52static inline void sdhci_be32bs_writel(struct sdhci_host *host,
53 u32 val, int reg)
54{
55 out_be32(host->ioaddr + reg, val);
56}
57
58static inline void sdhci_be32bs_writew(struct sdhci_host *host,
59 u16 val, int reg)
60{
61 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
62 int base = reg & ~0x3;
63 int shift = (reg & 0x2) * 8;
64
65 switch (reg) {
66 case SDHCI_TRANSFER_MODE:
67 /*
68 * Postpone this write, we must do it together with a
69 * command write that is down below.
70 */
71 pltfm_host->xfer_mode_shadow = val;
72 return;
73 case SDHCI_COMMAND:
74 sdhci_be32bs_writel(host,
75 val << 16 | pltfm_host->xfer_mode_shadow,
76 SDHCI_TRANSFER_MODE);
77 return;
78 }
79 clrsetbits_be32(host->ioaddr + base, 0xffff << shift, val << shift);
80}
81
82static inline void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg)
83{
84 int base = reg & ~0x3;
85 int shift = (reg & 0x3) * 8;
86
87 clrsetbits_be32(host->ioaddr + base , 0xff << shift, val << shift);
88}
89#endif /* CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER */
90
91extern void sdhci_get_of_property(struct platform_device *pdev);
92
93extern struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
94 struct sdhci_pltfm_data *pdata);
95extern void sdhci_pltfm_free(struct platform_device *pdev);
96
97extern int sdhci_pltfm_register(struct platform_device *pdev,
98 struct sdhci_pltfm_data *pdata);
99extern int sdhci_pltfm_unregister(struct platform_device *pdev);
100
101#ifdef CONFIG_PM
102extern int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state);
103extern int sdhci_pltfm_resume(struct platform_device *dev);
104#endif
27 105
28#endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */ 106#endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
diff --git a/drivers/mmc/host/sdhci-pxa.c b/drivers/mmc/host/sdhci-pxa.c
deleted file mode 100644
index 089c9a68b7b..00000000000
--- a/drivers/mmc/host/sdhci-pxa.c
+++ /dev/null
@@ -1,303 +0,0 @@
1/* linux/drivers/mmc/host/sdhci-pxa.c
2 *
3 * Copyright (C) 2010 Marvell International Ltd.
4 * Zhangfei Gao <zhangfei.gao@marvell.com>
5 * Kevin Wang <dwang4@marvell.com>
6 * Mingwei Wang <mwwang@marvell.com>
7 * Philip Rakity <prakity@marvell.com>
8 * Mark Brown <markb@marvell.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15/* Supports:
16 * SDHCI support for MMP2/PXA910/PXA168
17 *
18 * Refer to sdhci-s3c.c.
19 */
20
21#include <linux/delay.h>
22#include <linux/platform_device.h>
23#include <linux/mmc/host.h>
24#include <linux/clk.h>
25#include <linux/io.h>
26#include <linux/err.h>
27#include <plat/sdhci.h>
28#include "sdhci.h"
29
30#define DRIVER_NAME "sdhci-pxa"
31
32#define SD_FIFO_PARAM 0x104
33#define DIS_PAD_SD_CLK_GATE 0x400
34
35struct sdhci_pxa {
36 struct sdhci_host *host;
37 struct sdhci_pxa_platdata *pdata;
38 struct clk *clk;
39 struct resource *res;
40
41 u8 clk_enable;
42};
43
44/*****************************************************************************\
45 * *
46 * SDHCI core callbacks *
47 * *
48\*****************************************************************************/
49static void set_clock(struct sdhci_host *host, unsigned int clock)
50{
51 struct sdhci_pxa *pxa = sdhci_priv(host);
52 u32 tmp = 0;
53
54 if (clock == 0) {
55 if (pxa->clk_enable) {
56 clk_disable(pxa->clk);
57 pxa->clk_enable = 0;
58 }
59 } else {
60 if (0 == pxa->clk_enable) {
61 if (pxa->pdata->flags & PXA_FLAG_DISABLE_CLOCK_GATING) {
62 tmp = readl(host->ioaddr + SD_FIFO_PARAM);
63 tmp |= DIS_PAD_SD_CLK_GATE;
64 writel(tmp, host->ioaddr + SD_FIFO_PARAM);
65 }
66 clk_enable(pxa->clk);
67 pxa->clk_enable = 1;
68 }
69 }
70}
71
72static int set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
73{
74 u16 ctrl_2;
75
76 /*
77 * Set V18_EN -- UHS modes do not work without this.
78 * does not change signaling voltage
79 */
80 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
81
82 /* Select Bus Speed Mode for host */
83 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
84 switch (uhs) {
85 case MMC_TIMING_UHS_SDR12:
86 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
87 break;
88 case MMC_TIMING_UHS_SDR25:
89 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
90 break;
91 case MMC_TIMING_UHS_SDR50:
92 ctrl_2 |= SDHCI_CTRL_UHS_SDR50 | SDHCI_CTRL_VDD_180;
93 break;
94 case MMC_TIMING_UHS_SDR104:
95 ctrl_2 |= SDHCI_CTRL_UHS_SDR104 | SDHCI_CTRL_VDD_180;
96 break;
97 case MMC_TIMING_UHS_DDR50:
98 ctrl_2 |= SDHCI_CTRL_UHS_DDR50 | SDHCI_CTRL_VDD_180;
99 break;
100 }
101
102 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
103 pr_debug("%s:%s uhs = %d, ctrl_2 = %04X\n",
104 __func__, mmc_hostname(host->mmc), uhs, ctrl_2);
105
106 return 0;
107}
108
109static struct sdhci_ops sdhci_pxa_ops = {
110 .set_uhs_signaling = set_uhs_signaling,
111 .set_clock = set_clock,
112};
113
114/*****************************************************************************\
115 * *
116 * Device probing/removal *
117 * *
118\*****************************************************************************/
119
120static int __devinit sdhci_pxa_probe(struct platform_device *pdev)
121{
122 struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
123 struct device *dev = &pdev->dev;
124 struct sdhci_host *host = NULL;
125 struct resource *iomem = NULL;
126 struct sdhci_pxa *pxa = NULL;
127 int ret, irq;
128
129 irq = platform_get_irq(pdev, 0);
130 if (irq < 0) {
131 dev_err(dev, "no irq specified\n");
132 return irq;
133 }
134
135 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
136 if (!iomem) {
137 dev_err(dev, "no memory specified\n");
138 return -ENOENT;
139 }
140
141 host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pxa));
142 if (IS_ERR(host)) {
143 dev_err(dev, "failed to alloc host\n");
144 return PTR_ERR(host);
145 }
146
147 pxa = sdhci_priv(host);
148 pxa->host = host;
149 pxa->pdata = pdata;
150 pxa->clk_enable = 0;
151
152 pxa->clk = clk_get(dev, "PXA-SDHCLK");
153 if (IS_ERR(pxa->clk)) {
154 dev_err(dev, "failed to get io clock\n");
155 ret = PTR_ERR(pxa->clk);
156 goto out;
157 }
158
159 pxa->res = request_mem_region(iomem->start, resource_size(iomem),
160 mmc_hostname(host->mmc));
161 if (!pxa->res) {
162 dev_err(&pdev->dev, "cannot request region\n");
163 ret = -EBUSY;
164 goto out;
165 }
166
167 host->ioaddr = ioremap(iomem->start, resource_size(iomem));
168 if (!host->ioaddr) {
169 dev_err(&pdev->dev, "failed to remap registers\n");
170 ret = -ENOMEM;
171 goto out;
172 }
173
174 host->hw_name = "MMC";
175 host->ops = &sdhci_pxa_ops;
176 host->irq = irq;
177 host->quirks = SDHCI_QUIRK_BROKEN_ADMA
178 | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
179 | SDHCI_QUIRK_32BIT_DMA_ADDR
180 | SDHCI_QUIRK_32BIT_DMA_SIZE
181 | SDHCI_QUIRK_32BIT_ADMA_SIZE
182 | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
183
184 if (pdata->quirks)
185 host->quirks |= pdata->quirks;
186
187 /* enable 1/8V DDR capable */
188 host->mmc->caps |= MMC_CAP_1_8V_DDR;
189
190 /* If slot design supports 8 bit data, indicate this to MMC. */
191 if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT)
192 host->mmc->caps |= MMC_CAP_8_BIT_DATA;
193
194 ret = sdhci_add_host(host);
195 if (ret) {
196 dev_err(&pdev->dev, "failed to add host\n");
197 goto out;
198 }
199
200 if (pxa->pdata->max_speed)
201 host->mmc->f_max = pxa->pdata->max_speed;
202
203 platform_set_drvdata(pdev, host);
204
205 return 0;
206out:
207 if (host) {
208 clk_put(pxa->clk);
209 if (host->ioaddr)
210 iounmap(host->ioaddr);
211 if (pxa->res)
212 release_mem_region(pxa->res->start,
213 resource_size(pxa->res));
214 sdhci_free_host(host);
215 }
216
217 return ret;
218}
219
220static int __devexit sdhci_pxa_remove(struct platform_device *pdev)
221{
222 struct sdhci_host *host = platform_get_drvdata(pdev);
223 struct sdhci_pxa *pxa = sdhci_priv(host);
224 int dead = 0;
225 u32 scratch;
226
227 if (host) {
228 scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
229 if (scratch == (u32)-1)
230 dead = 1;
231
232 sdhci_remove_host(host, dead);
233
234 if (host->ioaddr)
235 iounmap(host->ioaddr);
236 if (pxa->res)
237 release_mem_region(pxa->res->start,
238 resource_size(pxa->res));
239 if (pxa->clk_enable) {
240 clk_disable(pxa->clk);
241 pxa->clk_enable = 0;
242 }
243 clk_put(pxa->clk);
244
245 sdhci_free_host(host);
246 platform_set_drvdata(pdev, NULL);
247 }
248
249 return 0;
250}
251
252#ifdef CONFIG_PM
253static int sdhci_pxa_suspend(struct platform_device *dev, pm_message_t state)
254{
255 struct sdhci_host *host = platform_get_drvdata(dev);
256
257 return sdhci_suspend_host(host, state);
258}
259
260static int sdhci_pxa_resume(struct platform_device *dev)
261{
262 struct sdhci_host *host = platform_get_drvdata(dev);
263
264 return sdhci_resume_host(host);
265}
266#else
267#define sdhci_pxa_suspend NULL
268#define sdhci_pxa_resume NULL
269#endif
270
271static struct platform_driver sdhci_pxa_driver = {
272 .probe = sdhci_pxa_probe,
273 .remove = __devexit_p(sdhci_pxa_remove),
274 .suspend = sdhci_pxa_suspend,
275 .resume = sdhci_pxa_resume,
276 .driver = {
277 .name = DRIVER_NAME,
278 .owner = THIS_MODULE,
279 },
280};
281
282/*****************************************************************************\
283 * *
284 * Driver init/exit *
285 * *
286\*****************************************************************************/
287
288static int __init sdhci_pxa_init(void)
289{
290 return platform_driver_register(&sdhci_pxa_driver);
291}
292
293static void __exit sdhci_pxa_exit(void)
294{
295 platform_driver_unregister(&sdhci_pxa_driver);
296}
297
298module_init(sdhci_pxa_init);
299module_exit(sdhci_pxa_exit);
300
301MODULE_DESCRIPTION("SDH controller driver for PXA168/PXA910/MMP2");
302MODULE_AUTHOR("Zhangfei Gao <zhangfei.gao@marvell.com>");
303MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c
new file mode 100644
index 00000000000..38f58994f79
--- /dev/null
+++ b/drivers/mmc/host/sdhci-pxav2.c
@@ -0,0 +1,244 @@
1/*
2 * Copyright (C) 2010 Marvell International Ltd.
3 * Zhangfei Gao <zhangfei.gao@marvell.com>
4 * Kevin Wang <dwang4@marvell.com>
5 * Jun Nie <njun@marvell.com>
6 * Qiming Wu <wuqm@marvell.com>
7 * Philip Rakity <prakity@marvell.com>
8 *
9 * This software is licensed under the terms of the GNU General Public
10 * License version 2, as published by the Free Software Foundation, and
11 * may be copied, distributed, and modified under those terms.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 */
19
20#include <linux/err.h>
21#include <linux/init.h>
22#include <linux/platform_device.h>
23#include <linux/clk.h>
24#include <linux/io.h>
25#include <linux/gpio.h>
26#include <linux/mmc/card.h>
27#include <linux/mmc/host.h>
28#include <linux/platform_data/pxa_sdhci.h>
29#include <linux/slab.h>
30#include "sdhci.h"
31#include "sdhci-pltfm.h"
32
33#define SD_FIFO_PARAM 0xe0
34#define DIS_PAD_SD_CLK_GATE 0x0400 /* Turn on/off Dynamic SD Clock Gating */
35#define CLK_GATE_ON 0x0200 /* Disable/enable Clock Gate */
36#define CLK_GATE_CTL 0x0100 /* Clock Gate Control */
37#define CLK_GATE_SETTING_BITS (DIS_PAD_SD_CLK_GATE | \
38 CLK_GATE_ON | CLK_GATE_CTL)
39
40#define SD_CLOCK_BURST_SIZE_SETUP 0xe6
41#define SDCLK_SEL_SHIFT 8
42#define SDCLK_SEL_MASK 0x3
43#define SDCLK_DELAY_SHIFT 10
44#define SDCLK_DELAY_MASK 0x3c
45
46#define SD_CE_ATA_2 0xea
47#define MMC_CARD 0x1000
48#define MMC_WIDTH 0x0100
49
50static void pxav2_set_private_registers(struct sdhci_host *host, u8 mask)
51{
52 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
53 struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
54
55 if (mask == SDHCI_RESET_ALL) {
56 u16 tmp = 0;
57
58 /*
59 * tune timing of read data/command when crc error happen
60 * no performance impact
61 */
62 if (pdata->clk_delay_sel == 1) {
63 tmp = readw(host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP);
64
65 tmp &= ~(SDCLK_DELAY_MASK << SDCLK_DELAY_SHIFT);
66 tmp |= (pdata->clk_delay_cycles & SDCLK_DELAY_MASK)
67 << SDCLK_DELAY_SHIFT;
68 tmp &= ~(SDCLK_SEL_MASK << SDCLK_SEL_SHIFT);
69 tmp |= (1 & SDCLK_SEL_MASK) << SDCLK_SEL_SHIFT;
70
71 writew(tmp, host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP);
72 }
73
74 if (pdata->flags & PXA_FLAG_ENABLE_CLOCK_GATING) {
75 tmp = readw(host->ioaddr + SD_FIFO_PARAM);
76 tmp &= ~CLK_GATE_SETTING_BITS;
77 writew(tmp, host->ioaddr + SD_FIFO_PARAM);
78 } else {
79 tmp = readw(host->ioaddr + SD_FIFO_PARAM);
80 tmp &= ~CLK_GATE_SETTING_BITS;
81 tmp |= CLK_GATE_SETTING_BITS;
82 writew(tmp, host->ioaddr + SD_FIFO_PARAM);
83 }
84 }
85}
86
87static int pxav2_mmc_set_width(struct sdhci_host *host, int width)
88{
89 u8 ctrl;
90 u16 tmp;
91
92 ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
93 tmp = readw(host->ioaddr + SD_CE_ATA_2);
94 if (width == MMC_BUS_WIDTH_8) {
95 ctrl &= ~SDHCI_CTRL_4BITBUS;
96 tmp |= MMC_CARD | MMC_WIDTH;
97 } else {
98 tmp &= ~(MMC_CARD | MMC_WIDTH);
99 if (width == MMC_BUS_WIDTH_4)
100 ctrl |= SDHCI_CTRL_4BITBUS;
101 else
102 ctrl &= ~SDHCI_CTRL_4BITBUS;
103 }
104 writew(tmp, host->ioaddr + SD_CE_ATA_2);
105 writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
106
107 return 0;
108}
109
110static u32 pxav2_get_max_clock(struct sdhci_host *host)
111{
112 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
113
114 return clk_get_rate(pltfm_host->clk);
115}
116
117static struct sdhci_ops pxav2_sdhci_ops = {
118 .get_max_clock = pxav2_get_max_clock,
119 .platform_reset_exit = pxav2_set_private_registers,
120 .platform_8bit_width = pxav2_mmc_set_width,
121};
122
123static int __devinit sdhci_pxav2_probe(struct platform_device *pdev)
124{
125 struct sdhci_pltfm_host *pltfm_host;
126 struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
127 struct device *dev = &pdev->dev;
128 struct sdhci_host *host = NULL;
129 struct sdhci_pxa *pxa = NULL;
130 int ret;
131 struct clk *clk;
132
133 pxa = kzalloc(sizeof(struct sdhci_pxa), GFP_KERNEL);
134 if (!pxa)
135 return -ENOMEM;
136
137 host = sdhci_pltfm_init(pdev, NULL);
138 if (IS_ERR(host)) {
139 kfree(pxa);
140 return PTR_ERR(host);
141 }
142 pltfm_host = sdhci_priv(host);
143 pltfm_host->priv = pxa;
144
145 clk = clk_get(dev, "PXA-SDHCLK");
146 if (IS_ERR(clk)) {
147 dev_err(dev, "failed to get io clock\n");
148 ret = PTR_ERR(clk);
149 goto err_clk_get;
150 }
151 pltfm_host->clk = clk;
152 clk_enable(clk);
153
154 host->quirks = SDHCI_QUIRK_BROKEN_ADMA
155 | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
156 | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
157
158 if (pdata) {
159 if (pdata->flags & PXA_FLAG_CARD_PERMANENT) {
160 /* on-chip device */
161 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
162 host->mmc->caps |= MMC_CAP_NONREMOVABLE;
163 }
164
165 /* If slot design supports 8 bit data, indicate this to MMC. */
166 if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT)
167 host->mmc->caps |= MMC_CAP_8_BIT_DATA;
168
169 if (pdata->quirks)
170 host->quirks |= pdata->quirks;
171 if (pdata->host_caps)
172 host->mmc->caps |= pdata->host_caps;
173 if (pdata->pm_caps)
174 host->mmc->pm_caps |= pdata->pm_caps;
175 }
176
177 host->ops = &pxav2_sdhci_ops;
178
179 ret = sdhci_add_host(host);
180 if (ret) {
181 dev_err(&pdev->dev, "failed to add host\n");
182 goto err_add_host;
183 }
184
185 platform_set_drvdata(pdev, host);
186
187 return 0;
188
189err_add_host:
190 clk_disable(clk);
191 clk_put(clk);
192err_clk_get:
193 sdhci_pltfm_free(pdev);
194 kfree(pxa);
195 return ret;
196}
197
198static int __devexit sdhci_pxav2_remove(struct platform_device *pdev)
199{
200 struct sdhci_host *host = platform_get_drvdata(pdev);
201 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
202 struct sdhci_pxa *pxa = pltfm_host->priv;
203
204 sdhci_remove_host(host, 1);
205
206 clk_disable(pltfm_host->clk);
207 clk_put(pltfm_host->clk);
208 sdhci_pltfm_free(pdev);
209 kfree(pxa);
210
211 platform_set_drvdata(pdev, NULL);
212
213 return 0;
214}
215
216static struct platform_driver sdhci_pxav2_driver = {
217 .driver = {
218 .name = "sdhci-pxav2",
219 .owner = THIS_MODULE,
220 },
221 .probe = sdhci_pxav2_probe,
222 .remove = __devexit_p(sdhci_pxav2_remove),
223#ifdef CONFIG_PM
224 .suspend = sdhci_pltfm_suspend,
225 .resume = sdhci_pltfm_resume,
226#endif
227};
228static int __init sdhci_pxav2_init(void)
229{
230 return platform_driver_register(&sdhci_pxav2_driver);
231}
232
233static void __exit sdhci_pxav2_exit(void)
234{
235 platform_driver_unregister(&sdhci_pxav2_driver);
236}
237
238module_init(sdhci_pxav2_init);
239module_exit(sdhci_pxav2_exit);
240
241MODULE_DESCRIPTION("SDHCI driver for pxav2");
242MODULE_AUTHOR("Marvell International Ltd.");
243MODULE_LICENSE("GPL v2");
244
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
new file mode 100644
index 00000000000..fc7e4a51562
--- /dev/null
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -0,0 +1,290 @@
1/*
2 * Copyright (C) 2010 Marvell International Ltd.
3 * Zhangfei Gao <zhangfei.gao@marvell.com>
4 * Kevin Wang <dwang4@marvell.com>
5 * Mingwei Wang <mwwang@marvell.com>
6 * Philip Rakity <prakity@marvell.com>
7 * Mark Brown <markb@marvell.com>
8 *
9 * This software is licensed under the terms of the GNU General Public
10 * License version 2, as published by the Free Software Foundation, and
11 * may be copied, distributed, and modified under those terms.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 */
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/platform_device.h>
22#include <linux/clk.h>
23#include <linux/io.h>
24#include <linux/gpio.h>
25#include <linux/mmc/card.h>
26#include <linux/mmc/host.h>
27#include <linux/platform_data/pxa_sdhci.h>
28#include <linux/slab.h>
29#include <linux/delay.h>
30#include "sdhci.h"
31#include "sdhci-pltfm.h"
32
33#define SD_CLOCK_BURST_SIZE_SETUP 0x10A
34#define SDCLK_SEL 0x100
35#define SDCLK_DELAY_SHIFT 9
36#define SDCLK_DELAY_MASK 0x1f
37
38#define SD_CFG_FIFO_PARAM 0x100
39#define SDCFG_GEN_PAD_CLK_ON (1<<6)
40#define SDCFG_GEN_PAD_CLK_CNT_MASK 0xFF
41#define SDCFG_GEN_PAD_CLK_CNT_SHIFT 24
42
43#define SD_SPI_MODE 0x108
44#define SD_CE_ATA_1 0x10C
45
46#define SD_CE_ATA_2 0x10E
47#define SDCE_MISC_INT (1<<2)
48#define SDCE_MISC_INT_EN (1<<1)
49
50static void pxav3_set_private_registers(struct sdhci_host *host, u8 mask)
51{
52 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
53 struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
54
55 if (mask == SDHCI_RESET_ALL) {
56 /*
57 * tune timing of read data/command when crc error happen
58 * no performance impact
59 */
60 if (pdata && 0 != pdata->clk_delay_cycles) {
61 u16 tmp;
62
63 tmp = readw(host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP);
64 tmp |= (pdata->clk_delay_cycles & SDCLK_DELAY_MASK)
65 << SDCLK_DELAY_SHIFT;
66 tmp |= SDCLK_SEL;
67 writew(tmp, host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP);
68 }
69 }
70}
71
72#define MAX_WAIT_COUNT 5
73static void pxav3_gen_init_74_clocks(struct sdhci_host *host, u8 power_mode)
74{
75 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
76 struct sdhci_pxa *pxa = pltfm_host->priv;
77 u16 tmp;
78 int count;
79
80 if (pxa->power_mode == MMC_POWER_UP
81 && power_mode == MMC_POWER_ON) {
82
83 dev_dbg(mmc_dev(host->mmc),
84 "%s: slot->power_mode = %d,"
85 "ios->power_mode = %d\n",
86 __func__,
87 pxa->power_mode,
88 power_mode);
89
90 /* set we want notice of when 74 clocks are sent */
91 tmp = readw(host->ioaddr + SD_CE_ATA_2);
92 tmp |= SDCE_MISC_INT_EN;
93 writew(tmp, host->ioaddr + SD_CE_ATA_2);
94
95 /* start sending the 74 clocks */
96 tmp = readw(host->ioaddr + SD_CFG_FIFO_PARAM);
97 tmp |= SDCFG_GEN_PAD_CLK_ON;
98 writew(tmp, host->ioaddr + SD_CFG_FIFO_PARAM);
99
100 /* slowest speed is about 100KHz or 10usec per clock */
101 udelay(740);
102 count = 0;
103
104 while (count++ < MAX_WAIT_COUNT) {
105 if ((readw(host->ioaddr + SD_CE_ATA_2)
106 & SDCE_MISC_INT) == 0)
107 break;
108 udelay(10);
109 }
110
111 if (count == MAX_WAIT_COUNT)
112 dev_warn(mmc_dev(host->mmc), "74 clock interrupt not cleared\n");
113
114 /* clear the interrupt bit if posted */
115 tmp = readw(host->ioaddr + SD_CE_ATA_2);
116 tmp |= SDCE_MISC_INT;
117 writew(tmp, host->ioaddr + SD_CE_ATA_2);
118 }
119 pxa->power_mode = power_mode;
120}
121
122static int pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
123{
124 u16 ctrl_2;
125
126 /*
127 * Set V18_EN -- UHS modes do not work without this.
128 * does not change signaling voltage
129 */
130 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
131
132 /* Select Bus Speed Mode for host */
133 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
134 switch (uhs) {
135 case MMC_TIMING_UHS_SDR12:
136 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
137 break;
138 case MMC_TIMING_UHS_SDR25:
139 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
140 break;
141 case MMC_TIMING_UHS_SDR50:
142 ctrl_2 |= SDHCI_CTRL_UHS_SDR50 | SDHCI_CTRL_VDD_180;
143 break;
144 case MMC_TIMING_UHS_SDR104:
145 ctrl_2 |= SDHCI_CTRL_UHS_SDR104 | SDHCI_CTRL_VDD_180;
146 break;
147 case MMC_TIMING_UHS_DDR50:
148 ctrl_2 |= SDHCI_CTRL_UHS_DDR50 | SDHCI_CTRL_VDD_180;
149 break;
150 }
151
152 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
153 dev_dbg(mmc_dev(host->mmc),
154 "%s uhs = %d, ctrl_2 = %04X\n",
155 __func__, uhs, ctrl_2);
156
157 return 0;
158}
159
160static struct sdhci_ops pxav3_sdhci_ops = {
161 .platform_reset_exit = pxav3_set_private_registers,
162 .set_uhs_signaling = pxav3_set_uhs_signaling,
163 .platform_send_init_74_clocks = pxav3_gen_init_74_clocks,
164};
165
166static int __devinit sdhci_pxav3_probe(struct platform_device *pdev)
167{
168 struct sdhci_pltfm_host *pltfm_host;
169 struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
170 struct device *dev = &pdev->dev;
171 struct sdhci_host *host = NULL;
172 struct sdhci_pxa *pxa = NULL;
173 int ret;
174 struct clk *clk;
175
176 pxa = kzalloc(sizeof(struct sdhci_pxa), GFP_KERNEL);
177 if (!pxa)
178 return -ENOMEM;
179
180 host = sdhci_pltfm_init(pdev, NULL);
181 if (IS_ERR(host)) {
182 kfree(pxa);
183 return PTR_ERR(host);
184 }
185 pltfm_host = sdhci_priv(host);
186 pltfm_host->priv = pxa;
187
188 clk = clk_get(dev, "PXA-SDHCLK");
189 if (IS_ERR(clk)) {
190 dev_err(dev, "failed to get io clock\n");
191 ret = PTR_ERR(clk);
192 goto err_clk_get;
193 }
194 pltfm_host->clk = clk;
195 clk_enable(clk);
196
197 host->quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
198 | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
199 | SDHCI_QUIRK_32BIT_ADMA_SIZE;
200
201 /* enable 1/8V DDR capable */
202 host->mmc->caps |= MMC_CAP_1_8V_DDR;
203
204 if (pdata) {
205 if (pdata->flags & PXA_FLAG_CARD_PERMANENT) {
206 /* on-chip device */
207 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
208 host->mmc->caps |= MMC_CAP_NONREMOVABLE;
209 }
210
211 /* If slot design supports 8 bit data, indicate this to MMC. */
212 if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT)
213 host->mmc->caps |= MMC_CAP_8_BIT_DATA;
214
215 if (pdata->quirks)
216 host->quirks |= pdata->quirks;
217 if (pdata->host_caps)
218 host->mmc->caps |= pdata->host_caps;
219 if (pdata->pm_caps)
220 host->mmc->pm_caps |= pdata->pm_caps;
221 }
222
223 host->ops = &pxav3_sdhci_ops;
224
225 ret = sdhci_add_host(host);
226 if (ret) {
227 dev_err(&pdev->dev, "failed to add host\n");
228 goto err_add_host;
229 }
230
231 platform_set_drvdata(pdev, host);
232
233 return 0;
234
235err_add_host:
236 clk_disable(clk);
237 clk_put(clk);
238err_clk_get:
239 sdhci_pltfm_free(pdev);
240 kfree(pxa);
241 return ret;
242}
243
244static int __devexit sdhci_pxav3_remove(struct platform_device *pdev)
245{
246 struct sdhci_host *host = platform_get_drvdata(pdev);
247 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
248 struct sdhci_pxa *pxa = pltfm_host->priv;
249
250 sdhci_remove_host(host, 1);
251
252 clk_disable(pltfm_host->clk);
253 clk_put(pltfm_host->clk);
254 sdhci_pltfm_free(pdev);
255 kfree(pxa);
256
257 platform_set_drvdata(pdev, NULL);
258
259 return 0;
260}
261
262static struct platform_driver sdhci_pxav3_driver = {
263 .driver = {
264 .name = "sdhci-pxav3",
265 .owner = THIS_MODULE,
266 },
267 .probe = sdhci_pxav3_probe,
268 .remove = __devexit_p(sdhci_pxav3_remove),
269#ifdef CONFIG_PM
270 .suspend = sdhci_pltfm_suspend,
271 .resume = sdhci_pltfm_resume,
272#endif
273};
274static int __init sdhci_pxav3_init(void)
275{
276 return platform_driver_register(&sdhci_pxav3_driver);
277}
278
279static void __exit sdhci_pxav3_exit(void)
280{
281 platform_driver_unregister(&sdhci_pxav3_driver);
282}
283
284module_init(sdhci_pxav3_init);
285module_exit(sdhci_pxav3_exit);
286
287MODULE_DESCRIPTION("SDHCI driver for pxav3");
288MODULE_AUTHOR("Marvell International Ltd.");
289MODULE_LICENSE("GPL v2");
290
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 69e3ee321eb..fe886d6c474 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -19,6 +19,7 @@
19#include <linux/clk.h> 19#include <linux/clk.h>
20#include <linux/io.h> 20#include <linux/io.h>
21#include <linux/gpio.h> 21#include <linux/gpio.h>
22#include <linux/module.h>
22 23
23#include <linux/mmc/host.h> 24#include <linux/mmc/host.h>
24 25
@@ -301,6 +302,8 @@ static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width)
301 ctrl &= ~SDHCI_CTRL_8BITBUS; 302 ctrl &= ~SDHCI_CTRL_8BITBUS;
302 break; 303 break;
303 default: 304 default:
305 ctrl &= ~SDHCI_CTRL_4BITBUS;
306 ctrl &= ~SDHCI_CTRL_8BITBUS;
304 break; 307 break;
305 } 308 }
306 309
@@ -502,6 +505,9 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
502 /* This host supports the Auto CMD12 */ 505 /* This host supports the Auto CMD12 */
503 host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; 506 host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
504 507
508 /* Samsung SoCs need BROKEN_ADMA_ZEROLEN_DESC */
509 host->quirks |= SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC;
510
505 if (pdata->cd_type == S3C_SDHCI_CD_NONE || 511 if (pdata->cd_type == S3C_SDHCI_CD_NONE ||
506 pdata->cd_type == S3C_SDHCI_CD_PERMANENT) 512 pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
507 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; 513 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
@@ -612,16 +618,14 @@ static int sdhci_s3c_suspend(struct platform_device *dev, pm_message_t pm)
612{ 618{
613 struct sdhci_host *host = platform_get_drvdata(dev); 619 struct sdhci_host *host = platform_get_drvdata(dev);
614 620
615 sdhci_suspend_host(host, pm); 621 return sdhci_suspend_host(host, pm);
616 return 0;
617} 622}
618 623
619static int sdhci_s3c_resume(struct platform_device *dev) 624static int sdhci_s3c_resume(struct platform_device *dev)
620{ 625{
621 struct sdhci_host *host = platform_get_drvdata(dev); 626 struct sdhci_host *host = platform_get_drvdata(dev);
622 627
623 sdhci_resume_host(host); 628 return sdhci_resume_host(host);
624 return 0;
625} 629}
626 630
627#else 631#else
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 343c97edba3..67950782e09 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -1,6 +1,8 @@
1/* 1/*
2 * Copyright (C) 2010 Google, Inc. 2 * Copyright (C) 2010 Google, Inc.
3 * 3 *
4 * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
5 *
4 * This software is licensed under the terms of the GNU General Public 6 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and 7 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms. 8 * may be copied, distributed, and modified under those terms.
@@ -18,15 +20,100 @@
18#include <linux/clk.h> 20#include <linux/clk.h>
19#include <linux/io.h> 21#include <linux/io.h>
20#include <linux/gpio.h> 22#include <linux/gpio.h>
23#include <linux/slab.h>
21#include <linux/mmc/card.h> 24#include <linux/mmc/card.h>
22#include <linux/mmc/host.h> 25#include <linux/mmc/host.h>
26#include <linux/mmc/sd.h>
27#include <linux/regulator/consumer.h>
28#include <linux/delay.h>
23 29
24#include <mach/gpio.h> 30#include <mach/gpio.h>
25#include <mach/sdhci.h> 31#include <mach/sdhci.h>
32#include <mach/io_dpd.h>
26 33
27#include "sdhci.h"
28#include "sdhci-pltfm.h" 34#include "sdhci-pltfm.h"
29 35
36#define SDHCI_VENDOR_CLOCK_CNTRL 0x100
37#define SDHCI_VENDOR_CLOCK_CNTRL_SDMMC_CLK 0x1
38#define SDHCI_VENDOR_CLOCK_CNTRL_PADPIPE_CLKEN_OVERRIDE 0x8
39#define SDHCI_VENDOR_CLOCK_CNTRL_SPI_MODE_CLKEN_OVERRIDE 0x4
40#define SDHCI_VENDOR_CLOCK_CNTRL_BASE_CLK_FREQ_SHIFT 8
41#define SDHCI_VENDOR_CLOCK_CNTRL_TAP_VALUE_SHIFT 16
42#define SDHCI_VENDOR_CLOCK_CNTRL_SDR50_TUNING 0x20
43
44#define SDHCI_VENDOR_MISC_CNTRL 0x120
45#define SDHCI_VENDOR_MISC_CNTRL_ENABLE_SDR104_SUPPORT 0x8
46#define SDHCI_VENDOR_MISC_CNTRL_ENABLE_SDR50_SUPPORT 0x10
47#define SDHCI_VENDOR_MISC_CNTRL_ENABLE_SD_3_0 0x20
48
49#define SDMMC_SDMEMCOMPPADCTRL 0x1E0
50#define SDMMC_SDMEMCOMPPADCTRL_VREF_SEL_MASK 0xF
51
52#define SDMMC_AUTO_CAL_CONFIG 0x1E4
53#define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE 0x20000000
54#define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET_SHIFT 0x8
55#define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET 0x70
56#define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PU_OFFSET 0x62
57
58#define SDHOST_1V8_OCR_MASK 0x8
59#define SDHOST_HIGH_VOLT_MIN 2700000
60#define SDHOST_HIGH_VOLT_MAX 3600000
61#define SDHOST_LOW_VOLT_MIN 1800000
62#define SDHOST_LOW_VOLT_MAX 1800000
63
64#define TEGRA_SDHOST_MIN_FREQ 50000000
65#define TEGRA2_SDHOST_STD_FREQ 50000000
66#define TEGRA3_SDHOST_STD_FREQ 104000000
67
68#define SD_SEND_TUNING_PATTERN 19
69#define MAX_TAP_VALUES 256
70
71static unsigned int tegra_sdhost_min_freq;
72static unsigned int tegra_sdhost_std_freq;
73static void tegra_3x_sdhci_set_card_clock(struct sdhci_host *sdhci, unsigned int clock);
74static void tegra3_sdhci_post_reset_init(struct sdhci_host *sdhci);
75
76static unsigned int tegra3_sdhost_max_clk[4] = {
77 208000000, 104000000, 208000000, 104000000 };
78
79struct tegra_sdhci_hw_ops{
80 /* Set the internal clk and card clk.*/
81 void (*set_card_clock)(struct sdhci_host *sdhci, unsigned int clock);
82 /* Post reset vendor registers configuration */
83 void (*sdhost_init)(struct sdhci_host *sdhci);
84};
85
86#ifdef CONFIG_ARCH_TEGRA_2x_SOC
87static struct tegra_sdhci_hw_ops tegra_2x_sdhci_ops = {
88};
89#endif
90
91#ifdef CONFIG_ARCH_TEGRA_3x_SOC
92static struct tegra_sdhci_hw_ops tegra_3x_sdhci_ops = {
93 .set_card_clock = tegra_3x_sdhci_set_card_clock,
94 .sdhost_init = tegra3_sdhci_post_reset_init,
95};
96#endif
97
98struct tegra_sdhci_host {
99 bool clk_enabled;
100 struct regulator *vdd_io_reg;
101 struct regulator *vdd_slot_reg;
102 /* Pointer to the chip specific HW ops */
103 struct tegra_sdhci_hw_ops *hw_ops;
104 /* Host controller instance */
105 unsigned int instance;
106 /* vddio_min */
107 unsigned int vddio_min_uv;
108 /* vddio_max */
109 unsigned int vddio_max_uv;
110 /* max clk supported by the platform */
111 unsigned int max_clk_limit;
112 struct tegra_io_dpd *dpd;
113 bool card_present;
114 bool is_rail_enabled;
115};
116
30static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg) 117static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg)
31{ 118{
32 u32 val; 119 u32 val;
@@ -42,11 +129,12 @@ static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg)
42 129
43static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg) 130static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
44{ 131{
132#ifdef CONFIG_ARCH_TEGRA_2x_SOC
45 if (unlikely(reg == SDHCI_HOST_VERSION)) { 133 if (unlikely(reg == SDHCI_HOST_VERSION)) {
46 /* Erratum: Version register is invalid in HW. */ 134 /* Erratum: Version register is invalid in HW. */
47 return SDHCI_SPEC_200; 135 return SDHCI_SPEC_200;
48 } 136 }
49 137#endif
50 return readw(host->ioaddr + reg); 138 return readw(host->ioaddr + reg);
51} 139}
52 140
@@ -61,6 +149,7 @@ static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
61 149
62 writel(val, host->ioaddr + reg); 150 writel(val, host->ioaddr + reg);
63 151
152#ifdef CONFIG_ARCH_TEGRA_2x_SOC
64 if (unlikely(reg == SDHCI_INT_ENABLE)) { 153 if (unlikely(reg == SDHCI_INT_ENABLE)) {
65 /* Erratum: Must enable block gap interrupt detection */ 154 /* Erratum: Must enable block gap interrupt detection */
66 u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL); 155 u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
@@ -70,6 +159,15 @@ static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
70 gap_ctrl &= ~0x8; 159 gap_ctrl &= ~0x8;
71 writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL); 160 writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
72 } 161 }
162#endif
163}
164
165static unsigned int tegra_sdhci_get_cd(struct sdhci_host *sdhci)
166{
167 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
168 struct tegra_sdhci_host *tegra_host = pltfm_host->priv;
169
170 return tegra_host->card_present;
73} 171}
74 172
75static unsigned int tegra_sdhci_get_ro(struct sdhci_host *sdhci) 173static unsigned int tegra_sdhci_get_ro(struct sdhci_host *sdhci)
@@ -85,9 +183,150 @@ static unsigned int tegra_sdhci_get_ro(struct sdhci_host *sdhci)
85 return gpio_get_value(plat->wp_gpio); 183 return gpio_get_value(plat->wp_gpio);
86} 184}
87 185
186static void tegra3_sdhci_post_reset_init(struct sdhci_host *sdhci)
187{
188 u16 misc_ctrl;
189 u32 vendor_ctrl;
190 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
191 struct tegra_sdhci_host *tegra_host = pltfm_host->priv;
192 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
193 struct tegra_sdhci_platform_data *plat;
194
195 plat = pdev->dev.platform_data;
196 /* Set the base clock frequency */
197 vendor_ctrl = sdhci_readl(sdhci, SDHCI_VENDOR_CLOCK_CNTRL);
198 vendor_ctrl &= ~(0xFF << SDHCI_VENDOR_CLOCK_CNTRL_BASE_CLK_FREQ_SHIFT);
199 vendor_ctrl |= (tegra3_sdhost_max_clk[tegra_host->instance] / 1000000) <<
200 SDHCI_VENDOR_CLOCK_CNTRL_BASE_CLK_FREQ_SHIFT;
201 vendor_ctrl |= SDHCI_VENDOR_CLOCK_CNTRL_PADPIPE_CLKEN_OVERRIDE;
202 vendor_ctrl &= ~SDHCI_VENDOR_CLOCK_CNTRL_SPI_MODE_CLKEN_OVERRIDE;
203
204 /* Set tap delay */
205 if (plat->tap_delay) {
206 vendor_ctrl &= ~(0xFF <<
207 SDHCI_VENDOR_CLOCK_CNTRL_TAP_VALUE_SHIFT);
208 vendor_ctrl |= (plat->tap_delay <<
209 SDHCI_VENDOR_CLOCK_CNTRL_TAP_VALUE_SHIFT);
210 }
211 /* Enable frequency tuning for SDR50 mode */
212 vendor_ctrl |= SDHCI_VENDOR_CLOCK_CNTRL_SDR50_TUNING;
213 sdhci_writel(sdhci, vendor_ctrl, SDHCI_VENDOR_CLOCK_CNTRL);
214
215 /* Enable SDHOST v3.0 support */
216 misc_ctrl = sdhci_readw(sdhci, SDHCI_VENDOR_MISC_CNTRL);
217 misc_ctrl |= SDHCI_VENDOR_MISC_CNTRL_ENABLE_SD_3_0 |
218 SDHCI_VENDOR_MISC_CNTRL_ENABLE_SDR104_SUPPORT |
219 SDHCI_VENDOR_MISC_CNTRL_ENABLE_SDR50_SUPPORT;
220 sdhci_writew(sdhci, misc_ctrl, SDHCI_VENDOR_MISC_CNTRL);
221}
222
223static int tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
224 unsigned int uhs)
225{
226 u16 clk, ctrl_2;
227 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
228
229 /* Select Bus Speed Mode for host */
230 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
231 switch (uhs) {
232 case MMC_TIMING_UHS_SDR12:
233 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
234 break;
235 case MMC_TIMING_UHS_SDR25:
236 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
237 break;
238 case MMC_TIMING_UHS_SDR50:
239 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
240 break;
241 case MMC_TIMING_UHS_SDR104:
242 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
243 break;
244 case MMC_TIMING_UHS_DDR50:
245 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
246 break;
247 }
248
249 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
250
251 if (uhs == MMC_TIMING_UHS_DDR50) {
252 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
253 clk &= ~(0xFF << SDHCI_DIVIDER_SHIFT);
254 clk |= 1 << SDHCI_DIVIDER_SHIFT;
255 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
256 }
257 return 0;
258}
259
260static void tegra_sdhci_reset_exit(struct sdhci_host *sdhci, u8 mask)
261{
262 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
263 struct tegra_sdhci_host *tegra_host = pltfm_host->priv;
264
265 if (mask & SDHCI_RESET_ALL) {
266 if (tegra_host->hw_ops->sdhost_init)
267 tegra_host->hw_ops->sdhost_init(sdhci);
268 }
269}
270
271static void sdhci_status_notify_cb(int card_present, void *dev_id)
272{
273 struct sdhci_host *sdhci = (struct sdhci_host *)dev_id;
274 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
275 struct tegra_sdhci_platform_data *plat;
276 unsigned int status, oldstat;
277
278 pr_debug("%s: card_present %d\n", mmc_hostname(sdhci->mmc),
279 card_present);
280
281 plat = pdev->dev.platform_data;
282 if (!plat->mmc_data.status) {
283 mmc_detect_change(sdhci->mmc, 0);
284 return;
285 }
286
287 status = plat->mmc_data.status(mmc_dev(sdhci->mmc));
288
289 oldstat = plat->mmc_data.card_present;
290 plat->mmc_data.card_present = status;
291 if (status ^ oldstat) {
292 pr_debug("%s: Slot status change detected (%d -> %d)\n",
293 mmc_hostname(sdhci->mmc), oldstat, status);
294 if (status && !plat->mmc_data.built_in)
295 mmc_detect_change(sdhci->mmc, (5 * HZ) / 2);
296 else
297 mmc_detect_change(sdhci->mmc, 0);
298 }
299}
300
88static irqreturn_t carddetect_irq(int irq, void *data) 301static irqreturn_t carddetect_irq(int irq, void *data)
89{ 302{
90 struct sdhci_host *sdhost = (struct sdhci_host *)data; 303 struct sdhci_host *sdhost = (struct sdhci_host *)data;
304 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhost);
305 struct tegra_sdhci_host *tegra_host = pltfm_host->priv;
306 struct platform_device *pdev = to_platform_device(mmc_dev(sdhost->mmc));
307 struct tegra_sdhci_platform_data *plat;
308
309 plat = pdev->dev.platform_data;
310
311 tegra_host->card_present = (gpio_get_value(plat->cd_gpio) == 0);
312
313 if (tegra_host->card_present) {
314 if (!tegra_host->is_rail_enabled) {
315 if (tegra_host->vdd_slot_reg)
316 regulator_enable(tegra_host->vdd_slot_reg);
317 if (tegra_host->vdd_io_reg)
318 regulator_enable(tegra_host->vdd_io_reg);
319 tegra_host->is_rail_enabled = 1;
320 }
321 } /* else {
322 if (tegra_host->is_rail_enabled) {
323 if (tegra_host->vdd_io_reg)
324 regulator_disable(tegra_host->vdd_io_reg);
325 if (tegra_host->vdd_slot_reg)
326 regulator_disable(tegra_host->vdd_slot_reg);
327 tegra_host->is_rail_enabled = 0;
328 }
329 } */
91 330
92 tasklet_schedule(&sdhost->card_tasklet); 331 tasklet_schedule(&sdhost->card_tasklet);
93 return IRQ_HANDLED; 332 return IRQ_HANDLED;
@@ -116,28 +355,597 @@ static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width)
116 return 0; 355 return 0;
117} 356}
118 357
358static void tegra_sdhci_set_clk_rate(struct sdhci_host *sdhci,
359 unsigned int clock)
360{
361 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
362 struct tegra_sdhci_host *tegra_host = pltfm_host->priv;
363 unsigned int clk_rate;
364
365 if (sdhci->mmc->card &&
366 mmc_card_ddr_mode(sdhci->mmc->card)) {
367 /*
368 * In ddr mode, tegra sdmmc controller clock frequency
369 * should be double the card clock frequency.
370 */
371 clk_rate = clock * 2;
372 } else {
373 if (clock <= tegra_sdhost_min_freq)
374 clk_rate = tegra_sdhost_min_freq;
375 else if (clock <= tegra_sdhost_std_freq)
376 clk_rate = tegra_sdhost_std_freq;
377 else
378 clk_rate = clock;
379
380 /*
381 * In SDR50 mode, run the sdmmc controller at 208MHz to ensure
382 * the core voltage is at 1.2V. If the core voltage is below 1.2V, CRC
383 * errors would occur during data transfers.
384 */
385 if ((sdhci->mmc->ios.timing == MMC_TIMING_UHS_SDR50) &&
386 (clk_rate == tegra_sdhost_std_freq))
387 clk_rate <<= 1;
388 }
389
390 if (tegra_host->max_clk_limit &&
391 (clk_rate > tegra_host->max_clk_limit))
392 clk_rate = tegra_host->max_clk_limit;
393
394 clk_set_rate(pltfm_host->clk, clk_rate);
395 sdhci->max_clk = clk_get_rate(pltfm_host->clk);
396}
397
398static void tegra_3x_sdhci_set_card_clock(struct sdhci_host *sdhci, unsigned int clock)
399{
400 int div;
401 u16 clk;
402 unsigned long timeout;
403 u8 ctrl;
404
405 if (clock && clock == sdhci->clock)
406 return;
407
408 sdhci_writew(sdhci, 0, SDHCI_CLOCK_CONTROL);
409
410 if (clock == 0)
411 goto out;
412 if (sdhci->mmc->ios.timing == MMC_TIMING_UHS_DDR50) {
413 div = 1;
414 goto set_clk;
415 }
416
417 if (sdhci->version >= SDHCI_SPEC_300) {
418 /* Version 3.00 divisors must be a multiple of 2. */
419 if (sdhci->max_clk <= clock) {
420 div = 1;
421 } else {
422 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; div += 2) {
423 if ((sdhci->max_clk / div) <= clock)
424 break;
425 }
426 }
427 } else {
428 /* Version 2.00 divisors must be a power of 2. */
429 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
430 if ((sdhci->max_clk / div) <= clock)
431 break;
432 }
433 }
434 div >>= 1;
435
436 /*
437 * Tegra3 sdmmc controller internal clock will not be stabilized when
438 * we use a clock divider value greater than 4. The WAR is as follows.
439 * - Enable internal clock.
440 * - Wait for 5 usec and do a dummy write.
441 * - Poll for clk stable.
442 */
443set_clk:
444 clk = (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
445 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
446 << SDHCI_DIVIDER_HI_SHIFT;
447 clk |= SDHCI_CLOCK_INT_EN;
448 sdhci_writew(sdhci, clk, SDHCI_CLOCK_CONTROL);
449
450 /* Wait for 5 usec */
451 udelay(5);
452
453 /* Do a dummy write */
454 ctrl = sdhci_readb(sdhci, SDHCI_CAPABILITIES);
455 ctrl |= 1;
456 sdhci_writeb(sdhci, ctrl, SDHCI_CAPABILITIES);
457
458 /* Wait max 20 ms */
459 timeout = 20;
460 while (!((clk = sdhci_readw(sdhci, SDHCI_CLOCK_CONTROL))
461 & SDHCI_CLOCK_INT_STABLE)) {
462 if (timeout == 0) {
463 dev_err(mmc_dev(sdhci->mmc), "Internal clock never stabilised\n");
464 return;
465 }
466 timeout--;
467 mdelay(1);
468 }
469
470 clk |= SDHCI_CLOCK_CARD_EN;
471 sdhci_writew(sdhci, clk, SDHCI_CLOCK_CONTROL);
472out:
473 sdhci->clock = clock;
474}
475
476static void tegra_sdhci_set_clock(struct sdhci_host *sdhci, unsigned int clock)
477{
478 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
479 struct tegra_sdhci_host *tegra_host = pltfm_host->priv;
480 u8 ctrl;
481
482 pr_debug("%s %s %u enabled=%u\n", __func__,
483 mmc_hostname(sdhci->mmc), clock, tegra_host->clk_enabled);
119 484
120static int tegra_sdhci_pltfm_init(struct sdhci_host *host, 485 if (clock) {
121 struct sdhci_pltfm_data *pdata) 486 /* bring out sd instance from io dpd mode */
487 tegra_io_dpd_disable(tegra_host->dpd);
488
489 if (!tegra_host->clk_enabled) {
490 clk_enable(pltfm_host->clk);
491 ctrl = sdhci_readb(sdhci, SDHCI_VENDOR_CLOCK_CNTRL);
492 ctrl |= SDHCI_VENDOR_CLOCK_CNTRL_SDMMC_CLK;
493 sdhci_writeb(sdhci, ctrl, SDHCI_VENDOR_CLOCK_CNTRL);
494 tegra_host->clk_enabled = true;
495 }
496 tegra_sdhci_set_clk_rate(sdhci, clock);
497 if (tegra_host->hw_ops->set_card_clock)
498 tegra_host->hw_ops->set_card_clock(sdhci, clock);
499 } else if (!clock && tegra_host->clk_enabled) {
500 if (tegra_host->hw_ops->set_card_clock)
501 tegra_host->hw_ops->set_card_clock(sdhci, clock);
502 ctrl = sdhci_readb(sdhci, SDHCI_VENDOR_CLOCK_CNTRL);
503 ctrl &= ~SDHCI_VENDOR_CLOCK_CNTRL_SDMMC_CLK;
504 sdhci_writeb(sdhci, ctrl, SDHCI_VENDOR_CLOCK_CNTRL);
505 clk_disable(pltfm_host->clk);
506 tegra_host->clk_enabled = false;
507 /* io dpd enable call for sd instance */
508 tegra_io_dpd_enable(tegra_host->dpd);
509 }
510}
511
512static int tegra_sdhci_signal_voltage_switch(struct sdhci_host *sdhci,
513 unsigned int signal_voltage)
122{ 514{
123 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 515 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
124 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc)); 516 struct tegra_sdhci_host *tegra_host = pltfm_host->priv;
517 unsigned int min_uV = SDHOST_HIGH_VOLT_MIN;
518 unsigned int max_uV = SDHOST_HIGH_VOLT_MAX;
519 unsigned int rc = 0;
520 u16 clk, ctrl;
521 unsigned int val;
522
523 /* Switch OFF the card clock to prevent glitches on the clock line */
524 clk = sdhci_readw(sdhci, SDHCI_CLOCK_CONTROL);
525 clk &= ~SDHCI_CLOCK_CARD_EN;
526 sdhci_writew(sdhci, clk, SDHCI_CLOCK_CONTROL);
527
528 ctrl = sdhci_readw(sdhci, SDHCI_HOST_CONTROL2);
529 if (signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
530 ctrl |= SDHCI_CTRL_VDD_180;
531 min_uV = SDHOST_LOW_VOLT_MIN;
532 max_uV = SDHOST_LOW_VOLT_MAX;
533 } else if (signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
534 if (ctrl & SDHCI_CTRL_VDD_180)
535 ctrl &= ~SDHCI_CTRL_VDD_180;
536 }
537 sdhci_writew(sdhci, ctrl, SDHCI_HOST_CONTROL2);
538
539 /* Switch the I/O rail voltage */
540 if (tegra_host->vdd_io_reg) {
541 rc = regulator_set_voltage(tegra_host->vdd_io_reg,
542 min_uV, max_uV);
543 if (rc) {
544 dev_err(mmc_dev(sdhci->mmc), "switching to 1.8V"
545 "failed . Switching back to 3.3V\n");
546 regulator_set_voltage(tegra_host->vdd_io_reg,
547 SDHOST_HIGH_VOLT_MIN,
548 SDHOST_HIGH_VOLT_MAX);
549 goto out;
550 }
551 }
552
553 /* Wait for 10 msec for the voltage to be switched */
554 mdelay(10);
555
556 /* Enable the card clock */
557 clk |= SDHCI_CLOCK_CARD_EN;
558 sdhci_writew(sdhci, clk, SDHCI_CLOCK_CONTROL);
559
560 /* Wait for 1 msec after enabling clock */
561 mdelay(1);
562
563 if (signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
564 /* Do Auto Calibration for 1.8V signal voltage */
565 val = sdhci_readl(sdhci, SDMMC_AUTO_CAL_CONFIG);
566 val |= SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE;
567 /* Program Auto cal PD offset(bits 8:14) */
568 val &= ~(0x7F <<
569 SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET_SHIFT);
570 val |= (SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET <<
571 SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET_SHIFT);
572 /* Program Auto cal PU offset(bits 0:6) */
573 val &= ~0x7F;
574 val |= SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PU_OFFSET;
575 sdhci_writel(sdhci, val, SDMMC_AUTO_CAL_CONFIG);
576
577 val = sdhci_readl(sdhci, SDMMC_SDMEMCOMPPADCTRL);
578 val &= ~SDMMC_SDMEMCOMPPADCTRL_VREF_SEL_MASK;
579 val |= 0x7;
580 sdhci_writel(sdhci, val, SDMMC_SDMEMCOMPPADCTRL);
581 }
582
583 return rc;
584out:
585 /* Enable the card clock */
586 clk |= SDHCI_CLOCK_CARD_EN;
587 sdhci_writew(sdhci, clk, SDHCI_CLOCK_CONTROL);
588
589 /* Wait for 1 msec for the clock to stabilize */
590 mdelay(1);
591
592 return rc;
593}
594
595static void tegra_sdhci_reset(struct sdhci_host *sdhci, u8 mask)
596{
597 unsigned long timeout;
598
599 sdhci_writeb(sdhci, mask, SDHCI_SOFTWARE_RESET);
600
601 /* Wait max 100 ms */
602 timeout = 100;
603
604 /* hw clears the bit when it's done */
605 while (sdhci_readb(sdhci, SDHCI_SOFTWARE_RESET) & mask) {
606 if (timeout == 0) {
607 dev_err(mmc_dev(sdhci->mmc), "Reset 0x%x never"
608 "completed.\n", (int)mask);
609 return;
610 }
611 timeout--;
612 mdelay(1);
613 }
614}
615
616static void sdhci_tegra_set_tap_delay(struct sdhci_host *sdhci,
617 unsigned int tap_delay)
618{
619 u32 vendor_ctrl;
620
621 /* Max tap delay value is 255 */
622 BUG_ON(tap_delay > MAX_TAP_VALUES);
623
624 vendor_ctrl = sdhci_readl(sdhci, SDHCI_VENDOR_CLOCK_CNTRL);
625 vendor_ctrl &= ~(0xFF << SDHCI_VENDOR_CLOCK_CNTRL_TAP_VALUE_SHIFT);
626 vendor_ctrl |= (tap_delay << SDHCI_VENDOR_CLOCK_CNTRL_TAP_VALUE_SHIFT);
627 sdhci_writel(sdhci, vendor_ctrl, SDHCI_VENDOR_CLOCK_CNTRL);
628}
629
630static void sdhci_tegra_clear_set_irqs(struct sdhci_host *host,
631 u32 clear, u32 set)
632{
633 u32 ier;
634
635 ier = sdhci_readl(host, SDHCI_INT_ENABLE);
636 ier &= ~clear;
637 ier |= set;
638 sdhci_writel(host, ier, SDHCI_INT_ENABLE);
639 sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE);
640}
641
642static int sdhci_tegra_run_frequency_tuning(struct sdhci_host *sdhci)
643{
644 int err = 0;
645 u8 ctrl;
646 u32 ier;
647 u32 mask;
648 unsigned int timeout = 10;
649 int flags;
650 u32 intstatus;
651
652 /*
653 * As per the Host Controller spec v3.00, tuning command
654 * generates Buffer Read Ready interrupt only, so enable that.
655 */
656 ier = sdhci_readl(sdhci, SDHCI_INT_ENABLE);
657 sdhci_tegra_clear_set_irqs(sdhci, ier, SDHCI_INT_DATA_AVAIL |
658 SDHCI_INT_DATA_CRC);
659
660 mask = SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT;
661 while (sdhci_readl(sdhci, SDHCI_PRESENT_STATE) & mask) {
662 if (timeout == 0) {
663 dev_err(mmc_dev(sdhci->mmc), "Controller never"
664 "released inhibit bit(s).\n");
665 err = -ETIMEDOUT;
666 goto out;
667 }
668 timeout--;
669 mdelay(1);
670 }
671
672 ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL2);
673 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
674 sdhci_writeb(sdhci, ctrl, SDHCI_HOST_CONTROL2);
675
676 ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL2);
677 ctrl |= SDHCI_CTRL_EXEC_TUNING;
678 sdhci_writeb(sdhci, ctrl, SDHCI_HOST_CONTROL2);
679
680 /*
681 * In response to CMD19, the card sends 64 bytes of tuning
682 * block to the Host Controller. So we set the block size
683 * to 64 here.
684 */
685 sdhci_writew(sdhci, SDHCI_MAKE_BLKSZ(7, 64), SDHCI_BLOCK_SIZE);
686
687 sdhci_writeb(sdhci, 0xE, SDHCI_TIMEOUT_CONTROL);
688
689 sdhci_writeb(sdhci, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
690
691 sdhci_writel(sdhci, 0x0, SDHCI_ARGUMENT);
692
693 /* Set the cmd flags */
694 flags = SDHCI_CMD_RESP_SHORT | SDHCI_CMD_CRC | SDHCI_CMD_DATA;
695 /* Issue the command */
696 sdhci_writew(sdhci, SDHCI_MAKE_CMD(
697 SD_SEND_TUNING_PATTERN, flags), SDHCI_COMMAND);
698
699 timeout = 5;
700 do {
701 timeout--;
702 mdelay(1);
703 intstatus = sdhci_readl(sdhci, SDHCI_INT_STATUS);
704 if (intstatus) {
705 sdhci_writel(sdhci, intstatus, SDHCI_INT_STATUS);
706 break;
707 }
708 } while(timeout);
709
710 if ((intstatus & SDHCI_INT_DATA_AVAIL) &&
711 !(intstatus & SDHCI_INT_DATA_CRC)) {
712 err = 0;
713 sdhci->tuning_done = 1;
714 } else {
715 tegra_sdhci_reset(sdhci, SDHCI_RESET_CMD);
716 tegra_sdhci_reset(sdhci, SDHCI_RESET_DATA);
717 err = -EIO;
718 }
719
720 if (sdhci->tuning_done) {
721 sdhci->tuning_done = 0;
722 ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL2);
723 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING) &&
724 (ctrl & SDHCI_CTRL_TUNED_CLK))
725 err = 0;
726 else
727 err = -EIO;
728 }
729 mdelay(1);
730out:
731 sdhci_tegra_clear_set_irqs(sdhci, SDHCI_INT_DATA_AVAIL, ier);
732 return err;
733}
734
735static int sdhci_tegra_execute_tuning(struct sdhci_host *sdhci)
736{
737 int err;
738 u16 ctrl_2;
739 u8 *tap_delay_status;
740 unsigned int i = 0;
741 unsigned int temp_low_pass_tap = 0;
742 unsigned int temp_pass_window = 0;
743 unsigned int best_low_pass_tap = 0;
744 unsigned int best_pass_window = 0;
745
746 /* Tuning is valid only in SDR104 and SDR50 modes */
747 ctrl_2 = sdhci_readw(sdhci, SDHCI_HOST_CONTROL2);
748 if (!(((ctrl_2 & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) ||
749 (((ctrl_2 & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
750 (sdhci->flags & SDHCI_SDR50_NEEDS_TUNING))))
751 return 0;
752
753 tap_delay_status = kzalloc(MAX_TAP_VALUES, GFP_KERNEL);
754 if (tap_delay_status == NULL) {
755 dev_err(mmc_dev(sdhci->mmc), "failed to allocate memory"
756 "for storing tap_delay_status\n");
757 err = -ENOMEM;
758 goto out;
759 }
760
761 /*
762 * Set each tap delay value and run frequency tuning. After each
763 * run, update the tap delay status as working or not working.
764 */
765 do {
766 /* Set the tap delay */
767 sdhci_tegra_set_tap_delay(sdhci, i);
768
769 /* Run frequency tuning */
770 err = sdhci_tegra_run_frequency_tuning(sdhci);
771
772 /* Update whether the tap delay worked or not */
773 tap_delay_status[i] = (err) ? 0: 1;
774 i++;
775 } while (i < 0xFF);
776
777 /* Find the best possible tap range */
778 for (i = 0; i < 0xFF; i++) {
779 temp_pass_window = 0;
780
781 /* Find the first passing tap in the current window */
782 if (tap_delay_status[i]) {
783 temp_low_pass_tap = i;
784
785 /* Find the pass window */
786 do {
787 temp_pass_window++;
788 i++;
789 if (i > 0xFF)
790 break;
791 } while (tap_delay_status[i]);
792
793 if ((temp_pass_window > best_pass_window) && (temp_pass_window > 1)){
794 best_low_pass_tap = temp_low_pass_tap;
795 best_pass_window = temp_pass_window;
796 }
797 }
798 }
799
800
801 pr_debug("%s: best pass tap window: start %d, end %d\n",
802 mmc_hostname(sdhci->mmc), best_low_pass_tap,
803 (best_low_pass_tap + best_pass_window));
804
805 /* Set the best tap */
806 sdhci_tegra_set_tap_delay(sdhci,
807 (best_low_pass_tap + ((best_pass_window * 3) / 4)));
808
809 /* Run frequency tuning */
810 err = sdhci_tegra_run_frequency_tuning(sdhci);
811
812out:
813 if (tap_delay_status)
814 kfree(tap_delay_status);
815
816 return err;
817}
818
819static int tegra_sdhci_suspend(struct sdhci_host *sdhci, pm_message_t state)
820{
821 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
822 struct tegra_sdhci_host *tegra_host = pltfm_host->priv;
823
824 tegra_sdhci_set_clock(sdhci, 0);
825
826 /* Disable the power rails if any */
827 if (tegra_host->card_present) {
828 if (tegra_host->is_rail_enabled) {
829 if (tegra_host->vdd_io_reg)
830 regulator_disable(tegra_host->vdd_io_reg);
831 if (tegra_host->vdd_slot_reg)
832 regulator_disable(tegra_host->vdd_slot_reg);
833 tegra_host->is_rail_enabled = 0;
834 }
835 }
836
837 return 0;
838}
839
840static int tegra_sdhci_resume(struct sdhci_host *sdhci)
841{
842 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
843 struct tegra_sdhci_host *tegra_host = pltfm_host->priv;
844
845 /* Enable the power rails if any */
846 if (tegra_host->card_present) {
847 if (!tegra_host->is_rail_enabled) {
848 if (tegra_host->vdd_slot_reg)
849 regulator_enable(tegra_host->vdd_slot_reg);
850 if (tegra_host->vdd_io_reg) {
851 regulator_enable(tegra_host->vdd_io_reg);
852 tegra_sdhci_signal_voltage_switch(sdhci, MMC_SIGNAL_VOLTAGE_330);
853 }
854 tegra_host->is_rail_enabled = 1;
855 }
856 }
857 /* Setting the min identification clock of freq 400KHz */
858 tegra_sdhci_set_clock(sdhci, 400000);
859
860 /* Reset the controller and power on if MMC_KEEP_POWER flag is set*/
861 if (sdhci->mmc->pm_flags & MMC_PM_KEEP_POWER) {
862 tegra_sdhci_reset(sdhci, SDHCI_RESET_ALL);
863 sdhci_writeb(sdhci, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
864 sdhci->pwr = 0;
865 }
866
867 return 0;
868}
869
870static struct sdhci_ops tegra_sdhci_ops = {
871 .get_ro = tegra_sdhci_get_ro,
872 .get_cd = tegra_sdhci_get_cd,
873 .read_l = tegra_sdhci_readl,
874 .read_w = tegra_sdhci_readw,
875 .write_l = tegra_sdhci_writel,
876 .platform_8bit_width = tegra_sdhci_8bit,
877 .set_clock = tegra_sdhci_set_clock,
878 .suspend = tegra_sdhci_suspend,
879 .resume = tegra_sdhci_resume,
880 .platform_reset_exit = tegra_sdhci_reset_exit,
881 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
882 .switch_signal_voltage = tegra_sdhci_signal_voltage_switch,
883 .execute_freq_tuning = sdhci_tegra_execute_tuning,
884};
885
886static struct sdhci_pltfm_data sdhci_tegra_pdata = {
887 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
888#ifndef CONFIG_ARCH_TEGRA_2x_SOC
889 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
890 SDHCI_QUIRK_NON_STD_VOLTAGE_SWITCHING |
891#endif
892#ifdef CONFIG_ARCH_TEGRA_3x_SOC
893 SDHCI_QUIRK_NONSTANDARD_CLOCK |
894 SDHCI_QUIRK_NON_STANDARD_TUNING |
895#endif
896 SDHCI_QUIRK_SINGLE_POWER_WRITE |
897 SDHCI_QUIRK_NO_HISPD_BIT |
898 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
899 SDHCI_QUIRK_NO_CALC_MAX_DISCARD_TO |
900 SDHCI_QUIRK_BROKEN_CARD_DETECTION,
901 .ops = &tegra_sdhci_ops,
902};
903
904static int __devinit sdhci_tegra_probe(struct platform_device *pdev)
905{
906 struct sdhci_pltfm_host *pltfm_host;
125 struct tegra_sdhci_platform_data *plat; 907 struct tegra_sdhci_platform_data *plat;
908 struct sdhci_host *host;
909 struct tegra_sdhci_host *tegra_host;
126 struct clk *clk; 910 struct clk *clk;
127 int rc; 911 int rc;
128 912
913 host = sdhci_pltfm_init(pdev, &sdhci_tegra_pdata);
914 if (IS_ERR(host))
915 return PTR_ERR(host);
916
917 pltfm_host = sdhci_priv(host);
918
129 plat = pdev->dev.platform_data; 919 plat = pdev->dev.platform_data;
920
130 if (plat == NULL) { 921 if (plat == NULL) {
131 dev_err(mmc_dev(host->mmc), "missing platform data\n"); 922 dev_err(mmc_dev(host->mmc), "missing platform data\n");
132 return -ENXIO; 923 rc = -ENXIO;
924 goto err_no_plat;
925 }
926
927 tegra_host = kzalloc(sizeof(struct tegra_sdhci_host), GFP_KERNEL);
928 if (tegra_host == NULL) {
929 dev_err(mmc_dev(host->mmc), "failed to allocate tegra host\n");
930 rc = -ENOMEM;
931 goto err_no_mem;
133 } 932 }
134 933
934#ifdef CONFIG_MMC_EMBEDDED_SDIO
935 if (plat->mmc_data.embedded_sdio)
936 mmc_set_embedded_sdio_data(host->mmc,
937 &plat->mmc_data.embedded_sdio->cis,
938 &plat->mmc_data.embedded_sdio->cccr,
939 plat->mmc_data.embedded_sdio->funcs,
940 plat->mmc_data.embedded_sdio->num_funcs);
941#endif
942
135 if (gpio_is_valid(plat->power_gpio)) { 943 if (gpio_is_valid(plat->power_gpio)) {
136 rc = gpio_request(plat->power_gpio, "sdhci_power"); 944 rc = gpio_request(plat->power_gpio, "sdhci_power");
137 if (rc) { 945 if (rc) {
138 dev_err(mmc_dev(host->mmc), 946 dev_err(mmc_dev(host->mmc),
139 "failed to allocate power gpio\n"); 947 "failed to allocate power gpio\n");
140 goto out; 948 goto err_power_req;
141 } 949 }
142 tegra_gpio_enable(plat->power_gpio); 950 tegra_gpio_enable(plat->power_gpio);
143 gpio_direction_output(plat->power_gpio, 1); 951 gpio_direction_output(plat->power_gpio, 1);
@@ -148,20 +956,34 @@ static int tegra_sdhci_pltfm_init(struct sdhci_host *host,
148 if (rc) { 956 if (rc) {
149 dev_err(mmc_dev(host->mmc), 957 dev_err(mmc_dev(host->mmc),
150 "failed to allocate cd gpio\n"); 958 "failed to allocate cd gpio\n");
151 goto out_power; 959 goto err_cd_req;
152 } 960 }
153 tegra_gpio_enable(plat->cd_gpio); 961 tegra_gpio_enable(plat->cd_gpio);
154 gpio_direction_input(plat->cd_gpio); 962 gpio_direction_input(plat->cd_gpio);
155 963
156 rc = request_irq(gpio_to_irq(plat->cd_gpio), carddetect_irq, 964 tegra_host->card_present = (gpio_get_value(plat->cd_gpio) == 0);
965
966 rc = request_threaded_irq(gpio_to_irq(plat->cd_gpio), NULL,
967 carddetect_irq,
157 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, 968 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
158 mmc_hostname(host->mmc), host); 969 mmc_hostname(host->mmc), host);
159 970
160 if (rc) { 971 if (rc) {
161 dev_err(mmc_dev(host->mmc), "request irq error\n"); 972 dev_err(mmc_dev(host->mmc), "request irq error\n");
162 goto out_cd; 973 goto err_cd_irq_req;
163 } 974 }
975 rc = enable_irq_wake(gpio_to_irq(plat->cd_gpio));
976 if (rc < 0)
977 dev_err(mmc_dev(host->mmc),
978 "SD card wake-up event registration"
979 "failed with eroor: %d\n", rc);
980
981 } else if (plat->mmc_data.register_status_notify) {
982 plat->mmc_data.register_status_notify(sdhci_status_notify_cb, host);
983 }
164 984
985 if (plat->mmc_data.status) {
986 plat->mmc_data.card_present = plat->mmc_data.status(mmc_dev(host->mmc));
165 } 987 }
166 988
167 if (gpio_is_valid(plat->wp_gpio)) { 989 if (gpio_is_valid(plat->wp_gpio)) {
@@ -169,61 +991,167 @@ static int tegra_sdhci_pltfm_init(struct sdhci_host *host,
169 if (rc) { 991 if (rc) {
170 dev_err(mmc_dev(host->mmc), 992 dev_err(mmc_dev(host->mmc),
171 "failed to allocate wp gpio\n"); 993 "failed to allocate wp gpio\n");
172 goto out_irq; 994 goto err_wp_req;
173 } 995 }
174 tegra_gpio_enable(plat->wp_gpio); 996 tegra_gpio_enable(plat->wp_gpio);
175 gpio_direction_input(plat->wp_gpio); 997 gpio_direction_input(plat->wp_gpio);
176 } 998 }
177 999
1000 /*
1001 * If there is no card detect gpio, assume that the
1002 * card is always present.
1003 */
1004 if (!gpio_is_valid(plat->cd_gpio))
1005 tegra_host->card_present = 1;
1006
1007 if (!plat->mmc_data.built_in) {
1008 if (plat->mmc_data.ocr_mask & SDHOST_1V8_OCR_MASK) {
1009 tegra_host->vddio_min_uv = SDHOST_LOW_VOLT_MIN;
1010 tegra_host->vddio_max_uv = SDHOST_LOW_VOLT_MAX;
1011 } else {
1012 /*
1013 * Set the minV and maxV to default
1014 * voltage range of 2.7V - 3.6V
1015 */
1016 tegra_host->vddio_min_uv = SDHOST_HIGH_VOLT_MIN;
1017 tegra_host->vddio_max_uv = SDHOST_HIGH_VOLT_MAX;
1018 }
1019 tegra_host->vdd_io_reg = regulator_get(mmc_dev(host->mmc), "vddio_sdmmc");
1020 if (IS_ERR_OR_NULL(tegra_host->vdd_io_reg)) {
1021 dev_err(mmc_dev(host->mmc), "%s regulator not found: %ld\n",
1022 "vddio_sdmmc", PTR_ERR(tegra_host->vdd_io_reg));
1023 tegra_host->vdd_io_reg = NULL;
1024 } else {
1025 rc = regulator_set_voltage(tegra_host->vdd_io_reg,
1026 tegra_host->vddio_min_uv,
1027 tegra_host->vddio_max_uv);
1028 if (rc) {
1029 dev_err(mmc_dev(host->mmc), "%s regulator_set_voltage failed: %d",
1030 "vddio_sdmmc", rc);
1031 }
1032 }
1033
1034 tegra_host->vdd_slot_reg = regulator_get(mmc_dev(host->mmc), "vddio_sd_slot");
1035 if (IS_ERR_OR_NULL(tegra_host->vdd_slot_reg)) {
1036 dev_err(mmc_dev(host->mmc), "%s regulator not found: %ld\n",
1037 "vddio_sd_slot", PTR_ERR(tegra_host->vdd_slot_reg));
1038 tegra_host->vdd_slot_reg = NULL;
1039 }
1040
1041 if (tegra_host->card_present) {
1042 if (tegra_host->vdd_slot_reg)
1043 regulator_enable(tegra_host->vdd_slot_reg);
1044 if (tegra_host->vdd_io_reg)
1045 regulator_enable(tegra_host->vdd_io_reg);
1046 tegra_host->is_rail_enabled = 1;
1047 }
1048 }
1049
178 clk = clk_get(mmc_dev(host->mmc), NULL); 1050 clk = clk_get(mmc_dev(host->mmc), NULL);
179 if (IS_ERR(clk)) { 1051 if (IS_ERR(clk)) {
180 dev_err(mmc_dev(host->mmc), "clk err\n"); 1052 dev_err(mmc_dev(host->mmc), "clk err\n");
181 rc = PTR_ERR(clk); 1053 rc = PTR_ERR(clk);
182 goto out_wp; 1054 goto err_clk_get;
183 } 1055 }
184 clk_enable(clk); 1056 rc = clk_enable(clk);
1057 if (rc != 0)
1058 goto err_clk_put;
185 pltfm_host->clk = clk; 1059 pltfm_host->clk = clk;
1060 pltfm_host->priv = tegra_host;
1061 tegra_host->clk_enabled = true;
1062 tegra_host->max_clk_limit = plat->max_clk_limit;
1063 tegra_host->instance = pdev->id;
1064 tegra_host->dpd = tegra_io_dpd_get(mmc_dev(host->mmc));
186 1065
187 host->mmc->pm_caps = plat->pm_flags; 1066 host->mmc->pm_caps |= plat->pm_caps;
1067 host->mmc->pm_flags |= plat->pm_flags;
188 1068
1069 host->mmc->caps |= MMC_CAP_ERASE;
1070 host->mmc->caps |= MMC_CAP_DISABLE;
1071 /* enable 1/8V DDR capable */
1072 host->mmc->caps |= MMC_CAP_1_8V_DDR;
189 if (plat->is_8bit) 1073 if (plat->is_8bit)
190 host->mmc->caps |= MMC_CAP_8_BIT_DATA; 1074 host->mmc->caps |= MMC_CAP_8_BIT_DATA;
1075 host->mmc->caps |= MMC_CAP_SDIO_IRQ;
1076
1077 host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_IGNORE_PM_NOTIFY;
1078 if (plat->mmc_data.built_in) {
1079 host->mmc->caps |= MMC_CAP_NONREMOVABLE;
1080 host->mmc->pm_flags |= MMC_PM_IGNORE_PM_NOTIFY;
1081 }
1082 /* Do not turn OFF embedded sdio cards as it support Wake on Wireless */
1083 if (plat->mmc_data.embedded_sdio)
1084 host->mmc->pm_flags |= MMC_PM_KEEP_POWER;
1085
1086 tegra_sdhost_min_freq = TEGRA_SDHOST_MIN_FREQ;
1087#ifdef CONFIG_ARCH_TEGRA_2x_SOC
1088 tegra_host->hw_ops = &tegra_2x_sdhci_ops;
1089 tegra_sdhost_std_freq = TEGRA2_SDHOST_STD_FREQ;
1090#else
1091 tegra_host->hw_ops = &tegra_3x_sdhci_ops;
1092 tegra_sdhost_std_freq = TEGRA3_SDHOST_STD_FREQ;
1093#endif
1094
1095 rc = sdhci_add_host(host);
1096 if (rc)
1097 goto err_add_host;
191 1098
192 return 0; 1099 return 0;
193 1100
194out_wp: 1101err_add_host:
1102 clk_disable(pltfm_host->clk);
1103err_clk_put:
1104 clk_put(pltfm_host->clk);
1105err_clk_get:
195 if (gpio_is_valid(plat->wp_gpio)) { 1106 if (gpio_is_valid(plat->wp_gpio)) {
196 tegra_gpio_disable(plat->wp_gpio); 1107 tegra_gpio_disable(plat->wp_gpio);
197 gpio_free(plat->wp_gpio); 1108 gpio_free(plat->wp_gpio);
198 } 1109 }
199 1110err_wp_req:
200out_irq:
201 if (gpio_is_valid(plat->cd_gpio)) 1111 if (gpio_is_valid(plat->cd_gpio))
202 free_irq(gpio_to_irq(plat->cd_gpio), host); 1112 free_irq(gpio_to_irq(plat->cd_gpio), host);
203out_cd: 1113err_cd_irq_req:
204 if (gpio_is_valid(plat->cd_gpio)) { 1114 if (gpio_is_valid(plat->cd_gpio)) {
205 tegra_gpio_disable(plat->cd_gpio); 1115 tegra_gpio_disable(plat->cd_gpio);
206 gpio_free(plat->cd_gpio); 1116 gpio_free(plat->cd_gpio);
207 } 1117 }
208 1118err_cd_req:
209out_power:
210 if (gpio_is_valid(plat->power_gpio)) { 1119 if (gpio_is_valid(plat->power_gpio)) {
211 tegra_gpio_disable(plat->power_gpio); 1120 tegra_gpio_disable(plat->power_gpio);
212 gpio_free(plat->power_gpio); 1121 gpio_free(plat->power_gpio);
213 } 1122 }
214 1123err_power_req:
215out: 1124err_no_mem:
1125 kfree(tegra_host);
1126err_no_plat:
1127 sdhci_pltfm_free(pdev);
216 return rc; 1128 return rc;
217} 1129}
218 1130
219static void tegra_sdhci_pltfm_exit(struct sdhci_host *host) 1131static int __devexit sdhci_tegra_remove(struct platform_device *pdev)
220{ 1132{
1133 struct sdhci_host *host = platform_get_drvdata(pdev);
221 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1134 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
222 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc)); 1135 struct tegra_sdhci_host *tegra_host = pltfm_host->priv;
223 struct tegra_sdhci_platform_data *plat; 1136 struct tegra_sdhci_platform_data *plat;
1137 int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
1138
1139 sdhci_remove_host(host, dead);
224 1140
225 plat = pdev->dev.platform_data; 1141 plat = pdev->dev.platform_data;
226 1142
1143 disable_irq_wake(gpio_to_irq(plat->cd_gpio));
1144
1145 if (tegra_host->vdd_slot_reg) {
1146 regulator_disable(tegra_host->vdd_slot_reg);
1147 regulator_put(tegra_host->vdd_slot_reg);
1148 }
1149
1150 if (tegra_host->vdd_io_reg) {
1151 regulator_disable(tegra_host->vdd_io_reg);
1152 regulator_put(tegra_host->vdd_io_reg);
1153 }
1154
227 if (gpio_is_valid(plat->wp_gpio)) { 1155 if (gpio_is_valid(plat->wp_gpio)) {
228 tegra_gpio_disable(plat->wp_gpio); 1156 tegra_gpio_disable(plat->wp_gpio);
229 gpio_free(plat->wp_gpio); 1157 gpio_free(plat->wp_gpio);
@@ -240,24 +1168,41 @@ static void tegra_sdhci_pltfm_exit(struct sdhci_host *host)
240 gpio_free(plat->power_gpio); 1168 gpio_free(plat->power_gpio);
241 } 1169 }
242 1170
243 clk_disable(pltfm_host->clk); 1171 if (tegra_host->clk_enabled)
1172 clk_disable(pltfm_host->clk);
244 clk_put(pltfm_host->clk); 1173 clk_put(pltfm_host->clk);
1174
1175 sdhci_pltfm_free(pdev);
1176 kfree(tegra_host);
1177
1178 return 0;
245} 1179}
246 1180
247static struct sdhci_ops tegra_sdhci_ops = { 1181static struct platform_driver sdhci_tegra_driver = {
248 .get_ro = tegra_sdhci_get_ro, 1182 .driver = {
249 .read_l = tegra_sdhci_readl, 1183 .name = "sdhci-tegra",
250 .read_w = tegra_sdhci_readw, 1184 .owner = THIS_MODULE,
251 .write_l = tegra_sdhci_writel, 1185 },
252 .platform_8bit_width = tegra_sdhci_8bit, 1186 .probe = sdhci_tegra_probe,
1187 .remove = __devexit_p(sdhci_tegra_remove),
1188#ifdef CONFIG_PM
1189 .suspend = sdhci_pltfm_suspend,
1190 .resume = sdhci_pltfm_resume,
1191#endif
253}; 1192};
254 1193
255struct sdhci_pltfm_data sdhci_tegra_pdata = { 1194static int __init sdhci_tegra_init(void)
256 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 1195{
257 SDHCI_QUIRK_SINGLE_POWER_WRITE | 1196 return platform_driver_register(&sdhci_tegra_driver);
258 SDHCI_QUIRK_NO_HISPD_BIT | 1197}
259 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC, 1198module_init(sdhci_tegra_init);
260 .ops = &tegra_sdhci_ops, 1199
261 .init = tegra_sdhci_pltfm_init, 1200static void __exit sdhci_tegra_exit(void)
262 .exit = tegra_sdhci_pltfm_exit, 1201{
263}; 1202 platform_driver_unregister(&sdhci_tegra_driver);
1203}
1204module_exit(sdhci_tegra_exit);
1205
1206MODULE_DESCRIPTION("SDHCI driver for Tegra");
1207MODULE_AUTHOR(" Google, Inc.");
1208MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 58d5436ff64..c6822c39541 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -25,6 +25,7 @@
25 25
26#include <linux/mmc/mmc.h> 26#include <linux/mmc/mmc.h>
27#include <linux/mmc/host.h> 27#include <linux/mmc/host.h>
28#include <linux/mmc/card.h>
28 29
29#include "sdhci.h" 30#include "sdhci.h"
30 31
@@ -127,11 +128,15 @@ static void sdhci_mask_irqs(struct sdhci_host *host, u32 irqs)
127 128
128static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) 129static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
129{ 130{
130 u32 irqs = SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT; 131 u32 present, irqs;
131 132
132 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 133 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
133 return; 134 return;
134 135
136 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
137 SDHCI_CARD_PRESENT;
138 irqs = present ? SDHCI_INT_CARD_REMOVE : SDHCI_INT_CARD_INSERT;
139
135 if (enable) 140 if (enable)
136 sdhci_unmask_irqs(host, irqs); 141 sdhci_unmask_irqs(host, irqs);
137 else 142 else
@@ -624,12 +629,11 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
624 /* timeout in us */ 629 /* timeout in us */
625 if (!data) 630 if (!data)
626 target_timeout = cmd->cmd_timeout_ms * 1000; 631 target_timeout = cmd->cmd_timeout_ms * 1000;
627 else 632 else {
628 target_timeout = data->timeout_ns / 1000 + 633 target_timeout = data->timeout_ns / 1000;
629 data->timeout_clks / host->clock; 634 if (host->clock)
630 635 target_timeout += data->timeout_clks / host->clock;
631 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) 636 }
632 host->timeout_clk = host->clock / 1000;
633 637
634 /* 638 /*
635 * Figure out needed cycles. 639 * Figure out needed cycles.
@@ -641,7 +645,6 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
641 * => 645 * =>
642 * (1) / (2) > 2^6 646 * (1) / (2) > 2^6
643 */ 647 */
644 BUG_ON(!host->timeout_clk);
645 count = 0; 648 count = 0;
646 current_timeout = (1 << 13) * 1000 / host->timeout_clk; 649 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
647 while (current_timeout < target_timeout) { 650 while (current_timeout < target_timeout) {
@@ -1044,14 +1047,11 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1044 u16 clk = 0; 1047 u16 clk = 0;
1045 unsigned long timeout; 1048 unsigned long timeout;
1046 1049
1047 if (clock == host->clock) 1050 if (clock && clock == host->clock)
1048 return; 1051 return;
1049 1052
1050 if (host->ops->set_clock) { 1053 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK)
1051 host->ops->set_clock(host, clock); 1054 return;
1052 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK)
1053 return;
1054 }
1055 1055
1056 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 1056 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1057 1057
@@ -1229,11 +1229,15 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1229 host->mrq = mrq; 1229 host->mrq = mrq;
1230 1230
1231 /* If polling, assume that the card is always present. */ 1231 /* If polling, assume that the card is always present. */
1232 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 1232 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) {
1233 present = true; 1233 if (host->ops->get_cd)
1234 else 1234 present = host->ops->get_cd(host);
1235 else
1236 present = true;
1237 } else {
1235 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 1238 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
1236 SDHCI_CARD_PRESENT; 1239 SDHCI_CARD_PRESENT;
1240 }
1237 1241
1238 if (!present || host->flags & SDHCI_DEVICE_DEAD) { 1242 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1239 host->mrq->cmd->error = -ENOMEDIUM; 1243 host->mrq->cmd->error = -ENOMEDIUM;
@@ -1275,6 +1279,20 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1275 1279
1276 host = mmc_priv(mmc); 1280 host = mmc_priv(mmc);
1277 1281
1282 /*
1283 * Controller registers should not be updated without the
1284 * controller clock enabled. Set the minimum controller
1285 * clock if there is no clock.
1286 */
1287 if (host->ops->set_clock) {
1288 if (!host->clock && !ios->clock) {
1289 host->ops->set_clock(host, host->mmc->f_min);
1290 host->clock = host->mmc->f_min;
1291 } else if (ios->clock && (ios->clock != host->clock)) {
1292 host->ops->set_clock(host, ios->clock);
1293 }
1294 }
1295
1278 spin_lock_irqsave(&host->lock, flags); 1296 spin_lock_irqsave(&host->lock, flags);
1279 1297
1280 if (host->flags & SDHCI_DEVICE_DEAD) 1298 if (host->flags & SDHCI_DEVICE_DEAD)
@@ -1289,13 +1307,13 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1289 sdhci_reinit(host); 1307 sdhci_reinit(host);
1290 } 1308 }
1291 1309
1292 sdhci_set_clock(host, ios->clock);
1293
1294 if (ios->power_mode == MMC_POWER_OFF) 1310 if (ios->power_mode == MMC_POWER_OFF)
1295 sdhci_set_power(host, -1); 1311 sdhci_set_power(host, -1);
1296 else 1312 else
1297 sdhci_set_power(host, ios->vdd); 1313 sdhci_set_power(host, ios->vdd);
1298 1314
1315 sdhci_set_clock(host, ios->clock);
1316
1299 if (host->ops->platform_send_init_74_clocks) 1317 if (host->ops->platform_send_init_74_clocks)
1300 host->ops->platform_send_init_74_clocks(host, ios->power_mode); 1318 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1301 1319
@@ -1337,11 +1355,12 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1337 unsigned int clock; 1355 unsigned int clock;
1338 1356
1339 /* In case of UHS-I modes, set High Speed Enable */ 1357 /* In case of UHS-I modes, set High Speed Enable */
1340 if ((ios->timing == MMC_TIMING_UHS_SDR50) || 1358 if (((ios->timing == MMC_TIMING_UHS_SDR50) ||
1341 (ios->timing == MMC_TIMING_UHS_SDR104) || 1359 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1342 (ios->timing == MMC_TIMING_UHS_DDR50) || 1360 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1343 (ios->timing == MMC_TIMING_UHS_SDR25) || 1361 (ios->timing == MMC_TIMING_UHS_SDR25) ||
1344 (ios->timing == MMC_TIMING_UHS_SDR12)) 1362 (ios->timing == MMC_TIMING_UHS_SDR12))
1363 && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
1345 ctrl |= SDHCI_CTRL_HISPD; 1364 ctrl |= SDHCI_CTRL_HISPD;
1346 1365
1347 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1366 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
@@ -1374,9 +1393,9 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1374 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1393 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1375 1394
1376 /* Re-enable SD Clock */ 1395 /* Re-enable SD Clock */
1377 clock = host->clock; 1396 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1378 host->clock = 0; 1397 clk |= SDHCI_CLOCK_CARD_EN;
1379 sdhci_set_clock(host, clock); 1398 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1380 } 1399 }
1381 1400
1382 1401
@@ -1405,9 +1424,9 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1405 } 1424 }
1406 1425
1407 /* Re-enable SD Clock */ 1426 /* Re-enable SD Clock */
1408 clock = host->clock; 1427 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1409 host->clock = 0; 1428 clk |= SDHCI_CLOCK_CARD_EN;
1410 sdhci_set_clock(host, clock); 1429 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1411 } else 1430 } else
1412 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1431 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1413 1432
@@ -1422,6 +1441,12 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1422out: 1441out:
1423 mmiowb(); 1442 mmiowb();
1424 spin_unlock_irqrestore(&host->lock, flags); 1443 spin_unlock_irqrestore(&host->lock, flags);
1444 /*
1445 * Controller clock should only be disabled after all the register
1446 * writes are done.
1447 */
1448 if (!ios->clock && host->ops->set_clock)
1449 host->ops->set_clock(host, ios->clock);
1425} 1450}
1426 1451
1427static int check_ro(struct sdhci_host *host) 1452static int check_ro(struct sdhci_host *host)
@@ -1508,6 +1533,12 @@ static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1508 if (host->version < SDHCI_SPEC_300) 1533 if (host->version < SDHCI_SPEC_300)
1509 return 0; 1534 return 0;
1510 1535
1536 if (host->quirks & SDHCI_QUIRK_NON_STD_VOLTAGE_SWITCHING) {
1537 if (host->ops->switch_signal_voltage)
1538 return host->ops->switch_signal_voltage(
1539 host, ios->signal_voltage);
1540 }
1541
1511 /* 1542 /*
1512 * We first check whether the request is to set signalling voltage 1543 * We first check whether the request is to set signalling voltage
1513 * to 3.3V. If so, we change the voltage to 3.3V and return quickly. 1544 * to 3.3V. If so, we change the voltage to 3.3V and return quickly.
@@ -1550,7 +1581,6 @@ static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1550 1581
1551 /* Wait for 5ms */ 1582 /* Wait for 5ms */
1552 usleep_range(5000, 5500); 1583 usleep_range(5000, 5500);
1553
1554 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1584 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1555 if (ctrl & SDHCI_CTRL_VDD_180) { 1585 if (ctrl & SDHCI_CTRL_VDD_180) {
1556 /* Provide SDCLK again and wait for 1ms*/ 1586 /* Provide SDCLK again and wait for 1ms*/
@@ -1607,6 +1637,14 @@ static int sdhci_execute_tuning(struct mmc_host *mmc)
1607 disable_irq(host->irq); 1637 disable_irq(host->irq);
1608 spin_lock(&host->lock); 1638 spin_lock(&host->lock);
1609 1639
1640 if ((host->quirks & SDHCI_QUIRK_NON_STANDARD_TUNING) &&
1641 host->ops->execute_freq_tuning) {
1642 err = host->ops->execute_freq_tuning(host);
1643 spin_unlock(&host->lock);
1644 enable_irq(host->irq);
1645 return err;
1646 }
1647
1610 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1648 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1611 1649
1612 /* 1650 /*
@@ -1780,6 +1818,16 @@ static void sdhci_enable_preset_value(struct mmc_host *mmc, bool enable)
1780 if (host->version < SDHCI_SPEC_300) 1818 if (host->version < SDHCI_SPEC_300)
1781 return; 1819 return;
1782 1820
1821 /*
1822 * Enabling preset value would make programming clock
1823 * divider ineffective. The controller would use the
1824 * values present in the preset value registers. In
1825 * case of non-standard clock, let the platform driver
1826 * decide whether to enable preset or not.
1827 */
1828 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK)
1829 return;
1830
1783 spin_lock_irqsave(&host->lock, flags); 1831 spin_lock_irqsave(&host->lock, flags);
1784 1832
1785 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1833 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
@@ -1799,10 +1847,42 @@ static void sdhci_enable_preset_value(struct mmc_host *mmc, bool enable)
1799 spin_unlock_irqrestore(&host->lock, flags); 1847 spin_unlock_irqrestore(&host->lock, flags);
1800} 1848}
1801 1849
1850int sdhci_enable(struct mmc_host *mmc)
1851{
1852 struct sdhci_host *host = mmc_priv(mmc);
1853
1854 if (!mmc->card || mmc->card->type == MMC_TYPE_SDIO)
1855 return 0;
1856
1857 if (mmc->ios.clock) {
1858 if (host->ops->set_clock)
1859 host->ops->set_clock(host, mmc->ios.clock);
1860 sdhci_set_clock(host, mmc->ios.clock);
1861 }
1862
1863 return 0;
1864}
1865
1866int sdhci_disable(struct mmc_host *mmc, int lazy)
1867{
1868 struct sdhci_host *host = mmc_priv(mmc);
1869
1870 if (!mmc->card || mmc->card->type == MMC_TYPE_SDIO)
1871 return 0;
1872
1873 sdhci_set_clock(host, 0);
1874 if (host->ops->set_clock)
1875 host->ops->set_clock(host, 0);
1876
1877 return 0;
1878}
1879
1802static const struct mmc_host_ops sdhci_ops = { 1880static const struct mmc_host_ops sdhci_ops = {
1803 .request = sdhci_request, 1881 .request = sdhci_request,
1804 .set_ios = sdhci_set_ios, 1882 .set_ios = sdhci_set_ios,
1805 .get_ro = sdhci_get_ro, 1883 .get_ro = sdhci_get_ro,
1884 .enable = sdhci_enable,
1885 .disable = sdhci_disable,
1806 .enable_sdio_irq = sdhci_enable_sdio_irq, 1886 .enable_sdio_irq = sdhci_enable_sdio_irq,
1807 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, 1887 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
1808 .execute_tuning = sdhci_execute_tuning, 1888 .execute_tuning = sdhci_execute_tuning,
@@ -1863,9 +1943,6 @@ static void sdhci_tasklet_finish(unsigned long param)
1863 1943
1864 del_timer(&host->timer); 1944 del_timer(&host->timer);
1865 1945
1866 if (host->version >= SDHCI_SPEC_300)
1867 del_timer(&host->tuning_timer);
1868
1869 mrq = host->mrq; 1946 mrq = host->mrq;
1870 1947
1871 /* 1948 /*
@@ -1885,6 +1962,8 @@ static void sdhci_tasklet_finish(unsigned long param)
1885 /* This is to force an update */ 1962 /* This is to force an update */
1886 clock = host->clock; 1963 clock = host->clock;
1887 host->clock = 0; 1964 host->clock = 0;
1965 if (host->ops->set_clock)
1966 host->ops->set_clock(host, clock);
1888 sdhci_set_clock(host, clock); 1967 sdhci_set_clock(host, clock);
1889 } 1968 }
1890 1969
@@ -2154,13 +2233,30 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
2154 mmc_hostname(host->mmc), intmask); 2233 mmc_hostname(host->mmc), intmask);
2155 2234
2156 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 2235 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2236 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
2237 SDHCI_CARD_PRESENT;
2238
2239 /*
2240 * There is a observation on i.mx esdhc. INSERT bit will be
2241 * immediately set again when it gets cleared, if a card is
2242 * inserted. We have to mask the irq to prevent interrupt
2243 * storm which will freeze the system. And the REMOVE gets
2244 * the same situation.
2245 *
2246 * More testing are needed here to ensure it works for other
2247 * platforms though.
2248 */
2249 sdhci_mask_irqs(host, present ? SDHCI_INT_CARD_INSERT :
2250 SDHCI_INT_CARD_REMOVE);
2251 sdhci_unmask_irqs(host, present ? SDHCI_INT_CARD_REMOVE :
2252 SDHCI_INT_CARD_INSERT);
2253
2157 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | 2254 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
2158 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); 2255 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
2256 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
2159 tasklet_schedule(&host->card_tasklet); 2257 tasklet_schedule(&host->card_tasklet);
2160 } 2258 }
2161 2259
2162 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
2163
2164 if (intmask & SDHCI_INT_CMD_MASK) { 2260 if (intmask & SDHCI_INT_CMD_MASK) {
2165 sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK, 2261 sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
2166 SDHCI_INT_STATUS); 2262 SDHCI_INT_STATUS);
@@ -2223,7 +2319,8 @@ out:
2223 2319
2224int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state) 2320int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
2225{ 2321{
2226 int ret; 2322 int ret = 0;
2323 struct mmc_host *mmc = host->mmc;
2227 2324
2228 sdhci_disable_card_detection(host); 2325 sdhci_disable_card_detection(host);
2229 2326
@@ -2235,15 +2332,21 @@ int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
2235 host->tuning_count * HZ); 2332 host->tuning_count * HZ);
2236 } 2333 }
2237 2334
2238 ret = mmc_suspend_host(host->mmc); 2335 if (mmc->card)
2239 if (ret) 2336 ret = mmc_suspend_host(host->mmc);
2240 return ret;
2241 2337
2242 free_irq(host->irq, host); 2338 if (mmc->pm_flags & MMC_PM_KEEP_POWER)
2339 host->card_int_set = sdhci_readl(host, SDHCI_INT_ENABLE) &
2340 SDHCI_INT_CARD_INT;
2341
2342 sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
2243 2343
2244 if (host->vmmc) 2344 if (host->vmmc)
2245 ret = regulator_disable(host->vmmc); 2345 ret = regulator_disable(host->vmmc);
2246 2346
2347 if (host->irq)
2348 disable_irq(host->irq);
2349
2247 return ret; 2350 return ret;
2248} 2351}
2249 2352
@@ -2251,7 +2354,8 @@ EXPORT_SYMBOL_GPL(sdhci_suspend_host);
2251 2354
2252int sdhci_resume_host(struct sdhci_host *host) 2355int sdhci_resume_host(struct sdhci_host *host)
2253{ 2356{
2254 int ret; 2357 int ret = 0;
2358 struct mmc_host *mmc = host->mmc;
2255 2359
2256 if (host->vmmc) { 2360 if (host->vmmc) {
2257 int ret = regulator_enable(host->vmmc); 2361 int ret = regulator_enable(host->vmmc);
@@ -2265,15 +2369,21 @@ int sdhci_resume_host(struct sdhci_host *host)
2265 host->ops->enable_dma(host); 2369 host->ops->enable_dma(host);
2266 } 2370 }
2267 2371
2268 ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED, 2372 if (host->irq)
2269 mmc_hostname(host->mmc), host); 2373 enable_irq(host->irq);
2270 if (ret)
2271 return ret;
2272 2374
2273 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER)); 2375 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
2274 mmiowb(); 2376 mmiowb();
2275 2377
2276 ret = mmc_resume_host(host->mmc); 2378 if (mmc->card) {
2379 ret = mmc_resume_host(host->mmc);
2380 /* Enable card interrupt as it is overwritten in sdhci_init */
2381 if ((mmc->caps & MMC_CAP_SDIO_IRQ) &&
2382 (mmc->pm_flags & MMC_PM_KEEP_POWER))
2383 if (host->card_int_set)
2384 mmc->ops->enable_sdio_irq(mmc, true);
2385 }
2386
2277 sdhci_enable_card_detection(host); 2387 sdhci_enable_card_detection(host);
2278 2388
2279 /* Set the re-tuning expiration flag */ 2389 /* Set the re-tuning expiration flag */
@@ -2440,22 +2550,6 @@ int sdhci_add_host(struct sdhci_host *host)
2440 host->max_clk = host->ops->get_max_clock(host); 2550 host->max_clk = host->ops->get_max_clock(host);
2441 } 2551 }
2442 2552
2443 host->timeout_clk =
2444 (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
2445 if (host->timeout_clk == 0) {
2446 if (host->ops->get_timeout_clock) {
2447 host->timeout_clk = host->ops->get_timeout_clock(host);
2448 } else if (!(host->quirks &
2449 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
2450 printk(KERN_ERR
2451 "%s: Hardware doesn't specify timeout clock "
2452 "frequency.\n", mmc_hostname(mmc));
2453 return -ENODEV;
2454 }
2455 }
2456 if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
2457 host->timeout_clk *= 1000;
2458
2459 /* 2553 /*
2460 * In case of Host Controller v3.00, find out whether clock 2554 * In case of Host Controller v3.00, find out whether clock
2461 * multiplier is supported. 2555 * multiplier is supported.
@@ -2488,7 +2582,27 @@ int sdhci_add_host(struct sdhci_host *host)
2488 } else 2582 } else
2489 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; 2583 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
2490 2584
2491 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23; 2585 host->timeout_clk =
2586 (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
2587 if (host->timeout_clk == 0) {
2588 if (host->ops->get_timeout_clock) {
2589 host->timeout_clk = host->ops->get_timeout_clock(host);
2590 } else if (!(host->quirks &
2591 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
2592 printk(KERN_ERR
2593 "%s: Hardware doesn't specify timeout clock "
2594 "frequency.\n", mmc_hostname(mmc));
2595 return -ENODEV;
2596 }
2597 }
2598 if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
2599 host->timeout_clk *= 1000;
2600
2601 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
2602 host->timeout_clk = mmc->f_max / 1000;
2603
2604 if (!(host->quirks & SDHCI_QUIRK_NO_CALC_MAX_DISCARD_TO))
2605 mmc->max_discard_to = (1 << 27) / host->timeout_clk;
2492 2606
2493 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) 2607 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
2494 host->flags |= SDHCI_AUTO_CMD12; 2608 host->flags |= SDHCI_AUTO_CMD12;
@@ -2517,7 +2631,7 @@ int sdhci_add_host(struct sdhci_host *host)
2517 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 2631 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2518 2632
2519 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 2633 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
2520 mmc_card_is_removable(mmc)) 2634 mmc_card_is_removable(mmc) && !(host->ops->get_cd))
2521 mmc->caps |= MMC_CAP_NEEDS_POLL; 2635 mmc->caps |= MMC_CAP_NEEDS_POLL;
2522 2636
2523 /* UHS-I mode(s) supported by the host controller. */ 2637 /* UHS-I mode(s) supported by the host controller. */
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 745c42fa41e..c00833de19d 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -270,10 +270,15 @@ struct sdhci_ops {
270 void (*platform_send_init_74_clocks)(struct sdhci_host *host, 270 void (*platform_send_init_74_clocks)(struct sdhci_host *host,
271 u8 power_mode); 271 u8 power_mode);
272 unsigned int (*get_ro)(struct sdhci_host *host); 272 unsigned int (*get_ro)(struct sdhci_host *host);
273 unsigned int (*get_cd)(struct sdhci_host *host);
273 void (*platform_reset_enter)(struct sdhci_host *host, u8 mask); 274 void (*platform_reset_enter)(struct sdhci_host *host, u8 mask);
274 void (*platform_reset_exit)(struct sdhci_host *host, u8 mask); 275 void (*platform_reset_exit)(struct sdhci_host *host, u8 mask);
275 int (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs); 276 int (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
276 277 int (*suspend)(struct sdhci_host *host, pm_message_t state);
278 int (*resume)(struct sdhci_host *host);
279 int (*switch_signal_voltage)(struct sdhci_host *host,
280 unsigned int signal_voltage);
281 int (*execute_freq_tuning)(struct sdhci_host *sdhci);
277}; 282};
278 283
279#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS 284#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 14f8edbaa19..557886bee9c 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -175,6 +175,7 @@ struct sh_mmcif_host {
175 enum mmcif_state state; 175 enum mmcif_state state;
176 spinlock_t lock; 176 spinlock_t lock;
177 bool power; 177 bool power;
178 bool card_present;
178 179
179 /* DMA support */ 180 /* DMA support */
180 struct dma_chan *chan_rx; 181 struct dma_chan *chan_rx;
@@ -877,23 +878,23 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
877 spin_unlock_irqrestore(&host->lock, flags); 878 spin_unlock_irqrestore(&host->lock, flags);
878 879
879 if (ios->power_mode == MMC_POWER_UP) { 880 if (ios->power_mode == MMC_POWER_UP) {
880 if (p->set_pwr) 881 if (!host->card_present) {
881 p->set_pwr(host->pd, ios->power_mode);
882 if (!host->power) {
883 /* See if we also get DMA */ 882 /* See if we also get DMA */
884 sh_mmcif_request_dma(host, host->pd->dev.platform_data); 883 sh_mmcif_request_dma(host, host->pd->dev.platform_data);
885 pm_runtime_get_sync(&host->pd->dev); 884 host->card_present = true;
886 host->power = true;
887 } 885 }
888 } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) { 886 } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
889 /* clock stop */ 887 /* clock stop */
890 sh_mmcif_clock_control(host, 0); 888 sh_mmcif_clock_control(host, 0);
891 if (ios->power_mode == MMC_POWER_OFF) { 889 if (ios->power_mode == MMC_POWER_OFF) {
892 if (host->power) { 890 if (host->card_present) {
893 pm_runtime_put(&host->pd->dev);
894 sh_mmcif_release_dma(host); 891 sh_mmcif_release_dma(host);
895 host->power = false; 892 host->card_present = false;
896 } 893 }
894 }
895 if (host->power) {
896 pm_runtime_put(&host->pd->dev);
897 host->power = false;
897 if (p->down_pwr) 898 if (p->down_pwr)
898 p->down_pwr(host->pd); 899 p->down_pwr(host->pd);
899 } 900 }
@@ -901,8 +902,16 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
901 return; 902 return;
902 } 903 }
903 904
904 if (ios->clock) 905 if (ios->clock) {
906 if (!host->power) {
907 if (p->set_pwr)
908 p->set_pwr(host->pd, ios->power_mode);
909 pm_runtime_get_sync(&host->pd->dev);
910 host->power = true;
911 sh_mmcif_sync_reset(host);
912 }
905 sh_mmcif_clock_control(host, ios->clock); 913 sh_mmcif_clock_control(host, ios->clock);
914 }
906 915
907 host->bus_width = ios->bus_width; 916 host->bus_width = ios->bus_width;
908 host->state = STATE_IDLE; 917 host->state = STATE_IDLE;
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index ce500f03df8..0c4a672f5db 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -26,6 +26,7 @@
26#include <linux/mmc/sh_mobile_sdhi.h> 26#include <linux/mmc/sh_mobile_sdhi.h>
27#include <linux/mfd/tmio.h> 27#include <linux/mfd/tmio.h>
28#include <linux/sh_dma.h> 28#include <linux/sh_dma.h>
29#include <linux/delay.h>
29 30
30#include "tmio_mmc.h" 31#include "tmio_mmc.h"
31 32
@@ -55,6 +56,39 @@ static int sh_mobile_sdhi_get_cd(struct platform_device *pdev)
55 return -ENOSYS; 56 return -ENOSYS;
56} 57}
57 58
59static int sh_mobile_sdhi_wait_idle(struct tmio_mmc_host *host)
60{
61 int timeout = 1000;
62
63 while (--timeout && !(sd_ctrl_read16(host, CTL_STATUS2) & (1 << 13)))
64 udelay(1);
65
66 if (!timeout) {
67 dev_warn(host->pdata->dev, "timeout waiting for SD bus idle\n");
68 return -EBUSY;
69 }
70
71 return 0;
72}
73
74static int sh_mobile_sdhi_write16_hook(struct tmio_mmc_host *host, int addr)
75{
76 switch (addr)
77 {
78 case CTL_SD_CMD:
79 case CTL_STOP_INTERNAL_ACTION:
80 case CTL_XFER_BLK_COUNT:
81 case CTL_SD_CARD_CLK_CTL:
82 case CTL_SD_XFER_LEN:
83 case CTL_SD_MEM_CARD_OPT:
84 case CTL_TRANSACTION_CTL:
85 case CTL_DMA_ENABLE:
86 return sh_mobile_sdhi_wait_idle(host);
87 }
88
89 return 0;
90}
91
58static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) 92static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
59{ 93{
60 struct sh_mobile_sdhi *priv; 94 struct sh_mobile_sdhi *priv;
@@ -89,6 +123,8 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
89 mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED; 123 mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED;
90 if (p) { 124 if (p) {
91 mmc_data->flags = p->tmio_flags; 125 mmc_data->flags = p->tmio_flags;
126 if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT)
127 mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
92 mmc_data->ocr_mask = p->tmio_ocr_mask; 128 mmc_data->ocr_mask = p->tmio_ocr_mask;
93 mmc_data->capabilities |= p->tmio_caps; 129 mmc_data->capabilities |= p->tmio_caps;
94 130
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 8d185de90d2..44a9668c4b7 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -27,7 +27,6 @@
27static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) 27static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
28{ 28{
29 const struct mfd_cell *cell = mfd_get_cell(dev); 29 const struct mfd_cell *cell = mfd_get_cell(dev);
30 struct mmc_host *mmc = platform_get_drvdata(dev);
31 int ret; 30 int ret;
32 31
33 ret = tmio_mmc_host_suspend(&dev->dev); 32 ret = tmio_mmc_host_suspend(&dev->dev);
@@ -42,7 +41,6 @@ static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
42static int tmio_mmc_resume(struct platform_device *dev) 41static int tmio_mmc_resume(struct platform_device *dev)
43{ 42{
44 const struct mfd_cell *cell = mfd_get_cell(dev); 43 const struct mfd_cell *cell = mfd_get_cell(dev);
45 struct mmc_host *mmc = platform_get_drvdata(dev);
46 int ret = 0; 44 int ret = 0;
47 45
48 /* Tell the MFD core we are ready to be enabled */ 46 /* Tell the MFD core we are ready to be enabled */
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 8260bc2c34e..eeaf64391fb 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -18,8 +18,10 @@
18 18
19#include <linux/highmem.h> 19#include <linux/highmem.h>
20#include <linux/mmc/tmio.h> 20#include <linux/mmc/tmio.h>
21#include <linux/mutex.h>
21#include <linux/pagemap.h> 22#include <linux/pagemap.h>
22#include <linux/spinlock.h> 23#include <linux/spinlock.h>
24#include <linux/scatterlist.h>
23 25
24/* Definitions for values the CTRL_SDIO_STATUS register can take. */ 26/* Definitions for values the CTRL_SDIO_STATUS register can take. */
25#define TMIO_SDIO_STAT_IOIRQ 0x0001 27#define TMIO_SDIO_STAT_IOIRQ 0x0001
@@ -52,6 +54,8 @@ struct tmio_mmc_host {
52 void (*set_clk_div)(struct platform_device *host, int state); 54 void (*set_clk_div)(struct platform_device *host, int state);
53 55
54 int pm_error; 56 int pm_error;
57 /* recognise system-wide suspend in runtime PM methods */
58 bool pm_global;
55 59
56 /* pio related stuff */ 60 /* pio related stuff */
57 struct scatterlist *sg_ptr; 61 struct scatterlist *sg_ptr;
@@ -73,8 +77,11 @@ struct tmio_mmc_host {
73 77
74 /* Track lost interrupts */ 78 /* Track lost interrupts */
75 struct delayed_work delayed_reset_work; 79 struct delayed_work delayed_reset_work;
76 spinlock_t lock; 80 struct work_struct done;
81
82 spinlock_t lock; /* protect host private data */
77 unsigned long last_req_ts; 83 unsigned long last_req_ts;
84 struct mutex ios_lock; /* protect set_ios() context */
78}; 85};
79 86
80int tmio_mmc_host_probe(struct tmio_mmc_host **host, 87int tmio_mmc_host_probe(struct tmio_mmc_host **host,
@@ -103,6 +110,7 @@ static inline void tmio_mmc_kunmap_atomic(struct scatterlist *sg,
103 110
104#if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE) 111#if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE)
105void tmio_mmc_start_dma(struct tmio_mmc_host *host, struct mmc_data *data); 112void tmio_mmc_start_dma(struct tmio_mmc_host *host, struct mmc_data *data);
113void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable);
106void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata); 114void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata);
107void tmio_mmc_release_dma(struct tmio_mmc_host *host); 115void tmio_mmc_release_dma(struct tmio_mmc_host *host);
108#else 116#else
@@ -111,6 +119,10 @@ static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host,
111{ 119{
112} 120}
113 121
122static inline void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
123{
124}
125
114static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host, 126static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host,
115 struct tmio_mmc_data *pdata) 127 struct tmio_mmc_data *pdata)
116{ 128{
@@ -134,4 +146,44 @@ int tmio_mmc_host_resume(struct device *dev);
134int tmio_mmc_host_runtime_suspend(struct device *dev); 146int tmio_mmc_host_runtime_suspend(struct device *dev);
135int tmio_mmc_host_runtime_resume(struct device *dev); 147int tmio_mmc_host_runtime_resume(struct device *dev);
136 148
149static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
150{
151 return readw(host->ctl + (addr << host->bus_shift));
152}
153
154static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
155 u16 *buf, int count)
156{
157 readsw(host->ctl + (addr << host->bus_shift), buf, count);
158}
159
160static inline u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
161{
162 return readw(host->ctl + (addr << host->bus_shift)) |
163 readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
164}
165
166static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val)
167{
168 /* If there is a hook and it returns non-zero then there
169 * is an error and the write should be skipped
170 */
171 if (host->pdata->write16_hook && host->pdata->write16_hook(host, addr))
172 return;
173 writew(val, host->ctl + (addr << host->bus_shift));
174}
175
176static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
177 u16 *buf, int count)
178{
179 writesw(host->ctl + (addr << host->bus_shift), buf, count);
180}
181
182static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
183{
184 writew(val, host->ctl + (addr << host->bus_shift));
185 writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
186}
187
188
137#endif 189#endif
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index 25f1ad6cbe0..86f259cdfcb 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/device.h> 13#include <linux/device.h>
14#include <linux/dma-mapping.h>
14#include <linux/dmaengine.h> 15#include <linux/dmaengine.h>
15#include <linux/mfd/tmio.h> 16#include <linux/mfd/tmio.h>
16#include <linux/mmc/host.h> 17#include <linux/mmc/host.h>
@@ -22,11 +23,14 @@
22 23
23#define TMIO_MMC_MIN_DMA_LEN 8 24#define TMIO_MMC_MIN_DMA_LEN 8
24 25
25static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) 26void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
26{ 27{
28 if (!host->chan_tx || !host->chan_rx)
29 return;
30
27#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE) 31#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
28 /* Switch DMA mode on or off - SuperH specific? */ 32 /* Switch DMA mode on or off - SuperH specific? */
29 writew(enable ? 2 : 0, host->ctl + (0xd8 << host->bus_shift)); 33 sd_ctrl_write16(host, CTL_DMA_ENABLE, enable ? 2 : 0);
30#endif 34#endif
31} 35}
32 36
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index 0b09e8239aa..1f16357e730 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -46,40 +46,6 @@
46 46
47#include "tmio_mmc.h" 47#include "tmio_mmc.h"
48 48
49static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
50{
51 return readw(host->ctl + (addr << host->bus_shift));
52}
53
54static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
55 u16 *buf, int count)
56{
57 readsw(host->ctl + (addr << host->bus_shift), buf, count);
58}
59
60static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
61{
62 return readw(host->ctl + (addr << host->bus_shift)) |
63 readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
64}
65
66static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val)
67{
68 writew(val, host->ctl + (addr << host->bus_shift));
69}
70
71static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
72 u16 *buf, int count)
73{
74 writesw(host->ctl + (addr << host->bus_shift), buf, count);
75}
76
77static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
78{
79 writew(val, host->ctl + (addr << host->bus_shift));
80 writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
81}
82
83void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i) 49void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
84{ 50{
85 u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) & ~(i & TMIO_MASK_IRQ); 51 u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) & ~(i & TMIO_MASK_IRQ);
@@ -284,10 +250,16 @@ static void tmio_mmc_reset_work(struct work_struct *work)
284/* called with host->lock held, interrupts disabled */ 250/* called with host->lock held, interrupts disabled */
285static void tmio_mmc_finish_request(struct tmio_mmc_host *host) 251static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
286{ 252{
287 struct mmc_request *mrq = host->mrq; 253 struct mmc_request *mrq;
254 unsigned long flags;
255
256 spin_lock_irqsave(&host->lock, flags);
288 257
289 if (!mrq) 258 mrq = host->mrq;
259 if (IS_ERR_OR_NULL(mrq)) {
260 spin_unlock_irqrestore(&host->lock, flags);
290 return; 261 return;
262 }
291 263
292 host->cmd = NULL; 264 host->cmd = NULL;
293 host->data = NULL; 265 host->data = NULL;
@@ -296,11 +268,18 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
296 cancel_delayed_work(&host->delayed_reset_work); 268 cancel_delayed_work(&host->delayed_reset_work);
297 269
298 host->mrq = NULL; 270 host->mrq = NULL;
271 spin_unlock_irqrestore(&host->lock, flags);
299 272
300 /* FIXME: mmc_request_done() can schedule! */
301 mmc_request_done(host->mmc, mrq); 273 mmc_request_done(host->mmc, mrq);
302} 274}
303 275
276static void tmio_mmc_done_work(struct work_struct *work)
277{
278 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
279 done);
280 tmio_mmc_finish_request(host);
281}
282
304/* These are the bitmasks the tmio chip requires to implement the MMC response 283/* These are the bitmasks the tmio chip requires to implement the MMC response
305 * types. Note that R1 and R6 are the same in this scheme. */ 284 * types. Note that R1 and R6 are the same in this scheme. */
306#define APP_CMD 0x0040 285#define APP_CMD 0x0040
@@ -467,7 +446,7 @@ void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
467 BUG(); 446 BUG();
468 } 447 }
469 448
470 tmio_mmc_finish_request(host); 449 schedule_work(&host->done);
471} 450}
472 451
473static void tmio_mmc_data_irq(struct tmio_mmc_host *host) 452static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
@@ -557,7 +536,7 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
557 tasklet_schedule(&host->dma_issue); 536 tasklet_schedule(&host->dma_issue);
558 } 537 }
559 } else { 538 } else {
560 tmio_mmc_finish_request(host); 539 schedule_work(&host->done);
561 } 540 }
562 541
563out: 542out:
@@ -567,6 +546,7 @@ out:
567irqreturn_t tmio_mmc_irq(int irq, void *devid) 546irqreturn_t tmio_mmc_irq(int irq, void *devid)
568{ 547{
569 struct tmio_mmc_host *host = devid; 548 struct tmio_mmc_host *host = devid;
549 struct mmc_host *mmc = host->mmc;
570 struct tmio_mmc_data *pdata = host->pdata; 550 struct tmio_mmc_data *pdata = host->pdata;
571 unsigned int ireg, irq_mask, status; 551 unsigned int ireg, irq_mask, status;
572 unsigned int sdio_ireg, sdio_irq_mask, sdio_status; 552 unsigned int sdio_ireg, sdio_irq_mask, sdio_status;
@@ -588,13 +568,13 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid)
588 if (sdio_ireg && !host->sdio_irq_enabled) { 568 if (sdio_ireg && !host->sdio_irq_enabled) {
589 pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n", 569 pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n",
590 sdio_status, sdio_irq_mask, sdio_ireg); 570 sdio_status, sdio_irq_mask, sdio_ireg);
591 tmio_mmc_enable_sdio_irq(host->mmc, 0); 571 tmio_mmc_enable_sdio_irq(mmc, 0);
592 goto out; 572 goto out;
593 } 573 }
594 574
595 if (host->mmc->caps & MMC_CAP_SDIO_IRQ && 575 if (mmc->caps & MMC_CAP_SDIO_IRQ &&
596 sdio_ireg & TMIO_SDIO_STAT_IOIRQ) 576 sdio_ireg & TMIO_SDIO_STAT_IOIRQ)
597 mmc_signal_sdio_irq(host->mmc); 577 mmc_signal_sdio_irq(mmc);
598 578
599 if (sdio_ireg) 579 if (sdio_ireg)
600 goto out; 580 goto out;
@@ -603,58 +583,49 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid)
603 pr_debug_status(status); 583 pr_debug_status(status);
604 pr_debug_status(ireg); 584 pr_debug_status(ireg);
605 585
606 if (!ireg) { 586 /* Card insert / remove attempts */
607 tmio_mmc_disable_mmc_irqs(host, status & ~irq_mask); 587 if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
608 588 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
609 pr_warning("tmio_mmc: Spurious irq, disabling! " 589 TMIO_STAT_CARD_REMOVE);
610 "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg); 590 if ((((ireg & TMIO_STAT_CARD_REMOVE) && mmc->card) ||
611 pr_debug_status(status); 591 ((ireg & TMIO_STAT_CARD_INSERT) && !mmc->card)) &&
612 592 !work_pending(&mmc->detect.work))
593 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
613 goto out; 594 goto out;
614 } 595 }
615 596
616 while (ireg) { 597 /* CRC and other errors */
617 /* Card insert / remove attempts */ 598/* if (ireg & TMIO_STAT_ERR_IRQ)
618 if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { 599 * handled |= tmio_error_irq(host, irq, stat);
619 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
620 TMIO_STAT_CARD_REMOVE);
621 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
622 }
623
624 /* CRC and other errors */
625/* if (ireg & TMIO_STAT_ERR_IRQ)
626 * handled |= tmio_error_irq(host, irq, stat);
627 */ 600 */
628 601
629 /* Command completion */ 602 /* Command completion */
630 if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) { 603 if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
631 tmio_mmc_ack_mmc_irqs(host, 604 tmio_mmc_ack_mmc_irqs(host,
632 TMIO_STAT_CMDRESPEND | 605 TMIO_STAT_CMDRESPEND |
633 TMIO_STAT_CMDTIMEOUT); 606 TMIO_STAT_CMDTIMEOUT);
634 tmio_mmc_cmd_irq(host, status); 607 tmio_mmc_cmd_irq(host, status);
635 } 608 goto out;
636 609 }
637 /* Data transfer */
638 if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
639 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
640 tmio_mmc_pio_irq(host);
641 }
642
643 /* Data transfer completion */
644 if (ireg & TMIO_STAT_DATAEND) {
645 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
646 tmio_mmc_data_irq(host);
647 }
648 610
649 /* Check status - keep going until we've handled it all */ 611 /* Data transfer */
650 status = sd_ctrl_read32(host, CTL_STATUS); 612 if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
651 irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); 613 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
652 ireg = status & TMIO_MASK_IRQ & ~irq_mask; 614 tmio_mmc_pio_irq(host);
615 goto out;
616 }
653 617
654 pr_debug("Status at end of loop: %08x\n", status); 618 /* Data transfer completion */
655 pr_debug_status(status); 619 if (ireg & TMIO_STAT_DATAEND) {
620 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
621 tmio_mmc_data_irq(host);
622 goto out;
656 } 623 }
657 pr_debug("MMC IRQ end\n"); 624
625 pr_warning("tmio_mmc: Spurious irq, disabling! "
626 "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg);
627 pr_debug_status(status);
628 tmio_mmc_disable_mmc_irqs(host, status & ~irq_mask);
658 629
659out: 630out:
660 return IRQ_HANDLED; 631 return IRQ_HANDLED;
@@ -749,6 +720,8 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
749 struct tmio_mmc_data *pdata = host->pdata; 720 struct tmio_mmc_data *pdata = host->pdata;
750 unsigned long flags; 721 unsigned long flags;
751 722
723 mutex_lock(&host->ios_lock);
724
752 spin_lock_irqsave(&host->lock, flags); 725 spin_lock_irqsave(&host->lock, flags);
753 if (host->mrq) { 726 if (host->mrq) {
754 if (IS_ERR(host->mrq)) { 727 if (IS_ERR(host->mrq)) {
@@ -764,6 +737,8 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
764 host->mrq->cmd->opcode, host->last_req_ts, jiffies); 737 host->mrq->cmd->opcode, host->last_req_ts, jiffies);
765 } 738 }
766 spin_unlock_irqrestore(&host->lock, flags); 739 spin_unlock_irqrestore(&host->lock, flags);
740
741 mutex_unlock(&host->ios_lock);
767 return; 742 return;
768 } 743 }
769 744
@@ -771,33 +746,30 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
771 746
772 spin_unlock_irqrestore(&host->lock, flags); 747 spin_unlock_irqrestore(&host->lock, flags);
773 748
774 if (ios->clock) 749 /*
775 tmio_mmc_set_clock(host, ios->clock); 750 * pdata->power == false only if COLD_CD is available, otherwise only
776 751 * in short time intervals during probing or resuming
777 /* Power sequence - OFF -> UP -> ON */ 752 */
778 if (ios->power_mode == MMC_POWER_UP) { 753 if (ios->power_mode == MMC_POWER_ON && ios->clock) {
779 if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) && !pdata->power) { 754 if (!pdata->power) {
780 pm_runtime_get_sync(&host->pdev->dev); 755 pm_runtime_get_sync(&host->pdev->dev);
781 pdata->power = true; 756 pdata->power = true;
782 } 757 }
758 tmio_mmc_set_clock(host, ios->clock);
783 /* power up SD bus */ 759 /* power up SD bus */
784 if (host->set_pwr) 760 if (host->set_pwr)
785 host->set_pwr(host->pdev, 1); 761 host->set_pwr(host->pdev, 1);
786 } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
787 /* power down SD bus */
788 if (ios->power_mode == MMC_POWER_OFF) {
789 if (host->set_pwr)
790 host->set_pwr(host->pdev, 0);
791 if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) &&
792 pdata->power) {
793 pdata->power = false;
794 pm_runtime_put(&host->pdev->dev);
795 }
796 }
797 tmio_mmc_clk_stop(host);
798 } else {
799 /* start bus clock */ 762 /* start bus clock */
800 tmio_mmc_clk_start(host); 763 tmio_mmc_clk_start(host);
764 } else if (ios->power_mode != MMC_POWER_UP) {
765 if (host->set_pwr)
766 host->set_pwr(host->pdev, 0);
767 if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) &&
768 pdata->power) {
769 pdata->power = false;
770 pm_runtime_put(&host->pdev->dev);
771 }
772 tmio_mmc_clk_stop(host);
801 } 773 }
802 774
803 switch (ios->bus_width) { 775 switch (ios->bus_width) {
@@ -817,6 +789,8 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
817 current->comm, task_pid_nr(current), 789 current->comm, task_pid_nr(current),
818 ios->clock, ios->power_mode); 790 ios->clock, ios->power_mode);
819 host->mrq = NULL; 791 host->mrq = NULL;
792
793 mutex_unlock(&host->ios_lock);
820} 794}
821 795
822static int tmio_mmc_get_ro(struct mmc_host *mmc) 796static int tmio_mmc_get_ro(struct mmc_host *mmc)
@@ -913,16 +887,20 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
913 tmio_mmc_enable_sdio_irq(mmc, 0); 887 tmio_mmc_enable_sdio_irq(mmc, 0);
914 888
915 spin_lock_init(&_host->lock); 889 spin_lock_init(&_host->lock);
890 mutex_init(&_host->ios_lock);
916 891
917 /* Init delayed work for request timeouts */ 892 /* Init delayed work for request timeouts */
918 INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work); 893 INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work);
894 INIT_WORK(&_host->done, tmio_mmc_done_work);
919 895
920 /* See if we also get DMA */ 896 /* See if we also get DMA */
921 tmio_mmc_request_dma(_host, pdata); 897 tmio_mmc_request_dma(_host, pdata);
922 898
923 /* We have to keep the device powered for its card detection to work */ 899 /* We have to keep the device powered for its card detection to work */
924 if (!(pdata->flags & TMIO_MMC_HAS_COLD_CD)) 900 if (!(pdata->flags & TMIO_MMC_HAS_COLD_CD)) {
901 pdata->power = true;
925 pm_runtime_get_noresume(&pdev->dev); 902 pm_runtime_get_noresume(&pdev->dev);
903 }
926 904
927 mmc_add_host(mmc); 905 mmc_add_host(mmc);
928 906
@@ -963,6 +941,7 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
963 pm_runtime_get_sync(&pdev->dev); 941 pm_runtime_get_sync(&pdev->dev);
964 942
965 mmc_remove_host(host->mmc); 943 mmc_remove_host(host->mmc);
944 cancel_work_sync(&host->done);
966 cancel_delayed_work_sync(&host->delayed_reset_work); 945 cancel_delayed_work_sync(&host->delayed_reset_work);
967 tmio_mmc_release_dma(host); 946 tmio_mmc_release_dma(host);
968 947
@@ -998,11 +977,16 @@ int tmio_mmc_host_resume(struct device *dev)
998 /* The MMC core will perform the complete set up */ 977 /* The MMC core will perform the complete set up */
999 host->pdata->power = false; 978 host->pdata->power = false;
1000 979
980 host->pm_global = true;
1001 if (!host->pm_error) 981 if (!host->pm_error)
1002 pm_runtime_get_sync(dev); 982 pm_runtime_get_sync(dev);
1003 983
1004 tmio_mmc_reset(mmc_priv(mmc)); 984 if (host->pm_global) {
1005 tmio_mmc_request_dma(host, host->pdata); 985 /* Runtime PM resume callback didn't run */
986 tmio_mmc_reset(host);
987 tmio_mmc_enable_dma(host, true);
988 host->pm_global = false;
989 }
1006 990
1007 return mmc_resume_host(mmc); 991 return mmc_resume_host(mmc);
1008} 992}
@@ -1023,12 +1007,15 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
1023 struct tmio_mmc_data *pdata = host->pdata; 1007 struct tmio_mmc_data *pdata = host->pdata;
1024 1008
1025 tmio_mmc_reset(host); 1009 tmio_mmc_reset(host);
1010 tmio_mmc_enable_dma(host, true);
1026 1011
1027 if (pdata->power) { 1012 if (pdata->power) {
1028 /* Only entered after a card-insert interrupt */ 1013 /* Only entered after a card-insert interrupt */
1029 tmio_mmc_set_ios(mmc, &mmc->ios); 1014 if (!mmc->card)
1015 tmio_mmc_set_ios(mmc, &mmc->ios);
1030 mmc_detect_change(mmc, msecs_to_jiffies(100)); 1016 mmc_detect_change(mmc, msecs_to_jiffies(100));
1031 } 1017 }
1018 host->pm_global = false;
1032 1019
1033 return 0; 1020 return 0;
1034} 1021}
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index d4455ffbefd..2ec978bc32b 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -259,7 +259,7 @@ static int firmware_rom_wait_states = 0x04;
259static int firmware_rom_wait_states = 0x1C; 259static int firmware_rom_wait_states = 0x1C;
260#endif 260#endif
261 261
262module_param(firmware_rom_wait_states, bool, 0644); 262module_param(firmware_rom_wait_states, int, 0644);
263MODULE_PARM_DESC(firmware_rom_wait_states, 263MODULE_PARM_DESC(firmware_rom_wait_states,
264 "ROM wait states byte=RRRIIEEE (Reserved Internal External)"); 264 "ROM wait states byte=RRRIIEEE (Reserved Internal External)");
265 265
@@ -1625,8 +1625,8 @@ static void __vub300_command_response(struct vub300_mmc_host *vub300,
1625 cmd->error = respretval; 1625 cmd->error = respretval;
1626 } else if (cmd->error) { 1626 } else if (cmd->error) {
1627 /* 1627 /*
1628 * the error occured sending the command 1628 * the error occurred sending the command
1629 * or recieving the response 1629 * or receiving the response
1630 */ 1630 */
1631 } else if (vub300->command_out_urb->status) { 1631 } else if (vub300->command_out_urb->status) {
1632 vub300->usb_transport_fail = vub300->command_out_urb->status; 1632 vub300->usb_transport_fail = vub300->command_out_urb->status;