aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/host/sdhci.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/mmc/host/sdhci.c
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'drivers/mmc/host/sdhci.c')
-rw-r--r--drivers/mmc/host/sdhci.c1016
1 files changed, 913 insertions, 103 deletions
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 401527d273b5..58d5436ff649 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -23,6 +23,7 @@
23 23
24#include <linux/leds.h> 24#include <linux/leds.h>
25 25
26#include <linux/mmc/mmc.h>
26#include <linux/mmc/host.h> 27#include <linux/mmc/host.h>
27 28
28#include "sdhci.h" 29#include "sdhci.h"
@@ -37,17 +38,21 @@
37#define SDHCI_USE_LEDS_CLASS 38#define SDHCI_USE_LEDS_CLASS
38#endif 39#endif
39 40
41#define MAX_TUNING_LOOP 40
42
40static unsigned int debug_quirks = 0; 43static unsigned int debug_quirks = 0;
41 44
42static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *);
43static void sdhci_finish_data(struct sdhci_host *); 45static void sdhci_finish_data(struct sdhci_host *);
44 46
45static void sdhci_send_command(struct sdhci_host *, struct mmc_command *); 47static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
46static void sdhci_finish_command(struct sdhci_host *); 48static void sdhci_finish_command(struct sdhci_host *);
49static int sdhci_execute_tuning(struct mmc_host *mmc);
50static void sdhci_tuning_timer(unsigned long data);
47 51
48static void sdhci_dumpregs(struct sdhci_host *host) 52static void sdhci_dumpregs(struct sdhci_host *host)
49{ 53{
50 printk(KERN_DEBUG DRIVER_NAME ": ============== REGISTER DUMP ==============\n"); 54 printk(KERN_DEBUG DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
55 mmc_hostname(host->mmc));
51 56
52 printk(KERN_DEBUG DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n", 57 printk(KERN_DEBUG DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
53 sdhci_readl(host, SDHCI_DMA_ADDRESS), 58 sdhci_readl(host, SDHCI_DMA_ADDRESS),
@@ -76,9 +81,14 @@ static void sdhci_dumpregs(struct sdhci_host *host)
76 printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n", 81 printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
77 sdhci_readw(host, SDHCI_ACMD12_ERR), 82 sdhci_readw(host, SDHCI_ACMD12_ERR),
78 sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); 83 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
79 printk(KERN_DEBUG DRIVER_NAME ": Caps: 0x%08x | Max curr: 0x%08x\n", 84 printk(KERN_DEBUG DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
80 sdhci_readl(host, SDHCI_CAPABILITIES), 85 sdhci_readl(host, SDHCI_CAPABILITIES),
86 sdhci_readl(host, SDHCI_CAPABILITIES_1));
87 printk(KERN_DEBUG DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
88 sdhci_readw(host, SDHCI_COMMAND),
81 sdhci_readl(host, SDHCI_MAX_CURRENT)); 89 sdhci_readl(host, SDHCI_MAX_CURRENT));
90 printk(KERN_DEBUG DRIVER_NAME ": Host ctl2: 0x%08x\n",
91 sdhci_readw(host, SDHCI_HOST_CONTROL2));
82 92
83 if (host->flags & SDHCI_USE_ADMA) 93 if (host->flags & SDHCI_USE_ADMA)
84 printk(KERN_DEBUG DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", 94 printk(KERN_DEBUG DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
@@ -152,6 +162,9 @@ static void sdhci_reset(struct sdhci_host *host, u8 mask)
152 if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) 162 if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
153 ier = sdhci_readl(host, SDHCI_INT_ENABLE); 163 ier = sdhci_readl(host, SDHCI_INT_ENABLE);
154 164
165 if (host->ops->platform_reset_enter)
166 host->ops->platform_reset_enter(host, mask);
167
155 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); 168 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
156 169
157 if (mask & SDHCI_RESET_ALL) 170 if (mask & SDHCI_RESET_ALL)
@@ -172,6 +185,9 @@ static void sdhci_reset(struct sdhci_host *host, u8 mask)
172 mdelay(1); 185 mdelay(1);
173 } 186 }
174 187
188 if (host->ops->platform_reset_exit)
189 host->ops->platform_reset_exit(host, mask);
190
175 if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) 191 if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
176 sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier); 192 sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier);
177} 193}
@@ -586,9 +602,10 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
586 data->sg_len, direction); 602 data->sg_len, direction);
587} 603}
588 604
589static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data) 605static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
590{ 606{
591 u8 count; 607 u8 count;
608 struct mmc_data *data = cmd->data;
592 unsigned target_timeout, current_timeout; 609 unsigned target_timeout, current_timeout;
593 610
594 /* 611 /*
@@ -600,9 +617,16 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
600 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) 617 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
601 return 0xE; 618 return 0xE;
602 619
620 /* Unspecified timeout, assume max */
621 if (!data && !cmd->cmd_timeout_ms)
622 return 0xE;
623
603 /* timeout in us */ 624 /* timeout in us */
604 target_timeout = data->timeout_ns / 1000 + 625 if (!data)
605 data->timeout_clks / host->clock; 626 target_timeout = cmd->cmd_timeout_ms * 1000;
627 else
628 target_timeout = data->timeout_ns / 1000 +
629 data->timeout_clks / host->clock;
606 630
607 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) 631 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
608 host->timeout_clk = host->clock / 1000; 632 host->timeout_clk = host->clock / 1000;
@@ -617,6 +641,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
617 * => 641 * =>
618 * (1) / (2) > 2^6 642 * (1) / (2) > 2^6
619 */ 643 */
644 BUG_ON(!host->timeout_clk);
620 count = 0; 645 count = 0;
621 current_timeout = (1 << 13) * 1000 / host->timeout_clk; 646 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
622 while (current_timeout < target_timeout) { 647 while (current_timeout < target_timeout) {
@@ -627,8 +652,8 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
627 } 652 }
628 653
629 if (count >= 0xF) { 654 if (count >= 0xF) {
630 printk(KERN_WARNING "%s: Too large timeout requested!\n", 655 printk(KERN_WARNING "%s: Too large timeout requested for CMD%d!\n",
631 mmc_hostname(host->mmc)); 656 mmc_hostname(host->mmc), cmd->opcode);
632 count = 0xE; 657 count = 0xE;
633 } 658 }
634 659
@@ -646,15 +671,21 @@ static void sdhci_set_transfer_irqs(struct sdhci_host *host)
646 sdhci_clear_set_irqs(host, dma_irqs, pio_irqs); 671 sdhci_clear_set_irqs(host, dma_irqs, pio_irqs);
647} 672}
648 673
649static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) 674static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
650{ 675{
651 u8 count; 676 u8 count;
652 u8 ctrl; 677 u8 ctrl;
678 struct mmc_data *data = cmd->data;
653 int ret; 679 int ret;
654 680
655 WARN_ON(host->data); 681 WARN_ON(host->data);
656 682
657 if (data == NULL) 683 if (data || (cmd->flags & MMC_RSP_BUSY)) {
684 count = sdhci_calc_timeout(host, cmd);
685 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
686 }
687
688 if (!data)
658 return; 689 return;
659 690
660 /* Sanity checks */ 691 /* Sanity checks */
@@ -664,9 +695,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
664 695
665 host->data = data; 696 host->data = data;
666 host->data_early = 0; 697 host->data_early = 0;
667 698 host->data->bytes_xfered = 0;
668 count = sdhci_calc_timeout(host, data);
669 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
670 699
671 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) 700 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
672 host->flags |= SDHCI_REQ_USE_DMA; 701 host->flags |= SDHCI_REQ_USE_DMA;
@@ -802,15 +831,17 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
802 831
803 sdhci_set_transfer_irqs(host); 832 sdhci_set_transfer_irqs(host);
804 833
805 /* We do not handle DMA boundaries, so set it to max (512 KiB) */ 834 /* Set the DMA boundary value and block size */
806 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, data->blksz), SDHCI_BLOCK_SIZE); 835 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
836 data->blksz), SDHCI_BLOCK_SIZE);
807 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); 837 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
808} 838}
809 839
810static void sdhci_set_transfer_mode(struct sdhci_host *host, 840static void sdhci_set_transfer_mode(struct sdhci_host *host,
811 struct mmc_data *data) 841 struct mmc_command *cmd)
812{ 842{
813 u16 mode; 843 u16 mode;
844 struct mmc_data *data = cmd->data;
814 845
815 if (data == NULL) 846 if (data == NULL)
816 return; 847 return;
@@ -818,12 +849,20 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host,
818 WARN_ON(!host->data); 849 WARN_ON(!host->data);
819 850
820 mode = SDHCI_TRNS_BLK_CNT_EN; 851 mode = SDHCI_TRNS_BLK_CNT_EN;
821 if (data->blocks > 1) { 852 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
822 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) 853 mode |= SDHCI_TRNS_MULTI;
823 mode |= SDHCI_TRNS_MULTI | SDHCI_TRNS_ACMD12; 854 /*
824 else 855 * If we are sending CMD23, CMD12 never gets sent
825 mode |= SDHCI_TRNS_MULTI; 856 * on successful completion (so no Auto-CMD12).
857 */
858 if (!host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD12))
859 mode |= SDHCI_TRNS_AUTO_CMD12;
860 else if (host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
861 mode |= SDHCI_TRNS_AUTO_CMD23;
862 sdhci_writel(host, host->mrq->sbc->arg, SDHCI_ARGUMENT2);
863 }
826 } 864 }
865
827 if (data->flags & MMC_DATA_READ) 866 if (data->flags & MMC_DATA_READ)
828 mode |= SDHCI_TRNS_READ; 867 mode |= SDHCI_TRNS_READ;
829 if (host->flags & SDHCI_REQ_USE_DMA) 868 if (host->flags & SDHCI_REQ_USE_DMA)
@@ -863,7 +902,15 @@ static void sdhci_finish_data(struct sdhci_host *host)
863 else 902 else
864 data->bytes_xfered = data->blksz * data->blocks; 903 data->bytes_xfered = data->blksz * data->blocks;
865 904
866 if (data->stop) { 905 /*
906 * Need to send CMD12 if -
907 * a) open-ended multiblock transfer (no CMD23)
908 * b) error in multiblock transfer
909 */
910 if (data->stop &&
911 (data->error ||
912 !host->mrq->sbc)) {
913
867 /* 914 /*
868 * The controller needs a reset of internal state machines 915 * The controller needs a reset of internal state machines
869 * upon error conditions. 916 * upon error conditions.
@@ -915,11 +962,11 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
915 962
916 host->cmd = cmd; 963 host->cmd = cmd;
917 964
918 sdhci_prepare_data(host, cmd->data); 965 sdhci_prepare_data(host, cmd);
919 966
920 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); 967 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
921 968
922 sdhci_set_transfer_mode(host, cmd->data); 969 sdhci_set_transfer_mode(host, cmd);
923 970
924 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { 971 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
925 printk(KERN_ERR "%s: Unsupported response type!\n", 972 printk(KERN_ERR "%s: Unsupported response type!\n",
@@ -942,7 +989,9 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
942 flags |= SDHCI_CMD_CRC; 989 flags |= SDHCI_CMD_CRC;
943 if (cmd->flags & MMC_RSP_OPCODE) 990 if (cmd->flags & MMC_RSP_OPCODE)
944 flags |= SDHCI_CMD_INDEX; 991 flags |= SDHCI_CMD_INDEX;
945 if (cmd->data) 992
993 /* CMD19 is special in that the Data Present Select should be set */
994 if (cmd->data || (cmd->opcode == MMC_SEND_TUNING_BLOCK))
946 flags |= SDHCI_CMD_DATA; 995 flags |= SDHCI_CMD_DATA;
947 996
948 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); 997 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
@@ -972,19 +1021,27 @@ static void sdhci_finish_command(struct sdhci_host *host)
972 1021
973 host->cmd->error = 0; 1022 host->cmd->error = 0;
974 1023
975 if (host->data && host->data_early) 1024 /* Finished CMD23, now send actual command. */
976 sdhci_finish_data(host); 1025 if (host->cmd == host->mrq->sbc) {
1026 host->cmd = NULL;
1027 sdhci_send_command(host, host->mrq->cmd);
1028 } else {
977 1029
978 if (!host->cmd->data) 1030 /* Processed actual command. */
979 tasklet_schedule(&host->finish_tasklet); 1031 if (host->data && host->data_early)
1032 sdhci_finish_data(host);
980 1033
981 host->cmd = NULL; 1034 if (!host->cmd->data)
1035 tasklet_schedule(&host->finish_tasklet);
1036
1037 host->cmd = NULL;
1038 }
982} 1039}
983 1040
984static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) 1041static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
985{ 1042{
986 int div; 1043 int div = 0; /* Initialized for compiler warning */
987 u16 clk; 1044 u16 clk = 0;
988 unsigned long timeout; 1045 unsigned long timeout;
989 1046
990 if (clock == host->clock) 1047 if (clock == host->clock)
@@ -1001,13 +1058,59 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1001 if (clock == 0) 1058 if (clock == 0)
1002 goto out; 1059 goto out;
1003 1060
1004 for (div = 1;div < 256;div *= 2) { 1061 if (host->version >= SDHCI_SPEC_300) {
1005 if ((host->max_clk / div) <= clock) 1062 /*
1006 break; 1063 * Check if the Host Controller supports Programmable Clock
1064 * Mode.
1065 */
1066 if (host->clk_mul) {
1067 u16 ctrl;
1068
1069 /*
1070 * We need to figure out whether the Host Driver needs
1071 * to select Programmable Clock Mode, or the value can
1072 * be set automatically by the Host Controller based on
1073 * the Preset Value registers.
1074 */
1075 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1076 if (!(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
1077 for (div = 1; div <= 1024; div++) {
1078 if (((host->max_clk * host->clk_mul) /
1079 div) <= clock)
1080 break;
1081 }
1082 /*
1083 * Set Programmable Clock Mode in the Clock
1084 * Control register.
1085 */
1086 clk = SDHCI_PROG_CLOCK_MODE;
1087 div--;
1088 }
1089 } else {
1090 /* Version 3.00 divisors must be a multiple of 2. */
1091 if (host->max_clk <= clock)
1092 div = 1;
1093 else {
1094 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1095 div += 2) {
1096 if ((host->max_clk / div) <= clock)
1097 break;
1098 }
1099 }
1100 div >>= 1;
1101 }
1102 } else {
1103 /* Version 2.00 divisors must be a power of 2. */
1104 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1105 if ((host->max_clk / div) <= clock)
1106 break;
1107 }
1108 div >>= 1;
1007 } 1109 }
1008 div >>= 1;
1009 1110
1010 clk = div << SDHCI_DIVIDER_SHIFT; 1111 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1112 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1113 << SDHCI_DIVIDER_HI_SHIFT;
1011 clk |= SDHCI_CLOCK_INT_EN; 1114 clk |= SDHCI_CLOCK_INT_EN;
1012 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1115 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1013 1116
@@ -1034,11 +1137,9 @@ out:
1034 1137
1035static void sdhci_set_power(struct sdhci_host *host, unsigned short power) 1138static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
1036{ 1139{
1037 u8 pwr; 1140 u8 pwr = 0;
1038 1141
1039 if (power == (unsigned short)-1) 1142 if (power != (unsigned short)-1) {
1040 pwr = 0;
1041 else {
1042 switch (1 << power) { 1143 switch (1 << power) {
1043 case MMC_VDD_165_195: 1144 case MMC_VDD_165_195:
1044 pwr = SDHCI_POWER_180; 1145 pwr = SDHCI_POWER_180;
@@ -1113,7 +1214,12 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1113#ifndef SDHCI_USE_LEDS_CLASS 1214#ifndef SDHCI_USE_LEDS_CLASS
1114 sdhci_activate_led(host); 1215 sdhci_activate_led(host);
1115#endif 1216#endif
1116 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) { 1217
1218 /*
1219 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1220 * requests if Auto-CMD12 is enabled.
1221 */
1222 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
1117 if (mrq->stop) { 1223 if (mrq->stop) {
1118 mrq->data->stop = NULL; 1224 mrq->data->stop = NULL;
1119 mrq->stop = NULL; 1225 mrq->stop = NULL;
@@ -1132,8 +1238,30 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1132 if (!present || host->flags & SDHCI_DEVICE_DEAD) { 1238 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1133 host->mrq->cmd->error = -ENOMEDIUM; 1239 host->mrq->cmd->error = -ENOMEDIUM;
1134 tasklet_schedule(&host->finish_tasklet); 1240 tasklet_schedule(&host->finish_tasklet);
1135 } else 1241 } else {
1136 sdhci_send_command(host, mrq->cmd); 1242 u32 present_state;
1243
1244 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
1245 /*
1246 * Check if the re-tuning timer has already expired and there
1247 * is no on-going data transfer. If so, we need to execute
1248 * tuning procedure before sending command.
1249 */
1250 if ((host->flags & SDHCI_NEEDS_RETUNING) &&
1251 !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) {
1252 spin_unlock_irqrestore(&host->lock, flags);
1253 sdhci_execute_tuning(mmc);
1254 spin_lock_irqsave(&host->lock, flags);
1255
1256 /* Restore original mmc_request structure */
1257 host->mrq = mrq;
1258 }
1259
1260 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1261 sdhci_send_command(host, mrq->sbc);
1262 else
1263 sdhci_send_command(host, mrq->cmd);
1264 }
1137 1265
1138 mmiowb(); 1266 mmiowb();
1139 spin_unlock_irqrestore(&host->lock, flags); 1267 spin_unlock_irqrestore(&host->lock, flags);
@@ -1168,25 +1296,120 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1168 else 1296 else
1169 sdhci_set_power(host, ios->vdd); 1297 sdhci_set_power(host, ios->vdd);
1170 1298
1171 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 1299 if (host->ops->platform_send_init_74_clocks)
1300 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1172 1301
1173 if (ios->bus_width == MMC_BUS_WIDTH_8) 1302 /*
1174 ctrl |= SDHCI_CTRL_8BITBUS; 1303 * If your platform has 8-bit width support but is not a v3 controller,
1175 else 1304 * or if it requires special setup code, you should implement that in
1176 ctrl &= ~SDHCI_CTRL_8BITBUS; 1305 * platform_8bit_width().
1306 */
1307 if (host->ops->platform_8bit_width)
1308 host->ops->platform_8bit_width(host, ios->bus_width);
1309 else {
1310 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1311 if (ios->bus_width == MMC_BUS_WIDTH_8) {
1312 ctrl &= ~SDHCI_CTRL_4BITBUS;
1313 if (host->version >= SDHCI_SPEC_300)
1314 ctrl |= SDHCI_CTRL_8BITBUS;
1315 } else {
1316 if (host->version >= SDHCI_SPEC_300)
1317 ctrl &= ~SDHCI_CTRL_8BITBUS;
1318 if (ios->bus_width == MMC_BUS_WIDTH_4)
1319 ctrl |= SDHCI_CTRL_4BITBUS;
1320 else
1321 ctrl &= ~SDHCI_CTRL_4BITBUS;
1322 }
1323 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1324 }
1177 1325
1178 if (ios->bus_width == MMC_BUS_WIDTH_4) 1326 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1179 ctrl |= SDHCI_CTRL_4BITBUS;
1180 else
1181 ctrl &= ~SDHCI_CTRL_4BITBUS;
1182 1327
1183 if (ios->timing == MMC_TIMING_SD_HS && 1328 if ((ios->timing == MMC_TIMING_SD_HS ||
1184 !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) 1329 ios->timing == MMC_TIMING_MMC_HS)
1330 && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
1185 ctrl |= SDHCI_CTRL_HISPD; 1331 ctrl |= SDHCI_CTRL_HISPD;
1186 else 1332 else
1187 ctrl &= ~SDHCI_CTRL_HISPD; 1333 ctrl &= ~SDHCI_CTRL_HISPD;
1188 1334
1189 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1335 if (host->version >= SDHCI_SPEC_300) {
1336 u16 clk, ctrl_2;
1337 unsigned int clock;
1338
1339 /* In case of UHS-I modes, set High Speed Enable */
1340 if ((ios->timing == MMC_TIMING_UHS_SDR50) ||
1341 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1342 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1343 (ios->timing == MMC_TIMING_UHS_SDR25) ||
1344 (ios->timing == MMC_TIMING_UHS_SDR12))
1345 ctrl |= SDHCI_CTRL_HISPD;
1346
1347 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1348 if (!(ctrl_2 & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
1349 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1350 /*
1351 * We only need to set Driver Strength if the
1352 * preset value enable is not set.
1353 */
1354 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1355 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1356 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1357 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1358 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1359
1360 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1361 } else {
1362 /*
1363 * According to SDHC Spec v3.00, if the Preset Value
1364 * Enable in the Host Control 2 register is set, we
1365 * need to reset SD Clock Enable before changing High
1366 * Speed Enable to avoid generating clock gliches.
1367 */
1368
1369 /* Reset SD Clock Enable */
1370 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1371 clk &= ~SDHCI_CLOCK_CARD_EN;
1372 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1373
1374 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1375
1376 /* Re-enable SD Clock */
1377 clock = host->clock;
1378 host->clock = 0;
1379 sdhci_set_clock(host, clock);
1380 }
1381
1382
1383 /* Reset SD Clock Enable */
1384 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1385 clk &= ~SDHCI_CLOCK_CARD_EN;
1386 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1387
1388 if (host->ops->set_uhs_signaling)
1389 host->ops->set_uhs_signaling(host, ios->timing);
1390 else {
1391 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1392 /* Select Bus Speed Mode for host */
1393 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1394 if (ios->timing == MMC_TIMING_UHS_SDR12)
1395 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1396 else if (ios->timing == MMC_TIMING_UHS_SDR25)
1397 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1398 else if (ios->timing == MMC_TIMING_UHS_SDR50)
1399 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1400 else if (ios->timing == MMC_TIMING_UHS_SDR104)
1401 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1402 else if (ios->timing == MMC_TIMING_UHS_DDR50)
1403 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1404 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1405 }
1406
1407 /* Re-enable SD Clock */
1408 clock = host->clock;
1409 host->clock = 0;
1410 sdhci_set_clock(host, clock);
1411 } else
1412 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1190 1413
1191 /* 1414 /*
1192 * Some (ENE) controllers go apeshit on some ios operation, 1415 * Some (ENE) controllers go apeshit on some ios operation,
@@ -1201,26 +1424,49 @@ out:
1201 spin_unlock_irqrestore(&host->lock, flags); 1424 spin_unlock_irqrestore(&host->lock, flags);
1202} 1425}
1203 1426
1204static int sdhci_get_ro(struct mmc_host *mmc) 1427static int check_ro(struct sdhci_host *host)
1205{ 1428{
1206 struct sdhci_host *host;
1207 unsigned long flags; 1429 unsigned long flags;
1208 int present; 1430 int is_readonly;
1209
1210 host = mmc_priv(mmc);
1211 1431
1212 spin_lock_irqsave(&host->lock, flags); 1432 spin_lock_irqsave(&host->lock, flags);
1213 1433
1214 if (host->flags & SDHCI_DEVICE_DEAD) 1434 if (host->flags & SDHCI_DEVICE_DEAD)
1215 present = 0; 1435 is_readonly = 0;
1436 else if (host->ops->get_ro)
1437 is_readonly = host->ops->get_ro(host);
1216 else 1438 else
1217 present = sdhci_readl(host, SDHCI_PRESENT_STATE); 1439 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
1440 & SDHCI_WRITE_PROTECT);
1218 1441
1219 spin_unlock_irqrestore(&host->lock, flags); 1442 spin_unlock_irqrestore(&host->lock, flags);
1220 1443
1221 if (host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT) 1444 /* This quirk needs to be replaced by a callback-function later */
1222 return !!(present & SDHCI_WRITE_PROTECT); 1445 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
1223 return !(present & SDHCI_WRITE_PROTECT); 1446 !is_readonly : is_readonly;
1447}
1448
1449#define SAMPLE_COUNT 5
1450
1451static int sdhci_get_ro(struct mmc_host *mmc)
1452{
1453 struct sdhci_host *host;
1454 int i, ro_count;
1455
1456 host = mmc_priv(mmc);
1457
1458 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1459 return check_ro(host);
1460
1461 ro_count = 0;
1462 for (i = 0; i < SAMPLE_COUNT; i++) {
1463 if (check_ro(host)) {
1464 if (++ro_count > SAMPLE_COUNT / 2)
1465 return 1;
1466 }
1467 msleep(30);
1468 }
1469 return 0;
1224} 1470}
1225 1471
1226static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) 1472static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
@@ -1245,11 +1491,322 @@ out:
1245 spin_unlock_irqrestore(&host->lock, flags); 1491 spin_unlock_irqrestore(&host->lock, flags);
1246} 1492}
1247 1493
1494static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1495 struct mmc_ios *ios)
1496{
1497 struct sdhci_host *host;
1498 u8 pwr;
1499 u16 clk, ctrl;
1500 u32 present_state;
1501
1502 host = mmc_priv(mmc);
1503
1504 /*
1505 * Signal Voltage Switching is only applicable for Host Controllers
1506 * v3.00 and above.
1507 */
1508 if (host->version < SDHCI_SPEC_300)
1509 return 0;
1510
1511 /*
1512 * We first check whether the request is to set signalling voltage
1513 * to 3.3V. If so, we change the voltage to 3.3V and return quickly.
1514 */
1515 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1516 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1517 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1518 ctrl &= ~SDHCI_CTRL_VDD_180;
1519 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1520
1521 /* Wait for 5ms */
1522 usleep_range(5000, 5500);
1523
1524 /* 3.3V regulator output should be stable within 5 ms */
1525 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1526 if (!(ctrl & SDHCI_CTRL_VDD_180))
1527 return 0;
1528 else {
1529 printk(KERN_INFO DRIVER_NAME ": Switching to 3.3V "
1530 "signalling voltage failed\n");
1531 return -EIO;
1532 }
1533 } else if (!(ctrl & SDHCI_CTRL_VDD_180) &&
1534 (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)) {
1535 /* Stop SDCLK */
1536 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1537 clk &= ~SDHCI_CLOCK_CARD_EN;
1538 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1539
1540 /* Check whether DAT[3:0] is 0000 */
1541 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
1542 if (!((present_state & SDHCI_DATA_LVL_MASK) >>
1543 SDHCI_DATA_LVL_SHIFT)) {
1544 /*
1545 * Enable 1.8V Signal Enable in the Host Control2
1546 * register
1547 */
1548 ctrl |= SDHCI_CTRL_VDD_180;
1549 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1550
1551 /* Wait for 5ms */
1552 usleep_range(5000, 5500);
1553
1554 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1555 if (ctrl & SDHCI_CTRL_VDD_180) {
1556 /* Provide SDCLK again and wait for 1ms*/
1557 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1558 clk |= SDHCI_CLOCK_CARD_EN;
1559 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1560 usleep_range(1000, 1500);
1561
1562 /*
1563 * If DAT[3:0] level is 1111b, then the card
1564 * was successfully switched to 1.8V signaling.
1565 */
1566 present_state = sdhci_readl(host,
1567 SDHCI_PRESENT_STATE);
1568 if ((present_state & SDHCI_DATA_LVL_MASK) ==
1569 SDHCI_DATA_LVL_MASK)
1570 return 0;
1571 }
1572 }
1573
1574 /*
1575 * If we are here, that means the switch to 1.8V signaling
1576 * failed. We power cycle the card, and retry initialization
1577 * sequence by setting S18R to 0.
1578 */
1579 pwr = sdhci_readb(host, SDHCI_POWER_CONTROL);
1580 pwr &= ~SDHCI_POWER_ON;
1581 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1582
1583 /* Wait for 1ms as per the spec */
1584 usleep_range(1000, 1500);
1585 pwr |= SDHCI_POWER_ON;
1586 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1587
1588 printk(KERN_INFO DRIVER_NAME ": Switching to 1.8V signalling "
1589 "voltage failed, retrying with S18R set to 0\n");
1590 return -EAGAIN;
1591 } else
1592 /* No signal voltage switch required */
1593 return 0;
1594}
1595
1596static int sdhci_execute_tuning(struct mmc_host *mmc)
1597{
1598 struct sdhci_host *host;
1599 u16 ctrl;
1600 u32 ier;
1601 int tuning_loop_counter = MAX_TUNING_LOOP;
1602 unsigned long timeout;
1603 int err = 0;
1604
1605 host = mmc_priv(mmc);
1606
1607 disable_irq(host->irq);
1608 spin_lock(&host->lock);
1609
1610 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1611
1612 /*
1613 * Host Controller needs tuning only in case of SDR104 mode
1614 * and for SDR50 mode when Use Tuning for SDR50 is set in
1615 * Capabilities register.
1616 */
1617 if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) ||
1618 (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
1619 (host->flags & SDHCI_SDR50_NEEDS_TUNING)))
1620 ctrl |= SDHCI_CTRL_EXEC_TUNING;
1621 else {
1622 spin_unlock(&host->lock);
1623 enable_irq(host->irq);
1624 return 0;
1625 }
1626
1627 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1628
1629 /*
1630 * As per the Host Controller spec v3.00, tuning command
1631 * generates Buffer Read Ready interrupt, so enable that.
1632 *
1633 * Note: The spec clearly says that when tuning sequence
1634 * is being performed, the controller does not generate
1635 * interrupts other than Buffer Read Ready interrupt. But
1636 * to make sure we don't hit a controller bug, we _only_
1637 * enable Buffer Read Ready interrupt here.
1638 */
1639 ier = sdhci_readl(host, SDHCI_INT_ENABLE);
1640 sdhci_clear_set_irqs(host, ier, SDHCI_INT_DATA_AVAIL);
1641
1642 /*
1643 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
1644 * of loops reaches 40 times or a timeout of 150ms occurs.
1645 */
1646 timeout = 150;
1647 do {
1648 struct mmc_command cmd = {0};
1649 struct mmc_request mrq = {0};
1650
1651 if (!tuning_loop_counter && !timeout)
1652 break;
1653
1654 cmd.opcode = MMC_SEND_TUNING_BLOCK;
1655 cmd.arg = 0;
1656 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1657 cmd.retries = 0;
1658 cmd.data = NULL;
1659 cmd.error = 0;
1660
1661 mrq.cmd = &cmd;
1662 host->mrq = &mrq;
1663
1664 /*
1665 * In response to CMD19, the card sends 64 bytes of tuning
1666 * block to the Host Controller. So we set the block size
1667 * to 64 here.
1668 */
1669 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), SDHCI_BLOCK_SIZE);
1670
1671 /*
1672 * The tuning block is sent by the card to the host controller.
1673 * So we set the TRNS_READ bit in the Transfer Mode register.
1674 * This also takes care of setting DMA Enable and Multi Block
1675 * Select in the same register to 0.
1676 */
1677 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
1678
1679 sdhci_send_command(host, &cmd);
1680
1681 host->cmd = NULL;
1682 host->mrq = NULL;
1683
1684 spin_unlock(&host->lock);
1685 enable_irq(host->irq);
1686
1687 /* Wait for Buffer Read Ready interrupt */
1688 wait_event_interruptible_timeout(host->buf_ready_int,
1689 (host->tuning_done == 1),
1690 msecs_to_jiffies(50));
1691 disable_irq(host->irq);
1692 spin_lock(&host->lock);
1693
1694 if (!host->tuning_done) {
1695 printk(KERN_INFO DRIVER_NAME ": Timeout waiting for "
1696 "Buffer Read Ready interrupt during tuning "
1697 "procedure, falling back to fixed sampling "
1698 "clock\n");
1699 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1700 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
1701 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
1702 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1703
1704 err = -EIO;
1705 goto out;
1706 }
1707
1708 host->tuning_done = 0;
1709
1710 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1711 tuning_loop_counter--;
1712 timeout--;
1713 mdelay(1);
1714 } while (ctrl & SDHCI_CTRL_EXEC_TUNING);
1715
1716 /*
1717 * The Host Driver has exhausted the maximum number of loops allowed,
1718 * so use fixed sampling frequency.
1719 */
1720 if (!tuning_loop_counter || !timeout) {
1721 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
1722 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1723 } else {
1724 if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
1725 printk(KERN_INFO DRIVER_NAME ": Tuning procedure"
1726 " failed, falling back to fixed sampling"
1727 " clock\n");
1728 err = -EIO;
1729 }
1730 }
1731
1732out:
1733 /*
1734 * If this is the very first time we are here, we start the retuning
1735 * timer. Since only during the first time, SDHCI_NEEDS_RETUNING
1736 * flag won't be set, we check this condition before actually starting
1737 * the timer.
1738 */
1739 if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count &&
1740 (host->tuning_mode == SDHCI_TUNING_MODE_1)) {
1741 mod_timer(&host->tuning_timer, jiffies +
1742 host->tuning_count * HZ);
1743 /* Tuning mode 1 limits the maximum data length to 4MB */
1744 mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size;
1745 } else {
1746 host->flags &= ~SDHCI_NEEDS_RETUNING;
1747 /* Reload the new initial value for timer */
1748 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
1749 mod_timer(&host->tuning_timer, jiffies +
1750 host->tuning_count * HZ);
1751 }
1752
1753 /*
1754 * In case tuning fails, host controllers which support re-tuning can
1755 * try tuning again at a later time, when the re-tuning timer expires.
1756 * So for these controllers, we return 0. Since there might be other
1757 * controllers who do not have this capability, we return error for
1758 * them.
1759 */
1760 if (err && host->tuning_count &&
1761 host->tuning_mode == SDHCI_TUNING_MODE_1)
1762 err = 0;
1763
1764 sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier);
1765 spin_unlock(&host->lock);
1766 enable_irq(host->irq);
1767
1768 return err;
1769}
1770
1771static void sdhci_enable_preset_value(struct mmc_host *mmc, bool enable)
1772{
1773 struct sdhci_host *host;
1774 u16 ctrl;
1775 unsigned long flags;
1776
1777 host = mmc_priv(mmc);
1778
1779 /* Host Controller v3.00 defines preset value registers */
1780 if (host->version < SDHCI_SPEC_300)
1781 return;
1782
1783 spin_lock_irqsave(&host->lock, flags);
1784
1785 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1786
1787 /*
1788 * We only enable or disable Preset Value if they are not already
1789 * enabled or disabled respectively. Otherwise, we bail out.
1790 */
1791 if (enable && !(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
1792 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
1793 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1794 } else if (!enable && (ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
1795 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
1796 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1797 }
1798
1799 spin_unlock_irqrestore(&host->lock, flags);
1800}
1801
1248static const struct mmc_host_ops sdhci_ops = { 1802static const struct mmc_host_ops sdhci_ops = {
1249 .request = sdhci_request, 1803 .request = sdhci_request,
1250 .set_ios = sdhci_set_ios, 1804 .set_ios = sdhci_set_ios,
1251 .get_ro = sdhci_get_ro, 1805 .get_ro = sdhci_get_ro,
1252 .enable_sdio_irq = sdhci_enable_sdio_irq, 1806 .enable_sdio_irq = sdhci_enable_sdio_irq,
1807 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
1808 .execute_tuning = sdhci_execute_tuning,
1809 .enable_preset_value = sdhci_enable_preset_value,
1253}; 1810};
1254 1811
1255/*****************************************************************************\ 1812/*****************************************************************************\
@@ -1295,10 +1852,20 @@ static void sdhci_tasklet_finish(unsigned long param)
1295 1852
1296 host = (struct sdhci_host*)param; 1853 host = (struct sdhci_host*)param;
1297 1854
1855 /*
1856 * If this tasklet gets rescheduled while running, it will
1857 * be run again afterwards but without any active request.
1858 */
1859 if (!host->mrq)
1860 return;
1861
1298 spin_lock_irqsave(&host->lock, flags); 1862 spin_lock_irqsave(&host->lock, flags);
1299 1863
1300 del_timer(&host->timer); 1864 del_timer(&host->timer);
1301 1865
1866 if (host->version >= SDHCI_SPEC_300)
1867 del_timer(&host->tuning_timer);
1868
1302 mrq = host->mrq; 1869 mrq = host->mrq;
1303 1870
1304 /* 1871 /*
@@ -1306,7 +1873,7 @@ static void sdhci_tasklet_finish(unsigned long param)
1306 * upon error conditions. 1873 * upon error conditions.
1307 */ 1874 */
1308 if (!(host->flags & SDHCI_DEVICE_DEAD) && 1875 if (!(host->flags & SDHCI_DEVICE_DEAD) &&
1309 (mrq->cmd->error || 1876 ((mrq->cmd && mrq->cmd->error) ||
1310 (mrq->data && (mrq->data->error || 1877 (mrq->data && (mrq->data->error ||
1311 (mrq->data->stop && mrq->data->stop->error))) || 1878 (mrq->data->stop && mrq->data->stop->error))) ||
1312 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) { 1879 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
@@ -1372,6 +1939,20 @@ static void sdhci_timeout_timer(unsigned long data)
1372 spin_unlock_irqrestore(&host->lock, flags); 1939 spin_unlock_irqrestore(&host->lock, flags);
1373} 1940}
1374 1941
1942static void sdhci_tuning_timer(unsigned long data)
1943{
1944 struct sdhci_host *host;
1945 unsigned long flags;
1946
1947 host = (struct sdhci_host *)data;
1948
1949 spin_lock_irqsave(&host->lock, flags);
1950
1951 host->flags |= SDHCI_NEEDS_RETUNING;
1952
1953 spin_unlock_irqrestore(&host->lock, flags);
1954}
1955
1375/*****************************************************************************\ 1956/*****************************************************************************\
1376 * * 1957 * *
1377 * Interrupt handling * 1958 * Interrupt handling *
@@ -1427,7 +2008,7 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
1427 sdhci_finish_command(host); 2008 sdhci_finish_command(host);
1428} 2009}
1429 2010
1430#ifdef DEBUG 2011#ifdef CONFIG_MMC_DEBUG
1431static void sdhci_show_adma_error(struct sdhci_host *host) 2012static void sdhci_show_adma_error(struct sdhci_host *host)
1432{ 2013{
1433 const char *name = mmc_hostname(host->mmc); 2014 const char *name = mmc_hostname(host->mmc);
@@ -1460,6 +2041,16 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
1460{ 2041{
1461 BUG_ON(intmask == 0); 2042 BUG_ON(intmask == 0);
1462 2043
2044 /* CMD19 generates _only_ Buffer Read Ready interrupt */
2045 if (intmask & SDHCI_INT_DATA_AVAIL) {
2046 if (SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) ==
2047 MMC_SEND_TUNING_BLOCK) {
2048 host->tuning_done = 1;
2049 wake_up(&host->buf_ready_int);
2050 return;
2051 }
2052 }
2053
1463 if (!host->data) { 2054 if (!host->data) {
1464 /* 2055 /*
1465 * The "data complete" interrupt is also used to 2056 * The "data complete" interrupt is also used to
@@ -1483,7 +2074,11 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
1483 2074
1484 if (intmask & SDHCI_INT_DATA_TIMEOUT) 2075 if (intmask & SDHCI_INT_DATA_TIMEOUT)
1485 host->data->error = -ETIMEDOUT; 2076 host->data->error = -ETIMEDOUT;
1486 else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT)) 2077 else if (intmask & SDHCI_INT_DATA_END_BIT)
2078 host->data->error = -EILSEQ;
2079 else if ((intmask & SDHCI_INT_DATA_CRC) &&
2080 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2081 != MMC_BUS_TEST_R)
1487 host->data->error = -EILSEQ; 2082 host->data->error = -EILSEQ;
1488 else if (intmask & SDHCI_INT_ADMA_ERROR) { 2083 else if (intmask & SDHCI_INT_ADMA_ERROR) {
1489 printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc)); 2084 printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc));
@@ -1501,10 +2096,28 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
1501 * We currently don't do anything fancy with DMA 2096 * We currently don't do anything fancy with DMA
1502 * boundaries, but as we can't disable the feature 2097 * boundaries, but as we can't disable the feature
1503 * we need to at least restart the transfer. 2098 * we need to at least restart the transfer.
2099 *
2100 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2101 * should return a valid address to continue from, but as
2102 * some controllers are faulty, don't trust them.
1504 */ 2103 */
1505 if (intmask & SDHCI_INT_DMA_END) 2104 if (intmask & SDHCI_INT_DMA_END) {
1506 sdhci_writel(host, sdhci_readl(host, SDHCI_DMA_ADDRESS), 2105 u32 dmastart, dmanow;
1507 SDHCI_DMA_ADDRESS); 2106 dmastart = sg_dma_address(host->data->sg);
2107 dmanow = dmastart + host->data->bytes_xfered;
2108 /*
2109 * Force update to the next DMA block boundary.
2110 */
2111 dmanow = (dmanow &
2112 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
2113 SDHCI_DEFAULT_BOUNDARY_SIZE;
2114 host->data->bytes_xfered = dmanow - dmastart;
2115 DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes,"
2116 " next 0x%08x\n",
2117 mmc_hostname(host->mmc), dmastart,
2118 host->data->bytes_xfered, dmanow);
2119 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
2120 }
1508 2121
1509 if (intmask & SDHCI_INT_DATA_END) { 2122 if (intmask & SDHCI_INT_DATA_END) {
1510 if (host->cmd) { 2123 if (host->cmd) {
@@ -1614,6 +2227,14 @@ int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
1614 2227
1615 sdhci_disable_card_detection(host); 2228 sdhci_disable_card_detection(host);
1616 2229
2230 /* Disable tuning since we are suspending */
2231 if (host->version >= SDHCI_SPEC_300 && host->tuning_count &&
2232 host->tuning_mode == SDHCI_TUNING_MODE_1) {
2233 host->flags &= ~SDHCI_NEEDS_RETUNING;
2234 mod_timer(&host->tuning_timer, jiffies +
2235 host->tuning_count * HZ);
2236 }
2237
1617 ret = mmc_suspend_host(host->mmc); 2238 ret = mmc_suspend_host(host->mmc);
1618 if (ret) 2239 if (ret)
1619 return ret; 2240 return ret;
@@ -1655,11 +2276,26 @@ int sdhci_resume_host(struct sdhci_host *host)
1655 ret = mmc_resume_host(host->mmc); 2276 ret = mmc_resume_host(host->mmc);
1656 sdhci_enable_card_detection(host); 2277 sdhci_enable_card_detection(host);
1657 2278
2279 /* Set the re-tuning expiration flag */
2280 if ((host->version >= SDHCI_SPEC_300) && host->tuning_count &&
2281 (host->tuning_mode == SDHCI_TUNING_MODE_1))
2282 host->flags |= SDHCI_NEEDS_RETUNING;
2283
1658 return ret; 2284 return ret;
1659} 2285}
1660 2286
1661EXPORT_SYMBOL_GPL(sdhci_resume_host); 2287EXPORT_SYMBOL_GPL(sdhci_resume_host);
1662 2288
2289void sdhci_enable_irq_wakeups(struct sdhci_host *host)
2290{
2291 u8 val;
2292 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2293 val |= SDHCI_WAKE_ON_INT;
2294 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2295}
2296
2297EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
2298
1663#endif /* CONFIG_PM */ 2299#endif /* CONFIG_PM */
1664 2300
1665/*****************************************************************************\ 2301/*****************************************************************************\
@@ -1691,7 +2327,9 @@ EXPORT_SYMBOL_GPL(sdhci_alloc_host);
1691int sdhci_add_host(struct sdhci_host *host) 2327int sdhci_add_host(struct sdhci_host *host)
1692{ 2328{
1693 struct mmc_host *mmc; 2329 struct mmc_host *mmc;
1694 unsigned int caps; 2330 u32 caps[2];
2331 u32 max_current_caps;
2332 unsigned int ocr_avail;
1695 int ret; 2333 int ret;
1696 2334
1697 WARN_ON(host == NULL); 2335 WARN_ON(host == NULL);
@@ -1708,18 +2346,21 @@ int sdhci_add_host(struct sdhci_host *host)
1708 host->version = sdhci_readw(host, SDHCI_HOST_VERSION); 2346 host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
1709 host->version = (host->version & SDHCI_SPEC_VER_MASK) 2347 host->version = (host->version & SDHCI_SPEC_VER_MASK)
1710 >> SDHCI_SPEC_VER_SHIFT; 2348 >> SDHCI_SPEC_VER_SHIFT;
1711 if (host->version > SDHCI_SPEC_200) { 2349 if (host->version > SDHCI_SPEC_300) {
1712 printk(KERN_ERR "%s: Unknown controller version (%d). " 2350 printk(KERN_ERR "%s: Unknown controller version (%d). "
1713 "You may experience problems.\n", mmc_hostname(mmc), 2351 "You may experience problems.\n", mmc_hostname(mmc),
1714 host->version); 2352 host->version);
1715 } 2353 }
1716 2354
1717 caps = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps : 2355 caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
1718 sdhci_readl(host, SDHCI_CAPABILITIES); 2356 sdhci_readl(host, SDHCI_CAPABILITIES);
1719 2357
2358 caps[1] = (host->version >= SDHCI_SPEC_300) ?
2359 sdhci_readl(host, SDHCI_CAPABILITIES_1) : 0;
2360
1720 if (host->quirks & SDHCI_QUIRK_FORCE_DMA) 2361 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
1721 host->flags |= SDHCI_USE_SDMA; 2362 host->flags |= SDHCI_USE_SDMA;
1722 else if (!(caps & SDHCI_CAN_DO_SDMA)) 2363 else if (!(caps[0] & SDHCI_CAN_DO_SDMA))
1723 DBG("Controller doesn't have SDMA capability\n"); 2364 DBG("Controller doesn't have SDMA capability\n");
1724 else 2365 else
1725 host->flags |= SDHCI_USE_SDMA; 2366 host->flags |= SDHCI_USE_SDMA;
@@ -1730,7 +2371,8 @@ int sdhci_add_host(struct sdhci_host *host)
1730 host->flags &= ~SDHCI_USE_SDMA; 2371 host->flags &= ~SDHCI_USE_SDMA;
1731 } 2372 }
1732 2373
1733 if ((host->version >= SDHCI_SPEC_200) && (caps & SDHCI_CAN_DO_ADMA2)) 2374 if ((host->version >= SDHCI_SPEC_200) &&
2375 (caps[0] & SDHCI_CAN_DO_ADMA2))
1734 host->flags |= SDHCI_USE_ADMA; 2376 host->flags |= SDHCI_USE_ADMA;
1735 2377
1736 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && 2378 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
@@ -1779,8 +2421,13 @@ int sdhci_add_host(struct sdhci_host *host)
1779 mmc_dev(host->mmc)->dma_mask = &host->dma_mask; 2421 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
1780 } 2422 }
1781 2423
1782 host->max_clk = 2424 if (host->version >= SDHCI_SPEC_300)
1783 (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; 2425 host->max_clk = (caps[0] & SDHCI_CLOCK_V3_BASE_MASK)
2426 >> SDHCI_CLOCK_BASE_SHIFT;
2427 else
2428 host->max_clk = (caps[0] & SDHCI_CLOCK_BASE_MASK)
2429 >> SDHCI_CLOCK_BASE_SHIFT;
2430
1784 host->max_clk *= 1000000; 2431 host->max_clk *= 1000000;
1785 if (host->max_clk == 0 || host->quirks & 2432 if (host->max_clk == 0 || host->quirks &
1786 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) { 2433 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
@@ -1794,7 +2441,7 @@ int sdhci_add_host(struct sdhci_host *host)
1794 } 2441 }
1795 2442
1796 host->timeout_clk = 2443 host->timeout_clk =
1797 (caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT; 2444 (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
1798 if (host->timeout_clk == 0) { 2445 if (host->timeout_clk == 0) {
1799 if (host->ops->get_timeout_clock) { 2446 if (host->ops->get_timeout_clock) {
1800 host->timeout_clk = host->ops->get_timeout_clock(host); 2447 host->timeout_clk = host->ops->get_timeout_clock(host);
@@ -1806,36 +2453,185 @@ int sdhci_add_host(struct sdhci_host *host)
1806 return -ENODEV; 2453 return -ENODEV;
1807 } 2454 }
1808 } 2455 }
1809 if (caps & SDHCI_TIMEOUT_CLK_UNIT) 2456 if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
1810 host->timeout_clk *= 1000; 2457 host->timeout_clk *= 1000;
1811 2458
1812 /* 2459 /*
2460 * In case of Host Controller v3.00, find out whether clock
2461 * multiplier is supported.
2462 */
2463 host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >>
2464 SDHCI_CLOCK_MUL_SHIFT;
2465
2466 /*
2467 * In case the value in Clock Multiplier is 0, then programmable
2468 * clock mode is not supported, otherwise the actual clock
2469 * multiplier is one more than the value of Clock Multiplier
2470 * in the Capabilities Register.
2471 */
2472 if (host->clk_mul)
2473 host->clk_mul += 1;
2474
2475 /*
1813 * Set host parameters. 2476 * Set host parameters.
1814 */ 2477 */
1815 mmc->ops = &sdhci_ops; 2478 mmc->ops = &sdhci_ops;
2479 mmc->f_max = host->max_clk;
1816 if (host->ops->get_min_clock) 2480 if (host->ops->get_min_clock)
1817 mmc->f_min = host->ops->get_min_clock(host); 2481 mmc->f_min = host->ops->get_min_clock(host);
1818 else 2482 else if (host->version >= SDHCI_SPEC_300) {
1819 mmc->f_min = host->max_clk / 256; 2483 if (host->clk_mul) {
1820 mmc->f_max = host->max_clk; 2484 mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
1821 mmc->caps |= MMC_CAP_SDIO_IRQ; 2485 mmc->f_max = host->max_clk * host->clk_mul;
2486 } else
2487 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
2488 } else
2489 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
2490
2491 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
2492
2493 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
2494 host->flags |= SDHCI_AUTO_CMD12;
2495
2496 /* Auto-CMD23 stuff only works in ADMA or PIO. */
2497 if ((host->version >= SDHCI_SPEC_300) &&
2498 ((host->flags & SDHCI_USE_ADMA) ||
2499 !(host->flags & SDHCI_USE_SDMA))) {
2500 host->flags |= SDHCI_AUTO_CMD23;
2501 DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc));
2502 } else {
2503 DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc));
2504 }
1822 2505
2506 /*
2507 * A controller may support 8-bit width, but the board itself
2508 * might not have the pins brought out. Boards that support
2509 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
2510 * their platform code before calling sdhci_add_host(), and we
2511 * won't assume 8-bit width for hosts without that CAP.
2512 */
1823 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) 2513 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
1824 mmc->caps |= MMC_CAP_4_BIT_DATA; 2514 mmc->caps |= MMC_CAP_4_BIT_DATA;
1825 2515
1826 if (caps & SDHCI_CAN_DO_HISPD) 2516 if (caps[0] & SDHCI_CAN_DO_HISPD)
1827 mmc->caps |= MMC_CAP_SD_HIGHSPEED; 2517 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
1828 2518
1829 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 2519 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
2520 mmc_card_is_removable(mmc))
1830 mmc->caps |= MMC_CAP_NEEDS_POLL; 2521 mmc->caps |= MMC_CAP_NEEDS_POLL;
1831 2522
1832 mmc->ocr_avail = 0; 2523 /* UHS-I mode(s) supported by the host controller. */
1833 if (caps & SDHCI_CAN_VDD_330) 2524 if (host->version >= SDHCI_SPEC_300)
1834 mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34; 2525 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
1835 if (caps & SDHCI_CAN_VDD_300) 2526
1836 mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31; 2527 /* SDR104 supports also implies SDR50 support */
1837 if (caps & SDHCI_CAN_VDD_180) 2528 if (caps[1] & SDHCI_SUPPORT_SDR104)
1838 mmc->ocr_avail |= MMC_VDD_165_195; 2529 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
2530 else if (caps[1] & SDHCI_SUPPORT_SDR50)
2531 mmc->caps |= MMC_CAP_UHS_SDR50;
2532
2533 if (caps[1] & SDHCI_SUPPORT_DDR50)
2534 mmc->caps |= MMC_CAP_UHS_DDR50;
2535
2536 /* Does the host needs tuning for SDR50? */
2537 if (caps[1] & SDHCI_USE_SDR50_TUNING)
2538 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
2539
2540 /* Driver Type(s) (A, C, D) supported by the host */
2541 if (caps[1] & SDHCI_DRIVER_TYPE_A)
2542 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
2543 if (caps[1] & SDHCI_DRIVER_TYPE_C)
2544 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
2545 if (caps[1] & SDHCI_DRIVER_TYPE_D)
2546 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
2547
2548 /* Initial value for re-tuning timer count */
2549 host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
2550 SDHCI_RETUNING_TIMER_COUNT_SHIFT;
2551
2552 /*
2553 * In case Re-tuning Timer is not disabled, the actual value of
2554 * re-tuning timer will be 2 ^ (n - 1).
2555 */
2556 if (host->tuning_count)
2557 host->tuning_count = 1 << (host->tuning_count - 1);
2558
2559 /* Re-tuning mode supported by the Host Controller */
2560 host->tuning_mode = (caps[1] & SDHCI_RETUNING_MODE_MASK) >>
2561 SDHCI_RETUNING_MODE_SHIFT;
2562
2563 ocr_avail = 0;
2564 /*
2565 * According to SD Host Controller spec v3.00, if the Host System
2566 * can afford more than 150mA, Host Driver should set XPC to 1. Also
2567 * the value is meaningful only if Voltage Support in the Capabilities
2568 * register is set. The actual current value is 4 times the register
2569 * value.
2570 */
2571 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
2572
2573 if (caps[0] & SDHCI_CAN_VDD_330) {
2574 int max_current_330;
2575
2576 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
2577
2578 max_current_330 = ((max_current_caps &
2579 SDHCI_MAX_CURRENT_330_MASK) >>
2580 SDHCI_MAX_CURRENT_330_SHIFT) *
2581 SDHCI_MAX_CURRENT_MULTIPLIER;
2582
2583 if (max_current_330 > 150)
2584 mmc->caps |= MMC_CAP_SET_XPC_330;
2585 }
2586 if (caps[0] & SDHCI_CAN_VDD_300) {
2587 int max_current_300;
2588
2589 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
2590
2591 max_current_300 = ((max_current_caps &
2592 SDHCI_MAX_CURRENT_300_MASK) >>
2593 SDHCI_MAX_CURRENT_300_SHIFT) *
2594 SDHCI_MAX_CURRENT_MULTIPLIER;
2595
2596 if (max_current_300 > 150)
2597 mmc->caps |= MMC_CAP_SET_XPC_300;
2598 }
2599 if (caps[0] & SDHCI_CAN_VDD_180) {
2600 int max_current_180;
2601
2602 ocr_avail |= MMC_VDD_165_195;
2603
2604 max_current_180 = ((max_current_caps &
2605 SDHCI_MAX_CURRENT_180_MASK) >>
2606 SDHCI_MAX_CURRENT_180_SHIFT) *
2607 SDHCI_MAX_CURRENT_MULTIPLIER;
2608
2609 if (max_current_180 > 150)
2610 mmc->caps |= MMC_CAP_SET_XPC_180;
2611
2612 /* Maximum current capabilities of the host at 1.8V */
2613 if (max_current_180 >= 800)
2614 mmc->caps |= MMC_CAP_MAX_CURRENT_800;
2615 else if (max_current_180 >= 600)
2616 mmc->caps |= MMC_CAP_MAX_CURRENT_600;
2617 else if (max_current_180 >= 400)
2618 mmc->caps |= MMC_CAP_MAX_CURRENT_400;
2619 else
2620 mmc->caps |= MMC_CAP_MAX_CURRENT_200;
2621 }
2622
2623 mmc->ocr_avail = ocr_avail;
2624 mmc->ocr_avail_sdio = ocr_avail;
2625 if (host->ocr_avail_sdio)
2626 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
2627 mmc->ocr_avail_sd = ocr_avail;
2628 if (host->ocr_avail_sd)
2629 mmc->ocr_avail_sd &= host->ocr_avail_sd;
2630 else /* normal SD controllers don't support 1.8V */
2631 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
2632 mmc->ocr_avail_mmc = ocr_avail;
2633 if (host->ocr_avail_mmc)
2634 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
1839 2635
1840 if (mmc->ocr_avail == 0) { 2636 if (mmc->ocr_avail == 0) {
1841 printk(KERN_ERR "%s: Hardware doesn't report any " 2637 printk(KERN_ERR "%s: Hardware doesn't report any "
@@ -1850,12 +2646,11 @@ int sdhci_add_host(struct sdhci_host *host)
1850 * can do scatter/gather or not. 2646 * can do scatter/gather or not.
1851 */ 2647 */
1852 if (host->flags & SDHCI_USE_ADMA) 2648 if (host->flags & SDHCI_USE_ADMA)
1853 mmc->max_hw_segs = 128; 2649 mmc->max_segs = 128;
1854 else if (host->flags & SDHCI_USE_SDMA) 2650 else if (host->flags & SDHCI_USE_SDMA)
1855 mmc->max_hw_segs = 1; 2651 mmc->max_segs = 1;
1856 else /* PIO */ 2652 else /* PIO */
1857 mmc->max_hw_segs = 128; 2653 mmc->max_segs = 128;
1858 mmc->max_phys_segs = 128;
1859 2654
1860 /* 2655 /*
1861 * Maximum number of sectors in one transfer. Limited by DMA boundary 2656 * Maximum number of sectors in one transfer. Limited by DMA boundary
@@ -1868,10 +2663,14 @@ int sdhci_add_host(struct sdhci_host *host)
1868 * of bytes. When doing hardware scatter/gather, each entry cannot 2663 * of bytes. When doing hardware scatter/gather, each entry cannot
1869 * be larger than 64 KiB though. 2664 * be larger than 64 KiB though.
1870 */ 2665 */
1871 if (host->flags & SDHCI_USE_ADMA) 2666 if (host->flags & SDHCI_USE_ADMA) {
1872 mmc->max_seg_size = 65536; 2667 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
1873 else 2668 mmc->max_seg_size = 65535;
2669 else
2670 mmc->max_seg_size = 65536;
2671 } else {
1874 mmc->max_seg_size = mmc->max_req_size; 2672 mmc->max_seg_size = mmc->max_req_size;
2673 }
1875 2674
1876 /* 2675 /*
1877 * Maximum block size. This varies from controller to controller and 2676 * Maximum block size. This varies from controller to controller and
@@ -1880,7 +2679,7 @@ int sdhci_add_host(struct sdhci_host *host)
1880 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) { 2679 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
1881 mmc->max_blk_size = 2; 2680 mmc->max_blk_size = 2;
1882 } else { 2681 } else {
1883 mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >> 2682 mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >>
1884 SDHCI_MAX_BLOCK_SHIFT; 2683 SDHCI_MAX_BLOCK_SHIFT;
1885 if (mmc->max_blk_size >= 3) { 2684 if (mmc->max_blk_size >= 3) {
1886 printk(KERN_WARNING "%s: Invalid maximum block size, " 2685 printk(KERN_WARNING "%s: Invalid maximum block size, "
@@ -1906,6 +2705,15 @@ int sdhci_add_host(struct sdhci_host *host)
1906 2705
1907 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host); 2706 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
1908 2707
2708 if (host->version >= SDHCI_SPEC_300) {
2709 init_waitqueue_head(&host->buf_ready_int);
2710
2711 /* Initialize re-tuning timer */
2712 init_timer(&host->tuning_timer);
2713 host->tuning_timer.data = (unsigned long)host;
2714 host->tuning_timer.function = sdhci_tuning_timer;
2715 }
2716
1909 ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED, 2717 ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
1910 mmc_hostname(mmc), host); 2718 mmc_hostname(mmc), host);
1911 if (ret) 2719 if (ret)
@@ -1999,6 +2807,8 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
1999 free_irq(host->irq, host); 2807 free_irq(host->irq, host);
2000 2808
2001 del_timer_sync(&host->timer); 2809 del_timer_sync(&host->timer);
2810 if (host->version >= SDHCI_SPEC_300)
2811 del_timer_sync(&host->tuning_timer);
2002 2812
2003 tasklet_kill(&host->card_tasklet); 2813 tasklet_kill(&host->card_tasklet);
2004 tasklet_kill(&host->finish_tasklet); 2814 tasklet_kill(&host->finish_tasklet);