aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/core/core.c3
-rw-r--r--drivers/mmc/core/sd_ops.c2
-rw-r--r--drivers/mmc/core/sdio.c6
-rw-r--r--drivers/mmc/core/sdio_io.c30
-rw-r--r--drivers/mmc/host/Kconfig20
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/at91_mci.c2
-rw-r--r--drivers/mmc/host/atmel-mci.c64
-rw-r--r--drivers/mmc/host/au1xmmc.c2
-rw-r--r--drivers/mmc/host/bfin_sdh.c2
-rw-r--r--drivers/mmc/host/cb710-mmc.c2
-rw-r--r--drivers/mmc/host/davinci_mmc.c111
-rw-r--r--drivers/mmc/host/imxmmc.c2
-rw-r--r--drivers/mmc/host/mmci.c2
-rw-r--r--drivers/mmc/host/msm_sdcc.c2
-rw-r--r--drivers/mmc/host/mvsdio.c2
-rw-r--r--drivers/mmc/host/mxcmmc.c116
-rw-r--r--drivers/mmc/host/of_mmc_spi.c4
-rw-r--r--drivers/mmc/host/omap.c64
-rw-r--r--drivers/mmc/host/omap_hsmmc.c279
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mmc/host/s3cmci.c3
-rw-r--r--drivers/mmc/host/sdhci-of-core.c11
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c12
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c12
-rw-r--r--drivers/mmc/host/sdhci-pci.c2
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c26
-rw-r--r--drivers/mmc/host/sdhci-s3c.c10
-rw-r--r--drivers/mmc/host/sdhci-spear.c298
-rw-r--r--drivers/mmc/host/sdhci.c25
-rw-r--r--drivers/mmc/host/sdhci.h42
-rw-r--r--drivers/mmc/host/sdricoh_cs.c2
-rw-r--r--drivers/mmc/host/sh_mmcif.c965
-rw-r--r--drivers/mmc/host/tifm_sd.c2
-rw-r--r--drivers/mmc/host/tmio_mmc.c369
-rw-r--r--drivers/mmc/host/tmio_mmc.h13
-rw-r--r--drivers/mmc/host/via-sdmmc.c2
-rw-r--r--drivers/mmc/host/wbsd.c2
38 files changed, 2183 insertions, 332 deletions
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 3168ebd616b2..569e94da844c 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1252,9 +1252,8 @@ EXPORT_SYMBOL(mmc_card_can_sleep);
1252/** 1252/**
1253 * mmc_suspend_host - suspend a host 1253 * mmc_suspend_host - suspend a host
1254 * @host: mmc host 1254 * @host: mmc host
1255 * @state: suspend mode (PM_SUSPEND_xxx)
1256 */ 1255 */
1257int mmc_suspend_host(struct mmc_host *host, pm_message_t state) 1256int mmc_suspend_host(struct mmc_host *host)
1258{ 1257{
1259 int err = 0; 1258 int err = 0;
1260 1259
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c
index 0d96080d44b0..63772e7e7608 100644
--- a/drivers/mmc/core/sd_ops.c
+++ b/drivers/mmc/core/sd_ops.c
@@ -79,8 +79,6 @@ int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card,
79 * we cannot use the retries field in mmc_command. 79 * we cannot use the retries field in mmc_command.
80 */ 80 */
81 for (i = 0;i <= retries;i++) { 81 for (i = 0;i <= retries;i++) {
82 memset(&mrq, 0, sizeof(struct mmc_request));
83
84 err = mmc_app_cmd(host, card); 82 err = mmc_app_cmd(host, card);
85 if (err) { 83 if (err) {
86 /* no point in retrying; no APP commands allowed */ 84 /* no point in retrying; no APP commands allowed */
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 2dd4cfe7ca17..b9dee28ee7d0 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -296,6 +296,12 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
296 card->type = MMC_TYPE_SDIO; 296 card->type = MMC_TYPE_SDIO;
297 297
298 /* 298 /*
299 * Call the optional HC's init_card function to handle quirks.
300 */
301 if (host->ops->init_card)
302 host->ops->init_card(host, card);
303
304 /*
299 * For native busses: set card RCA and quit open drain mode. 305 * For native busses: set card RCA and quit open drain mode.
300 */ 306 */
301 if (!powered_resume && !mmc_host_is_spi(host)) { 307 if (!powered_resume && !mmc_host_is_spi(host)) {
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index ff27c8c71355..0f687cdeb064 100644
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -406,6 +406,36 @@ void sdio_writeb(struct sdio_func *func, u8 b, unsigned int addr, int *err_ret)
406EXPORT_SYMBOL_GPL(sdio_writeb); 406EXPORT_SYMBOL_GPL(sdio_writeb);
407 407
408/** 408/**
409 * sdio_writeb_readb - write and read a byte from SDIO function
410 * @func: SDIO function to access
411 * @write_byte: byte to write
412 * @addr: address to write to
413 * @err_ret: optional status value from transfer
414 *
415 * Performs a RAW (Read after Write) operation as defined by SDIO spec -
416 * single byte is written to address space of a given SDIO function and
417 * response is read back from the same address, both using single request.
418 * If there is a problem with the operation, 0xff is returned and
419 * @err_ret will contain the error code.
420 */
421u8 sdio_writeb_readb(struct sdio_func *func, u8 write_byte,
422 unsigned int addr, int *err_ret)
423{
424 int ret;
425 u8 val;
426
427 ret = mmc_io_rw_direct(func->card, 1, func->num, addr,
428 write_byte, &val);
429 if (err_ret)
430 *err_ret = ret;
431 if (ret)
432 val = 0xff;
433
434 return val;
435}
436EXPORT_SYMBOL_GPL(sdio_writeb_readb);
437
438/**
409 * sdio_memcpy_fromio - read a chunk of memory from a SDIO function 439 * sdio_memcpy_fromio - read a chunk of memory from a SDIO function
410 * @func: SDIO function to access 440 * @func: SDIO function to access
411 * @dst: buffer to store the data 441 * @dst: buffer to store the data
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 2e13b94769fd..e171e77f6129 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -136,6 +136,18 @@ config MMC_SDHCI_S3C
136 136
137 If unsure, say N. 137 If unsure, say N.
138 138
139config MMC_SDHCI_SPEAR
140 tristate "SDHCI support on ST SPEAr platform"
141 depends on MMC_SDHCI && PLAT_SPEAR
142 help
143 This selects the Secure Digital Host Controller Interface (SDHCI)
144 often referrered to as the HSMMC block in some of the ST SPEAR range
145 of SoC
146
147 If you have a controller with this interface, say Y or M here.
148
149 If unsure, say N.
150
139config MMC_SDHCI_S3C_DMA 151config MMC_SDHCI_S3C_DMA
140 bool "DMA support on S3C SDHCI" 152 bool "DMA support on S3C SDHCI"
141 depends on MMC_SDHCI_S3C && EXPERIMENTAL 153 depends on MMC_SDHCI_S3C && EXPERIMENTAL
@@ -412,3 +424,11 @@ config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
412 depends on SDH_BFIN 424 depends on SDH_BFIN
413 help 425 help
414 If you say yes here SD-Cards may work on the EZkit. 426 If you say yes here SD-Cards may work on the EZkit.
427
428config MMC_SH_MMCIF
429 tristate "SuperH Internal MMCIF support"
430 depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE)
431 help
432 This selects the MMC Host Interface controler (MMCIF).
433
434 This driver supports MMCIF in sh7724/sh7757/sh7372.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index f4803977dfce..e30c2ee48894 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_MMC_SDHCI) += sdhci.o
14obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o 14obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
15obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o 15obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
16obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o 16obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
17obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o
17obj-$(CONFIG_MMC_WBSD) += wbsd.o 18obj-$(CONFIG_MMC_WBSD) += wbsd.o
18obj-$(CONFIG_MMC_AU1X) += au1xmmc.o 19obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
19obj-$(CONFIG_MMC_OMAP) += omap.o 20obj-$(CONFIG_MMC_OMAP) += omap.o
@@ -34,6 +35,7 @@ obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
34obj-$(CONFIG_MMC_CB710) += cb710-mmc.o 35obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
35obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o 36obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
36obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o 37obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
38obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
37 39
38obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o 40obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
39sdhci-of-y := sdhci-of-core.o 41sdhci-of-y := sdhci-of-core.o
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index 336d9f553f3e..5f3a599ead07 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -1157,7 +1157,7 @@ static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
1157 enable_irq_wake(host->board->det_pin); 1157 enable_irq_wake(host->board->det_pin);
1158 1158
1159 if (mmc) 1159 if (mmc)
1160 ret = mmc_suspend_host(mmc, state); 1160 ret = mmc_suspend_host(mmc);
1161 1161
1162 return ret; 1162 return ret;
1163} 1163}
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index df0e8a88d85f..95ef864ad8f9 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -173,6 +173,7 @@ struct atmel_mci {
173 * @mmc: The mmc_host representing this slot. 173 * @mmc: The mmc_host representing this slot.
174 * @host: The MMC controller this slot is using. 174 * @host: The MMC controller this slot is using.
175 * @sdc_reg: Value of SDCR to be written before using this slot. 175 * @sdc_reg: Value of SDCR to be written before using this slot.
176 * @sdio_irq: SDIO irq mask for this slot.
176 * @mrq: mmc_request currently being processed or waiting to be 177 * @mrq: mmc_request currently being processed or waiting to be
177 * processed, or NULL when the slot is idle. 178 * processed, or NULL when the slot is idle.
178 * @queue_node: List node for placing this node in the @queue list of 179 * @queue_node: List node for placing this node in the @queue list of
@@ -191,6 +192,7 @@ struct atmel_mci_slot {
191 struct atmel_mci *host; 192 struct atmel_mci *host;
192 193
193 u32 sdc_reg; 194 u32 sdc_reg;
195 u32 sdio_irq;
194 196
195 struct mmc_request *mrq; 197 struct mmc_request *mrq;
196 struct list_head queue_node; 198 struct list_head queue_node;
@@ -792,7 +794,7 @@ static void atmci_start_request(struct atmel_mci *host,
792 mci_writel(host, SDCR, slot->sdc_reg); 794 mci_writel(host, SDCR, slot->sdc_reg);
793 795
794 iflags = mci_readl(host, IMR); 796 iflags = mci_readl(host, IMR);
795 if (iflags) 797 if (iflags & ~(MCI_SDIOIRQA | MCI_SDIOIRQB))
796 dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n", 798 dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
797 iflags); 799 iflags);
798 800
@@ -952,10 +954,21 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
952 if (mci_has_rwproof()) 954 if (mci_has_rwproof())
953 host->mode_reg |= (MCI_MR_WRPROOF | MCI_MR_RDPROOF); 955 host->mode_reg |= (MCI_MR_WRPROOF | MCI_MR_RDPROOF);
954 956
955 if (list_empty(&host->queue)) 957 if (atmci_is_mci2()) {
958 /* setup High Speed mode in relation with card capacity */
959 if (ios->timing == MMC_TIMING_SD_HS)
960 host->cfg_reg |= MCI_CFG_HSMODE;
961 else
962 host->cfg_reg &= ~MCI_CFG_HSMODE;
963 }
964
965 if (list_empty(&host->queue)) {
956 mci_writel(host, MR, host->mode_reg); 966 mci_writel(host, MR, host->mode_reg);
957 else 967 if (atmci_is_mci2())
968 mci_writel(host, CFG, host->cfg_reg);
969 } else {
958 host->need_clock_update = true; 970 host->need_clock_update = true;
971 }
959 972
960 spin_unlock_bh(&host->lock); 973 spin_unlock_bh(&host->lock);
961 } else { 974 } else {
@@ -1030,11 +1043,23 @@ static int atmci_get_cd(struct mmc_host *mmc)
1030 return present; 1043 return present;
1031} 1044}
1032 1045
1046static void atmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1047{
1048 struct atmel_mci_slot *slot = mmc_priv(mmc);
1049 struct atmel_mci *host = slot->host;
1050
1051 if (enable)
1052 mci_writel(host, IER, slot->sdio_irq);
1053 else
1054 mci_writel(host, IDR, slot->sdio_irq);
1055}
1056
1033static const struct mmc_host_ops atmci_ops = { 1057static const struct mmc_host_ops atmci_ops = {
1034 .request = atmci_request, 1058 .request = atmci_request,
1035 .set_ios = atmci_set_ios, 1059 .set_ios = atmci_set_ios,
1036 .get_ro = atmci_get_ro, 1060 .get_ro = atmci_get_ro,
1037 .get_cd = atmci_get_cd, 1061 .get_cd = atmci_get_cd,
1062 .enable_sdio_irq = atmci_enable_sdio_irq,
1038}; 1063};
1039 1064
1040/* Called with host->lock held */ 1065/* Called with host->lock held */
@@ -1052,8 +1077,11 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
1052 * necessary if set_ios() is called when a different slot is 1077 * necessary if set_ios() is called when a different slot is
1053 * busy transfering data. 1078 * busy transfering data.
1054 */ 1079 */
1055 if (host->need_clock_update) 1080 if (host->need_clock_update) {
1056 mci_writel(host, MR, host->mode_reg); 1081 mci_writel(host, MR, host->mode_reg);
1082 if (atmci_is_mci2())
1083 mci_writel(host, CFG, host->cfg_reg);
1084 }
1057 1085
1058 host->cur_slot->mrq = NULL; 1086 host->cur_slot->mrq = NULL;
1059 host->mrq = NULL; 1087 host->mrq = NULL;
@@ -1483,6 +1511,19 @@ static void atmci_cmd_interrupt(struct atmel_mci *host, u32 status)
1483 tasklet_schedule(&host->tasklet); 1511 tasklet_schedule(&host->tasklet);
1484} 1512}
1485 1513
1514static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
1515{
1516 int i;
1517
1518 for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
1519 struct atmel_mci_slot *slot = host->slot[i];
1520 if (slot && (status & slot->sdio_irq)) {
1521 mmc_signal_sdio_irq(slot->mmc);
1522 }
1523 }
1524}
1525
1526
1486static irqreturn_t atmci_interrupt(int irq, void *dev_id) 1527static irqreturn_t atmci_interrupt(int irq, void *dev_id)
1487{ 1528{
1488 struct atmel_mci *host = dev_id; 1529 struct atmel_mci *host = dev_id;
@@ -1522,6 +1563,10 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
1522 1563
1523 if (pending & MCI_CMDRDY) 1564 if (pending & MCI_CMDRDY)
1524 atmci_cmd_interrupt(host, status); 1565 atmci_cmd_interrupt(host, status);
1566
1567 if (pending & (MCI_SDIOIRQA | MCI_SDIOIRQB))
1568 atmci_sdio_interrupt(host, status);
1569
1525 } while (pass_count++ < 5); 1570 } while (pass_count++ < 5);
1526 1571
1527 return pass_count ? IRQ_HANDLED : IRQ_NONE; 1572 return pass_count ? IRQ_HANDLED : IRQ_NONE;
@@ -1544,7 +1589,7 @@ static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
1544 1589
1545static int __init atmci_init_slot(struct atmel_mci *host, 1590static int __init atmci_init_slot(struct atmel_mci *host,
1546 struct mci_slot_pdata *slot_data, unsigned int id, 1591 struct mci_slot_pdata *slot_data, unsigned int id,
1547 u32 sdc_reg) 1592 u32 sdc_reg, u32 sdio_irq)
1548{ 1593{
1549 struct mmc_host *mmc; 1594 struct mmc_host *mmc;
1550 struct atmel_mci_slot *slot; 1595 struct atmel_mci_slot *slot;
@@ -1560,11 +1605,16 @@ static int __init atmci_init_slot(struct atmel_mci *host,
1560 slot->wp_pin = slot_data->wp_pin; 1605 slot->wp_pin = slot_data->wp_pin;
1561 slot->detect_is_active_high = slot_data->detect_is_active_high; 1606 slot->detect_is_active_high = slot_data->detect_is_active_high;
1562 slot->sdc_reg = sdc_reg; 1607 slot->sdc_reg = sdc_reg;
1608 slot->sdio_irq = sdio_irq;
1563 1609
1564 mmc->ops = &atmci_ops; 1610 mmc->ops = &atmci_ops;
1565 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512); 1611 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);
1566 mmc->f_max = host->bus_hz / 2; 1612 mmc->f_max = host->bus_hz / 2;
1567 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1613 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1614 if (sdio_irq)
1615 mmc->caps |= MMC_CAP_SDIO_IRQ;
1616 if (atmci_is_mci2())
1617 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1568 if (slot_data->bus_width >= 4) 1618 if (slot_data->bus_width >= 4)
1569 mmc->caps |= MMC_CAP_4_BIT_DATA; 1619 mmc->caps |= MMC_CAP_4_BIT_DATA;
1570 1620
@@ -1753,13 +1803,13 @@ static int __init atmci_probe(struct platform_device *pdev)
1753 ret = -ENODEV; 1803 ret = -ENODEV;
1754 if (pdata->slot[0].bus_width) { 1804 if (pdata->slot[0].bus_width) {
1755 ret = atmci_init_slot(host, &pdata->slot[0], 1805 ret = atmci_init_slot(host, &pdata->slot[0],
1756 0, MCI_SDCSEL_SLOT_A); 1806 0, MCI_SDCSEL_SLOT_A, MCI_SDIOIRQA);
1757 if (!ret) 1807 if (!ret)
1758 nr_slots++; 1808 nr_slots++;
1759 } 1809 }
1760 if (pdata->slot[1].bus_width) { 1810 if (pdata->slot[1].bus_width) {
1761 ret = atmci_init_slot(host, &pdata->slot[1], 1811 ret = atmci_init_slot(host, &pdata->slot[1],
1762 1, MCI_SDCSEL_SLOT_B); 1812 1, MCI_SDCSEL_SLOT_B, MCI_SDIOIRQB);
1763 if (!ret) 1813 if (!ret)
1764 nr_slots++; 1814 nr_slots++;
1765 } 1815 }
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index f5834449400e..c8da5d30a861 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -1142,7 +1142,7 @@ static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state)
1142 struct au1xmmc_host *host = platform_get_drvdata(pdev); 1142 struct au1xmmc_host *host = platform_get_drvdata(pdev);
1143 int ret; 1143 int ret;
1144 1144
1145 ret = mmc_suspend_host(host->mmc, state); 1145 ret = mmc_suspend_host(host->mmc);
1146 if (ret) 1146 if (ret)
1147 return ret; 1147 return ret;
1148 1148
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
index 6919e844072c..4b0e677d7295 100644
--- a/drivers/mmc/host/bfin_sdh.c
+++ b/drivers/mmc/host/bfin_sdh.c
@@ -576,7 +576,7 @@ static int sdh_suspend(struct platform_device *dev, pm_message_t state)
576 int ret = 0; 576 int ret = 0;
577 577
578 if (mmc) 578 if (mmc)
579 ret = mmc_suspend_host(mmc, state); 579 ret = mmc_suspend_host(mmc);
580 580
581 bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() & ~PWR_ON); 581 bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() & ~PWR_ON);
582 peripheral_free_list(drv_data->pin_req); 582 peripheral_free_list(drv_data->pin_req);
diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c
index 92a324f7417c..ca3bdc831900 100644
--- a/drivers/mmc/host/cb710-mmc.c
+++ b/drivers/mmc/host/cb710-mmc.c
@@ -675,7 +675,7 @@ static int cb710_mmc_suspend(struct platform_device *pdev, pm_message_t state)
675 struct mmc_host *mmc = cb710_slot_to_mmc(slot); 675 struct mmc_host *mmc = cb710_slot_to_mmc(slot);
676 int err; 676 int err;
677 677
678 err = mmc_suspend_host(mmc, state); 678 err = mmc_suspend_host(mmc);
679 if (err) 679 if (err)
680 return err; 680 return err;
681 681
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 3bd0ba294e9d..33d9f1b00862 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -137,15 +137,15 @@
137 137
138/* 138/*
139 * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units, 139 * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units,
140 * and we handle up to NR_SG segments. MMC_BLOCK_BOUNCE kicks in only 140 * and we handle up to MAX_NR_SG segments. MMC_BLOCK_BOUNCE kicks in only
141 * for drivers with max_hw_segs == 1, making the segments bigger (64KB) 141 * for drivers with max_hw_segs == 1, making the segments bigger (64KB)
142 * than the page or two that's otherwise typical. NR_SG == 16 gives at 142 * than the page or two that's otherwise typical. nr_sg (passed from
143 * least the same throughput boost, using EDMA transfer linkage instead 143 * platform data) == 16 gives at least the same throughput boost, using
144 * of spending CPU time copying pages. 144 * EDMA transfer linkage instead of spending CPU time copying pages.
145 */ 145 */
146#define MAX_CCNT ((1 << 16) - 1) 146#define MAX_CCNT ((1 << 16) - 1)
147 147
148#define NR_SG 16 148#define MAX_NR_SG 16
149 149
150static unsigned rw_threshold = 32; 150static unsigned rw_threshold = 32;
151module_param(rw_threshold, uint, S_IRUGO); 151module_param(rw_threshold, uint, S_IRUGO);
@@ -171,6 +171,7 @@ struct mmc_davinci_host {
171#define DAVINCI_MMC_DATADIR_READ 1 171#define DAVINCI_MMC_DATADIR_READ 1
172#define DAVINCI_MMC_DATADIR_WRITE 2 172#define DAVINCI_MMC_DATADIR_WRITE 2
173 unsigned char data_dir; 173 unsigned char data_dir;
174 unsigned char suspended;
174 175
175 /* buffer is used during PIO of one scatterlist segment, and 176 /* buffer is used during PIO of one scatterlist segment, and
176 * is updated along with buffer_bytes_left. bytes_left applies 177 * is updated along with buffer_bytes_left. bytes_left applies
@@ -192,7 +193,7 @@ struct mmc_davinci_host {
192 struct edmacc_param tx_template; 193 struct edmacc_param tx_template;
193 struct edmacc_param rx_template; 194 struct edmacc_param rx_template;
194 unsigned n_link; 195 unsigned n_link;
195 u32 links[NR_SG - 1]; 196 u32 links[MAX_NR_SG - 1];
196 197
197 /* For PIO we walk scatterlists one segment at a time. */ 198 /* For PIO we walk scatterlists one segment at a time. */
198 unsigned int sg_len; 199 unsigned int sg_len;
@@ -202,6 +203,8 @@ struct mmc_davinci_host {
202 u8 version; 203 u8 version;
203 /* for ns in one cycle calculation */ 204 /* for ns in one cycle calculation */
204 unsigned ns_in_one_cycle; 205 unsigned ns_in_one_cycle;
206 /* Number of sg segments */
207 u8 nr_sg;
205#ifdef CONFIG_CPU_FREQ 208#ifdef CONFIG_CPU_FREQ
206 struct notifier_block freq_transition; 209 struct notifier_block freq_transition;
207#endif 210#endif
@@ -568,6 +571,7 @@ davinci_release_dma_channels(struct mmc_davinci_host *host)
568 571
569static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host) 572static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
570{ 573{
574 u32 link_size;
571 int r, i; 575 int r, i;
572 576
573 /* Acquire master DMA write channel */ 577 /* Acquire master DMA write channel */
@@ -593,7 +597,8 @@ static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
593 /* Allocate parameter RAM slots, which will later be bound to a 597 /* Allocate parameter RAM slots, which will later be bound to a
594 * channel as needed to handle a scatterlist. 598 * channel as needed to handle a scatterlist.
595 */ 599 */
596 for (i = 0; i < ARRAY_SIZE(host->links); i++) { 600 link_size = min_t(unsigned, host->nr_sg, ARRAY_SIZE(host->links));
601 for (i = 0; i < link_size; i++) {
597 r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY); 602 r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY);
598 if (r < 0) { 603 if (r < 0) {
599 dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n", 604 dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n",
@@ -905,19 +910,26 @@ static void mmc_davinci_cmd_done(struct mmc_davinci_host *host,
905 } 910 }
906} 911}
907 912
908static void 913static inline void mmc_davinci_reset_ctrl(struct mmc_davinci_host *host,
909davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data) 914 int val)
910{ 915{
911 u32 temp; 916 u32 temp;
912 917
913 /* reset command and data state machines */
914 temp = readl(host->base + DAVINCI_MMCCTL); 918 temp = readl(host->base + DAVINCI_MMCCTL);
915 writel(temp | MMCCTL_CMDRST | MMCCTL_DATRST, 919 if (val) /* reset */
916 host->base + DAVINCI_MMCCTL); 920 temp |= MMCCTL_CMDRST | MMCCTL_DATRST;
921 else /* enable */
922 temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST);
917 923
918 temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST);
919 udelay(10);
920 writel(temp, host->base + DAVINCI_MMCCTL); 924 writel(temp, host->base + DAVINCI_MMCCTL);
925 udelay(10);
926}
927
928static void
929davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)
930{
931 mmc_davinci_reset_ctrl(host, 1);
932 mmc_davinci_reset_ctrl(host, 0);
921} 933}
922 934
923static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) 935static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
@@ -1121,15 +1133,8 @@ static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
1121#endif 1133#endif
1122static void __init init_mmcsd_host(struct mmc_davinci_host *host) 1134static void __init init_mmcsd_host(struct mmc_davinci_host *host)
1123{ 1135{
1124 /* DAT line portion is diabled and in reset state */
1125 writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_DATRST,
1126 host->base + DAVINCI_MMCCTL);
1127
1128 /* CMD line portion is diabled and in reset state */
1129 writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_CMDRST,
1130 host->base + DAVINCI_MMCCTL);
1131 1136
1132 udelay(10); 1137 mmc_davinci_reset_ctrl(host, 1);
1133 1138
1134 writel(0, host->base + DAVINCI_MMCCLK); 1139 writel(0, host->base + DAVINCI_MMCCLK);
1135 writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); 1140 writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK);
@@ -1137,12 +1142,7 @@ static void __init init_mmcsd_host(struct mmc_davinci_host *host)
1137 writel(0x1FFF, host->base + DAVINCI_MMCTOR); 1142 writel(0x1FFF, host->base + DAVINCI_MMCTOR);
1138 writel(0xFFFF, host->base + DAVINCI_MMCTOD); 1143 writel(0xFFFF, host->base + DAVINCI_MMCTOD);
1139 1144
1140 writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_DATRST, 1145 mmc_davinci_reset_ctrl(host, 0);
1141 host->base + DAVINCI_MMCCTL);
1142 writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_CMDRST,
1143 host->base + DAVINCI_MMCCTL);
1144
1145 udelay(10);
1146} 1146}
1147 1147
1148static int __init davinci_mmcsd_probe(struct platform_device *pdev) 1148static int __init davinci_mmcsd_probe(struct platform_device *pdev)
@@ -1202,6 +1202,12 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
1202 1202
1203 init_mmcsd_host(host); 1203 init_mmcsd_host(host);
1204 1204
1205 if (pdata->nr_sg)
1206 host->nr_sg = pdata->nr_sg - 1;
1207
1208 if (host->nr_sg > MAX_NR_SG || !host->nr_sg)
1209 host->nr_sg = MAX_NR_SG;
1210
1205 host->use_dma = use_dma; 1211 host->use_dma = use_dma;
1206 host->irq = irq; 1212 host->irq = irq;
1207 1213
@@ -1327,32 +1333,65 @@ static int __exit davinci_mmcsd_remove(struct platform_device *pdev)
1327} 1333}
1328 1334
1329#ifdef CONFIG_PM 1335#ifdef CONFIG_PM
1330static int davinci_mmcsd_suspend(struct platform_device *pdev, pm_message_t msg) 1336static int davinci_mmcsd_suspend(struct device *dev)
1331{ 1337{
1338 struct platform_device *pdev = to_platform_device(dev);
1332 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1339 struct mmc_davinci_host *host = platform_get_drvdata(pdev);
1340 int ret;
1333 1341
1334 return mmc_suspend_host(host->mmc, msg); 1342 mmc_host_enable(host->mmc);
1343 ret = mmc_suspend_host(host->mmc);
1344 if (!ret) {
1345 writel(0, host->base + DAVINCI_MMCIM);
1346 mmc_davinci_reset_ctrl(host, 1);
1347 mmc_host_disable(host->mmc);
1348 clk_disable(host->clk);
1349 host->suspended = 1;
1350 } else {
1351 host->suspended = 0;
1352 mmc_host_disable(host->mmc);
1353 }
1354
1355 return ret;
1335} 1356}
1336 1357
1337static int davinci_mmcsd_resume(struct platform_device *pdev) 1358static int davinci_mmcsd_resume(struct device *dev)
1338{ 1359{
1360 struct platform_device *pdev = to_platform_device(dev);
1339 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1361 struct mmc_davinci_host *host = platform_get_drvdata(pdev);
1362 int ret;
1363
1364 if (!host->suspended)
1365 return 0;
1340 1366
1341 return mmc_resume_host(host->mmc); 1367 clk_enable(host->clk);
1368 mmc_host_enable(host->mmc);
1369
1370 mmc_davinci_reset_ctrl(host, 0);
1371 ret = mmc_resume_host(host->mmc);
1372 if (!ret)
1373 host->suspended = 0;
1374
1375 return ret;
1342} 1376}
1377
1378static const struct dev_pm_ops davinci_mmcsd_pm = {
1379 .suspend = davinci_mmcsd_suspend,
1380 .resume = davinci_mmcsd_resume,
1381};
1382
1383#define davinci_mmcsd_pm_ops (&davinci_mmcsd_pm)
1343#else 1384#else
1344#define davinci_mmcsd_suspend NULL 1385#define davinci_mmcsd_pm_ops NULL
1345#define davinci_mmcsd_resume NULL
1346#endif 1386#endif
1347 1387
1348static struct platform_driver davinci_mmcsd_driver = { 1388static struct platform_driver davinci_mmcsd_driver = {
1349 .driver = { 1389 .driver = {
1350 .name = "davinci_mmc", 1390 .name = "davinci_mmc",
1351 .owner = THIS_MODULE, 1391 .owner = THIS_MODULE,
1392 .pm = davinci_mmcsd_pm_ops,
1352 }, 1393 },
1353 .remove = __exit_p(davinci_mmcsd_remove), 1394 .remove = __exit_p(davinci_mmcsd_remove),
1354 .suspend = davinci_mmcsd_suspend,
1355 .resume = davinci_mmcsd_resume,
1356}; 1395};
1357 1396
1358static int __init davinci_mmcsd_init(void) 1397static int __init davinci_mmcsd_init(void)
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
index bf98d7cc928a..9a68ff4353a2 100644
--- a/drivers/mmc/host/imxmmc.c
+++ b/drivers/mmc/host/imxmmc.c
@@ -1115,7 +1115,7 @@ static int imxmci_suspend(struct platform_device *dev, pm_message_t state)
1115 int ret = 0; 1115 int ret = 0;
1116 1116
1117 if (mmc) 1117 if (mmc)
1118 ret = mmc_suspend_host(mmc, state); 1118 ret = mmc_suspend_host(mmc);
1119 1119
1120 return ret; 1120 return ret;
1121} 1121}
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index ff115d920888..4917af96bae1 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -824,7 +824,7 @@ static int mmci_suspend(struct amba_device *dev, pm_message_t state)
824 if (mmc) { 824 if (mmc) {
825 struct mmci_host *host = mmc_priv(mmc); 825 struct mmci_host *host = mmc_priv(mmc);
826 826
827 ret = mmc_suspend_host(mmc, state); 827 ret = mmc_suspend_host(mmc);
828 if (ret == 0) 828 if (ret == 0)
829 writel(0, host->base + MMCIMASK0); 829 writel(0, host->base + MMCIMASK0);
830 } 830 }
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 61f1d27fed3f..24e09454e522 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -1327,7 +1327,7 @@ msmsdcc_suspend(struct platform_device *dev, pm_message_t state)
1327 disable_irq(host->stat_irq); 1327 disable_irq(host->stat_irq);
1328 1328
1329 if (mmc->card && mmc->card->type != MMC_TYPE_SDIO) 1329 if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
1330 rc = mmc_suspend_host(mmc, state); 1330 rc = mmc_suspend_host(mmc);
1331 if (!rc) 1331 if (!rc)
1332 msmsdcc_writel(host, 0, MMCIMASK0); 1332 msmsdcc_writel(host, 0, MMCIMASK0);
1333 if (host->clks_on) 1333 if (host->clks_on)
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 34e23489811a..366eefa77c5a 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -865,7 +865,7 @@ static int mvsd_suspend(struct platform_device *dev, pm_message_t state)
865 int ret = 0; 865 int ret = 0;
866 866
867 if (mmc) 867 if (mmc)
868 ret = mmc_suspend_host(mmc, state); 868 ret = mmc_suspend_host(mmc);
869 869
870 return ret; 870 return ret;
871} 871}
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 2df90412abb5..d9d4a72e0ec7 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -119,6 +119,7 @@ struct mxcmci_host {
119 int detect_irq; 119 int detect_irq;
120 int dma; 120 int dma;
121 int do_dma; 121 int do_dma;
122 int use_sdio;
122 unsigned int power_mode; 123 unsigned int power_mode;
123 struct imxmmc_platform_data *pdata; 124 struct imxmmc_platform_data *pdata;
124 125
@@ -138,6 +139,7 @@ struct mxcmci_host {
138 int clock; 139 int clock;
139 140
140 struct work_struct datawork; 141 struct work_struct datawork;
142 spinlock_t lock;
141}; 143};
142 144
143static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios); 145static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
@@ -151,6 +153,8 @@ static void mxcmci_softreset(struct mxcmci_host *host)
151{ 153{
152 int i; 154 int i;
153 155
156 dev_dbg(mmc_dev(host->mmc), "mxcmci_softreset\n");
157
154 /* reset sequence */ 158 /* reset sequence */
155 writew(STR_STP_CLK_RESET, host->base + MMC_REG_STR_STP_CLK); 159 writew(STR_STP_CLK_RESET, host->base + MMC_REG_STR_STP_CLK);
156 writew(STR_STP_CLK_RESET | STR_STP_CLK_START_CLK, 160 writew(STR_STP_CLK_RESET | STR_STP_CLK_START_CLK,
@@ -224,6 +228,9 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
224static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd, 228static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
225 unsigned int cmdat) 229 unsigned int cmdat)
226{ 230{
231 u32 int_cntr;
232 unsigned long flags;
233
227 WARN_ON(host->cmd != NULL); 234 WARN_ON(host->cmd != NULL);
228 host->cmd = cmd; 235 host->cmd = cmd;
229 236
@@ -247,12 +254,16 @@ static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
247 return -EINVAL; 254 return -EINVAL;
248 } 255 }
249 256
257 int_cntr = INT_END_CMD_RES_EN;
258
250 if (mxcmci_use_dma(host)) 259 if (mxcmci_use_dma(host))
251 writel(INT_READ_OP_EN | INT_WRITE_OP_DONE_EN | 260 int_cntr |= INT_READ_OP_EN | INT_WRITE_OP_DONE_EN;
252 INT_END_CMD_RES_EN, 261
253 host->base + MMC_REG_INT_CNTR); 262 spin_lock_irqsave(&host->lock, flags);
254 else 263 if (host->use_sdio)
255 writel(INT_END_CMD_RES_EN, host->base + MMC_REG_INT_CNTR); 264 int_cntr |= INT_SDIO_IRQ_EN;
265 writel(int_cntr, host->base + MMC_REG_INT_CNTR);
266 spin_unlock_irqrestore(&host->lock, flags);
256 267
257 writew(cmd->opcode, host->base + MMC_REG_CMD); 268 writew(cmd->opcode, host->base + MMC_REG_CMD);
258 writel(cmd->arg, host->base + MMC_REG_ARG); 269 writel(cmd->arg, host->base + MMC_REG_ARG);
@@ -264,7 +275,14 @@ static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
264static void mxcmci_finish_request(struct mxcmci_host *host, 275static void mxcmci_finish_request(struct mxcmci_host *host,
265 struct mmc_request *req) 276 struct mmc_request *req)
266{ 277{
267 writel(0, host->base + MMC_REG_INT_CNTR); 278 u32 int_cntr = 0;
279 unsigned long flags;
280
281 spin_lock_irqsave(&host->lock, flags);
282 if (host->use_sdio)
283 int_cntr |= INT_SDIO_IRQ_EN;
284 writel(int_cntr, host->base + MMC_REG_INT_CNTR);
285 spin_unlock_irqrestore(&host->lock, flags);
268 286
269 host->req = NULL; 287 host->req = NULL;
270 host->cmd = NULL; 288 host->cmd = NULL;
@@ -290,16 +308,25 @@ static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat)
290 dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n", 308 dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",
291 stat); 309 stat);
292 if (stat & STATUS_CRC_READ_ERR) { 310 if (stat & STATUS_CRC_READ_ERR) {
311 dev_err(mmc_dev(host->mmc), "%s: -EILSEQ\n", __func__);
293 data->error = -EILSEQ; 312 data->error = -EILSEQ;
294 } else if (stat & STATUS_CRC_WRITE_ERR) { 313 } else if (stat & STATUS_CRC_WRITE_ERR) {
295 u32 err_code = (stat >> 9) & 0x3; 314 u32 err_code = (stat >> 9) & 0x3;
296 if (err_code == 2) /* No CRC response */ 315 if (err_code == 2) { /* No CRC response */
316 dev_err(mmc_dev(host->mmc),
317 "%s: No CRC -ETIMEDOUT\n", __func__);
297 data->error = -ETIMEDOUT; 318 data->error = -ETIMEDOUT;
298 else 319 } else {
320 dev_err(mmc_dev(host->mmc),
321 "%s: -EILSEQ\n", __func__);
299 data->error = -EILSEQ; 322 data->error = -EILSEQ;
323 }
300 } else if (stat & STATUS_TIME_OUT_READ) { 324 } else if (stat & STATUS_TIME_OUT_READ) {
325 dev_err(mmc_dev(host->mmc),
326 "%s: read -ETIMEDOUT\n", __func__);
301 data->error = -ETIMEDOUT; 327 data->error = -ETIMEDOUT;
302 } else { 328 } else {
329 dev_err(mmc_dev(host->mmc), "%s: -EIO\n", __func__);
303 data->error = -EIO; 330 data->error = -EIO;
304 } 331 }
305 } else { 332 } else {
@@ -433,8 +460,6 @@ static int mxcmci_transfer_data(struct mxcmci_host *host)
433 struct scatterlist *sg; 460 struct scatterlist *sg;
434 int stat, i; 461 int stat, i;
435 462
436 host->datasize = 0;
437
438 host->data = data; 463 host->data = data;
439 host->datasize = 0; 464 host->datasize = 0;
440 465
@@ -464,6 +489,9 @@ static void mxcmci_datawork(struct work_struct *work)
464 struct mxcmci_host *host = container_of(work, struct mxcmci_host, 489 struct mxcmci_host *host = container_of(work, struct mxcmci_host,
465 datawork); 490 datawork);
466 int datastat = mxcmci_transfer_data(host); 491 int datastat = mxcmci_transfer_data(host);
492
493 writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
494 host->base + MMC_REG_STATUS);
467 mxcmci_finish_data(host, datastat); 495 mxcmci_finish_data(host, datastat);
468 496
469 if (host->req->stop) { 497 if (host->req->stop) {
@@ -523,15 +551,35 @@ static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat)
523static irqreturn_t mxcmci_irq(int irq, void *devid) 551static irqreturn_t mxcmci_irq(int irq, void *devid)
524{ 552{
525 struct mxcmci_host *host = devid; 553 struct mxcmci_host *host = devid;
554 unsigned long flags;
555 bool sdio_irq;
526 u32 stat; 556 u32 stat;
527 557
528 stat = readl(host->base + MMC_REG_STATUS); 558 stat = readl(host->base + MMC_REG_STATUS);
529 writel(stat, host->base + MMC_REG_STATUS); 559 writel(stat & ~(STATUS_SDIO_INT_ACTIVE | STATUS_DATA_TRANS_DONE |
560 STATUS_WRITE_OP_DONE), host->base + MMC_REG_STATUS);
530 561
531 dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat); 562 dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat);
532 563
564 spin_lock_irqsave(&host->lock, flags);
565 sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio;
566 spin_unlock_irqrestore(&host->lock, flags);
567
568#ifdef HAS_DMA
569 if (mxcmci_use_dma(host) &&
570 (stat & (STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE)))
571 writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
572 host->base + MMC_REG_STATUS);
573#endif
574
575 if (sdio_irq) {
576 writel(STATUS_SDIO_INT_ACTIVE, host->base + MMC_REG_STATUS);
577 mmc_signal_sdio_irq(host->mmc);
578 }
579
533 if (stat & STATUS_END_CMD_RESP) 580 if (stat & STATUS_END_CMD_RESP)
534 mxcmci_cmd_done(host, stat); 581 mxcmci_cmd_done(host, stat);
582
535#ifdef HAS_DMA 583#ifdef HAS_DMA
536 if (mxcmci_use_dma(host) && 584 if (mxcmci_use_dma(host) &&
537 (stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE))) 585 (stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE)))
@@ -668,11 +716,46 @@ static int mxcmci_get_ro(struct mmc_host *mmc)
668 return -ENOSYS; 716 return -ENOSYS;
669} 717}
670 718
719static void mxcmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
720{
721 struct mxcmci_host *host = mmc_priv(mmc);
722 unsigned long flags;
723 u32 int_cntr;
724
725 spin_lock_irqsave(&host->lock, flags);
726 host->use_sdio = enable;
727 int_cntr = readl(host->base + MMC_REG_INT_CNTR);
728
729 if (enable)
730 int_cntr |= INT_SDIO_IRQ_EN;
731 else
732 int_cntr &= ~INT_SDIO_IRQ_EN;
733
734 writel(int_cntr, host->base + MMC_REG_INT_CNTR);
735 spin_unlock_irqrestore(&host->lock, flags);
736}
737
738static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card)
739{
740 /*
741 * MX3 SoCs have a silicon bug which corrupts CRC calculation of
742 * multi-block transfers when connected SDIO peripheral doesn't
743 * drive the BUSY line as required by the specs.
744 * One way to prevent this is to only allow 1-bit transfers.
745 */
746
747 if (cpu_is_mx3() && card->type == MMC_TYPE_SDIO)
748 host->caps &= ~MMC_CAP_4_BIT_DATA;
749 else
750 host->caps |= MMC_CAP_4_BIT_DATA;
751}
671 752
672static const struct mmc_host_ops mxcmci_ops = { 753static const struct mmc_host_ops mxcmci_ops = {
673 .request = mxcmci_request, 754 .request = mxcmci_request,
674 .set_ios = mxcmci_set_ios, 755 .set_ios = mxcmci_set_ios,
675 .get_ro = mxcmci_get_ro, 756 .get_ro = mxcmci_get_ro,
757 .enable_sdio_irq = mxcmci_enable_sdio_irq,
758 .init_card = mxcmci_init_card,
676}; 759};
677 760
678static int mxcmci_probe(struct platform_device *pdev) 761static int mxcmci_probe(struct platform_device *pdev)
@@ -700,7 +783,7 @@ static int mxcmci_probe(struct platform_device *pdev)
700 } 783 }
701 784
702 mmc->ops = &mxcmci_ops; 785 mmc->ops = &mxcmci_ops;
703 mmc->caps = MMC_CAP_4_BIT_DATA; 786 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
704 787
705 /* MMC core transfer sizes tunable parameters */ 788 /* MMC core transfer sizes tunable parameters */
706 mmc->max_hw_segs = 64; 789 mmc->max_hw_segs = 64;
@@ -719,6 +802,7 @@ static int mxcmci_probe(struct platform_device *pdev)
719 802
720 host->mmc = mmc; 803 host->mmc = mmc;
721 host->pdata = pdev->dev.platform_data; 804 host->pdata = pdev->dev.platform_data;
805 spin_lock_init(&host->lock);
722 806
723 if (host->pdata && host->pdata->ocr_avail) 807 if (host->pdata && host->pdata->ocr_avail)
724 mmc->ocr_avail = host->pdata->ocr_avail; 808 mmc->ocr_avail = host->pdata->ocr_avail;
@@ -848,7 +932,7 @@ static int mxcmci_suspend(struct platform_device *dev, pm_message_t state)
848 int ret = 0; 932 int ret = 0;
849 933
850 if (mmc) 934 if (mmc)
851 ret = mmc_suspend_host(mmc, state); 935 ret = mmc_suspend_host(mmc);
852 936
853 return ret; 937 return ret;
854} 938}
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c
index bb6cc54b558e..1247e5de9faa 100644
--- a/drivers/mmc/host/of_mmc_spi.c
+++ b/drivers/mmc/host/of_mmc_spi.c
@@ -64,7 +64,7 @@ static int of_mmc_spi_get_ro(struct device *dev)
64struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi) 64struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi)
65{ 65{
66 struct device *dev = &spi->dev; 66 struct device *dev = &spi->dev;
67 struct device_node *np = dev_archdata_get_node(&dev->archdata); 67 struct device_node *np = dev->of_node;
68 struct of_mmc_spi *oms; 68 struct of_mmc_spi *oms;
69 const u32 *voltage_ranges; 69 const u32 *voltage_ranges;
70 int num_ranges; 70 int num_ranges;
@@ -135,7 +135,7 @@ EXPORT_SYMBOL(mmc_spi_get_pdata);
135void mmc_spi_put_pdata(struct spi_device *spi) 135void mmc_spi_put_pdata(struct spi_device *spi)
136{ 136{
137 struct device *dev = &spi->dev; 137 struct device *dev = &spi->dev;
138 struct device_node *np = dev_archdata_get_node(&dev->archdata); 138 struct device_node *np = dev->of_node;
139 struct of_mmc_spi *oms = to_of_mmc_spi(dev); 139 struct of_mmc_spi *oms = to_of_mmc_spi(dev);
140 int i; 140 int i;
141 141
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 84d280406341..2b281680e320 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -39,30 +39,30 @@
39#include <plat/fpga.h> 39#include <plat/fpga.h>
40 40
41#define OMAP_MMC_REG_CMD 0x00 41#define OMAP_MMC_REG_CMD 0x00
42#define OMAP_MMC_REG_ARGL 0x04 42#define OMAP_MMC_REG_ARGL 0x01
43#define OMAP_MMC_REG_ARGH 0x08 43#define OMAP_MMC_REG_ARGH 0x02
44#define OMAP_MMC_REG_CON 0x0c 44#define OMAP_MMC_REG_CON 0x03
45#define OMAP_MMC_REG_STAT 0x10 45#define OMAP_MMC_REG_STAT 0x04
46#define OMAP_MMC_REG_IE 0x14 46#define OMAP_MMC_REG_IE 0x05
47#define OMAP_MMC_REG_CTO 0x18 47#define OMAP_MMC_REG_CTO 0x06
48#define OMAP_MMC_REG_DTO 0x1c 48#define OMAP_MMC_REG_DTO 0x07
49#define OMAP_MMC_REG_DATA 0x20 49#define OMAP_MMC_REG_DATA 0x08
50#define OMAP_MMC_REG_BLEN 0x24 50#define OMAP_MMC_REG_BLEN 0x09
51#define OMAP_MMC_REG_NBLK 0x28 51#define OMAP_MMC_REG_NBLK 0x0a
52#define OMAP_MMC_REG_BUF 0x2c 52#define OMAP_MMC_REG_BUF 0x0b
53#define OMAP_MMC_REG_SDIO 0x34 53#define OMAP_MMC_REG_SDIO 0x0d
54#define OMAP_MMC_REG_REV 0x3c 54#define OMAP_MMC_REG_REV 0x0f
55#define OMAP_MMC_REG_RSP0 0x40 55#define OMAP_MMC_REG_RSP0 0x10
56#define OMAP_MMC_REG_RSP1 0x44 56#define OMAP_MMC_REG_RSP1 0x11
57#define OMAP_MMC_REG_RSP2 0x48 57#define OMAP_MMC_REG_RSP2 0x12
58#define OMAP_MMC_REG_RSP3 0x4c 58#define OMAP_MMC_REG_RSP3 0x13
59#define OMAP_MMC_REG_RSP4 0x50 59#define OMAP_MMC_REG_RSP4 0x14
60#define OMAP_MMC_REG_RSP5 0x54 60#define OMAP_MMC_REG_RSP5 0x15
61#define OMAP_MMC_REG_RSP6 0x58 61#define OMAP_MMC_REG_RSP6 0x16
62#define OMAP_MMC_REG_RSP7 0x5c 62#define OMAP_MMC_REG_RSP7 0x17
63#define OMAP_MMC_REG_IOSR 0x60 63#define OMAP_MMC_REG_IOSR 0x18
64#define OMAP_MMC_REG_SYSC 0x64 64#define OMAP_MMC_REG_SYSC 0x19
65#define OMAP_MMC_REG_SYSS 0x68 65#define OMAP_MMC_REG_SYSS 0x1a
66 66
67#define OMAP_MMC_STAT_CARD_ERR (1 << 14) 67#define OMAP_MMC_STAT_CARD_ERR (1 << 14)
68#define OMAP_MMC_STAT_CARD_IRQ (1 << 13) 68#define OMAP_MMC_STAT_CARD_IRQ (1 << 13)
@@ -78,8 +78,9 @@
78#define OMAP_MMC_STAT_CARD_BUSY (1 << 2) 78#define OMAP_MMC_STAT_CARD_BUSY (1 << 2)
79#define OMAP_MMC_STAT_END_OF_CMD (1 << 0) 79#define OMAP_MMC_STAT_END_OF_CMD (1 << 0)
80 80
81#define OMAP_MMC_READ(host, reg) __raw_readw((host)->virt_base + OMAP_MMC_REG_##reg) 81#define OMAP_MMC_REG(host, reg) (OMAP_MMC_REG_##reg << (host)->reg_shift)
82#define OMAP_MMC_WRITE(host, reg, val) __raw_writew((val), (host)->virt_base + OMAP_MMC_REG_##reg) 82#define OMAP_MMC_READ(host, reg) __raw_readw((host)->virt_base + OMAP_MMC_REG(host, reg))
83#define OMAP_MMC_WRITE(host, reg, val) __raw_writew((val), (host)->virt_base + OMAP_MMC_REG(host, reg))
83 84
84/* 85/*
85 * Command types 86 * Command types
@@ -133,6 +134,7 @@ struct mmc_omap_host {
133 int irq; 134 int irq;
134 unsigned char bus_mode; 135 unsigned char bus_mode;
135 unsigned char hw_bus_mode; 136 unsigned char hw_bus_mode;
137 unsigned int reg_shift;
136 138
137 struct work_struct cmd_abort_work; 139 struct work_struct cmd_abort_work;
138 unsigned abort:1; 140 unsigned abort:1;
@@ -680,9 +682,9 @@ mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
680 host->data->bytes_xfered += n; 682 host->data->bytes_xfered += n;
681 683
682 if (write) { 684 if (write) {
683 __raw_writesw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n); 685 __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n);
684 } else { 686 } else {
685 __raw_readsw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n); 687 __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n);
686 } 688 }
687} 689}
688 690
@@ -900,7 +902,7 @@ mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
900 int dst_port = 0; 902 int dst_port = 0;
901 int sync_dev = 0; 903 int sync_dev = 0;
902 904
903 data_addr = host->phys_base + OMAP_MMC_REG_DATA; 905 data_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
904 frame = data->blksz; 906 frame = data->blksz;
905 count = sg_dma_len(sg); 907 count = sg_dma_len(sg);
906 908
@@ -1493,6 +1495,8 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
1493 } 1495 }
1494 } 1496 }
1495 1497
1498 host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
1499
1496 return 0; 1500 return 0;
1497 1501
1498err_plat_cleanup: 1502err_plat_cleanup:
@@ -1557,7 +1561,7 @@ static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg)
1557 struct mmc_omap_slot *slot; 1561 struct mmc_omap_slot *slot;
1558 1562
1559 slot = host->slots[i]; 1563 slot = host->slots[i];
1560 ret = mmc_suspend_host(slot->mmc, mesg); 1564 ret = mmc_suspend_host(slot->mmc);
1561 if (ret < 0) { 1565 if (ret < 0) {
1562 while (--i >= 0) { 1566 while (--i >= 0) {
1563 slot = host->slots[i]; 1567 slot = host->slots[i];
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index e9caf694c59e..b032828c6126 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -157,12 +157,10 @@ struct omap_hsmmc_host {
157 */ 157 */
158 struct regulator *vcc; 158 struct regulator *vcc;
159 struct regulator *vcc_aux; 159 struct regulator *vcc_aux;
160 struct semaphore sem;
161 struct work_struct mmc_carddetect_work; 160 struct work_struct mmc_carddetect_work;
162 void __iomem *base; 161 void __iomem *base;
163 resource_size_t mapbase; 162 resource_size_t mapbase;
164 spinlock_t irq_lock; /* Prevent races with irq handler */ 163 spinlock_t irq_lock; /* Prevent races with irq handler */
165 unsigned long flags;
166 unsigned int id; 164 unsigned int id;
167 unsigned int dma_len; 165 unsigned int dma_len;
168 unsigned int dma_sg_idx; 166 unsigned int dma_sg_idx;
@@ -183,6 +181,7 @@ struct omap_hsmmc_host {
183 int protect_card; 181 int protect_card;
184 int reqs_blocked; 182 int reqs_blocked;
185 int use_reg; 183 int use_reg;
184 int req_in_progress;
186 185
187 struct omap_mmc_platform_data *pdata; 186 struct omap_mmc_platform_data *pdata;
188}; 187};
@@ -524,6 +523,27 @@ static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host)
524 dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stoped\n"); 523 dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stoped\n");
525} 524}
526 525
526static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host)
527{
528 unsigned int irq_mask;
529
530 if (host->use_dma)
531 irq_mask = INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE);
532 else
533 irq_mask = INT_EN_MASK;
534
535 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
536 OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);
537 OMAP_HSMMC_WRITE(host->base, IE, irq_mask);
538}
539
540static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host)
541{
542 OMAP_HSMMC_WRITE(host->base, ISE, 0);
543 OMAP_HSMMC_WRITE(host->base, IE, 0);
544 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
545}
546
527#ifdef CONFIG_PM 547#ifdef CONFIG_PM
528 548
529/* 549/*
@@ -592,9 +612,7 @@ static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
592 && time_before(jiffies, timeout)) 612 && time_before(jiffies, timeout))
593 ; 613 ;
594 614
595 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); 615 omap_hsmmc_disable_irq(host);
596 OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
597 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
598 616
599 /* Do not initialize card-specific things if the power is off */ 617 /* Do not initialize card-specific things if the power is off */
600 if (host->power_mode == MMC_POWER_OFF) 618 if (host->power_mode == MMC_POWER_OFF)
@@ -697,6 +715,8 @@ static void send_init_stream(struct omap_hsmmc_host *host)
697 return; 715 return;
698 716
699 disable_irq(host->irq); 717 disable_irq(host->irq);
718
719 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
700 OMAP_HSMMC_WRITE(host->base, CON, 720 OMAP_HSMMC_WRITE(host->base, CON,
701 OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM); 721 OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM);
702 OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD); 722 OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD);
@@ -762,17 +782,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
762 mmc_hostname(host->mmc), cmd->opcode, cmd->arg); 782 mmc_hostname(host->mmc), cmd->opcode, cmd->arg);
763 host->cmd = cmd; 783 host->cmd = cmd;
764 784
765 /* 785 omap_hsmmc_enable_irq(host);
766 * Clear status bits and enable interrupts
767 */
768 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
769 OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
770
771 if (host->use_dma)
772 OMAP_HSMMC_WRITE(host->base, IE,
773 INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE));
774 else
775 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
776 786
777 host->response_busy = 0; 787 host->response_busy = 0;
778 if (cmd->flags & MMC_RSP_PRESENT) { 788 if (cmd->flags & MMC_RSP_PRESENT) {
@@ -806,13 +816,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
806 if (host->use_dma) 816 if (host->use_dma)
807 cmdreg |= DMA_EN; 817 cmdreg |= DMA_EN;
808 818
809 /* 819 host->req_in_progress = 1;
810 * In an interrupt context (i.e. STOP command), the spinlock is unlocked
811 * by the interrupt handler, otherwise (i.e. for a new request) it is
812 * unlocked here.
813 */
814 if (!in_interrupt())
815 spin_unlock_irqrestore(&host->irq_lock, host->flags);
816 820
817 OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg); 821 OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg);
818 OMAP_HSMMC_WRITE(host->base, CMD, cmdreg); 822 OMAP_HSMMC_WRITE(host->base, CMD, cmdreg);
@@ -827,6 +831,23 @@ omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
827 return DMA_FROM_DEVICE; 831 return DMA_FROM_DEVICE;
828} 832}
829 833
834static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
835{
836 int dma_ch;
837
838 spin_lock(&host->irq_lock);
839 host->req_in_progress = 0;
840 dma_ch = host->dma_ch;
841 spin_unlock(&host->irq_lock);
842
843 omap_hsmmc_disable_irq(host);
844 /* Do not complete the request if DMA is still in progress */
845 if (mrq->data && host->use_dma && dma_ch != -1)
846 return;
847 host->mrq = NULL;
848 mmc_request_done(host->mmc, mrq);
849}
850
830/* 851/*
831 * Notify the transfer complete to MMC core 852 * Notify the transfer complete to MMC core
832 */ 853 */
@@ -843,25 +864,19 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)
843 return; 864 return;
844 } 865 }
845 866
846 host->mrq = NULL; 867 omap_hsmmc_request_done(host, mrq);
847 mmc_request_done(host->mmc, mrq);
848 return; 868 return;
849 } 869 }
850 870
851 host->data = NULL; 871 host->data = NULL;
852 872
853 if (host->use_dma && host->dma_ch != -1)
854 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
855 omap_hsmmc_get_dma_dir(host, data));
856
857 if (!data->error) 873 if (!data->error)
858 data->bytes_xfered += data->blocks * (data->blksz); 874 data->bytes_xfered += data->blocks * (data->blksz);
859 else 875 else
860 data->bytes_xfered = 0; 876 data->bytes_xfered = 0;
861 877
862 if (!data->stop) { 878 if (!data->stop) {
863 host->mrq = NULL; 879 omap_hsmmc_request_done(host, data->mrq);
864 mmc_request_done(host->mmc, data->mrq);
865 return; 880 return;
866 } 881 }
867 omap_hsmmc_start_command(host, data->stop, NULL); 882 omap_hsmmc_start_command(host, data->stop, NULL);
@@ -887,10 +902,8 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
887 cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP10); 902 cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP10);
888 } 903 }
889 } 904 }
890 if ((host->data == NULL && !host->response_busy) || cmd->error) { 905 if ((host->data == NULL && !host->response_busy) || cmd->error)
891 host->mrq = NULL; 906 omap_hsmmc_request_done(host, cmd->mrq);
892 mmc_request_done(host->mmc, cmd->mrq);
893 }
894} 907}
895 908
896/* 909/*
@@ -898,14 +911,19 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
898 */ 911 */
899static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno) 912static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
900{ 913{
914 int dma_ch;
915
901 host->data->error = errno; 916 host->data->error = errno;
902 917
903 if (host->use_dma && host->dma_ch != -1) { 918 spin_lock(&host->irq_lock);
919 dma_ch = host->dma_ch;
920 host->dma_ch = -1;
921 spin_unlock(&host->irq_lock);
922
923 if (host->use_dma && dma_ch != -1) {
904 dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len, 924 dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len,
905 omap_hsmmc_get_dma_dir(host, host->data)); 925 omap_hsmmc_get_dma_dir(host, host->data));
906 omap_free_dma(host->dma_ch); 926 omap_free_dma(dma_ch);
907 host->dma_ch = -1;
908 up(&host->sem);
909 } 927 }
910 host->data = NULL; 928 host->data = NULL;
911} 929}
@@ -967,28 +985,21 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host,
967 __func__); 985 __func__);
968} 986}
969 987
970/* 988static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
971 * MMC controller IRQ handler
972 */
973static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
974{ 989{
975 struct omap_hsmmc_host *host = dev_id;
976 struct mmc_data *data; 990 struct mmc_data *data;
977 int end_cmd = 0, end_trans = 0, status; 991 int end_cmd = 0, end_trans = 0;
978 992
979 spin_lock(&host->irq_lock); 993 if (!host->req_in_progress) {
980 994 do {
981 if (host->mrq == NULL) { 995 OMAP_HSMMC_WRITE(host->base, STAT, status);
982 OMAP_HSMMC_WRITE(host->base, STAT, 996 /* Flush posted write */
983 OMAP_HSMMC_READ(host->base, STAT)); 997 status = OMAP_HSMMC_READ(host->base, STAT);
984 /* Flush posted write */ 998 } while (status & INT_EN_MASK);
985 OMAP_HSMMC_READ(host->base, STAT); 999 return;
986 spin_unlock(&host->irq_lock);
987 return IRQ_HANDLED;
988 } 1000 }
989 1001
990 data = host->data; 1002 data = host->data;
991 status = OMAP_HSMMC_READ(host->base, STAT);
992 dev_dbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status); 1003 dev_dbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status);
993 1004
994 if (status & ERR) { 1005 if (status & ERR) {
@@ -1041,15 +1052,27 @@ static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
1041 } 1052 }
1042 1053
1043 OMAP_HSMMC_WRITE(host->base, STAT, status); 1054 OMAP_HSMMC_WRITE(host->base, STAT, status);
1044 /* Flush posted write */
1045 OMAP_HSMMC_READ(host->base, STAT);
1046 1055
1047 if (end_cmd || ((status & CC) && host->cmd)) 1056 if (end_cmd || ((status & CC) && host->cmd))
1048 omap_hsmmc_cmd_done(host, host->cmd); 1057 omap_hsmmc_cmd_done(host, host->cmd);
1049 if ((end_trans || (status & TC)) && host->mrq) 1058 if ((end_trans || (status & TC)) && host->mrq)
1050 omap_hsmmc_xfer_done(host, data); 1059 omap_hsmmc_xfer_done(host, data);
1060}
1051 1061
1052 spin_unlock(&host->irq_lock); 1062/*
1063 * MMC controller IRQ handler
1064 */
1065static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
1066{
1067 struct omap_hsmmc_host *host = dev_id;
1068 int status;
1069
1070 status = OMAP_HSMMC_READ(host->base, STAT);
1071 do {
1072 omap_hsmmc_do_irq(host, status);
1073 /* Flush posted write */
1074 status = OMAP_HSMMC_READ(host->base, STAT);
1075 } while (status & INT_EN_MASK);
1053 1076
1054 return IRQ_HANDLED; 1077 return IRQ_HANDLED;
1055} 1078}
@@ -1244,31 +1267,47 @@ static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host,
1244/* 1267/*
1245 * DMA call back function 1268 * DMA call back function
1246 */ 1269 */
1247static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *data) 1270static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
1248{ 1271{
1249 struct omap_hsmmc_host *host = data; 1272 struct omap_hsmmc_host *host = cb_data;
1273 struct mmc_data *data = host->mrq->data;
1274 int dma_ch, req_in_progress;
1250 1275
1251 if (ch_status & OMAP2_DMA_MISALIGNED_ERR_IRQ) 1276 if (ch_status & OMAP2_DMA_MISALIGNED_ERR_IRQ)
1252 dev_dbg(mmc_dev(host->mmc), "MISALIGNED_ADRS_ERR\n"); 1277 dev_dbg(mmc_dev(host->mmc), "MISALIGNED_ADRS_ERR\n");
1253 1278
1254 if (host->dma_ch < 0) 1279 spin_lock(&host->irq_lock);
1280 if (host->dma_ch < 0) {
1281 spin_unlock(&host->irq_lock);
1255 return; 1282 return;
1283 }
1256 1284
1257 host->dma_sg_idx++; 1285 host->dma_sg_idx++;
1258 if (host->dma_sg_idx < host->dma_len) { 1286 if (host->dma_sg_idx < host->dma_len) {
1259 /* Fire up the next transfer. */ 1287 /* Fire up the next transfer. */
1260 omap_hsmmc_config_dma_params(host, host->data, 1288 omap_hsmmc_config_dma_params(host, data,
1261 host->data->sg + host->dma_sg_idx); 1289 data->sg + host->dma_sg_idx);
1290 spin_unlock(&host->irq_lock);
1262 return; 1291 return;
1263 } 1292 }
1264 1293
1265 omap_free_dma(host->dma_ch); 1294 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
1295 omap_hsmmc_get_dma_dir(host, data));
1296
1297 req_in_progress = host->req_in_progress;
1298 dma_ch = host->dma_ch;
1266 host->dma_ch = -1; 1299 host->dma_ch = -1;
1267 /* 1300 spin_unlock(&host->irq_lock);
1268 * DMA Callback: run in interrupt context. 1301
1269 * mutex_unlock will throw a kernel warning if used. 1302 omap_free_dma(dma_ch);
1270 */ 1303
1271 up(&host->sem); 1304 /* If DMA has finished after TC, complete the request */
1305 if (!req_in_progress) {
1306 struct mmc_request *mrq = host->mrq;
1307
1308 host->mrq = NULL;
1309 mmc_request_done(host->mmc, mrq);
1310 }
1272} 1311}
1273 1312
1274/* 1313/*
@@ -1277,7 +1316,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *data)
1277static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host, 1316static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
1278 struct mmc_request *req) 1317 struct mmc_request *req)
1279{ 1318{
1280 int dma_ch = 0, ret = 0, err = 1, i; 1319 int dma_ch = 0, ret = 0, i;
1281 struct mmc_data *data = req->data; 1320 struct mmc_data *data = req->data;
1282 1321
1283 /* Sanity check: all the SG entries must be aligned by block size. */ 1322 /* Sanity check: all the SG entries must be aligned by block size. */
@@ -1294,23 +1333,7 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
1294 */ 1333 */
1295 return -EINVAL; 1334 return -EINVAL;
1296 1335
1297 /* 1336 BUG_ON(host->dma_ch != -1);
1298 * If for some reason the DMA transfer is still active,
1299 * we wait for timeout period and free the dma
1300 */
1301 if (host->dma_ch != -1) {
1302 set_current_state(TASK_UNINTERRUPTIBLE);
1303 schedule_timeout(100);
1304 if (down_trylock(&host->sem)) {
1305 omap_free_dma(host->dma_ch);
1306 host->dma_ch = -1;
1307 up(&host->sem);
1308 return err;
1309 }
1310 } else {
1311 if (down_trylock(&host->sem))
1312 return err;
1313 }
1314 1337
1315 ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data), 1338 ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data),
1316 "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch); 1339 "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch);
@@ -1410,37 +1433,27 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
1410 struct omap_hsmmc_host *host = mmc_priv(mmc); 1433 struct omap_hsmmc_host *host = mmc_priv(mmc);
1411 int err; 1434 int err;
1412 1435
1413 /* 1436 BUG_ON(host->req_in_progress);
1414 * Prevent races with the interrupt handler because of unexpected 1437 BUG_ON(host->dma_ch != -1);
1415 * interrupts, but not if we are already in interrupt context i.e. 1438 if (host->protect_card) {
1416 * retries. 1439 if (host->reqs_blocked < 3) {
1417 */ 1440 /*
1418 if (!in_interrupt()) { 1441 * Ensure the controller is left in a consistent
1419 spin_lock_irqsave(&host->irq_lock, host->flags); 1442 * state by resetting the command and data state
1420 /* 1443 * machines.
1421 * Protect the card from I/O if there is a possibility 1444 */
1422 * it can be removed. 1445 omap_hsmmc_reset_controller_fsm(host, SRD);
1423 */ 1446 omap_hsmmc_reset_controller_fsm(host, SRC);
1424 if (host->protect_card) { 1447 host->reqs_blocked += 1;
1425 if (host->reqs_blocked < 3) { 1448 }
1426 /* 1449 req->cmd->error = -EBADF;
1427 * Ensure the controller is left in a consistent 1450 if (req->data)
1428 * state by resetting the command and data state 1451 req->data->error = -EBADF;
1429 * machines. 1452 req->cmd->retries = 0;
1430 */ 1453 mmc_request_done(mmc, req);
1431 omap_hsmmc_reset_controller_fsm(host, SRD); 1454 return;
1432 omap_hsmmc_reset_controller_fsm(host, SRC); 1455 } else if (host->reqs_blocked)
1433 host->reqs_blocked += 1; 1456 host->reqs_blocked = 0;
1434 }
1435 req->cmd->error = -EBADF;
1436 if (req->data)
1437 req->data->error = -EBADF;
1438 spin_unlock_irqrestore(&host->irq_lock, host->flags);
1439 mmc_request_done(mmc, req);
1440 return;
1441 } else if (host->reqs_blocked)
1442 host->reqs_blocked = 0;
1443 }
1444 WARN_ON(host->mrq != NULL); 1457 WARN_ON(host->mrq != NULL);
1445 host->mrq = req; 1458 host->mrq = req;
1446 err = omap_hsmmc_prepare_data(host, req); 1459 err = omap_hsmmc_prepare_data(host, req);
@@ -1449,8 +1462,6 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
1449 if (req->data) 1462 if (req->data)
1450 req->data->error = err; 1463 req->data->error = err;
1451 host->mrq = NULL; 1464 host->mrq = NULL;
1452 if (!in_interrupt())
1453 spin_unlock_irqrestore(&host->irq_lock, host->flags);
1454 mmc_request_done(mmc, req); 1465 mmc_request_done(mmc, req);
1455 return; 1466 return;
1456 } 1467 }
@@ -2019,7 +2030,6 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
2019 mmc->f_min = 400000; 2030 mmc->f_min = 400000;
2020 mmc->f_max = 52000000; 2031 mmc->f_max = 52000000;
2021 2032
2022 sema_init(&host->sem, 1);
2023 spin_lock_init(&host->irq_lock); 2033 spin_lock_init(&host->irq_lock);
2024 2034
2025 host->iclk = clk_get(&pdev->dev, "ick"); 2035 host->iclk = clk_get(&pdev->dev, "ick");
@@ -2162,8 +2172,7 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
2162 } 2172 }
2163 } 2173 }
2164 2174
2165 OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK); 2175 omap_hsmmc_disable_irq(host);
2166 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
2167 2176
2168 mmc_host_lazy_disable(host->mmc); 2177 mmc_host_lazy_disable(host->mmc);
2169 2178
@@ -2258,10 +2267,12 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
2258} 2267}
2259 2268
2260#ifdef CONFIG_PM 2269#ifdef CONFIG_PM
2261static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state) 2270static int omap_hsmmc_suspend(struct device *dev)
2262{ 2271{
2263 int ret = 0; 2272 int ret = 0;
2273 struct platform_device *pdev = to_platform_device(dev);
2264 struct omap_hsmmc_host *host = platform_get_drvdata(pdev); 2274 struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
2275 pm_message_t state = PMSG_SUSPEND; /* unused by MMC core */
2265 2276
2266 if (host && host->suspended) 2277 if (host && host->suspended)
2267 return 0; 2278 return 0;
@@ -2281,12 +2292,9 @@ static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state)
2281 } 2292 }
2282 cancel_work_sync(&host->mmc_carddetect_work); 2293 cancel_work_sync(&host->mmc_carddetect_work);
2283 mmc_host_enable(host->mmc); 2294 mmc_host_enable(host->mmc);
2284 ret = mmc_suspend_host(host->mmc, state); 2295 ret = mmc_suspend_host(host->mmc);
2285 if (ret == 0) { 2296 if (ret == 0) {
2286 OMAP_HSMMC_WRITE(host->base, ISE, 0); 2297 omap_hsmmc_disable_irq(host);
2287 OMAP_HSMMC_WRITE(host->base, IE, 0);
2288
2289
2290 OMAP_HSMMC_WRITE(host->base, HCTL, 2298 OMAP_HSMMC_WRITE(host->base, HCTL,
2291 OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP); 2299 OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
2292 mmc_host_disable(host->mmc); 2300 mmc_host_disable(host->mmc);
@@ -2310,9 +2318,10 @@ static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state)
2310} 2318}
2311 2319
2312/* Routine to resume the MMC device */ 2320/* Routine to resume the MMC device */
2313static int omap_hsmmc_resume(struct platform_device *pdev) 2321static int omap_hsmmc_resume(struct device *dev)
2314{ 2322{
2315 int ret = 0; 2323 int ret = 0;
2324 struct platform_device *pdev = to_platform_device(dev);
2316 struct omap_hsmmc_host *host = platform_get_drvdata(pdev); 2325 struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
2317 2326
2318 if (host && !host->suspended) 2327 if (host && !host->suspended)
@@ -2363,13 +2372,17 @@ clk_en_err:
2363#define omap_hsmmc_resume NULL 2372#define omap_hsmmc_resume NULL
2364#endif 2373#endif
2365 2374
2366static struct platform_driver omap_hsmmc_driver = { 2375static struct dev_pm_ops omap_hsmmc_dev_pm_ops = {
2367 .remove = omap_hsmmc_remove,
2368 .suspend = omap_hsmmc_suspend, 2376 .suspend = omap_hsmmc_suspend,
2369 .resume = omap_hsmmc_resume, 2377 .resume = omap_hsmmc_resume,
2378};
2379
2380static struct platform_driver omap_hsmmc_driver = {
2381 .remove = omap_hsmmc_remove,
2370 .driver = { 2382 .driver = {
2371 .name = DRIVER_NAME, 2383 .name = DRIVER_NAME,
2372 .owner = THIS_MODULE, 2384 .owner = THIS_MODULE,
2385 .pm = &omap_hsmmc_dev_pm_ops,
2373 }, 2386 },
2374}; 2387};
2375 2388
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index e4f00e70a749..0a4e43f37140 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -813,7 +813,7 @@ static int pxamci_suspend(struct device *dev)
813 int ret = 0; 813 int ret = 0;
814 814
815 if (mmc) 815 if (mmc)
816 ret = mmc_suspend_host(mmc, PMSG_SUSPEND); 816 ret = mmc_suspend_host(mmc);
817 817
818 return ret; 818 return ret;
819} 819}
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 2fdf7689ae6c..2e16e0a90a5e 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1881,9 +1881,8 @@ MODULE_DEVICE_TABLE(platform, s3cmci_driver_ids);
1881static int s3cmci_suspend(struct device *dev) 1881static int s3cmci_suspend(struct device *dev)
1882{ 1882{
1883 struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev)); 1883 struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev));
1884 struct pm_message event = { PM_EVENT_SUSPEND };
1885 1884
1886 return mmc_suspend_host(mmc, event); 1885 return mmc_suspend_host(mmc);
1887} 1886}
1888 1887
1889static int s3cmci_resume(struct device *dev) 1888static int s3cmci_resume(struct device *dev)
diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c
index 55e33135edb4..a2e9820cd42f 100644
--- a/drivers/mmc/host/sdhci-of-core.c
+++ b/drivers/mmc/host/sdhci-of-core.c
@@ -89,7 +89,7 @@ static int sdhci_of_suspend(struct of_device *ofdev, pm_message_t state)
89{ 89{
90 struct sdhci_host *host = dev_get_drvdata(&ofdev->dev); 90 struct sdhci_host *host = dev_get_drvdata(&ofdev->dev);
91 91
92 return mmc_suspend_host(host->mmc, state); 92 return mmc_suspend_host(host->mmc);
93} 93}
94 94
95static int sdhci_of_resume(struct of_device *ofdev) 95static int sdhci_of_resume(struct of_device *ofdev)
@@ -118,7 +118,7 @@ static bool __devinit sdhci_of_wp_inverted(struct device_node *np)
118static int __devinit sdhci_of_probe(struct of_device *ofdev, 118static int __devinit sdhci_of_probe(struct of_device *ofdev,
119 const struct of_device_id *match) 119 const struct of_device_id *match)
120{ 120{
121 struct device_node *np = ofdev->node; 121 struct device_node *np = ofdev->dev.of_node;
122 struct sdhci_of_data *sdhci_of_data = match->data; 122 struct sdhci_of_data *sdhci_of_data = match->data;
123 struct sdhci_host *host; 123 struct sdhci_host *host;
124 struct sdhci_of_host *of_host; 124 struct sdhci_of_host *of_host;
@@ -205,8 +205,11 @@ static const struct of_device_id sdhci_of_match[] = {
205MODULE_DEVICE_TABLE(of, sdhci_of_match); 205MODULE_DEVICE_TABLE(of, sdhci_of_match);
206 206
207static struct of_platform_driver sdhci_of_driver = { 207static struct of_platform_driver sdhci_of_driver = {
208 .driver.name = "sdhci-of", 208 .driver = {
209 .match_table = sdhci_of_match, 209 .name = "sdhci-of",
210 .owner = THIS_MODULE,
211 .of_match_table = sdhci_of_match,
212 },
210 .probe = sdhci_of_probe, 213 .probe = sdhci_of_probe,
211 .remove = __devexit_p(sdhci_of_remove), 214 .remove = __devexit_p(sdhci_of_remove),
212 .suspend = sdhci_of_suspend, 215 .suspend = sdhci_of_suspend,
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index d5b11a17e648..c8623de13af3 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -129,12 +129,12 @@ struct sdhci_of_data sdhci_esdhc = {
129 SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET | 129 SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET |
130 SDHCI_QUIRK_NO_CARD_NO_RESET, 130 SDHCI_QUIRK_NO_CARD_NO_RESET,
131 .ops = { 131 .ops = {
132 .readl = sdhci_be32bs_readl, 132 .read_l = sdhci_be32bs_readl,
133 .readw = esdhc_readw, 133 .read_w = esdhc_readw,
134 .readb = sdhci_be32bs_readb, 134 .read_b = sdhci_be32bs_readb,
135 .writel = sdhci_be32bs_writel, 135 .write_l = sdhci_be32bs_writel,
136 .writew = esdhc_writew, 136 .write_w = esdhc_writew,
137 .writeb = esdhc_writeb, 137 .write_b = esdhc_writeb,
138 .set_clock = esdhc_set_clock, 138 .set_clock = esdhc_set_clock,
139 .enable_dma = esdhc_enable_dma, 139 .enable_dma = esdhc_enable_dma,
140 .get_max_clock = esdhc_get_max_clock, 140 .get_max_clock = esdhc_get_max_clock,
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c
index 35117f3ed757..68ddb7546ae2 100644
--- a/drivers/mmc/host/sdhci-of-hlwd.c
+++ b/drivers/mmc/host/sdhci-of-hlwd.c
@@ -55,11 +55,11 @@ struct sdhci_of_data sdhci_hlwd = {
55 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | 55 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
56 SDHCI_QUIRK_32BIT_DMA_SIZE, 56 SDHCI_QUIRK_32BIT_DMA_SIZE,
57 .ops = { 57 .ops = {
58 .readl = sdhci_be32bs_readl, 58 .read_l = sdhci_be32bs_readl,
59 .readw = sdhci_be32bs_readw, 59 .read_w = sdhci_be32bs_readw,
60 .readb = sdhci_be32bs_readb, 60 .read_b = sdhci_be32bs_readb,
61 .writel = sdhci_hlwd_writel, 61 .write_l = sdhci_hlwd_writel,
62 .writew = sdhci_hlwd_writew, 62 .write_w = sdhci_hlwd_writew,
63 .writeb = sdhci_hlwd_writeb, 63 .write_b = sdhci_hlwd_writeb,
64 }, 64 },
65}; 65};
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 6701af629c30..65483fdea45b 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -628,7 +628,7 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
628 host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pci_slot)); 628 host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pci_slot));
629 if (IS_ERR(host)) { 629 if (IS_ERR(host)) {
630 dev_err(&pdev->dev, "cannot allocate host\n"); 630 dev_err(&pdev->dev, "cannot allocate host\n");
631 return ERR_PTR(PTR_ERR(host)); 631 return ERR_CAST(host);
632 } 632 }
633 633
634 slot = sdhci_priv(host); 634 slot = sdhci_priv(host);
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 297f40ae6ad5..b6ee0d719698 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -29,6 +29,7 @@
29#include <linux/mmc/host.h> 29#include <linux/mmc/host.h>
30 30
31#include <linux/io.h> 31#include <linux/io.h>
32#include <linux/sdhci-pltfm.h>
32 33
33#include "sdhci.h" 34#include "sdhci.h"
34 35
@@ -49,19 +50,18 @@ static struct sdhci_ops sdhci_pltfm_ops = {
49 50
50static int __devinit sdhci_pltfm_probe(struct platform_device *pdev) 51static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
51{ 52{
53 struct sdhci_pltfm_data *pdata = pdev->dev.platform_data;
52 struct sdhci_host *host; 54 struct sdhci_host *host;
53 struct resource *iomem; 55 struct resource *iomem;
54 int ret; 56 int ret;
55 57
56 BUG_ON(pdev == NULL);
57
58 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 58 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
59 if (!iomem) { 59 if (!iomem) {
60 ret = -ENOMEM; 60 ret = -ENOMEM;
61 goto err; 61 goto err;
62 } 62 }
63 63
64 if (resource_size(iomem) != 0x100) 64 if (resource_size(iomem) < 0x100)
65 dev_err(&pdev->dev, "Invalid iomem size. You may " 65 dev_err(&pdev->dev, "Invalid iomem size. You may "
66 "experience problems.\n"); 66 "experience problems.\n");
67 67
@@ -76,7 +76,12 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
76 } 76 }
77 77
78 host->hw_name = "platform"; 78 host->hw_name = "platform";
79 host->ops = &sdhci_pltfm_ops; 79 if (pdata && pdata->ops)
80 host->ops = pdata->ops;
81 else
82 host->ops = &sdhci_pltfm_ops;
83 if (pdata)
84 host->quirks = pdata->quirks;
80 host->irq = platform_get_irq(pdev, 0); 85 host->irq = platform_get_irq(pdev, 0);
81 86
82 if (!request_mem_region(iomem->start, resource_size(iomem), 87 if (!request_mem_region(iomem->start, resource_size(iomem),
@@ -93,6 +98,12 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
93 goto err_remap; 98 goto err_remap;
94 } 99 }
95 100
101 if (pdata && pdata->init) {
102 ret = pdata->init(host);
103 if (ret)
104 goto err_plat_init;
105 }
106
96 ret = sdhci_add_host(host); 107 ret = sdhci_add_host(host);
97 if (ret) 108 if (ret)
98 goto err_add_host; 109 goto err_add_host;
@@ -102,6 +113,9 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
102 return 0; 113 return 0;
103 114
104err_add_host: 115err_add_host:
116 if (pdata && pdata->exit)
117 pdata->exit(host);
118err_plat_init:
105 iounmap(host->ioaddr); 119 iounmap(host->ioaddr);
106err_remap: 120err_remap:
107 release_mem_region(iomem->start, resource_size(iomem)); 121 release_mem_region(iomem->start, resource_size(iomem));
@@ -114,6 +128,7 @@ err:
114 128
115static int __devexit sdhci_pltfm_remove(struct platform_device *pdev) 129static int __devexit sdhci_pltfm_remove(struct platform_device *pdev)
116{ 130{
131 struct sdhci_pltfm_data *pdata = pdev->dev.platform_data;
117 struct sdhci_host *host = platform_get_drvdata(pdev); 132 struct sdhci_host *host = platform_get_drvdata(pdev);
118 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 133 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
119 int dead; 134 int dead;
@@ -125,6 +140,8 @@ static int __devexit sdhci_pltfm_remove(struct platform_device *pdev)
125 dead = 1; 140 dead = 1;
126 141
127 sdhci_remove_host(host, dead); 142 sdhci_remove_host(host, dead);
143 if (pdata && pdata->exit)
144 pdata->exit(host);
128 iounmap(host->ioaddr); 145 iounmap(host->ioaddr);
129 release_mem_region(iomem->start, resource_size(iomem)); 146 release_mem_region(iomem->start, resource_size(iomem));
130 sdhci_free_host(host); 147 sdhci_free_host(host);
@@ -165,4 +182,3 @@ MODULE_DESCRIPTION("Secure Digital Host Controller Interface platform driver");
165MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>"); 182MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
166MODULE_LICENSE("GPL v2"); 183MODULE_LICENSE("GPL v2");
167MODULE_ALIAS("platform:sdhci"); 184MODULE_ALIAS("platform:sdhci");
168
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 2136794c0cfa..af217924a76e 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -317,12 +317,7 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
317 host->irq = irq; 317 host->irq = irq;
318 318
319 /* Setup quirks for the controller */ 319 /* Setup quirks for the controller */
320 320 host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
321 /* Currently with ADMA enabled we are getting some length
322 * interrupts that are not being dealt with, do disable
323 * ADMA until this is sorted out. */
324 host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
325 host->quirks |= SDHCI_QUIRK_32BIT_ADMA_SIZE;
326 321
327#ifndef CONFIG_MMC_SDHCI_S3C_DMA 322#ifndef CONFIG_MMC_SDHCI_S3C_DMA
328 323
@@ -330,9 +325,6 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
330 * support as well. */ 325 * support as well. */
331 host->quirks |= SDHCI_QUIRK_BROKEN_DMA; 326 host->quirks |= SDHCI_QUIRK_BROKEN_DMA;
332 327
333 /* PIO currently has problems with multi-block IO */
334 host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK;
335
336#endif /* CONFIG_MMC_SDHCI_S3C_DMA */ 328#endif /* CONFIG_MMC_SDHCI_S3C_DMA */
337 329
338 /* It seems we do not get an DATA transfer complete on non-busy 330 /* It seems we do not get an DATA transfer complete on non-busy
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
new file mode 100644
index 000000000000..d70c54c7b70a
--- /dev/null
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -0,0 +1,298 @@
1/*
2 * drivers/mmc/host/sdhci-spear.c
3 *
4 * Support of SDHCI platform devices for spear soc family
5 *
6 * Copyright (C) 2010 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com>
8 *
9 * Inspired by sdhci-pltfm.c
10 *
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
14 */
15
16#include <linux/clk.h>
17#include <linux/delay.h>
18#include <linux/gpio.h>
19#include <linux/highmem.h>
20#include <linux/interrupt.h>
21#include <linux/irq.h>
22#include <linux/platform_device.h>
23#include <linux/slab.h>
24#include <linux/mmc/host.h>
25#include <linux/mmc/sdhci-spear.h>
26#include <linux/io.h>
27#include "sdhci.h"
28
29struct spear_sdhci {
30 struct clk *clk;
31 struct sdhci_plat_data *data;
32};
33
34/* sdhci ops */
35static struct sdhci_ops sdhci_pltfm_ops = {
36 /* Nothing to do for now. */
37};
38
39/* gpio card detection interrupt handler */
40static irqreturn_t sdhci_gpio_irq(int irq, void *dev_id)
41{
42 struct platform_device *pdev = dev_id;
43 struct sdhci_host *host = platform_get_drvdata(pdev);
44 struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev);
45 unsigned long gpio_irq_type;
46 int val;
47
48 val = gpio_get_value(sdhci->data->card_int_gpio);
49
50 /* val == 1 -> card removed, val == 0 -> card inserted */
51 /* if card removed - set irq for low level, else vice versa */
52 gpio_irq_type = val ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH;
53 set_irq_type(irq, gpio_irq_type);
54
55 if (sdhci->data->card_power_gpio >= 0) {
56 if (!sdhci->data->power_always_enb) {
57 /* if card inserted, give power, otherwise remove it */
58 val = sdhci->data->power_active_high ? !val : val ;
59 gpio_set_value(sdhci->data->card_power_gpio, val);
60 }
61 }
62
63 /* inform sdhci driver about card insertion/removal */
64 tasklet_schedule(&host->card_tasklet);
65
66 return IRQ_HANDLED;
67}
68
69static int __devinit sdhci_probe(struct platform_device *pdev)
70{
71 struct sdhci_host *host;
72 struct resource *iomem;
73 struct spear_sdhci *sdhci;
74 int ret;
75
76 BUG_ON(pdev == NULL);
77
78 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
79 if (!iomem) {
80 ret = -ENOMEM;
81 dev_dbg(&pdev->dev, "memory resource not defined\n");
82 goto err;
83 }
84
85 if (!request_mem_region(iomem->start, resource_size(iomem),
86 "spear-sdhci")) {
87 ret = -EBUSY;
88 dev_dbg(&pdev->dev, "cannot request region\n");
89 goto err;
90 }
91
92 sdhci = kzalloc(sizeof(*sdhci), GFP_KERNEL);
93 if (!sdhci) {
94 ret = -ENOMEM;
95 dev_dbg(&pdev->dev, "cannot allocate memory for sdhci\n");
96 goto err_kzalloc;
97 }
98
99 /* clk enable */
100 sdhci->clk = clk_get(&pdev->dev, NULL);
101 if (IS_ERR(sdhci->clk)) {
102 ret = PTR_ERR(sdhci->clk);
103 dev_dbg(&pdev->dev, "Error getting clock\n");
104 goto err_clk_get;
105 }
106
107 ret = clk_enable(sdhci->clk);
108 if (ret) {
109 dev_dbg(&pdev->dev, "Error enabling clock\n");
110 goto err_clk_enb;
111 }
112
113 /* overwrite platform_data */
114 sdhci->data = dev_get_platdata(&pdev->dev);
115 pdev->dev.platform_data = sdhci;
116
117 if (pdev->dev.parent)
118 host = sdhci_alloc_host(pdev->dev.parent, 0);
119 else
120 host = sdhci_alloc_host(&pdev->dev, 0);
121
122 if (IS_ERR(host)) {
123 ret = PTR_ERR(host);
124 dev_dbg(&pdev->dev, "error allocating host\n");
125 goto err_alloc_host;
126 }
127
128 host->hw_name = "sdhci";
129 host->ops = &sdhci_pltfm_ops;
130 host->irq = platform_get_irq(pdev, 0);
131 host->quirks = SDHCI_QUIRK_BROKEN_ADMA;
132
133 host->ioaddr = ioremap(iomem->start, resource_size(iomem));
134 if (!host->ioaddr) {
135 ret = -ENOMEM;
136 dev_dbg(&pdev->dev, "failed to remap registers\n");
137 goto err_ioremap;
138 }
139
140 ret = sdhci_add_host(host);
141 if (ret) {
142 dev_dbg(&pdev->dev, "error adding host\n");
143 goto err_add_host;
144 }
145
146 platform_set_drvdata(pdev, host);
147
148 /*
149 * It is optional to use GPIOs for sdhci Power control & sdhci card
150 * interrupt detection. If sdhci->data is NULL, then use original sdhci
151 * lines otherwise GPIO lines.
152 * If GPIO is selected for power control, then power should be disabled
153 * after card removal and should be enabled when card insertion
154 * interrupt occurs
155 */
156 if (!sdhci->data)
157 return 0;
158
159 if (sdhci->data->card_power_gpio >= 0) {
160 int val = 0;
161
162 ret = gpio_request(sdhci->data->card_power_gpio, "sdhci");
163 if (ret < 0) {
164 dev_dbg(&pdev->dev, "gpio request fail: %d\n",
165 sdhci->data->card_power_gpio);
166 goto err_pgpio_request;
167 }
168
169 if (sdhci->data->power_always_enb)
170 val = sdhci->data->power_active_high;
171 else
172 val = !sdhci->data->power_active_high;
173
174 ret = gpio_direction_output(sdhci->data->card_power_gpio, val);
175 if (ret) {
176 dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
177 sdhci->data->card_power_gpio);
178 goto err_pgpio_direction;
179 }
180
181 gpio_set_value(sdhci->data->card_power_gpio, 1);
182 }
183
184 if (sdhci->data->card_int_gpio >= 0) {
185 ret = gpio_request(sdhci->data->card_int_gpio, "sdhci");
186 if (ret < 0) {
187 dev_dbg(&pdev->dev, "gpio request fail: %d\n",
188 sdhci->data->card_int_gpio);
189 goto err_igpio_request;
190 }
191
192 ret = gpio_direction_input(sdhci->data->card_int_gpio);
193 if (ret) {
194 dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
195 sdhci->data->card_int_gpio);
196 goto err_igpio_direction;
197 }
198 ret = request_irq(gpio_to_irq(sdhci->data->card_int_gpio),
199 sdhci_gpio_irq, IRQF_TRIGGER_LOW,
200 mmc_hostname(host->mmc), pdev);
201 if (ret) {
202 dev_dbg(&pdev->dev, "gpio request irq fail: %d\n",
203 sdhci->data->card_int_gpio);
204 goto err_igpio_request_irq;
205 }
206
207 }
208
209 return 0;
210
211err_igpio_request_irq:
212err_igpio_direction:
213 if (sdhci->data->card_int_gpio >= 0)
214 gpio_free(sdhci->data->card_int_gpio);
215err_igpio_request:
216err_pgpio_direction:
217 if (sdhci->data->card_power_gpio >= 0)
218 gpio_free(sdhci->data->card_power_gpio);
219err_pgpio_request:
220 platform_set_drvdata(pdev, NULL);
221 sdhci_remove_host(host, 1);
222err_add_host:
223 iounmap(host->ioaddr);
224err_ioremap:
225 sdhci_free_host(host);
226err_alloc_host:
227 clk_disable(sdhci->clk);
228err_clk_enb:
229 clk_put(sdhci->clk);
230err_clk_get:
231 kfree(sdhci);
232err_kzalloc:
233 release_mem_region(iomem->start, resource_size(iomem));
234err:
235 dev_err(&pdev->dev, "spear-sdhci probe failed: %d\n", ret);
236 return ret;
237}
238
239static int __devexit sdhci_remove(struct platform_device *pdev)
240{
241 struct sdhci_host *host = platform_get_drvdata(pdev);
242 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
243 struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev);
244 int dead;
245 u32 scratch;
246
247 if (sdhci->data) {
248 if (sdhci->data->card_int_gpio >= 0) {
249 free_irq(gpio_to_irq(sdhci->data->card_int_gpio), pdev);
250 gpio_free(sdhci->data->card_int_gpio);
251 }
252
253 if (sdhci->data->card_power_gpio >= 0)
254 gpio_free(sdhci->data->card_power_gpio);
255 }
256
257 platform_set_drvdata(pdev, NULL);
258 dead = 0;
259 scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
260 if (scratch == (u32)-1)
261 dead = 1;
262
263 sdhci_remove_host(host, dead);
264 iounmap(host->ioaddr);
265 sdhci_free_host(host);
266 clk_disable(sdhci->clk);
267 clk_put(sdhci->clk);
268 kfree(sdhci);
269 if (iomem)
270 release_mem_region(iomem->start, resource_size(iomem));
271
272 return 0;
273}
274
275static struct platform_driver sdhci_driver = {
276 .driver = {
277 .name = "sdhci",
278 .owner = THIS_MODULE,
279 },
280 .probe = sdhci_probe,
281 .remove = __devexit_p(sdhci_remove),
282};
283
284static int __init sdhci_init(void)
285{
286 return platform_driver_register(&sdhci_driver);
287}
288module_init(sdhci_init);
289
290static void __exit sdhci_exit(void)
291{
292 platform_driver_unregister(&sdhci_driver);
293}
294module_exit(sdhci_exit);
295
296MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver");
297MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
298MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 9d4fdfa685e5..c6d1bd8d4ac4 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -496,12 +496,22 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
496 WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4); 496 WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4);
497 } 497 }
498 498
499 /* 499 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
500 * Add a terminating entry. 500 /*
501 */ 501 * Mark the last descriptor as the terminating descriptor
502 */
503 if (desc != host->adma_desc) {
504 desc -= 8;
505 desc[0] |= 0x2; /* end */
506 }
507 } else {
508 /*
509 * Add a terminating entry.
510 */
502 511
503 /* nop, end, valid */ 512 /* nop, end, valid */
504 sdhci_set_adma_desc(desc, 0, 0, 0x3); 513 sdhci_set_adma_desc(desc, 0, 0, 0x3);
514 }
505 515
506 /* 516 /*
507 * Resync align buffer as we might have changed it. 517 * Resync align buffer as we might have changed it.
@@ -1587,7 +1597,7 @@ int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
1587 1597
1588 sdhci_disable_card_detection(host); 1598 sdhci_disable_card_detection(host);
1589 1599
1590 ret = mmc_suspend_host(host->mmc, state); 1600 ret = mmc_suspend_host(host->mmc);
1591 if (ret) 1601 if (ret)
1592 return ret; 1602 return ret;
1593 1603
@@ -1744,7 +1754,8 @@ int sdhci_add_host(struct sdhci_host *host)
1744 host->max_clk = 1754 host->max_clk =
1745 (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; 1755 (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
1746 host->max_clk *= 1000000; 1756 host->max_clk *= 1000000;
1747 if (host->max_clk == 0) { 1757 if (host->max_clk == 0 || host->quirks &
1758 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
1748 if (!host->ops->get_max_clock) { 1759 if (!host->ops->get_max_clock) {
1749 printk(KERN_ERR 1760 printk(KERN_ERR
1750 "%s: Hardware doesn't specify base clock " 1761 "%s: Hardware doesn't specify base clock "
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 842f46f94284..c8468134adc9 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -127,7 +127,7 @@
127#define SDHCI_INT_DATA_MASK (SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \ 127#define SDHCI_INT_DATA_MASK (SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \
128 SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \ 128 SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \
129 SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \ 129 SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \
130 SDHCI_INT_DATA_END_BIT | SDHCI_ADMA_ERROR) 130 SDHCI_INT_DATA_END_BIT | SDHCI_INT_ADMA_ERROR)
131#define SDHCI_INT_ALL_MASK ((unsigned int)-1) 131#define SDHCI_INT_ALL_MASK ((unsigned int)-1)
132 132
133#define SDHCI_ACMD12_ERR 0x3C 133#define SDHCI_ACMD12_ERR 0x3C
@@ -236,6 +236,10 @@ struct sdhci_host {
236#define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23) 236#define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23)
237/* Controller uses SDCLK instead of TMCLK for data timeouts */ 237/* Controller uses SDCLK instead of TMCLK for data timeouts */
238#define SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1<<24) 238#define SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1<<24)
239/* Controller reports wrong base clock capability */
240#define SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN (1<<25)
241/* Controller cannot support End Attribute in NOP ADMA descriptor */
242#define SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC (1<<26)
239 243
240 int irq; /* Device IRQ */ 244 int irq; /* Device IRQ */
241 void __iomem * ioaddr; /* Mapped address */ 245 void __iomem * ioaddr; /* Mapped address */
@@ -294,12 +298,12 @@ struct sdhci_host {
294 298
295struct sdhci_ops { 299struct sdhci_ops {
296#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS 300#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
297 u32 (*readl)(struct sdhci_host *host, int reg); 301 u32 (*read_l)(struct sdhci_host *host, int reg);
298 u16 (*readw)(struct sdhci_host *host, int reg); 302 u16 (*read_w)(struct sdhci_host *host, int reg);
299 u8 (*readb)(struct sdhci_host *host, int reg); 303 u8 (*read_b)(struct sdhci_host *host, int reg);
300 void (*writel)(struct sdhci_host *host, u32 val, int reg); 304 void (*write_l)(struct sdhci_host *host, u32 val, int reg);
301 void (*writew)(struct sdhci_host *host, u16 val, int reg); 305 void (*write_w)(struct sdhci_host *host, u16 val, int reg);
302 void (*writeb)(struct sdhci_host *host, u8 val, int reg); 306 void (*write_b)(struct sdhci_host *host, u8 val, int reg);
303#endif 307#endif
304 308
305 void (*set_clock)(struct sdhci_host *host, unsigned int clock); 309 void (*set_clock)(struct sdhci_host *host, unsigned int clock);
@@ -314,48 +318,48 @@ struct sdhci_ops {
314 318
315static inline void sdhci_writel(struct sdhci_host *host, u32 val, int reg) 319static inline void sdhci_writel(struct sdhci_host *host, u32 val, int reg)
316{ 320{
317 if (unlikely(host->ops->writel)) 321 if (unlikely(host->ops->write_l))
318 host->ops->writel(host, val, reg); 322 host->ops->write_l(host, val, reg);
319 else 323 else
320 writel(val, host->ioaddr + reg); 324 writel(val, host->ioaddr + reg);
321} 325}
322 326
323static inline void sdhci_writew(struct sdhci_host *host, u16 val, int reg) 327static inline void sdhci_writew(struct sdhci_host *host, u16 val, int reg)
324{ 328{
325 if (unlikely(host->ops->writew)) 329 if (unlikely(host->ops->write_w))
326 host->ops->writew(host, val, reg); 330 host->ops->write_w(host, val, reg);
327 else 331 else
328 writew(val, host->ioaddr + reg); 332 writew(val, host->ioaddr + reg);
329} 333}
330 334
331static inline void sdhci_writeb(struct sdhci_host *host, u8 val, int reg) 335static inline void sdhci_writeb(struct sdhci_host *host, u8 val, int reg)
332{ 336{
333 if (unlikely(host->ops->writeb)) 337 if (unlikely(host->ops->write_b))
334 host->ops->writeb(host, val, reg); 338 host->ops->write_b(host, val, reg);
335 else 339 else
336 writeb(val, host->ioaddr + reg); 340 writeb(val, host->ioaddr + reg);
337} 341}
338 342
339static inline u32 sdhci_readl(struct sdhci_host *host, int reg) 343static inline u32 sdhci_readl(struct sdhci_host *host, int reg)
340{ 344{
341 if (unlikely(host->ops->readl)) 345 if (unlikely(host->ops->read_l))
342 return host->ops->readl(host, reg); 346 return host->ops->read_l(host, reg);
343 else 347 else
344 return readl(host->ioaddr + reg); 348 return readl(host->ioaddr + reg);
345} 349}
346 350
347static inline u16 sdhci_readw(struct sdhci_host *host, int reg) 351static inline u16 sdhci_readw(struct sdhci_host *host, int reg)
348{ 352{
349 if (unlikely(host->ops->readw)) 353 if (unlikely(host->ops->read_w))
350 return host->ops->readw(host, reg); 354 return host->ops->read_w(host, reg);
351 else 355 else
352 return readw(host->ioaddr + reg); 356 return readw(host->ioaddr + reg);
353} 357}
354 358
355static inline u8 sdhci_readb(struct sdhci_host *host, int reg) 359static inline u8 sdhci_readb(struct sdhci_host *host, int reg)
356{ 360{
357 if (unlikely(host->ops->readb)) 361 if (unlikely(host->ops->read_b))
358 return host->ops->readb(host, reg); 362 return host->ops->read_b(host, reg);
359 else 363 else
360 return readb(host->ioaddr + reg); 364 return readb(host->ioaddr + reg);
361} 365}
diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c
index cb41e9c3ac07..e7507af3856e 100644
--- a/drivers/mmc/host/sdricoh_cs.c
+++ b/drivers/mmc/host/sdricoh_cs.c
@@ -519,7 +519,7 @@ static int sdricoh_pcmcia_suspend(struct pcmcia_device *link)
519{ 519{
520 struct mmc_host *mmc = link->priv; 520 struct mmc_host *mmc = link->priv;
521 dev_dbg(&link->dev, "suspend\n"); 521 dev_dbg(&link->dev, "suspend\n");
522 mmc_suspend_host(mmc, PMSG_SUSPEND); 522 mmc_suspend_host(mmc);
523 return 0; 523 return 0;
524} 524}
525 525
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
new file mode 100644
index 000000000000..eb97830c0344
--- /dev/null
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -0,0 +1,965 @@
1/*
2 * MMCIF eMMC driver.
3 *
4 * Copyright (C) 2010 Renesas Solutions Corp.
5 * Yusuke Goda <yusuke.goda.sx@renesas.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License.
10 *
11 *
12 * TODO
13 * 1. DMA
14 * 2. Power management
15 * 3. Handle MMC errors better
16 *
17 */
18
19#include <linux/dma-mapping.h>
20#include <linux/mmc/host.h>
21#include <linux/mmc/card.h>
22#include <linux/mmc/core.h>
23#include <linux/mmc/mmc.h>
24#include <linux/mmc/sdio.h>
25#include <linux/delay.h>
26#include <linux/platform_device.h>
27#include <linux/clk.h>
28#include <linux/mmc/sh_mmcif.h>
29
30#define DRIVER_NAME "sh_mmcif"
31#define DRIVER_VERSION "2010-04-28"
32
33#define MMCIF_CE_CMD_SET 0x00000000
34#define MMCIF_CE_ARG 0x00000008
35#define MMCIF_CE_ARG_CMD12 0x0000000C
36#define MMCIF_CE_CMD_CTRL 0x00000010
37#define MMCIF_CE_BLOCK_SET 0x00000014
38#define MMCIF_CE_CLK_CTRL 0x00000018
39#define MMCIF_CE_BUF_ACC 0x0000001C
40#define MMCIF_CE_RESP3 0x00000020
41#define MMCIF_CE_RESP2 0x00000024
42#define MMCIF_CE_RESP1 0x00000028
43#define MMCIF_CE_RESP0 0x0000002C
44#define MMCIF_CE_RESP_CMD12 0x00000030
45#define MMCIF_CE_DATA 0x00000034
46#define MMCIF_CE_INT 0x00000040
47#define MMCIF_CE_INT_MASK 0x00000044
48#define MMCIF_CE_HOST_STS1 0x00000048
49#define MMCIF_CE_HOST_STS2 0x0000004C
50#define MMCIF_CE_VERSION 0x0000007C
51
52/* CE_CMD_SET */
53#define CMD_MASK 0x3f000000
54#define CMD_SET_RTYP_NO ((0 << 23) | (0 << 22))
55#define CMD_SET_RTYP_6B ((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
56#define CMD_SET_RTYP_17B ((1 << 23) | (0 << 22)) /* R2 */
57#define CMD_SET_RBSY (1 << 21) /* R1b */
58#define CMD_SET_CCSEN (1 << 20)
59#define CMD_SET_WDAT (1 << 19) /* 1: on data, 0: no data */
60#define CMD_SET_DWEN (1 << 18) /* 1: write, 0: read */
61#define CMD_SET_CMLTE (1 << 17) /* 1: multi block trans, 0: single */
62#define CMD_SET_CMD12EN (1 << 16) /* 1: CMD12 auto issue */
63#define CMD_SET_RIDXC_INDEX ((0 << 15) | (0 << 14)) /* index check */
64#define CMD_SET_RIDXC_BITS ((0 << 15) | (1 << 14)) /* check bits check */
65#define CMD_SET_RIDXC_NO ((1 << 15) | (0 << 14)) /* no check */
66#define CMD_SET_CRC7C ((0 << 13) | (0 << 12)) /* CRC7 check*/
67#define CMD_SET_CRC7C_BITS ((0 << 13) | (1 << 12)) /* check bits check*/
68#define CMD_SET_CRC7C_INTERNAL ((1 << 13) | (0 << 12)) /* internal CRC7 check*/
69#define CMD_SET_CRC16C (1 << 10) /* 0: CRC16 check*/
70#define CMD_SET_CRCSTE (1 << 8) /* 1: not receive CRC status */
71#define CMD_SET_TBIT (1 << 7) /* 1: tran mission bit "Low" */
72#define CMD_SET_OPDM (1 << 6) /* 1: open/drain */
73#define CMD_SET_CCSH (1 << 5)
74#define CMD_SET_DATW_1 ((0 << 1) | (0 << 0)) /* 1bit */
75#define CMD_SET_DATW_4 ((0 << 1) | (1 << 0)) /* 4bit */
76#define CMD_SET_DATW_8 ((1 << 1) | (0 << 0)) /* 8bit */
77
78/* CE_CMD_CTRL */
79#define CMD_CTRL_BREAK (1 << 0)
80
81/* CE_BLOCK_SET */
82#define BLOCK_SIZE_MASK 0x0000ffff
83
84/* CE_CLK_CTRL */
85#define CLK_ENABLE (1 << 24) /* 1: output mmc clock */
86#define CLK_CLEAR ((1 << 19) | (1 << 18) | (1 << 17) | (1 << 16))
87#define CLK_SUP_PCLK ((1 << 19) | (1 << 18) | (1 << 17) | (1 << 16))
88#define SRSPTO_256 ((1 << 13) | (0 << 12)) /* resp timeout */
89#define SRBSYTO_29 ((1 << 11) | (1 << 10) | \
90 (1 << 9) | (1 << 8)) /* resp busy timeout */
91#define SRWDTO_29 ((1 << 7) | (1 << 6) | \
92 (1 << 5) | (1 << 4)) /* read/write timeout */
93#define SCCSTO_29 ((1 << 3) | (1 << 2) | \
94 (1 << 1) | (1 << 0)) /* ccs timeout */
95
96/* CE_BUF_ACC */
97#define BUF_ACC_DMAWEN (1 << 25)
98#define BUF_ACC_DMAREN (1 << 24)
99#define BUF_ACC_BUSW_32 (0 << 17)
100#define BUF_ACC_BUSW_16 (1 << 17)
101#define BUF_ACC_ATYP (1 << 16)
102
103/* CE_INT */
104#define INT_CCSDE (1 << 29)
105#define INT_CMD12DRE (1 << 26)
106#define INT_CMD12RBE (1 << 25)
107#define INT_CMD12CRE (1 << 24)
108#define INT_DTRANE (1 << 23)
109#define INT_BUFRE (1 << 22)
110#define INT_BUFWEN (1 << 21)
111#define INT_BUFREN (1 << 20)
112#define INT_CCSRCV (1 << 19)
113#define INT_RBSYE (1 << 17)
114#define INT_CRSPE (1 << 16)
115#define INT_CMDVIO (1 << 15)
116#define INT_BUFVIO (1 << 14)
117#define INT_WDATERR (1 << 11)
118#define INT_RDATERR (1 << 10)
119#define INT_RIDXERR (1 << 9)
120#define INT_RSPERR (1 << 8)
121#define INT_CCSTO (1 << 5)
122#define INT_CRCSTO (1 << 4)
123#define INT_WDATTO (1 << 3)
124#define INT_RDATTO (1 << 2)
125#define INT_RBSYTO (1 << 1)
126#define INT_RSPTO (1 << 0)
127#define INT_ERR_STS (INT_CMDVIO | INT_BUFVIO | INT_WDATERR | \
128 INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
129 INT_CCSTO | INT_CRCSTO | INT_WDATTO | \
130 INT_RDATTO | INT_RBSYTO | INT_RSPTO)
131
132/* CE_INT_MASK */
133#define MASK_ALL 0x00000000
134#define MASK_MCCSDE (1 << 29)
135#define MASK_MCMD12DRE (1 << 26)
136#define MASK_MCMD12RBE (1 << 25)
137#define MASK_MCMD12CRE (1 << 24)
138#define MASK_MDTRANE (1 << 23)
139#define MASK_MBUFRE (1 << 22)
140#define MASK_MBUFWEN (1 << 21)
141#define MASK_MBUFREN (1 << 20)
142#define MASK_MCCSRCV (1 << 19)
143#define MASK_MRBSYE (1 << 17)
144#define MASK_MCRSPE (1 << 16)
145#define MASK_MCMDVIO (1 << 15)
146#define MASK_MBUFVIO (1 << 14)
147#define MASK_MWDATERR (1 << 11)
148#define MASK_MRDATERR (1 << 10)
149#define MASK_MRIDXERR (1 << 9)
150#define MASK_MRSPERR (1 << 8)
151#define MASK_MCCSTO (1 << 5)
152#define MASK_MCRCSTO (1 << 4)
153#define MASK_MWDATTO (1 << 3)
154#define MASK_MRDATTO (1 << 2)
155#define MASK_MRBSYTO (1 << 1)
156#define MASK_MRSPTO (1 << 0)
157
158/* CE_HOST_STS1 */
159#define STS1_CMDSEQ (1 << 31)
160
161/* CE_HOST_STS2 */
162#define STS2_CRCSTE (1 << 31)
163#define STS2_CRC16E (1 << 30)
164#define STS2_AC12CRCE (1 << 29)
165#define STS2_RSPCRC7E (1 << 28)
166#define STS2_CRCSTEBE (1 << 27)
167#define STS2_RDATEBE (1 << 26)
168#define STS2_AC12REBE (1 << 25)
169#define STS2_RSPEBE (1 << 24)
170#define STS2_AC12IDXE (1 << 23)
171#define STS2_RSPIDXE (1 << 22)
172#define STS2_CCSTO (1 << 15)
173#define STS2_RDATTO (1 << 14)
174#define STS2_DATBSYTO (1 << 13)
175#define STS2_CRCSTTO (1 << 12)
176#define STS2_AC12BSYTO (1 << 11)
177#define STS2_RSPBSYTO (1 << 10)
178#define STS2_AC12RSPTO (1 << 9)
179#define STS2_RSPTO (1 << 8)
180#define STS2_CRC_ERR (STS2_CRCSTE | STS2_CRC16E | \
181 STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
182#define STS2_TIMEOUT_ERR (STS2_CCSTO | STS2_RDATTO | \
183 STS2_DATBSYTO | STS2_CRCSTTO | \
184 STS2_AC12BSYTO | STS2_RSPBSYTO | \
185 STS2_AC12RSPTO | STS2_RSPTO)
186
187/* CE_VERSION */
188#define SOFT_RST_ON (1 << 31)
189#define SOFT_RST_OFF (0 << 31)
190
191#define CLKDEV_EMMC_DATA 52000000 /* 52MHz */
192#define CLKDEV_MMC_DATA 20000000 /* 20MHz */
193#define CLKDEV_INIT 400000 /* 400 KHz */
194
195struct sh_mmcif_host {
196 struct mmc_host *mmc;
197 struct mmc_data *data;
198 struct mmc_command *cmd;
199 struct platform_device *pd;
200 struct clk *hclk;
201 unsigned int clk;
202 int bus_width;
203 u16 wait_int;
204 u16 sd_error;
205 long timeout;
206 void __iomem *addr;
207 wait_queue_head_t intr_wait;
208};
209
210static inline u32 sh_mmcif_readl(struct sh_mmcif_host *host, unsigned int reg)
211{
212 return readl(host->addr + reg);
213}
214
215static inline void sh_mmcif_writel(struct sh_mmcif_host *host,
216 unsigned int reg, u32 val)
217{
218 writel(val, host->addr + reg);
219}
220
221static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
222 unsigned int reg, u32 val)
223{
224 writel(val | sh_mmcif_readl(host, reg), host->addr + reg);
225}
226
227static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
228 unsigned int reg, u32 val)
229{
230 writel(~val & sh_mmcif_readl(host, reg), host->addr + reg);
231}
232
233
234static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
235{
236 struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
237
238 sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
239 sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
240
241 if (!clk)
242 return;
243 if (p->sup_pclk && clk == host->clk)
244 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK);
245 else
246 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR &
247 (ilog2(__rounddown_pow_of_two(host->clk / clk)) << 16));
248
249 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
250}
251
252static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
253{
254 u32 tmp;
255
256 tmp = 0x010f0000 & sh_mmcif_readl(host, MMCIF_CE_CLK_CTRL);
257
258 sh_mmcif_writel(host, MMCIF_CE_VERSION, SOFT_RST_ON);
259 sh_mmcif_writel(host, MMCIF_CE_VERSION, SOFT_RST_OFF);
260 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
261 SRSPTO_256 | SRBSYTO_29 | SRWDTO_29 | SCCSTO_29);
262 /* byte swap on */
263 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
264}
265
266static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
267{
268 u32 state1, state2;
269 int ret, timeout = 10000000;
270
271 host->sd_error = 0;
272 host->wait_int = 0;
273
274 state1 = sh_mmcif_readl(host, MMCIF_CE_HOST_STS1);
275 state2 = sh_mmcif_readl(host, MMCIF_CE_HOST_STS2);
276 pr_debug("%s: ERR HOST_STS1 = %08x\n", \
277 DRIVER_NAME, sh_mmcif_readl(host, MMCIF_CE_HOST_STS1));
278 pr_debug("%s: ERR HOST_STS2 = %08x\n", \
279 DRIVER_NAME, sh_mmcif_readl(host, MMCIF_CE_HOST_STS2));
280
281 if (state1 & STS1_CMDSEQ) {
282 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
283 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
284 while (1) {
285 timeout--;
286 if (timeout < 0) {
287 pr_err(DRIVER_NAME": Forceed end of " \
288 "command sequence timeout err\n");
289 return -EIO;
290 }
291 if (!(sh_mmcif_readl(host, MMCIF_CE_HOST_STS1)
292 & STS1_CMDSEQ))
293 break;
294 mdelay(1);
295 }
296 sh_mmcif_sync_reset(host);
297 pr_debug(DRIVER_NAME": Forced end of command sequence\n");
298 return -EIO;
299 }
300
301 if (state2 & STS2_CRC_ERR) {
302 pr_debug(DRIVER_NAME": Happened CRC error\n");
303 ret = -EIO;
304 } else if (state2 & STS2_TIMEOUT_ERR) {
305 pr_debug(DRIVER_NAME": Happened Timeout error\n");
306 ret = -ETIMEDOUT;
307 } else {
308 pr_debug(DRIVER_NAME": Happened End/Index error\n");
309 ret = -EIO;
310 }
311 return ret;
312}
313
314static int sh_mmcif_single_read(struct sh_mmcif_host *host,
315 struct mmc_request *mrq)
316{
317 struct mmc_data *data = mrq->data;
318 long time;
319 u32 blocksize, i, *p = sg_virt(data->sg);
320
321 host->wait_int = 0;
322
323 /* buf read enable */
324 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
325 time = wait_event_interruptible_timeout(host->intr_wait,
326 host->wait_int == 1 ||
327 host->sd_error == 1, host->timeout);
328 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
329 return sh_mmcif_error_manage(host);
330
331 host->wait_int = 0;
332 blocksize = (BLOCK_SIZE_MASK &
333 sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET)) + 3;
334 for (i = 0; i < blocksize / 4; i++)
335 *p++ = sh_mmcif_readl(host, MMCIF_CE_DATA);
336
337 /* buffer read end */
338 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
339 time = wait_event_interruptible_timeout(host->intr_wait,
340 host->wait_int == 1 ||
341 host->sd_error == 1, host->timeout);
342 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
343 return sh_mmcif_error_manage(host);
344
345 host->wait_int = 0;
346 return 0;
347}
348
349static int sh_mmcif_multi_read(struct sh_mmcif_host *host,
350 struct mmc_request *mrq)
351{
352 struct mmc_data *data = mrq->data;
353 long time;
354 u32 blocksize, i, j, sec, *p;
355
356 blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET);
357 for (j = 0; j < data->sg_len; j++) {
358 p = sg_virt(data->sg);
359 host->wait_int = 0;
360 for (sec = 0; sec < data->sg->length / blocksize; sec++) {
361 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
362 /* buf read enable */
363 time = wait_event_interruptible_timeout(host->intr_wait,
364 host->wait_int == 1 ||
365 host->sd_error == 1, host->timeout);
366
367 if (host->wait_int != 1 &&
368 (time == 0 || host->sd_error != 0))
369 return sh_mmcif_error_manage(host);
370
371 host->wait_int = 0;
372 for (i = 0; i < blocksize / 4; i++)
373 *p++ = sh_mmcif_readl(host, MMCIF_CE_DATA);
374 }
375 if (j < data->sg_len - 1)
376 data->sg++;
377 }
378 return 0;
379}
380
381static int sh_mmcif_single_write(struct sh_mmcif_host *host,
382 struct mmc_request *mrq)
383{
384 struct mmc_data *data = mrq->data;
385 long time;
386 u32 blocksize, i, *p = sg_virt(data->sg);
387
388 host->wait_int = 0;
389 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
390
391 /* buf write enable */
392 time = wait_event_interruptible_timeout(host->intr_wait,
393 host->wait_int == 1 ||
394 host->sd_error == 1, host->timeout);
395 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
396 return sh_mmcif_error_manage(host);
397
398 host->wait_int = 0;
399 blocksize = (BLOCK_SIZE_MASK &
400 sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET)) + 3;
401 for (i = 0; i < blocksize / 4; i++)
402 sh_mmcif_writel(host, MMCIF_CE_DATA, *p++);
403
404 /* buffer write end */
405 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
406
407 time = wait_event_interruptible_timeout(host->intr_wait,
408 host->wait_int == 1 ||
409 host->sd_error == 1, host->timeout);
410 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
411 return sh_mmcif_error_manage(host);
412
413 host->wait_int = 0;
414 return 0;
415}
416
417static int sh_mmcif_multi_write(struct sh_mmcif_host *host,
418 struct mmc_request *mrq)
419{
420 struct mmc_data *data = mrq->data;
421 long time;
422 u32 i, sec, j, blocksize, *p;
423
424 blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET);
425
426 for (j = 0; j < data->sg_len; j++) {
427 p = sg_virt(data->sg);
428 host->wait_int = 0;
429 for (sec = 0; sec < data->sg->length / blocksize; sec++) {
430 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
431 /* buf write enable*/
432 time = wait_event_interruptible_timeout(host->intr_wait,
433 host->wait_int == 1 ||
434 host->sd_error == 1, host->timeout);
435
436 if (host->wait_int != 1 &&
437 (time == 0 || host->sd_error != 0))
438 return sh_mmcif_error_manage(host);
439
440 host->wait_int = 0;
441 for (i = 0; i < blocksize / 4; i++)
442 sh_mmcif_writel(host, MMCIF_CE_DATA, *p++);
443 }
444 if (j < data->sg_len - 1)
445 data->sg++;
446 }
447 return 0;
448}
449
450static void sh_mmcif_get_response(struct sh_mmcif_host *host,
451 struct mmc_command *cmd)
452{
453 if (cmd->flags & MMC_RSP_136) {
454 cmd->resp[0] = sh_mmcif_readl(host, MMCIF_CE_RESP3);
455 cmd->resp[1] = sh_mmcif_readl(host, MMCIF_CE_RESP2);
456 cmd->resp[2] = sh_mmcif_readl(host, MMCIF_CE_RESP1);
457 cmd->resp[3] = sh_mmcif_readl(host, MMCIF_CE_RESP0);
458 } else
459 cmd->resp[0] = sh_mmcif_readl(host, MMCIF_CE_RESP0);
460}
461
462static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
463 struct mmc_command *cmd)
464{
465 cmd->resp[0] = sh_mmcif_readl(host, MMCIF_CE_RESP_CMD12);
466}
467
468static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
469 struct mmc_request *mrq, struct mmc_command *cmd, u32 opc)
470{
471 u32 tmp = 0;
472
473 /* Response Type check */
474 switch (mmc_resp_type(cmd)) {
475 case MMC_RSP_NONE:
476 tmp |= CMD_SET_RTYP_NO;
477 break;
478 case MMC_RSP_R1:
479 case MMC_RSP_R1B:
480 case MMC_RSP_R3:
481 tmp |= CMD_SET_RTYP_6B;
482 break;
483 case MMC_RSP_R2:
484 tmp |= CMD_SET_RTYP_17B;
485 break;
486 default:
487 pr_err(DRIVER_NAME": Not support type response.\n");
488 break;
489 }
490 switch (opc) {
491 /* RBSY */
492 case MMC_SWITCH:
493 case MMC_STOP_TRANSMISSION:
494 case MMC_SET_WRITE_PROT:
495 case MMC_CLR_WRITE_PROT:
496 case MMC_ERASE:
497 case MMC_GEN_CMD:
498 tmp |= CMD_SET_RBSY;
499 break;
500 }
501 /* WDAT / DATW */
502 if (host->data) {
503 tmp |= CMD_SET_WDAT;
504 switch (host->bus_width) {
505 case MMC_BUS_WIDTH_1:
506 tmp |= CMD_SET_DATW_1;
507 break;
508 case MMC_BUS_WIDTH_4:
509 tmp |= CMD_SET_DATW_4;
510 break;
511 case MMC_BUS_WIDTH_8:
512 tmp |= CMD_SET_DATW_8;
513 break;
514 default:
515 pr_err(DRIVER_NAME": Not support bus width.\n");
516 break;
517 }
518 }
519 /* DWEN */
520 if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
521 tmp |= CMD_SET_DWEN;
522 /* CMLTE/CMD12EN */
523 if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
524 tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
525 sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
526 mrq->data->blocks << 16);
527 }
528 /* RIDXC[1:0] check bits */
529 if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
530 opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
531 tmp |= CMD_SET_RIDXC_BITS;
532 /* RCRC7C[1:0] check bits */
533 if (opc == MMC_SEND_OP_COND)
534 tmp |= CMD_SET_CRC7C_BITS;
535 /* RCRC7C[1:0] internal CRC7 */
536 if (opc == MMC_ALL_SEND_CID ||
537 opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
538 tmp |= CMD_SET_CRC7C_INTERNAL;
539
540 return opc = ((opc << 24) | tmp);
541}
542
543static u32 sh_mmcif_data_trans(struct sh_mmcif_host *host,
544 struct mmc_request *mrq, u32 opc)
545{
546 u32 ret;
547
548 switch (opc) {
549 case MMC_READ_MULTIPLE_BLOCK:
550 ret = sh_mmcif_multi_read(host, mrq);
551 break;
552 case MMC_WRITE_MULTIPLE_BLOCK:
553 ret = sh_mmcif_multi_write(host, mrq);
554 break;
555 case MMC_WRITE_BLOCK:
556 ret = sh_mmcif_single_write(host, mrq);
557 break;
558 case MMC_READ_SINGLE_BLOCK:
559 case MMC_SEND_EXT_CSD:
560 ret = sh_mmcif_single_read(host, mrq);
561 break;
562 default:
563 pr_err(DRIVER_NAME": NOT SUPPORT CMD = d'%08d\n", opc);
564 ret = -EINVAL;
565 break;
566 }
567 return ret;
568}
569
570static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
571 struct mmc_request *mrq, struct mmc_command *cmd)
572{
573 long time;
574 int ret = 0, mask = 0;
575 u32 opc = cmd->opcode;
576
577 host->cmd = cmd;
578
579 switch (opc) {
580 /* respons busy check */
581 case MMC_SWITCH:
582 case MMC_STOP_TRANSMISSION:
583 case MMC_SET_WRITE_PROT:
584 case MMC_CLR_WRITE_PROT:
585 case MMC_ERASE:
586 case MMC_GEN_CMD:
587 mask = MASK_MRBSYE;
588 break;
589 default:
590 mask = MASK_MCRSPE;
591 break;
592 }
593 mask |= MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR |
594 MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR |
595 MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO |
596 MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO;
597
598 if (host->data) {
599 sh_mmcif_writel(host, MMCIF_CE_BLOCK_SET, 0);
600 sh_mmcif_writel(host, MMCIF_CE_BLOCK_SET, mrq->data->blksz);
601 }
602 opc = sh_mmcif_set_cmd(host, mrq, cmd, opc);
603
604 sh_mmcif_writel(host, MMCIF_CE_INT, 0xD80430C0);
605 sh_mmcif_writel(host, MMCIF_CE_INT_MASK, mask);
606 /* set arg */
607 sh_mmcif_writel(host, MMCIF_CE_ARG, cmd->arg);
608 host->wait_int = 0;
609 /* set cmd */
610 sh_mmcif_writel(host, MMCIF_CE_CMD_SET, opc);
611
612 time = wait_event_interruptible_timeout(host->intr_wait,
613 host->wait_int == 1 || host->sd_error == 1, host->timeout);
614 if (host->wait_int != 1 && time == 0) {
615 cmd->error = sh_mmcif_error_manage(host);
616 return;
617 }
618 if (host->sd_error) {
619 switch (cmd->opcode) {
620 case MMC_ALL_SEND_CID:
621 case MMC_SELECT_CARD:
622 case MMC_APP_CMD:
623 cmd->error = -ETIMEDOUT;
624 break;
625 default:
626 pr_debug("%s: Cmd(d'%d) err\n",
627 DRIVER_NAME, cmd->opcode);
628 cmd->error = sh_mmcif_error_manage(host);
629 break;
630 }
631 host->sd_error = 0;
632 host->wait_int = 0;
633 return;
634 }
635 if (!(cmd->flags & MMC_RSP_PRESENT)) {
636 cmd->error = ret;
637 host->wait_int = 0;
638 return;
639 }
640 if (host->wait_int == 1) {
641 sh_mmcif_get_response(host, cmd);
642 host->wait_int = 0;
643 }
644 if (host->data) {
645 ret = sh_mmcif_data_trans(host, mrq, cmd->opcode);
646 if (ret < 0)
647 mrq->data->bytes_xfered = 0;
648 else
649 mrq->data->bytes_xfered =
650 mrq->data->blocks * mrq->data->blksz;
651 }
652 cmd->error = ret;
653}
654
655static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
656 struct mmc_request *mrq, struct mmc_command *cmd)
657{
658 long time;
659
660 if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK)
661 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
662 else if (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
663 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
664 else {
665 pr_err(DRIVER_NAME": not support stop cmd\n");
666 cmd->error = sh_mmcif_error_manage(host);
667 return;
668 }
669
670 time = wait_event_interruptible_timeout(host->intr_wait,
671 host->wait_int == 1 ||
672 host->sd_error == 1, host->timeout);
673 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0)) {
674 cmd->error = sh_mmcif_error_manage(host);
675 return;
676 }
677 sh_mmcif_get_cmd12response(host, cmd);
678 host->wait_int = 0;
679 cmd->error = 0;
680}
681
682static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
683{
684 struct sh_mmcif_host *host = mmc_priv(mmc);
685
686 switch (mrq->cmd->opcode) {
687 /* MMCIF does not support SD/SDIO command */
688 case SD_IO_SEND_OP_COND:
689 case MMC_APP_CMD:
690 mrq->cmd->error = -ETIMEDOUT;
691 mmc_request_done(mmc, mrq);
692 return;
693 case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
694 if (!mrq->data) {
695 /* send_if_cond cmd (not support) */
696 mrq->cmd->error = -ETIMEDOUT;
697 mmc_request_done(mmc, mrq);
698 return;
699 }
700 break;
701 default:
702 break;
703 }
704 host->data = mrq->data;
705 sh_mmcif_start_cmd(host, mrq, mrq->cmd);
706 host->data = NULL;
707
708 if (mrq->cmd->error != 0) {
709 mmc_request_done(mmc, mrq);
710 return;
711 }
712 if (mrq->stop)
713 sh_mmcif_stop_cmd(host, mrq, mrq->stop);
714 mmc_request_done(mmc, mrq);
715}
716
717static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
718{
719 struct sh_mmcif_host *host = mmc_priv(mmc);
720 struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
721
722 if (ios->power_mode == MMC_POWER_OFF) {
723 /* clock stop */
724 sh_mmcif_clock_control(host, 0);
725 if (p->down_pwr)
726 p->down_pwr(host->pd);
727 return;
728 } else if (ios->power_mode == MMC_POWER_UP) {
729 if (p->set_pwr)
730 p->set_pwr(host->pd, ios->power_mode);
731 }
732
733 if (ios->clock)
734 sh_mmcif_clock_control(host, ios->clock);
735
736 host->bus_width = ios->bus_width;
737}
738
739static struct mmc_host_ops sh_mmcif_ops = {
740 .request = sh_mmcif_request,
741 .set_ios = sh_mmcif_set_ios,
742};
743
744static void sh_mmcif_detect(struct mmc_host *mmc)
745{
746 mmc_detect_change(mmc, 0);
747}
748
749static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
750{
751 struct sh_mmcif_host *host = dev_id;
752 u32 state = 0;
753 int err = 0;
754
755 state = sh_mmcif_readl(host, MMCIF_CE_INT);
756
757 if (state & INT_RBSYE) {
758 sh_mmcif_writel(host, MMCIF_CE_INT, ~(INT_RBSYE | INT_CRSPE));
759 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE);
760 } else if (state & INT_CRSPE) {
761 sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_CRSPE);
762 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCRSPE);
763 } else if (state & INT_BUFREN) {
764 sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_BUFREN);
765 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
766 } else if (state & INT_BUFWEN) {
767 sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_BUFWEN);
768 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
769 } else if (state & INT_CMD12DRE) {
770 sh_mmcif_writel(host, MMCIF_CE_INT,
771 ~(INT_CMD12DRE | INT_CMD12RBE |
772 INT_CMD12CRE | INT_BUFRE));
773 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
774 } else if (state & INT_BUFRE) {
775 sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_BUFRE);
776 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
777 } else if (state & INT_DTRANE) {
778 sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_DTRANE);
779 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
780 } else if (state & INT_CMD12RBE) {
781 sh_mmcif_writel(host, MMCIF_CE_INT,
782 ~(INT_CMD12RBE | INT_CMD12CRE));
783 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
784 } else if (state & INT_ERR_STS) {
785 /* err interrupts */
786 sh_mmcif_writel(host, MMCIF_CE_INT, ~state);
787 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
788 err = 1;
789 } else {
790 pr_debug("%s: Not support int\n", DRIVER_NAME);
791 sh_mmcif_writel(host, MMCIF_CE_INT, ~state);
792 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
793 err = 1;
794 }
795 if (err) {
796 host->sd_error = 1;
797 pr_debug("%s: int err state = %08x\n", DRIVER_NAME, state);
798 }
799 host->wait_int = 1;
800 wake_up(&host->intr_wait);
801
802 return IRQ_HANDLED;
803}
804
805static int __devinit sh_mmcif_probe(struct platform_device *pdev)
806{
807 int ret = 0, irq[2];
808 struct mmc_host *mmc;
809 struct sh_mmcif_host *host = NULL;
810 struct sh_mmcif_plat_data *pd = NULL;
811 struct resource *res;
812 void __iomem *reg;
813 char clk_name[8];
814
815 irq[0] = platform_get_irq(pdev, 0);
816 irq[1] = platform_get_irq(pdev, 1);
817 if (irq[0] < 0 || irq[1] < 0) {
818 pr_err(DRIVER_NAME": Get irq error\n");
819 return -ENXIO;
820 }
821 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
822 if (!res) {
823 dev_err(&pdev->dev, "platform_get_resource error.\n");
824 return -ENXIO;
825 }
826 reg = ioremap(res->start, resource_size(res));
827 if (!reg) {
828 dev_err(&pdev->dev, "ioremap error.\n");
829 return -ENOMEM;
830 }
831 pd = (struct sh_mmcif_plat_data *)(pdev->dev.platform_data);
832 if (!pd) {
833 dev_err(&pdev->dev, "sh_mmcif plat data error.\n");
834 ret = -ENXIO;
835 goto clean_up;
836 }
837 mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev);
838 if (!mmc) {
839 ret = -ENOMEM;
840 goto clean_up;
841 }
842 host = mmc_priv(mmc);
843 host->mmc = mmc;
844 host->addr = reg;
845 host->timeout = 1000;
846
847 snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id);
848 host->hclk = clk_get(&pdev->dev, clk_name);
849 if (IS_ERR(host->hclk)) {
850 dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
851 ret = PTR_ERR(host->hclk);
852 goto clean_up1;
853 }
854 clk_enable(host->hclk);
855 host->clk = clk_get_rate(host->hclk);
856 host->pd = pdev;
857
858 init_waitqueue_head(&host->intr_wait);
859
860 mmc->ops = &sh_mmcif_ops;
861 mmc->f_max = host->clk;
862 /* close to 400KHz */
863 if (mmc->f_max < 51200000)
864 mmc->f_min = mmc->f_max / 128;
865 else if (mmc->f_max < 102400000)
866 mmc->f_min = mmc->f_max / 256;
867 else
868 mmc->f_min = mmc->f_max / 512;
869 if (pd->ocr)
870 mmc->ocr_avail = pd->ocr;
871 mmc->caps = MMC_CAP_MMC_HIGHSPEED;
872 if (pd->caps)
873 mmc->caps |= pd->caps;
874 mmc->max_phys_segs = 128;
875 mmc->max_hw_segs = 128;
876 mmc->max_blk_size = 512;
877 mmc->max_blk_count = 65535;
878 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
879 mmc->max_seg_size = mmc->max_req_size;
880
881 sh_mmcif_sync_reset(host);
882 platform_set_drvdata(pdev, host);
883 mmc_add_host(mmc);
884
885 ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host);
886 if (ret) {
887 pr_err(DRIVER_NAME": request_irq error (sh_mmc:error)\n");
888 goto clean_up2;
889 }
890 ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host);
891 if (ret) {
892 free_irq(irq[0], host);
893 pr_err(DRIVER_NAME": request_irq error (sh_mmc:int)\n");
894 goto clean_up2;
895 }
896
897 sh_mmcif_writel(host, MMCIF_CE_INT_MASK, MASK_ALL);
898 sh_mmcif_detect(host->mmc);
899
900 pr_info("%s: driver version %s\n", DRIVER_NAME, DRIVER_VERSION);
901 pr_debug("%s: chip ver H'%04x\n", DRIVER_NAME,
902 sh_mmcif_readl(host, MMCIF_CE_VERSION) & 0x0000ffff);
903 return ret;
904
905clean_up2:
906 clk_disable(host->hclk);
907clean_up1:
908 mmc_free_host(mmc);
909clean_up:
910 if (reg)
911 iounmap(reg);
912 return ret;
913}
914
915static int __devexit sh_mmcif_remove(struct platform_device *pdev)
916{
917 struct sh_mmcif_host *host = platform_get_drvdata(pdev);
918 int irq[2];
919
920 sh_mmcif_writel(host, MMCIF_CE_INT_MASK, MASK_ALL);
921
922 irq[0] = platform_get_irq(pdev, 0);
923 irq[1] = platform_get_irq(pdev, 1);
924
925 if (host->addr)
926 iounmap(host->addr);
927
928 platform_set_drvdata(pdev, NULL);
929 mmc_remove_host(host->mmc);
930
931 free_irq(irq[0], host);
932 free_irq(irq[1], host);
933
934 clk_disable(host->hclk);
935 mmc_free_host(host->mmc);
936
937 return 0;
938}
939
940static struct platform_driver sh_mmcif_driver = {
941 .probe = sh_mmcif_probe,
942 .remove = sh_mmcif_remove,
943 .driver = {
944 .name = DRIVER_NAME,
945 },
946};
947
948static int __init sh_mmcif_init(void)
949{
950 return platform_driver_register(&sh_mmcif_driver);
951}
952
953static void __exit sh_mmcif_exit(void)
954{
955 platform_driver_unregister(&sh_mmcif_driver);
956}
957
958module_init(sh_mmcif_init);
959module_exit(sh_mmcif_exit);
960
961
962MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
963MODULE_LICENSE("GPL");
964MODULE_ALIAS(DRIVER_NAME);
965MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index 82554ddec6b3..cec99958b652 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -1032,7 +1032,7 @@ static void tifm_sd_remove(struct tifm_dev *sock)
1032 1032
1033static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state) 1033static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state)
1034{ 1034{
1035 return mmc_suspend_host(tifm_get_drvdata(sock), state); 1035 return mmc_suspend_host(tifm_get_drvdata(sock));
1036} 1036}
1037 1037
1038static int tifm_sd_resume(struct tifm_dev *sock) 1038static int tifm_sd_resume(struct tifm_dev *sock)
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index b2b577f6afd4..ee7d0a5a51c4 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -29,6 +29,7 @@
29#include <linux/irq.h> 29#include <linux/irq.h>
30#include <linux/device.h> 30#include <linux/device.h>
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/dmaengine.h>
32#include <linux/mmc/host.h> 33#include <linux/mmc/host.h>
33#include <linux/mfd/core.h> 34#include <linux/mfd/core.h>
34#include <linux/mfd/tmio.h> 35#include <linux/mfd/tmio.h>
@@ -131,8 +132,8 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
131 132
132 host->cmd = cmd; 133 host->cmd = cmd;
133 134
134/* FIXME - this seems to be ok comented out but the spec suggest this bit should 135/* FIXME - this seems to be ok commented out but the spec suggest this bit
135 * be set when issuing app commands. 136 * should be set when issuing app commands.
136 * if(cmd->flags & MMC_FLAG_ACMD) 137 * if(cmd->flags & MMC_FLAG_ACMD)
137 * c |= APP_CMD; 138 * c |= APP_CMD;
138 */ 139 */
@@ -155,12 +156,12 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
155 return 0; 156 return 0;
156} 157}
157 158
158/* This chip always returns (at least?) as much data as you ask for. 159/*
160 * This chip always returns (at least?) as much data as you ask for.
159 * I'm unsure what happens if you ask for less than a block. This should be 161 * I'm unsure what happens if you ask for less than a block. This should be
160 * looked into to ensure that a funny length read doesnt hose the controller. 162 * looked into to ensure that a funny length read doesnt hose the controller.
161 *
162 */ 163 */
163static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host) 164static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
164{ 165{
165 struct mmc_data *data = host->data; 166 struct mmc_data *data = host->data;
166 unsigned short *buf; 167 unsigned short *buf;
@@ -180,7 +181,7 @@ static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
180 count = data->blksz; 181 count = data->blksz;
181 182
182 pr_debug("count: %08x offset: %08x flags %08x\n", 183 pr_debug("count: %08x offset: %08x flags %08x\n",
183 count, host->sg_off, data->flags); 184 count, host->sg_off, data->flags);
184 185
185 /* Transfer the data */ 186 /* Transfer the data */
186 if (data->flags & MMC_DATA_READ) 187 if (data->flags & MMC_DATA_READ)
@@ -198,7 +199,7 @@ static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
198 return; 199 return;
199} 200}
200 201
201static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host) 202static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
202{ 203{
203 struct mmc_data *data = host->data; 204 struct mmc_data *data = host->data;
204 struct mmc_command *stop; 205 struct mmc_command *stop;
@@ -206,7 +207,7 @@ static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host)
206 host->data = NULL; 207 host->data = NULL;
207 208
208 if (!data) { 209 if (!data) {
209 pr_debug("Spurious data end IRQ\n"); 210 dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
210 return; 211 return;
211 } 212 }
212 stop = data->stop; 213 stop = data->stop;
@@ -219,7 +220,8 @@ static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host)
219 220
220 pr_debug("Completed data request\n"); 221 pr_debug("Completed data request\n");
221 222
222 /*FIXME - other drivers allow an optional stop command of any given type 223 /*
224 * FIXME: other drivers allow an optional stop command of any given type
223 * which we dont do, as the chip can auto generate them. 225 * which we dont do, as the chip can auto generate them.
224 * Perhaps we can be smarter about when to use auto CMD12 and 226 * Perhaps we can be smarter about when to use auto CMD12 and
225 * only issue the auto request when we know this is the desired 227 * only issue the auto request when we know this is the desired
@@ -227,10 +229,17 @@ static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host)
227 * upper layers expect. For now, we do what works. 229 * upper layers expect. For now, we do what works.
228 */ 230 */
229 231
230 if (data->flags & MMC_DATA_READ) 232 if (data->flags & MMC_DATA_READ) {
231 disable_mmc_irqs(host, TMIO_MASK_READOP); 233 if (!host->chan_rx)
232 else 234 disable_mmc_irqs(host, TMIO_MASK_READOP);
233 disable_mmc_irqs(host, TMIO_MASK_WRITEOP); 235 dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
236 host->mrq);
237 } else {
238 if (!host->chan_tx)
239 disable_mmc_irqs(host, TMIO_MASK_WRITEOP);
240 dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
241 host->mrq);
242 }
234 243
235 if (stop) { 244 if (stop) {
236 if (stop->opcode == 12 && !stop->arg) 245 if (stop->opcode == 12 && !stop->arg)
@@ -242,7 +251,35 @@ static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host)
242 tmio_mmc_finish_request(host); 251 tmio_mmc_finish_request(host);
243} 252}
244 253
245static inline void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, 254static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
255{
256 struct mmc_data *data = host->data;
257
258 if (!data)
259 return;
260
261 if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) {
262 /*
263 * Has all data been written out yet? Testing on SuperH showed,
264 * that in most cases the first interrupt comes already with the
265 * BUSY status bit clear, but on some operations, like mount or
266 * in the beginning of a write / sync / umount, there is one
267 * DATAEND interrupt with the BUSY bit set, in this cases
268 * waiting for one more interrupt fixes the problem.
269 */
270 if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) {
271 disable_mmc_irqs(host, TMIO_STAT_DATAEND);
272 tasklet_schedule(&host->dma_complete);
273 }
274 } else if (host->chan_rx && (data->flags & MMC_DATA_READ)) {
275 disable_mmc_irqs(host, TMIO_STAT_DATAEND);
276 tasklet_schedule(&host->dma_complete);
277 } else {
278 tmio_mmc_do_data_irq(host);
279 }
280}
281
282static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
246 unsigned int stat) 283 unsigned int stat)
247{ 284{
248 struct mmc_command *cmd = host->cmd; 285 struct mmc_command *cmd = host->cmd;
@@ -282,10 +319,16 @@ static inline void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
282 * If theres no data or we encountered an error, finish now. 319 * If theres no data or we encountered an error, finish now.
283 */ 320 */
284 if (host->data && !cmd->error) { 321 if (host->data && !cmd->error) {
285 if (host->data->flags & MMC_DATA_READ) 322 if (host->data->flags & MMC_DATA_READ) {
286 enable_mmc_irqs(host, TMIO_MASK_READOP); 323 if (!host->chan_rx)
287 else 324 enable_mmc_irqs(host, TMIO_MASK_READOP);
288 enable_mmc_irqs(host, TMIO_MASK_WRITEOP); 325 } else {
326 struct dma_chan *chan = host->chan_tx;
327 if (!chan)
328 enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
329 else
330 tasklet_schedule(&host->dma_issue);
331 }
289 } else { 332 } else {
290 tmio_mmc_finish_request(host); 333 tmio_mmc_finish_request(host);
291 } 334 }
@@ -293,7 +336,6 @@ static inline void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
293 return; 336 return;
294} 337}
295 338
296
297static irqreturn_t tmio_mmc_irq(int irq, void *devid) 339static irqreturn_t tmio_mmc_irq(int irq, void *devid)
298{ 340{
299 struct tmio_mmc_host *host = devid; 341 struct tmio_mmc_host *host = devid;
@@ -311,7 +353,7 @@ static irqreturn_t tmio_mmc_irq(int irq, void *devid)
311 if (!ireg) { 353 if (!ireg) {
312 disable_mmc_irqs(host, status & ~irq_mask); 354 disable_mmc_irqs(host, status & ~irq_mask);
313 355
314 pr_debug("tmio_mmc: Spurious irq, disabling! " 356 pr_warning("tmio_mmc: Spurious irq, disabling! "
315 "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg); 357 "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg);
316 pr_debug_status(status); 358 pr_debug_status(status);
317 359
@@ -363,16 +405,265 @@ out:
363 return IRQ_HANDLED; 405 return IRQ_HANDLED;
364} 406}
365 407
408#ifdef CONFIG_TMIO_MMC_DMA
409static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
410{
411#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
412 /* Switch DMA mode on or off - SuperH specific? */
413 sd_ctrl_write16(host, 0xd8, enable ? 2 : 0);
414#endif
415}
416
417static void tmio_dma_complete(void *arg)
418{
419 struct tmio_mmc_host *host = arg;
420
421 dev_dbg(&host->pdev->dev, "Command completed\n");
422
423 if (!host->data)
424 dev_warn(&host->pdev->dev, "NULL data in DMA completion!\n");
425 else
426 enable_mmc_irqs(host, TMIO_STAT_DATAEND);
427}
428
429static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
430{
431 struct scatterlist *sg = host->sg_ptr;
432 struct dma_async_tx_descriptor *desc = NULL;
433 struct dma_chan *chan = host->chan_rx;
434 int ret;
435
436 ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE);
437 if (ret > 0) {
438 host->dma_sglen = ret;
439 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
440 DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
441 }
442
443 if (desc) {
444 host->desc = desc;
445 desc->callback = tmio_dma_complete;
446 desc->callback_param = host;
447 host->cookie = desc->tx_submit(desc);
448 if (host->cookie < 0) {
449 host->desc = NULL;
450 ret = host->cookie;
451 } else {
452 chan->device->device_issue_pending(chan);
453 }
454 }
455 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
456 __func__, host->sg_len, ret, host->cookie, host->mrq);
457
458 if (!host->desc) {
459 /* DMA failed, fall back to PIO */
460 if (ret >= 0)
461 ret = -EIO;
462 host->chan_rx = NULL;
463 dma_release_channel(chan);
464 /* Free the Tx channel too */
465 chan = host->chan_tx;
466 if (chan) {
467 host->chan_tx = NULL;
468 dma_release_channel(chan);
469 }
470 dev_warn(&host->pdev->dev,
471 "DMA failed: %d, falling back to PIO\n", ret);
472 tmio_mmc_enable_dma(host, false);
473 reset(host);
474 /* Fail this request, let above layers recover */
475 host->mrq->cmd->error = ret;
476 tmio_mmc_finish_request(host);
477 }
478
479 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
480 desc, host->cookie, host->sg_len);
481
482 return ret > 0 ? 0 : ret;
483}
484
485static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
486{
487 struct scatterlist *sg = host->sg_ptr;
488 struct dma_async_tx_descriptor *desc = NULL;
489 struct dma_chan *chan = host->chan_tx;
490 int ret;
491
492 ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE);
493 if (ret > 0) {
494 host->dma_sglen = ret;
495 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
496 DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
497 }
498
499 if (desc) {
500 host->desc = desc;
501 desc->callback = tmio_dma_complete;
502 desc->callback_param = host;
503 host->cookie = desc->tx_submit(desc);
504 if (host->cookie < 0) {
505 host->desc = NULL;
506 ret = host->cookie;
507 }
508 }
509 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
510 __func__, host->sg_len, ret, host->cookie, host->mrq);
511
512 if (!host->desc) {
513 /* DMA failed, fall back to PIO */
514 if (ret >= 0)
515 ret = -EIO;
516 host->chan_tx = NULL;
517 dma_release_channel(chan);
518 /* Free the Rx channel too */
519 chan = host->chan_rx;
520 if (chan) {
521 host->chan_rx = NULL;
522 dma_release_channel(chan);
523 }
524 dev_warn(&host->pdev->dev,
525 "DMA failed: %d, falling back to PIO\n", ret);
526 tmio_mmc_enable_dma(host, false);
527 reset(host);
528 /* Fail this request, let above layers recover */
529 host->mrq->cmd->error = ret;
530 tmio_mmc_finish_request(host);
531 }
532
533 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
534 desc, host->cookie);
535
536 return ret > 0 ? 0 : ret;
537}
538
539static int tmio_mmc_start_dma(struct tmio_mmc_host *host,
540 struct mmc_data *data)
541{
542 if (data->flags & MMC_DATA_READ) {
543 if (host->chan_rx)
544 return tmio_mmc_start_dma_rx(host);
545 } else {
546 if (host->chan_tx)
547 return tmio_mmc_start_dma_tx(host);
548 }
549
550 return 0;
551}
552
553static void tmio_issue_tasklet_fn(unsigned long priv)
554{
555 struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
556 struct dma_chan *chan = host->chan_tx;
557
558 chan->device->device_issue_pending(chan);
559}
560
561static void tmio_tasklet_fn(unsigned long arg)
562{
563 struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
564
565 if (host->data->flags & MMC_DATA_READ)
566 dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen,
567 DMA_FROM_DEVICE);
568 else
569 dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen,
570 DMA_TO_DEVICE);
571
572 tmio_mmc_do_data_irq(host);
573}
574
575/* It might be necessary to make filter MFD specific */
576static bool tmio_mmc_filter(struct dma_chan *chan, void *arg)
577{
578 dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
579 chan->private = arg;
580 return true;
581}
582
583static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
584 struct tmio_mmc_data *pdata)
585{
586 host->cookie = -EINVAL;
587 host->desc = NULL;
588
589 /* We can only either use DMA for both Tx and Rx or not use it at all */
590 if (pdata->dma) {
591 dma_cap_mask_t mask;
592
593 dma_cap_zero(mask);
594 dma_cap_set(DMA_SLAVE, mask);
595
596 host->chan_tx = dma_request_channel(mask, tmio_mmc_filter,
597 pdata->dma->chan_priv_tx);
598 dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
599 host->chan_tx);
600
601 if (!host->chan_tx)
602 return;
603
604 host->chan_rx = dma_request_channel(mask, tmio_mmc_filter,
605 pdata->dma->chan_priv_rx);
606 dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
607 host->chan_rx);
608
609 if (!host->chan_rx) {
610 dma_release_channel(host->chan_tx);
611 host->chan_tx = NULL;
612 return;
613 }
614
615 tasklet_init(&host->dma_complete, tmio_tasklet_fn, (unsigned long)host);
616 tasklet_init(&host->dma_issue, tmio_issue_tasklet_fn, (unsigned long)host);
617
618 tmio_mmc_enable_dma(host, true);
619 }
620}
621
622static void tmio_mmc_release_dma(struct tmio_mmc_host *host)
623{
624 if (host->chan_tx) {
625 struct dma_chan *chan = host->chan_tx;
626 host->chan_tx = NULL;
627 dma_release_channel(chan);
628 }
629 if (host->chan_rx) {
630 struct dma_chan *chan = host->chan_rx;
631 host->chan_rx = NULL;
632 dma_release_channel(chan);
633 }
634
635 host->cookie = -EINVAL;
636 host->desc = NULL;
637}
638#else
639static int tmio_mmc_start_dma(struct tmio_mmc_host *host,
640 struct mmc_data *data)
641{
642 return 0;
643}
644
645static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
646 struct tmio_mmc_data *pdata)
647{
648 host->chan_tx = NULL;
649 host->chan_rx = NULL;
650}
651
652static void tmio_mmc_release_dma(struct tmio_mmc_host *host)
653{
654}
655#endif
656
366static int tmio_mmc_start_data(struct tmio_mmc_host *host, 657static int tmio_mmc_start_data(struct tmio_mmc_host *host,
367 struct mmc_data *data) 658 struct mmc_data *data)
368{ 659{
369 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n", 660 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
370 data->blksz, data->blocks); 661 data->blksz, data->blocks);
371 662
372 /* Hardware cannot perform 1 and 2 byte requests in 4 bit mode */ 663 /* Hardware cannot perform 1 and 2 byte requests in 4 bit mode */
373 if (data->blksz < 4 && host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) { 664 if (data->blksz < 4 && host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
374 printk(KERN_ERR "%s: %d byte block unsupported in 4 bit mode\n", 665 pr_err("%s: %d byte block unsupported in 4 bit mode\n",
375 mmc_hostname(host->mmc), data->blksz); 666 mmc_hostname(host->mmc), data->blksz);
376 return -EINVAL; 667 return -EINVAL;
377 } 668 }
378 669
@@ -383,7 +674,7 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
383 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); 674 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
384 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); 675 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
385 676
386 return 0; 677 return tmio_mmc_start_dma(host, data);
387} 678}
388 679
389/* Process requests from the MMC layer */ 680/* Process requests from the MMC layer */
@@ -404,7 +695,6 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
404 } 695 }
405 696
406 ret = tmio_mmc_start_command(host, mrq->cmd); 697 ret = tmio_mmc_start_command(host, mrq->cmd);
407
408 if (!ret) 698 if (!ret)
409 return; 699 return;
410 700
@@ -458,11 +748,14 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
458static int tmio_mmc_get_ro(struct mmc_host *mmc) 748static int tmio_mmc_get_ro(struct mmc_host *mmc)
459{ 749{
460 struct tmio_mmc_host *host = mmc_priv(mmc); 750 struct tmio_mmc_host *host = mmc_priv(mmc);
751 struct mfd_cell *cell = host->pdev->dev.platform_data;
752 struct tmio_mmc_data *pdata = cell->driver_data;
461 753
462 return (sd_ctrl_read16(host, CTL_STATUS) & TMIO_STAT_WRPROTECT) ? 0 : 1; 754 return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
755 (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)) ? 0 : 1;
463} 756}
464 757
465static struct mmc_host_ops tmio_mmc_ops = { 758static const struct mmc_host_ops tmio_mmc_ops = {
466 .request = tmio_mmc_request, 759 .request = tmio_mmc_request,
467 .set_ios = tmio_mmc_set_ios, 760 .set_ios = tmio_mmc_set_ios,
468 .get_ro = tmio_mmc_get_ro, 761 .get_ro = tmio_mmc_get_ro,
@@ -475,7 +768,7 @@ static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
475 struct mmc_host *mmc = platform_get_drvdata(dev); 768 struct mmc_host *mmc = platform_get_drvdata(dev);
476 int ret; 769 int ret;
477 770
478 ret = mmc_suspend_host(mmc, state); 771 ret = mmc_suspend_host(mmc);
479 772
480 /* Tell MFD core it can disable us now.*/ 773 /* Tell MFD core it can disable us now.*/
481 if (!ret && cell->disable) 774 if (!ret && cell->disable)
@@ -515,6 +808,7 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
515 struct tmio_mmc_host *host; 808 struct tmio_mmc_host *host;
516 struct mmc_host *mmc; 809 struct mmc_host *mmc;
517 int ret = -EINVAL; 810 int ret = -EINVAL;
811 u32 irq_mask = TMIO_MASK_CMD;
518 812
519 if (dev->num_resources != 2) 813 if (dev->num_resources != 2)
520 goto out; 814 goto out;
@@ -553,7 +847,10 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
553 mmc->caps |= pdata->capabilities; 847 mmc->caps |= pdata->capabilities;
554 mmc->f_max = pdata->hclk; 848 mmc->f_max = pdata->hclk;
555 mmc->f_min = mmc->f_max / 512; 849 mmc->f_min = mmc->f_max / 512;
556 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 850 if (pdata->ocr_mask)
851 mmc->ocr_avail = pdata->ocr_mask;
852 else
853 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
557 854
558 /* Tell the MFD core we are ready to be enabled */ 855 /* Tell the MFD core we are ready to be enabled */
559 if (cell->enable) { 856 if (cell->enable) {
@@ -578,13 +875,20 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
578 if (ret) 875 if (ret)
579 goto cell_disable; 876 goto cell_disable;
580 877
878 /* See if we also get DMA */
879 tmio_mmc_request_dma(host, pdata);
880
581 mmc_add_host(mmc); 881 mmc_add_host(mmc);
582 882
583 printk(KERN_INFO "%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), 883 pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc),
584 (unsigned long)host->ctl, host->irq); 884 (unsigned long)host->ctl, host->irq);
585 885
586 /* Unmask the IRQs we want to know about */ 886 /* Unmask the IRQs we want to know about */
587 enable_mmc_irqs(host, TMIO_MASK_IRQ); 887 if (!host->chan_rx)
888 irq_mask |= TMIO_MASK_READOP;
889 if (!host->chan_tx)
890 irq_mask |= TMIO_MASK_WRITEOP;
891 enable_mmc_irqs(host, irq_mask);
588 892
589 return 0; 893 return 0;
590 894
@@ -609,6 +913,7 @@ static int __devexit tmio_mmc_remove(struct platform_device *dev)
609 if (mmc) { 913 if (mmc) {
610 struct tmio_mmc_host *host = mmc_priv(mmc); 914 struct tmio_mmc_host *host = mmc_priv(mmc);
611 mmc_remove_host(mmc); 915 mmc_remove_host(mmc);
916 tmio_mmc_release_dma(host);
612 free_irq(host->irq, host); 917 free_irq(host->irq, host);
613 if (cell->disable) 918 if (cell->disable)
614 cell->disable(dev); 919 cell->disable(dev);
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index dafecfbcd91a..64f7d5dfc106 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -10,6 +10,8 @@
10 */ 10 */
11 11
12#include <linux/highmem.h> 12#include <linux/highmem.h>
13#include <linux/interrupt.h>
14#include <linux/dmaengine.h>
13 15
14#define CTL_SD_CMD 0x00 16#define CTL_SD_CMD 0x00
15#define CTL_ARG_REG 0x04 17#define CTL_ARG_REG 0x04
@@ -106,6 +108,17 @@ struct tmio_mmc_host {
106 unsigned int sg_off; 108 unsigned int sg_off;
107 109
108 struct platform_device *pdev; 110 struct platform_device *pdev;
111
112 /* DMA support */
113 struct dma_chan *chan_rx;
114 struct dma_chan *chan_tx;
115 struct tasklet_struct dma_complete;
116 struct tasklet_struct dma_issue;
117#ifdef CONFIG_TMIO_MMC_DMA
118 struct dma_async_tx_descriptor *desc;
119 unsigned int dma_sglen;
120 dma_cookie_t cookie;
121#endif
109}; 122};
110 123
111#include <linux/io.h> 124#include <linux/io.h>
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index 632858a94376..19f2d72dbca5 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -1280,7 +1280,7 @@ static int via_sd_suspend(struct pci_dev *pcidev, pm_message_t state)
1280 via_save_pcictrlreg(host); 1280 via_save_pcictrlreg(host);
1281 via_save_sdcreg(host); 1281 via_save_sdcreg(host);
1282 1282
1283 ret = mmc_suspend_host(host->mmc, state); 1283 ret = mmc_suspend_host(host->mmc);
1284 1284
1285 pci_save_state(pcidev); 1285 pci_save_state(pcidev);
1286 pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0); 1286 pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index 69efe01eece8..0012f5d13d28 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -1819,7 +1819,7 @@ static int wbsd_suspend(struct wbsd_host *host, pm_message_t state)
1819{ 1819{
1820 BUG_ON(host == NULL); 1820 BUG_ON(host == NULL);
1821 1821
1822 return mmc_suspend_host(host->mmc, state); 1822 return mmc_suspend_host(host->mmc);
1823} 1823}
1824 1824
1825static int wbsd_resume(struct wbsd_host *host) 1825static int wbsd_resume(struct wbsd_host *host)