diff options
Diffstat (limited to 'drivers/mmc/host')
-rw-r--r-- | drivers/mmc/host/Kconfig | 14 | ||||
-rw-r--r-- | drivers/mmc/host/Makefile | 8 | ||||
-rw-r--r-- | drivers/mmc/host/dw_mmc.c | 4 | ||||
-rw-r--r-- | drivers/mmc/host/mmci.c | 13 | ||||
-rw-r--r-- | drivers/mmc/host/of_mmc_spi.c | 2 | ||||
-rw-r--r-- | drivers/mmc/host/sdhci-esdhc-imx.c | 86 | ||||
-rw-r--r-- | drivers/mmc/host/sdhci-esdhc.h | 3 | ||||
-rw-r--r-- | drivers/mmc/host/sdhci-of-esdhc.c | 3 | ||||
-rw-r--r-- | drivers/mmc/host/sdhci-pci.c | 6 | ||||
-rw-r--r-- | drivers/mmc/host/sdhci-pltfm.h | 2 | ||||
-rw-r--r-- | drivers/mmc/host/sdhci.h | 1 | ||||
-rw-r--r-- | drivers/mmc/host/sh_mobile_sdhi.c | 171 | ||||
-rw-r--r-- | drivers/mmc/host/tmio_mmc.c | 1285 | ||||
-rw-r--r-- | drivers/mmc/host/tmio_mmc.h | 123 | ||||
-rw-r--r-- | drivers/mmc/host/tmio_mmc_dma.c | 317 | ||||
-rw-r--r-- | drivers/mmc/host/tmio_mmc_pio.c | 897 | ||||
-rw-r--r-- | drivers/mmc/host/via-sdmmc.c | 5 |
17 files changed, 1650 insertions, 1290 deletions
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 1a21c6427a19..94df40531c38 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig | |||
@@ -439,13 +439,25 @@ config MMC_SDRICOH_CS | |||
439 | To compile this driver as a module, choose M here: the | 439 | To compile this driver as a module, choose M here: the |
440 | module will be called sdricoh_cs. | 440 | module will be called sdricoh_cs. |
441 | 441 | ||
442 | config MMC_TMIO_CORE | ||
443 | tristate | ||
444 | |||
442 | config MMC_TMIO | 445 | config MMC_TMIO |
443 | tristate "Toshiba Mobile IO Controller (TMIO) MMC/SD function support" | 446 | tristate "Toshiba Mobile IO Controller (TMIO) MMC/SD function support" |
444 | depends on MFD_TMIO || MFD_ASIC3 || MFD_SH_MOBILE_SDHI | 447 | depends on MFD_TMIO || MFD_ASIC3 |
448 | select MMC_TMIO_CORE | ||
445 | help | 449 | help |
446 | This provides support for the SD/MMC cell found in TC6393XB, | 450 | This provides support for the SD/MMC cell found in TC6393XB, |
447 | T7L66XB and also HTC ASIC3 | 451 | T7L66XB and also HTC ASIC3 |
448 | 452 | ||
453 | config MMC_SDHI | ||
454 | tristate "SH-Mobile SDHI SD/SDIO controller support" | ||
455 | depends on SUPERH || ARCH_SHMOBILE | ||
456 | select MMC_TMIO_CORE | ||
457 | help | ||
458 | This provides support for the SDHI SD/SDIO controller found in | ||
459 | SuperH and ARM SH-Mobile SoCs | ||
460 | |||
449 | config MMC_CB710 | 461 | config MMC_CB710 |
450 | tristate "ENE CB710 MMC/SD Interface support" | 462 | tristate "ENE CB710 MMC/SD Interface support" |
451 | depends on PCI | 463 | depends on PCI |
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index 30aa6867745f..4f1df0aae574 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile | |||
@@ -29,7 +29,13 @@ endif | |||
29 | obj-$(CONFIG_MMC_S3C) += s3cmci.o | 29 | obj-$(CONFIG_MMC_S3C) += s3cmci.o |
30 | obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o | 30 | obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o |
31 | obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o | 31 | obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o |
32 | obj-$(CONFIG_MMC_CB710) += cb710-mmc.o | 32 | obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o |
33 | tmio_mmc_core-y := tmio_mmc_pio.o | ||
34 | ifneq ($(CONFIG_MMC_SDHI),n) | ||
35 | tmio_mmc_core-y += tmio_mmc_dma.o | ||
36 | endif | ||
37 | obj-$(CONFIG_MMC_SDHI) += sh_mobile_sdhi.o | ||
38 | obj-$(CONFIG_MMC_CB710) += cb710-mmc.o | ||
33 | obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o | 39 | obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o |
34 | obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o | 40 | obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o |
35 | obj-$(CONFIG_MMC_DW) += dw_mmc.o | 41 | obj-$(CONFIG_MMC_DW) += dw_mmc.o |
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 5a614069cb00..87e1f57ec9ba 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c | |||
@@ -316,7 +316,7 @@ static void dw_mci_idmac_stop_dma(struct dw_mci *host) | |||
316 | 316 | ||
317 | /* Stop the IDMAC running */ | 317 | /* Stop the IDMAC running */ |
318 | temp = mci_readl(host, BMOD); | 318 | temp = mci_readl(host, BMOD); |
319 | temp &= ~SDMMC_IDMAC_ENABLE; | 319 | temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB); |
320 | mci_writel(host, BMOD, temp); | 320 | mci_writel(host, BMOD, temp); |
321 | } | 321 | } |
322 | 322 | ||
@@ -385,7 +385,7 @@ static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) | |||
385 | 385 | ||
386 | /* Enable the IDMAC */ | 386 | /* Enable the IDMAC */ |
387 | temp = mci_readl(host, BMOD); | 387 | temp = mci_readl(host, BMOD); |
388 | temp |= SDMMC_IDMAC_ENABLE; | 388 | temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB; |
389 | mci_writel(host, BMOD, temp); | 389 | mci_writel(host, BMOD, temp); |
390 | 390 | ||
391 | /* Start it running */ | 391 | /* Start it running */ |
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 5bbb87d10251..b4a7e4fba90f 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c | |||
@@ -68,6 +68,12 @@ static struct variant_data variant_arm = { | |||
68 | .datalength_bits = 16, | 68 | .datalength_bits = 16, |
69 | }; | 69 | }; |
70 | 70 | ||
71 | static struct variant_data variant_arm_extended_fifo = { | ||
72 | .fifosize = 128 * 4, | ||
73 | .fifohalfsize = 64 * 4, | ||
74 | .datalength_bits = 16, | ||
75 | }; | ||
76 | |||
71 | static struct variant_data variant_u300 = { | 77 | static struct variant_data variant_u300 = { |
72 | .fifosize = 16 * 4, | 78 | .fifosize = 16 * 4, |
73 | .fifohalfsize = 8 * 4, | 79 | .fifohalfsize = 8 * 4, |
@@ -1277,10 +1283,15 @@ static int mmci_resume(struct amba_device *dev) | |||
1277 | static struct amba_id mmci_ids[] = { | 1283 | static struct amba_id mmci_ids[] = { |
1278 | { | 1284 | { |
1279 | .id = 0x00041180, | 1285 | .id = 0x00041180, |
1280 | .mask = 0x000fffff, | 1286 | .mask = 0xff0fffff, |
1281 | .data = &variant_arm, | 1287 | .data = &variant_arm, |
1282 | }, | 1288 | }, |
1283 | { | 1289 | { |
1290 | .id = 0x01041180, | ||
1291 | .mask = 0xff0fffff, | ||
1292 | .data = &variant_arm_extended_fifo, | ||
1293 | }, | ||
1294 | { | ||
1284 | .id = 0x00041181, | 1295 | .id = 0x00041181, |
1285 | .mask = 0x000fffff, | 1296 | .mask = 0x000fffff, |
1286 | .data = &variant_arm, | 1297 | .data = &variant_arm, |
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c index 5530def54e5b..e2aecb7f1d5c 100644 --- a/drivers/mmc/host/of_mmc_spi.c +++ b/drivers/mmc/host/of_mmc_spi.c | |||
@@ -15,9 +15,11 @@ | |||
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/device.h> | 16 | #include <linux/device.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/irq.h> | ||
18 | #include <linux/gpio.h> | 19 | #include <linux/gpio.h> |
19 | #include <linux/of.h> | 20 | #include <linux/of.h> |
20 | #include <linux/of_gpio.h> | 21 | #include <linux/of_gpio.h> |
22 | #include <linux/of_irq.h> | ||
21 | #include <linux/spi/spi.h> | 23 | #include <linux/spi/spi.h> |
22 | #include <linux/spi/mmc_spi.h> | 24 | #include <linux/spi/mmc_spi.h> |
23 | #include <linux/mmc/core.h> | 25 | #include <linux/mmc/core.h> |
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 3b5248567973..a19967d0bfc4 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c | |||
@@ -16,14 +16,40 @@ | |||
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/clk.h> | 17 | #include <linux/clk.h> |
18 | #include <linux/gpio.h> | 18 | #include <linux/gpio.h> |
19 | #include <linux/slab.h> | ||
19 | #include <linux/mmc/host.h> | 20 | #include <linux/mmc/host.h> |
20 | #include <linux/mmc/sdhci-pltfm.h> | 21 | #include <linux/mmc/sdhci-pltfm.h> |
22 | #include <linux/mmc/mmc.h> | ||
23 | #include <linux/mmc/sdio.h> | ||
21 | #include <mach/hardware.h> | 24 | #include <mach/hardware.h> |
22 | #include <mach/esdhc.h> | 25 | #include <mach/esdhc.h> |
23 | #include "sdhci.h" | 26 | #include "sdhci.h" |
24 | #include "sdhci-pltfm.h" | 27 | #include "sdhci-pltfm.h" |
25 | #include "sdhci-esdhc.h" | 28 | #include "sdhci-esdhc.h" |
26 | 29 | ||
30 | /* VENDOR SPEC register */ | ||
31 | #define SDHCI_VENDOR_SPEC 0xC0 | ||
32 | #define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002 | ||
33 | |||
34 | #define ESDHC_FLAG_GPIO_FOR_CD_WP (1 << 0) | ||
35 | /* | ||
36 | * The CMDTYPE of the CMD register (offset 0xE) should be set to | ||
37 | * "11" when the STOP CMD12 is issued on imx53 to abort one | ||
38 | * open ended multi-blk IO. Otherwise the TC INT wouldn't | ||
39 | * be generated. | ||
40 | * In exact block transfer, the controller doesn't complete the | ||
41 | * operations automatically as required at the end of the | ||
42 | * transfer and remains on hold if the abort command is not sent. | ||
43 | * As a result, the TC flag is not asserted and SW received timeout | ||
44 | * exeception. Bit1 of Vendor Spec registor is used to fix it. | ||
45 | */ | ||
46 | #define ESDHC_FLAG_MULTIBLK_NO_INT (1 << 1) | ||
47 | |||
48 | struct pltfm_imx_data { | ||
49 | int flags; | ||
50 | u32 scratchpad; | ||
51 | }; | ||
52 | |||
27 | static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg) | 53 | static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg) |
28 | { | 54 | { |
29 | void __iomem *base = host->ioaddr + (reg & ~0x3); | 55 | void __iomem *base = host->ioaddr + (reg & ~0x3); |
@@ -34,10 +60,14 @@ static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, i | |||
34 | 60 | ||
35 | static u32 esdhc_readl_le(struct sdhci_host *host, int reg) | 61 | static u32 esdhc_readl_le(struct sdhci_host *host, int reg) |
36 | { | 62 | { |
63 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | ||
64 | struct pltfm_imx_data *imx_data = pltfm_host->priv; | ||
65 | |||
37 | /* fake CARD_PRESENT flag on mx25/35 */ | 66 | /* fake CARD_PRESENT flag on mx25/35 */ |
38 | u32 val = readl(host->ioaddr + reg); | 67 | u32 val = readl(host->ioaddr + reg); |
39 | 68 | ||
40 | if (unlikely(reg == SDHCI_PRESENT_STATE)) { | 69 | if (unlikely((reg == SDHCI_PRESENT_STATE) |
70 | && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP))) { | ||
41 | struct esdhc_platform_data *boarddata = | 71 | struct esdhc_platform_data *boarddata = |
42 | host->mmc->parent->platform_data; | 72 | host->mmc->parent->platform_data; |
43 | 73 | ||
@@ -55,13 +85,26 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg) | |||
55 | 85 | ||
56 | static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) | 86 | static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) |
57 | { | 87 | { |
58 | if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) | 88 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
89 | struct pltfm_imx_data *imx_data = pltfm_host->priv; | ||
90 | |||
91 | if (unlikely((reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE) | ||
92 | && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP))) | ||
59 | /* | 93 | /* |
60 | * these interrupts won't work with a custom card_detect gpio | 94 | * these interrupts won't work with a custom card_detect gpio |
61 | * (only applied to mx25/35) | 95 | * (only applied to mx25/35) |
62 | */ | 96 | */ |
63 | val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); | 97 | val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); |
64 | 98 | ||
99 | if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) | ||
100 | && (reg == SDHCI_INT_STATUS) | ||
101 | && (val & SDHCI_INT_DATA_END))) { | ||
102 | u32 v; | ||
103 | v = readl(host->ioaddr + SDHCI_VENDOR_SPEC); | ||
104 | v &= ~SDHCI_VENDOR_SPEC_SDIO_QUIRK; | ||
105 | writel(v, host->ioaddr + SDHCI_VENDOR_SPEC); | ||
106 | } | ||
107 | |||
65 | writel(val, host->ioaddr + reg); | 108 | writel(val, host->ioaddr + reg); |
66 | } | 109 | } |
67 | 110 | ||
@@ -76,6 +119,7 @@ static u16 esdhc_readw_le(struct sdhci_host *host, int reg) | |||
76 | static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) | 119 | static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) |
77 | { | 120 | { |
78 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 121 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
122 | struct pltfm_imx_data *imx_data = pltfm_host->priv; | ||
79 | 123 | ||
80 | switch (reg) { | 124 | switch (reg) { |
81 | case SDHCI_TRANSFER_MODE: | 125 | case SDHCI_TRANSFER_MODE: |
@@ -83,10 +127,22 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) | |||
83 | * Postpone this write, we must do it together with a | 127 | * Postpone this write, we must do it together with a |
84 | * command write that is down below. | 128 | * command write that is down below. |
85 | */ | 129 | */ |
86 | pltfm_host->scratchpad = val; | 130 | if ((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) |
131 | && (host->cmd->opcode == SD_IO_RW_EXTENDED) | ||
132 | && (host->cmd->data->blocks > 1) | ||
133 | && (host->cmd->data->flags & MMC_DATA_READ)) { | ||
134 | u32 v; | ||
135 | v = readl(host->ioaddr + SDHCI_VENDOR_SPEC); | ||
136 | v |= SDHCI_VENDOR_SPEC_SDIO_QUIRK; | ||
137 | writel(v, host->ioaddr + SDHCI_VENDOR_SPEC); | ||
138 | } | ||
139 | imx_data->scratchpad = val; | ||
87 | return; | 140 | return; |
88 | case SDHCI_COMMAND: | 141 | case SDHCI_COMMAND: |
89 | writel(val << 16 | pltfm_host->scratchpad, | 142 | if ((host->cmd->opcode == MMC_STOP_TRANSMISSION) |
143 | && (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)) | ||
144 | val |= SDHCI_CMD_ABORTCMD; | ||
145 | writel(val << 16 | imx_data->scratchpad, | ||
90 | host->ioaddr + SDHCI_TRANSFER_MODE); | 146 | host->ioaddr + SDHCI_TRANSFER_MODE); |
91 | return; | 147 | return; |
92 | case SDHCI_BLOCK_SIZE: | 148 | case SDHCI_BLOCK_SIZE: |
@@ -146,7 +202,9 @@ static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host) | |||
146 | } | 202 | } |
147 | 203 | ||
148 | static struct sdhci_ops sdhci_esdhc_ops = { | 204 | static struct sdhci_ops sdhci_esdhc_ops = { |
205 | .read_l = esdhc_readl_le, | ||
149 | .read_w = esdhc_readw_le, | 206 | .read_w = esdhc_readw_le, |
207 | .write_l = esdhc_writel_le, | ||
150 | .write_w = esdhc_writew_le, | 208 | .write_w = esdhc_writew_le, |
151 | .write_b = esdhc_writeb_le, | 209 | .write_b = esdhc_writeb_le, |
152 | .set_clock = esdhc_set_clock, | 210 | .set_clock = esdhc_set_clock, |
@@ -168,6 +226,7 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd | |||
168 | struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; | 226 | struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; |
169 | struct clk *clk; | 227 | struct clk *clk; |
170 | int err; | 228 | int err; |
229 | struct pltfm_imx_data *imx_data; | ||
171 | 230 | ||
172 | clk = clk_get(mmc_dev(host->mmc), NULL); | 231 | clk = clk_get(mmc_dev(host->mmc), NULL); |
173 | if (IS_ERR(clk)) { | 232 | if (IS_ERR(clk)) { |
@@ -177,7 +236,15 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd | |||
177 | clk_enable(clk); | 236 | clk_enable(clk); |
178 | pltfm_host->clk = clk; | 237 | pltfm_host->clk = clk; |
179 | 238 | ||
180 | if (cpu_is_mx35() || cpu_is_mx51()) | 239 | imx_data = kzalloc(sizeof(struct pltfm_imx_data), GFP_KERNEL); |
240 | if (!imx_data) { | ||
241 | clk_disable(pltfm_host->clk); | ||
242 | clk_put(pltfm_host->clk); | ||
243 | return -ENOMEM; | ||
244 | } | ||
245 | pltfm_host->priv = imx_data; | ||
246 | |||
247 | if (!cpu_is_mx25()) | ||
181 | host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; | 248 | host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; |
182 | 249 | ||
183 | if (cpu_is_mx25() || cpu_is_mx35()) { | 250 | if (cpu_is_mx25() || cpu_is_mx35()) { |
@@ -187,6 +254,9 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd | |||
187 | sdhci_esdhc_ops.get_ro = esdhc_pltfm_get_ro; | 254 | sdhci_esdhc_ops.get_ro = esdhc_pltfm_get_ro; |
188 | } | 255 | } |
189 | 256 | ||
257 | if (!(cpu_is_mx25() || cpu_is_mx35() || cpu_is_mx51())) | ||
258 | imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT; | ||
259 | |||
190 | if (boarddata) { | 260 | if (boarddata) { |
191 | err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP"); | 261 | err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP"); |
192 | if (err) { | 262 | if (err) { |
@@ -214,8 +284,7 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd | |||
214 | goto no_card_detect_irq; | 284 | goto no_card_detect_irq; |
215 | } | 285 | } |
216 | 286 | ||
217 | sdhci_esdhc_ops.write_l = esdhc_writel_le; | 287 | imx_data->flags |= ESDHC_FLAG_GPIO_FOR_CD_WP; |
218 | sdhci_esdhc_ops.read_l = esdhc_readl_le; | ||
219 | /* Now we have a working card_detect again */ | 288 | /* Now we have a working card_detect again */ |
220 | host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; | 289 | host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; |
221 | } | 290 | } |
@@ -227,6 +296,7 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd | |||
227 | no_card_detect_pin: | 296 | no_card_detect_pin: |
228 | boarddata->cd_gpio = err; | 297 | boarddata->cd_gpio = err; |
229 | not_supported: | 298 | not_supported: |
299 | kfree(imx_data); | ||
230 | return 0; | 300 | return 0; |
231 | } | 301 | } |
232 | 302 | ||
@@ -234,6 +304,7 @@ static void esdhc_pltfm_exit(struct sdhci_host *host) | |||
234 | { | 304 | { |
235 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 305 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
236 | struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; | 306 | struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; |
307 | struct pltfm_imx_data *imx_data = pltfm_host->priv; | ||
237 | 308 | ||
238 | if (boarddata && gpio_is_valid(boarddata->wp_gpio)) | 309 | if (boarddata && gpio_is_valid(boarddata->wp_gpio)) |
239 | gpio_free(boarddata->wp_gpio); | 310 | gpio_free(boarddata->wp_gpio); |
@@ -247,6 +318,7 @@ static void esdhc_pltfm_exit(struct sdhci_host *host) | |||
247 | 318 | ||
248 | clk_disable(pltfm_host->clk); | 319 | clk_disable(pltfm_host->clk); |
249 | clk_put(pltfm_host->clk); | 320 | clk_put(pltfm_host->clk); |
321 | kfree(imx_data); | ||
250 | } | 322 | } |
251 | 323 | ||
252 | struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { | 324 | struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { |
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h index c55aae828aac..c3b08f111942 100644 --- a/drivers/mmc/host/sdhci-esdhc.h +++ b/drivers/mmc/host/sdhci-esdhc.h | |||
@@ -23,8 +23,7 @@ | |||
23 | SDHCI_QUIRK_NONSTANDARD_CLOCK | \ | 23 | SDHCI_QUIRK_NONSTANDARD_CLOCK | \ |
24 | SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \ | 24 | SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \ |
25 | SDHCI_QUIRK_PIO_NEEDS_DELAY | \ | 25 | SDHCI_QUIRK_PIO_NEEDS_DELAY | \ |
26 | SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET | \ | 26 | SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) |
27 | SDHCI_QUIRK_NO_CARD_NO_RESET) | ||
28 | 27 | ||
29 | #define ESDHC_SYSTEM_CONTROL 0x2c | 28 | #define ESDHC_SYSTEM_CONTROL 0x2c |
30 | #define ESDHC_CLOCK_MASK 0x0000fff0 | 29 | #define ESDHC_CLOCK_MASK 0x0000fff0 |
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index 08161f690ae8..ba40d6d035c7 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c | |||
@@ -74,7 +74,8 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host) | |||
74 | 74 | ||
75 | struct sdhci_of_data sdhci_esdhc = { | 75 | struct sdhci_of_data sdhci_esdhc = { |
76 | /* card detection could be handled via GPIO */ | 76 | /* card detection could be handled via GPIO */ |
77 | .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION, | 77 | .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION |
78 | | SDHCI_QUIRK_NO_CARD_NO_RESET, | ||
78 | .ops = { | 79 | .ops = { |
79 | .read_l = sdhci_be32bs_readl, | 80 | .read_l = sdhci_be32bs_readl, |
80 | .read_w = esdhc_readw, | 81 | .read_w = esdhc_readw, |
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index 2f8d46854acd..a136be706347 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c | |||
@@ -1016,16 +1016,14 @@ static int __devinit sdhci_pci_probe(struct pci_dev *pdev, | |||
1016 | struct sdhci_pci_chip *chip; | 1016 | struct sdhci_pci_chip *chip; |
1017 | struct sdhci_pci_slot *slot; | 1017 | struct sdhci_pci_slot *slot; |
1018 | 1018 | ||
1019 | u8 slots, rev, first_bar; | 1019 | u8 slots, first_bar; |
1020 | int ret, i; | 1020 | int ret, i; |
1021 | 1021 | ||
1022 | BUG_ON(pdev == NULL); | 1022 | BUG_ON(pdev == NULL); |
1023 | BUG_ON(ent == NULL); | 1023 | BUG_ON(ent == NULL); |
1024 | 1024 | ||
1025 | pci_read_config_byte(pdev, PCI_CLASS_REVISION, &rev); | ||
1026 | |||
1027 | dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n", | 1025 | dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n", |
1028 | (int)pdev->vendor, (int)pdev->device, (int)rev); | 1026 | (int)pdev->vendor, (int)pdev->device, (int)pdev->revision); |
1029 | 1027 | ||
1030 | ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots); | 1028 | ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots); |
1031 | if (ret) | 1029 | if (ret) |
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h index ea2e44d9be5e..2b37016ad0ac 100644 --- a/drivers/mmc/host/sdhci-pltfm.h +++ b/drivers/mmc/host/sdhci-pltfm.h | |||
@@ -17,7 +17,7 @@ | |||
17 | 17 | ||
18 | struct sdhci_pltfm_host { | 18 | struct sdhci_pltfm_host { |
19 | struct clk *clk; | 19 | struct clk *clk; |
20 | u32 scratchpad; /* to handle quirks across io-accessor calls */ | 20 | void *priv; /* to handle quirks across io-accessor calls */ |
21 | }; | 21 | }; |
22 | 22 | ||
23 | extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata; | 23 | extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata; |
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 6e0969e40650..25e8bde600d1 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h | |||
@@ -45,6 +45,7 @@ | |||
45 | #define SDHCI_CMD_CRC 0x08 | 45 | #define SDHCI_CMD_CRC 0x08 |
46 | #define SDHCI_CMD_INDEX 0x10 | 46 | #define SDHCI_CMD_INDEX 0x10 |
47 | #define SDHCI_CMD_DATA 0x20 | 47 | #define SDHCI_CMD_DATA 0x20 |
48 | #define SDHCI_CMD_ABORTCMD 0xC0 | ||
48 | 49 | ||
49 | #define SDHCI_CMD_RESP_NONE 0x00 | 50 | #define SDHCI_CMD_RESP_NONE 0x00 |
50 | #define SDHCI_CMD_RESP_LONG 0x01 | 51 | #define SDHCI_CMD_RESP_LONG 0x01 |
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c new file mode 100644 index 000000000000..cc701236d16f --- /dev/null +++ b/drivers/mmc/host/sh_mobile_sdhi.c | |||
@@ -0,0 +1,171 @@ | |||
1 | /* | ||
2 | * SuperH Mobile SDHI | ||
3 | * | ||
4 | * Copyright (C) 2009 Magnus Damm | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * Based on "Compaq ASIC3 support": | ||
11 | * | ||
12 | * Copyright 2001 Compaq Computer Corporation. | ||
13 | * Copyright 2004-2005 Phil Blundell | ||
14 | * Copyright 2007-2008 OpenedHand Ltd. | ||
15 | * | ||
16 | * Authors: Phil Blundell <pb@handhelds.org>, | ||
17 | * Samuel Ortiz <sameo@openedhand.com> | ||
18 | * | ||
19 | */ | ||
20 | |||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/clk.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/platform_device.h> | ||
25 | #include <linux/mmc/host.h> | ||
26 | #include <linux/mmc/sh_mobile_sdhi.h> | ||
27 | #include <linux/mfd/tmio.h> | ||
28 | #include <linux/sh_dma.h> | ||
29 | |||
30 | #include "tmio_mmc.h" | ||
31 | |||
32 | struct sh_mobile_sdhi { | ||
33 | struct clk *clk; | ||
34 | struct tmio_mmc_data mmc_data; | ||
35 | struct sh_dmae_slave param_tx; | ||
36 | struct sh_dmae_slave param_rx; | ||
37 | struct tmio_mmc_dma dma_priv; | ||
38 | }; | ||
39 | |||
40 | static void sh_mobile_sdhi_set_pwr(struct platform_device *pdev, int state) | ||
41 | { | ||
42 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; | ||
43 | |||
44 | if (p && p->set_pwr) | ||
45 | p->set_pwr(pdev, state); | ||
46 | } | ||
47 | |||
48 | static int sh_mobile_sdhi_get_cd(struct platform_device *pdev) | ||
49 | { | ||
50 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; | ||
51 | |||
52 | if (p && p->get_cd) | ||
53 | return p->get_cd(pdev); | ||
54 | else | ||
55 | return -ENOSYS; | ||
56 | } | ||
57 | |||
58 | static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) | ||
59 | { | ||
60 | struct sh_mobile_sdhi *priv; | ||
61 | struct tmio_mmc_data *mmc_data; | ||
62 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; | ||
63 | struct tmio_mmc_host *host; | ||
64 | char clk_name[8]; | ||
65 | int ret; | ||
66 | |||
67 | priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL); | ||
68 | if (priv == NULL) { | ||
69 | dev_err(&pdev->dev, "kzalloc failed\n"); | ||
70 | return -ENOMEM; | ||
71 | } | ||
72 | |||
73 | mmc_data = &priv->mmc_data; | ||
74 | |||
75 | snprintf(clk_name, sizeof(clk_name), "sdhi%d", pdev->id); | ||
76 | priv->clk = clk_get(&pdev->dev, clk_name); | ||
77 | if (IS_ERR(priv->clk)) { | ||
78 | dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); | ||
79 | ret = PTR_ERR(priv->clk); | ||
80 | goto eclkget; | ||
81 | } | ||
82 | |||
83 | clk_enable(priv->clk); | ||
84 | |||
85 | mmc_data->hclk = clk_get_rate(priv->clk); | ||
86 | mmc_data->set_pwr = sh_mobile_sdhi_set_pwr; | ||
87 | mmc_data->get_cd = sh_mobile_sdhi_get_cd; | ||
88 | mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED; | ||
89 | if (p) { | ||
90 | mmc_data->flags = p->tmio_flags; | ||
91 | mmc_data->ocr_mask = p->tmio_ocr_mask; | ||
92 | mmc_data->capabilities |= p->tmio_caps; | ||
93 | |||
94 | if (p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) { | ||
95 | priv->param_tx.slave_id = p->dma_slave_tx; | ||
96 | priv->param_rx.slave_id = p->dma_slave_rx; | ||
97 | priv->dma_priv.chan_priv_tx = &priv->param_tx; | ||
98 | priv->dma_priv.chan_priv_rx = &priv->param_rx; | ||
99 | priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */ | ||
100 | mmc_data->dma = &priv->dma_priv; | ||
101 | } | ||
102 | } | ||
103 | |||
104 | /* | ||
105 | * All SDHI blocks support 2-byte and larger block sizes in 4-bit | ||
106 | * bus width mode. | ||
107 | */ | ||
108 | mmc_data->flags |= TMIO_MMC_BLKSZ_2BYTES; | ||
109 | |||
110 | /* | ||
111 | * All SDHI blocks support SDIO IRQ signalling. | ||
112 | */ | ||
113 | mmc_data->flags |= TMIO_MMC_SDIO_IRQ; | ||
114 | |||
115 | ret = tmio_mmc_host_probe(&host, pdev, mmc_data); | ||
116 | if (ret < 0) | ||
117 | goto eprobe; | ||
118 | |||
119 | pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), | ||
120 | (unsigned long)host->ctl, host->irq); | ||
121 | |||
122 | return ret; | ||
123 | |||
124 | eprobe: | ||
125 | clk_disable(priv->clk); | ||
126 | clk_put(priv->clk); | ||
127 | eclkget: | ||
128 | kfree(priv); | ||
129 | return ret; | ||
130 | } | ||
131 | |||
132 | static int sh_mobile_sdhi_remove(struct platform_device *pdev) | ||
133 | { | ||
134 | struct mmc_host *mmc = platform_get_drvdata(pdev); | ||
135 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
136 | struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data); | ||
137 | |||
138 | tmio_mmc_host_remove(host); | ||
139 | clk_disable(priv->clk); | ||
140 | clk_put(priv->clk); | ||
141 | kfree(priv); | ||
142 | |||
143 | return 0; | ||
144 | } | ||
145 | |||
146 | static struct platform_driver sh_mobile_sdhi_driver = { | ||
147 | .driver = { | ||
148 | .name = "sh_mobile_sdhi", | ||
149 | .owner = THIS_MODULE, | ||
150 | }, | ||
151 | .probe = sh_mobile_sdhi_probe, | ||
152 | .remove = __devexit_p(sh_mobile_sdhi_remove), | ||
153 | }; | ||
154 | |||
155 | static int __init sh_mobile_sdhi_init(void) | ||
156 | { | ||
157 | return platform_driver_register(&sh_mobile_sdhi_driver); | ||
158 | } | ||
159 | |||
160 | static void __exit sh_mobile_sdhi_exit(void) | ||
161 | { | ||
162 | platform_driver_unregister(&sh_mobile_sdhi_driver); | ||
163 | } | ||
164 | |||
165 | module_init(sh_mobile_sdhi_init); | ||
166 | module_exit(sh_mobile_sdhi_exit); | ||
167 | |||
168 | MODULE_DESCRIPTION("SuperH Mobile SDHI driver"); | ||
169 | MODULE_AUTHOR("Magnus Damm"); | ||
170 | MODULE_LICENSE("GPL v2"); | ||
171 | MODULE_ALIAS("platform:sh_mobile_sdhi"); | ||
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index ab1adeabdd22..79c568461d59 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c | |||
@@ -1,8 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * linux/drivers/mmc/tmio_mmc.c | 2 | * linux/drivers/mmc/host/tmio_mmc.c |
3 | * | 3 | * |
4 | * Copyright (C) 2004 Ian Molton | 4 | * Copyright (C) 2007 Ian Molton |
5 | * Copyright (C) 2007 Ian Molton | 5 | * Copyright (C) 2004 Ian Molton |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
@@ -11,1182 +11,17 @@ | |||
11 | * Driver for the MMC / SD / SDIO cell found in: | 11 | * Driver for the MMC / SD / SDIO cell found in: |
12 | * | 12 | * |
13 | * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 | 13 | * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 |
14 | * | ||
15 | * This driver draws mainly on scattered spec sheets, Reverse engineering | ||
16 | * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit | ||
17 | * support). (Further 4 bit support from a later datasheet). | ||
18 | * | ||
19 | * TODO: | ||
20 | * Investigate using a workqueue for PIO transfers | ||
21 | * Eliminate FIXMEs | ||
22 | * SDIO support | ||
23 | * Better Power management | ||
24 | * Handle MMC errors better | ||
25 | * double buffer support | ||
26 | * | ||
27 | */ | 14 | */ |
28 | 15 | ||
29 | #include <linux/delay.h> | ||
30 | #include <linux/device.h> | 16 | #include <linux/device.h> |
31 | #include <linux/dmaengine.h> | ||
32 | #include <linux/highmem.h> | ||
33 | #include <linux/interrupt.h> | ||
34 | #include <linux/io.h> | ||
35 | #include <linux/irq.h> | ||
36 | #include <linux/mfd/core.h> | 17 | #include <linux/mfd/core.h> |
37 | #include <linux/mfd/tmio.h> | 18 | #include <linux/mfd/tmio.h> |
38 | #include <linux/mmc/host.h> | 19 | #include <linux/mmc/host.h> |
39 | #include <linux/module.h> | 20 | #include <linux/module.h> |
40 | #include <linux/pagemap.h> | 21 | #include <linux/pagemap.h> |
41 | #include <linux/scatterlist.h> | 22 | #include <linux/scatterlist.h> |
42 | #include <linux/workqueue.h> | ||
43 | #include <linux/spinlock.h> | ||
44 | |||
45 | #define CTL_SD_CMD 0x00 | ||
46 | #define CTL_ARG_REG 0x04 | ||
47 | #define CTL_STOP_INTERNAL_ACTION 0x08 | ||
48 | #define CTL_XFER_BLK_COUNT 0xa | ||
49 | #define CTL_RESPONSE 0x0c | ||
50 | #define CTL_STATUS 0x1c | ||
51 | #define CTL_IRQ_MASK 0x20 | ||
52 | #define CTL_SD_CARD_CLK_CTL 0x24 | ||
53 | #define CTL_SD_XFER_LEN 0x26 | ||
54 | #define CTL_SD_MEM_CARD_OPT 0x28 | ||
55 | #define CTL_SD_ERROR_DETAIL_STATUS 0x2c | ||
56 | #define CTL_SD_DATA_PORT 0x30 | ||
57 | #define CTL_TRANSACTION_CTL 0x34 | ||
58 | #define CTL_SDIO_STATUS 0x36 | ||
59 | #define CTL_SDIO_IRQ_MASK 0x38 | ||
60 | #define CTL_RESET_SD 0xe0 | ||
61 | #define CTL_SDIO_REGS 0x100 | ||
62 | #define CTL_CLK_AND_WAIT_CTL 0x138 | ||
63 | #define CTL_RESET_SDIO 0x1e0 | ||
64 | |||
65 | /* Definitions for values the CTRL_STATUS register can take. */ | ||
66 | #define TMIO_STAT_CMDRESPEND 0x00000001 | ||
67 | #define TMIO_STAT_DATAEND 0x00000004 | ||
68 | #define TMIO_STAT_CARD_REMOVE 0x00000008 | ||
69 | #define TMIO_STAT_CARD_INSERT 0x00000010 | ||
70 | #define TMIO_STAT_SIGSTATE 0x00000020 | ||
71 | #define TMIO_STAT_WRPROTECT 0x00000080 | ||
72 | #define TMIO_STAT_CARD_REMOVE_A 0x00000100 | ||
73 | #define TMIO_STAT_CARD_INSERT_A 0x00000200 | ||
74 | #define TMIO_STAT_SIGSTATE_A 0x00000400 | ||
75 | #define TMIO_STAT_CMD_IDX_ERR 0x00010000 | ||
76 | #define TMIO_STAT_CRCFAIL 0x00020000 | ||
77 | #define TMIO_STAT_STOPBIT_ERR 0x00040000 | ||
78 | #define TMIO_STAT_DATATIMEOUT 0x00080000 | ||
79 | #define TMIO_STAT_RXOVERFLOW 0x00100000 | ||
80 | #define TMIO_STAT_TXUNDERRUN 0x00200000 | ||
81 | #define TMIO_STAT_CMDTIMEOUT 0x00400000 | ||
82 | #define TMIO_STAT_RXRDY 0x01000000 | ||
83 | #define TMIO_STAT_TXRQ 0x02000000 | ||
84 | #define TMIO_STAT_ILL_FUNC 0x20000000 | ||
85 | #define TMIO_STAT_CMD_BUSY 0x40000000 | ||
86 | #define TMIO_STAT_ILL_ACCESS 0x80000000 | ||
87 | |||
88 | /* Definitions for values the CTRL_SDIO_STATUS register can take. */ | ||
89 | #define TMIO_SDIO_STAT_IOIRQ 0x0001 | ||
90 | #define TMIO_SDIO_STAT_EXPUB52 0x4000 | ||
91 | #define TMIO_SDIO_STAT_EXWT 0x8000 | ||
92 | #define TMIO_SDIO_MASK_ALL 0xc007 | ||
93 | |||
94 | /* Define some IRQ masks */ | ||
95 | /* This is the mask used at reset by the chip */ | ||
96 | #define TMIO_MASK_ALL 0x837f031d | ||
97 | #define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND) | ||
98 | #define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND) | ||
99 | #define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \ | ||
100 | TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT) | ||
101 | #define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD) | ||
102 | |||
103 | #define enable_mmc_irqs(host, i) \ | ||
104 | do { \ | ||
105 | u32 mask;\ | ||
106 | mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \ | ||
107 | mask &= ~((i) & TMIO_MASK_IRQ); \ | ||
108 | sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \ | ||
109 | } while (0) | ||
110 | |||
111 | #define disable_mmc_irqs(host, i) \ | ||
112 | do { \ | ||
113 | u32 mask;\ | ||
114 | mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \ | ||
115 | mask |= ((i) & TMIO_MASK_IRQ); \ | ||
116 | sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \ | ||
117 | } while (0) | ||
118 | |||
119 | #define ack_mmc_irqs(host, i) \ | ||
120 | do { \ | ||
121 | sd_ctrl_write32((host), CTL_STATUS, ~(i)); \ | ||
122 | } while (0) | ||
123 | |||
124 | /* This is arbitrary, just noone needed any higher alignment yet */ | ||
125 | #define MAX_ALIGN 4 | ||
126 | |||
127 | struct tmio_mmc_host { | ||
128 | void __iomem *ctl; | ||
129 | unsigned long bus_shift; | ||
130 | struct mmc_command *cmd; | ||
131 | struct mmc_request *mrq; | ||
132 | struct mmc_data *data; | ||
133 | struct mmc_host *mmc; | ||
134 | int irq; | ||
135 | unsigned int sdio_irq_enabled; | ||
136 | |||
137 | /* Callbacks for clock / power control */ | ||
138 | void (*set_pwr)(struct platform_device *host, int state); | ||
139 | void (*set_clk_div)(struct platform_device *host, int state); | ||
140 | |||
141 | /* pio related stuff */ | ||
142 | struct scatterlist *sg_ptr; | ||
143 | struct scatterlist *sg_orig; | ||
144 | unsigned int sg_len; | ||
145 | unsigned int sg_off; | ||
146 | |||
147 | struct platform_device *pdev; | ||
148 | |||
149 | /* DMA support */ | ||
150 | struct dma_chan *chan_rx; | ||
151 | struct dma_chan *chan_tx; | ||
152 | struct tasklet_struct dma_complete; | ||
153 | struct tasklet_struct dma_issue; | ||
154 | #ifdef CONFIG_TMIO_MMC_DMA | ||
155 | u8 bounce_buf[PAGE_CACHE_SIZE] __attribute__((aligned(MAX_ALIGN))); | ||
156 | struct scatterlist bounce_sg; | ||
157 | #endif | ||
158 | |||
159 | /* Track lost interrupts */ | ||
160 | struct delayed_work delayed_reset_work; | ||
161 | spinlock_t lock; | ||
162 | unsigned long last_req_ts; | ||
163 | }; | ||
164 | |||
165 | static void tmio_check_bounce_buffer(struct tmio_mmc_host *host); | ||
166 | |||
167 | static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr) | ||
168 | { | ||
169 | return readw(host->ctl + (addr << host->bus_shift)); | ||
170 | } | ||
171 | |||
172 | static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr, | ||
173 | u16 *buf, int count) | ||
174 | { | ||
175 | readsw(host->ctl + (addr << host->bus_shift), buf, count); | ||
176 | } | ||
177 | |||
178 | static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr) | ||
179 | { | ||
180 | return readw(host->ctl + (addr << host->bus_shift)) | | ||
181 | readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16; | ||
182 | } | ||
183 | |||
184 | static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val) | ||
185 | { | ||
186 | writew(val, host->ctl + (addr << host->bus_shift)); | ||
187 | } | ||
188 | |||
189 | static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr, | ||
190 | u16 *buf, int count) | ||
191 | { | ||
192 | writesw(host->ctl + (addr << host->bus_shift), buf, count); | ||
193 | } | ||
194 | |||
195 | static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val) | ||
196 | { | ||
197 | writew(val, host->ctl + (addr << host->bus_shift)); | ||
198 | writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); | ||
199 | } | ||
200 | |||
201 | static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data) | ||
202 | { | ||
203 | host->sg_len = data->sg_len; | ||
204 | host->sg_ptr = data->sg; | ||
205 | host->sg_orig = data->sg; | ||
206 | host->sg_off = 0; | ||
207 | } | ||
208 | |||
209 | static int tmio_mmc_next_sg(struct tmio_mmc_host *host) | ||
210 | { | ||
211 | host->sg_ptr = sg_next(host->sg_ptr); | ||
212 | host->sg_off = 0; | ||
213 | return --host->sg_len; | ||
214 | } | ||
215 | |||
216 | static char *tmio_mmc_kmap_atomic(struct scatterlist *sg, unsigned long *flags) | ||
217 | { | ||
218 | local_irq_save(*flags); | ||
219 | return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; | ||
220 | } | ||
221 | |||
222 | static void tmio_mmc_kunmap_atomic(struct scatterlist *sg, unsigned long *flags, void *virt) | ||
223 | { | ||
224 | kunmap_atomic(virt - sg->offset, KM_BIO_SRC_IRQ); | ||
225 | local_irq_restore(*flags); | ||
226 | } | ||
227 | |||
228 | #ifdef CONFIG_MMC_DEBUG | ||
229 | |||
230 | #define STATUS_TO_TEXT(a, status, i) \ | ||
231 | do { \ | ||
232 | if (status & TMIO_STAT_##a) { \ | ||
233 | if (i++) \ | ||
234 | printk(" | "); \ | ||
235 | printk(#a); \ | ||
236 | } \ | ||
237 | } while (0) | ||
238 | |||
239 | void pr_debug_status(u32 status) | ||
240 | { | ||
241 | int i = 0; | ||
242 | printk(KERN_DEBUG "status: %08x = ", status); | ||
243 | STATUS_TO_TEXT(CARD_REMOVE, status, i); | ||
244 | STATUS_TO_TEXT(CARD_INSERT, status, i); | ||
245 | STATUS_TO_TEXT(SIGSTATE, status, i); | ||
246 | STATUS_TO_TEXT(WRPROTECT, status, i); | ||
247 | STATUS_TO_TEXT(CARD_REMOVE_A, status, i); | ||
248 | STATUS_TO_TEXT(CARD_INSERT_A, status, i); | ||
249 | STATUS_TO_TEXT(SIGSTATE_A, status, i); | ||
250 | STATUS_TO_TEXT(CMD_IDX_ERR, status, i); | ||
251 | STATUS_TO_TEXT(STOPBIT_ERR, status, i); | ||
252 | STATUS_TO_TEXT(ILL_FUNC, status, i); | ||
253 | STATUS_TO_TEXT(CMD_BUSY, status, i); | ||
254 | STATUS_TO_TEXT(CMDRESPEND, status, i); | ||
255 | STATUS_TO_TEXT(DATAEND, status, i); | ||
256 | STATUS_TO_TEXT(CRCFAIL, status, i); | ||
257 | STATUS_TO_TEXT(DATATIMEOUT, status, i); | ||
258 | STATUS_TO_TEXT(CMDTIMEOUT, status, i); | ||
259 | STATUS_TO_TEXT(RXOVERFLOW, status, i); | ||
260 | STATUS_TO_TEXT(TXUNDERRUN, status, i); | ||
261 | STATUS_TO_TEXT(RXRDY, status, i); | ||
262 | STATUS_TO_TEXT(TXRQ, status, i); | ||
263 | STATUS_TO_TEXT(ILL_ACCESS, status, i); | ||
264 | printk("\n"); | ||
265 | } | ||
266 | |||
267 | #else | ||
268 | #define pr_debug_status(s) do { } while (0) | ||
269 | #endif | ||
270 | |||
271 | static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) | ||
272 | { | ||
273 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
274 | |||
275 | if (enable) { | ||
276 | host->sdio_irq_enabled = 1; | ||
277 | sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001); | ||
278 | sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, | ||
279 | (TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ)); | ||
280 | } else { | ||
281 | sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL); | ||
282 | sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000); | ||
283 | host->sdio_irq_enabled = 0; | ||
284 | } | ||
285 | } | ||
286 | |||
287 | static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock) | ||
288 | { | ||
289 | u32 clk = 0, clock; | ||
290 | |||
291 | if (new_clock) { | ||
292 | for (clock = host->mmc->f_min, clk = 0x80000080; | ||
293 | new_clock >= (clock<<1); clk >>= 1) | ||
294 | clock <<= 1; | ||
295 | clk |= 0x100; | ||
296 | } | ||
297 | |||
298 | if (host->set_clk_div) | ||
299 | host->set_clk_div(host->pdev, (clk>>22) & 1); | ||
300 | |||
301 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff); | ||
302 | } | ||
303 | |||
304 | static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) | ||
305 | { | ||
306 | struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); | ||
307 | |||
308 | /* | ||
309 | * Testing on sh-mobile showed that SDIO IRQs are unmasked when | ||
310 | * CTL_CLK_AND_WAIT_CTL gets written, so we have to disable the | ||
311 | * device IRQ here and restore the SDIO IRQ mask before | ||
312 | * re-enabling the device IRQ. | ||
313 | */ | ||
314 | if (pdata->flags & TMIO_MMC_SDIO_IRQ) | ||
315 | disable_irq(host->irq); | ||
316 | sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); | ||
317 | msleep(10); | ||
318 | if (pdata->flags & TMIO_MMC_SDIO_IRQ) { | ||
319 | tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled); | ||
320 | enable_irq(host->irq); | ||
321 | } | ||
322 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 & | ||
323 | sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); | ||
324 | msleep(10); | ||
325 | } | ||
326 | |||
327 | static void tmio_mmc_clk_start(struct tmio_mmc_host *host) | ||
328 | { | ||
329 | struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); | ||
330 | |||
331 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 | | ||
332 | sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); | ||
333 | msleep(10); | ||
334 | /* see comment in tmio_mmc_clk_stop above */ | ||
335 | if (pdata->flags & TMIO_MMC_SDIO_IRQ) | ||
336 | disable_irq(host->irq); | ||
337 | sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); | ||
338 | msleep(10); | ||
339 | if (pdata->flags & TMIO_MMC_SDIO_IRQ) { | ||
340 | tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled); | ||
341 | enable_irq(host->irq); | ||
342 | } | ||
343 | } | ||
344 | |||
345 | static void reset(struct tmio_mmc_host *host) | ||
346 | { | ||
347 | /* FIXME - should we set stop clock reg here */ | ||
348 | sd_ctrl_write16(host, CTL_RESET_SD, 0x0000); | ||
349 | sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000); | ||
350 | msleep(10); | ||
351 | sd_ctrl_write16(host, CTL_RESET_SD, 0x0001); | ||
352 | sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001); | ||
353 | msleep(10); | ||
354 | } | ||
355 | |||
356 | static void tmio_mmc_reset_work(struct work_struct *work) | ||
357 | { | ||
358 | struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, | ||
359 | delayed_reset_work.work); | ||
360 | struct mmc_request *mrq; | ||
361 | unsigned long flags; | ||
362 | |||
363 | spin_lock_irqsave(&host->lock, flags); | ||
364 | mrq = host->mrq; | ||
365 | |||
366 | /* request already finished */ | ||
367 | if (!mrq | ||
368 | || time_is_after_jiffies(host->last_req_ts + | ||
369 | msecs_to_jiffies(2000))) { | ||
370 | spin_unlock_irqrestore(&host->lock, flags); | ||
371 | return; | ||
372 | } | ||
373 | |||
374 | dev_warn(&host->pdev->dev, | ||
375 | "timeout waiting for hardware interrupt (CMD%u)\n", | ||
376 | mrq->cmd->opcode); | ||
377 | |||
378 | if (host->data) | ||
379 | host->data->error = -ETIMEDOUT; | ||
380 | else if (host->cmd) | ||
381 | host->cmd->error = -ETIMEDOUT; | ||
382 | else | ||
383 | mrq->cmd->error = -ETIMEDOUT; | ||
384 | |||
385 | host->cmd = NULL; | ||
386 | host->data = NULL; | ||
387 | host->mrq = NULL; | ||
388 | |||
389 | spin_unlock_irqrestore(&host->lock, flags); | ||
390 | |||
391 | reset(host); | ||
392 | |||
393 | mmc_request_done(host->mmc, mrq); | ||
394 | } | ||
395 | |||
396 | static void | ||
397 | tmio_mmc_finish_request(struct tmio_mmc_host *host) | ||
398 | { | ||
399 | struct mmc_request *mrq = host->mrq; | ||
400 | |||
401 | if (!mrq) | ||
402 | return; | ||
403 | |||
404 | host->mrq = NULL; | ||
405 | host->cmd = NULL; | ||
406 | host->data = NULL; | ||
407 | |||
408 | cancel_delayed_work(&host->delayed_reset_work); | ||
409 | |||
410 | mmc_request_done(host->mmc, mrq); | ||
411 | } | ||
412 | |||
413 | /* These are the bitmasks the tmio chip requires to implement the MMC response | ||
414 | * types. Note that R1 and R6 are the same in this scheme. */ | ||
415 | #define APP_CMD 0x0040 | ||
416 | #define RESP_NONE 0x0300 | ||
417 | #define RESP_R1 0x0400 | ||
418 | #define RESP_R1B 0x0500 | ||
419 | #define RESP_R2 0x0600 | ||
420 | #define RESP_R3 0x0700 | ||
421 | #define DATA_PRESENT 0x0800 | ||
422 | #define TRANSFER_READ 0x1000 | ||
423 | #define TRANSFER_MULTI 0x2000 | ||
424 | #define SECURITY_CMD 0x4000 | ||
425 | |||
426 | static int | ||
427 | tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd) | ||
428 | { | ||
429 | struct mmc_data *data = host->data; | ||
430 | int c = cmd->opcode; | ||
431 | |||
432 | /* Command 12 is handled by hardware */ | ||
433 | if (cmd->opcode == 12 && !cmd->arg) { | ||
434 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001); | ||
435 | return 0; | ||
436 | } | ||
437 | |||
438 | switch (mmc_resp_type(cmd)) { | ||
439 | case MMC_RSP_NONE: c |= RESP_NONE; break; | ||
440 | case MMC_RSP_R1: c |= RESP_R1; break; | ||
441 | case MMC_RSP_R1B: c |= RESP_R1B; break; | ||
442 | case MMC_RSP_R2: c |= RESP_R2; break; | ||
443 | case MMC_RSP_R3: c |= RESP_R3; break; | ||
444 | default: | ||
445 | pr_debug("Unknown response type %d\n", mmc_resp_type(cmd)); | ||
446 | return -EINVAL; | ||
447 | } | ||
448 | |||
449 | host->cmd = cmd; | ||
450 | |||
451 | /* FIXME - this seems to be ok commented out but the spec suggest this bit | ||
452 | * should be set when issuing app commands. | ||
453 | * if(cmd->flags & MMC_FLAG_ACMD) | ||
454 | * c |= APP_CMD; | ||
455 | */ | ||
456 | if (data) { | ||
457 | c |= DATA_PRESENT; | ||
458 | if (data->blocks > 1) { | ||
459 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100); | ||
460 | c |= TRANSFER_MULTI; | ||
461 | } | ||
462 | if (data->flags & MMC_DATA_READ) | ||
463 | c |= TRANSFER_READ; | ||
464 | } | ||
465 | |||
466 | enable_mmc_irqs(host, TMIO_MASK_CMD); | ||
467 | |||
468 | /* Fire off the command */ | ||
469 | sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg); | ||
470 | sd_ctrl_write16(host, CTL_SD_CMD, c); | ||
471 | |||
472 | return 0; | ||
473 | } | ||
474 | |||
475 | /* | ||
476 | * This chip always returns (at least?) as much data as you ask for. | ||
477 | * I'm unsure what happens if you ask for less than a block. This should be | ||
478 | * looked into to ensure that a funny length read doesnt hose the controller. | ||
479 | */ | ||
480 | static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) | ||
481 | { | ||
482 | struct mmc_data *data = host->data; | ||
483 | void *sg_virt; | ||
484 | unsigned short *buf; | ||
485 | unsigned int count; | ||
486 | unsigned long flags; | ||
487 | |||
488 | if (!data) { | ||
489 | pr_debug("Spurious PIO IRQ\n"); | ||
490 | return; | ||
491 | } | ||
492 | |||
493 | sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags); | ||
494 | buf = (unsigned short *)(sg_virt + host->sg_off); | ||
495 | |||
496 | count = host->sg_ptr->length - host->sg_off; | ||
497 | if (count > data->blksz) | ||
498 | count = data->blksz; | ||
499 | |||
500 | pr_debug("count: %08x offset: %08x flags %08x\n", | ||
501 | count, host->sg_off, data->flags); | ||
502 | |||
503 | /* Transfer the data */ | ||
504 | if (data->flags & MMC_DATA_READ) | ||
505 | sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); | ||
506 | else | ||
507 | sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); | ||
508 | |||
509 | host->sg_off += count; | ||
510 | |||
511 | tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt); | ||
512 | |||
513 | if (host->sg_off == host->sg_ptr->length) | ||
514 | tmio_mmc_next_sg(host); | ||
515 | |||
516 | return; | ||
517 | } | ||
518 | |||
519 | /* needs to be called with host->lock held */ | ||
520 | static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) | ||
521 | { | ||
522 | struct mmc_data *data = host->data; | ||
523 | struct mmc_command *stop; | ||
524 | |||
525 | host->data = NULL; | ||
526 | |||
527 | if (!data) { | ||
528 | dev_warn(&host->pdev->dev, "Spurious data end IRQ\n"); | ||
529 | return; | ||
530 | } | ||
531 | stop = data->stop; | ||
532 | |||
533 | /* FIXME - return correct transfer count on errors */ | ||
534 | if (!data->error) | ||
535 | data->bytes_xfered = data->blocks * data->blksz; | ||
536 | else | ||
537 | data->bytes_xfered = 0; | ||
538 | |||
539 | pr_debug("Completed data request\n"); | ||
540 | |||
541 | /* | ||
542 | * FIXME: other drivers allow an optional stop command of any given type | ||
543 | * which we dont do, as the chip can auto generate them. | ||
544 | * Perhaps we can be smarter about when to use auto CMD12 and | ||
545 | * only issue the auto request when we know this is the desired | ||
546 | * stop command, allowing fallback to the stop command the | ||
547 | * upper layers expect. For now, we do what works. | ||
548 | */ | ||
549 | |||
550 | if (data->flags & MMC_DATA_READ) { | ||
551 | if (!host->chan_rx) | ||
552 | disable_mmc_irqs(host, TMIO_MASK_READOP); | ||
553 | else | ||
554 | tmio_check_bounce_buffer(host); | ||
555 | dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", | ||
556 | host->mrq); | ||
557 | } else { | ||
558 | if (!host->chan_tx) | ||
559 | disable_mmc_irqs(host, TMIO_MASK_WRITEOP); | ||
560 | dev_dbg(&host->pdev->dev, "Complete Tx request %p\n", | ||
561 | host->mrq); | ||
562 | } | ||
563 | |||
564 | if (stop) { | ||
565 | if (stop->opcode == 12 && !stop->arg) | ||
566 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000); | ||
567 | else | ||
568 | BUG(); | ||
569 | } | ||
570 | |||
571 | tmio_mmc_finish_request(host); | ||
572 | } | ||
573 | |||
574 | static void tmio_mmc_data_irq(struct tmio_mmc_host *host) | ||
575 | { | ||
576 | struct mmc_data *data; | ||
577 | spin_lock(&host->lock); | ||
578 | data = host->data; | ||
579 | |||
580 | if (!data) | ||
581 | goto out; | ||
582 | |||
583 | if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) { | ||
584 | /* | ||
585 | * Has all data been written out yet? Testing on SuperH showed, | ||
586 | * that in most cases the first interrupt comes already with the | ||
587 | * BUSY status bit clear, but on some operations, like mount or | ||
588 | * in the beginning of a write / sync / umount, there is one | ||
589 | * DATAEND interrupt with the BUSY bit set, in this cases | ||
590 | * waiting for one more interrupt fixes the problem. | ||
591 | */ | ||
592 | if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) { | ||
593 | disable_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
594 | tasklet_schedule(&host->dma_complete); | ||
595 | } | ||
596 | } else if (host->chan_rx && (data->flags & MMC_DATA_READ)) { | ||
597 | disable_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
598 | tasklet_schedule(&host->dma_complete); | ||
599 | } else { | ||
600 | tmio_mmc_do_data_irq(host); | ||
601 | } | ||
602 | out: | ||
603 | spin_unlock(&host->lock); | ||
604 | } | ||
605 | |||
606 | static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, | ||
607 | unsigned int stat) | ||
608 | { | ||
609 | struct mmc_command *cmd = host->cmd; | ||
610 | int i, addr; | ||
611 | |||
612 | spin_lock(&host->lock); | ||
613 | |||
614 | if (!host->cmd) { | ||
615 | pr_debug("Spurious CMD irq\n"); | ||
616 | goto out; | ||
617 | } | ||
618 | |||
619 | host->cmd = NULL; | ||
620 | |||
621 | /* This controller is sicker than the PXA one. Not only do we need to | ||
622 | * drop the top 8 bits of the first response word, we also need to | ||
623 | * modify the order of the response for short response command types. | ||
624 | */ | ||
625 | |||
626 | for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4) | ||
627 | cmd->resp[i] = sd_ctrl_read32(host, addr); | ||
628 | |||
629 | if (cmd->flags & MMC_RSP_136) { | ||
630 | cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24); | ||
631 | cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24); | ||
632 | cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24); | ||
633 | cmd->resp[3] <<= 8; | ||
634 | } else if (cmd->flags & MMC_RSP_R3) { | ||
635 | cmd->resp[0] = cmd->resp[3]; | ||
636 | } | ||
637 | |||
638 | if (stat & TMIO_STAT_CMDTIMEOUT) | ||
639 | cmd->error = -ETIMEDOUT; | ||
640 | else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) | ||
641 | cmd->error = -EILSEQ; | ||
642 | |||
643 | /* If there is data to handle we enable data IRQs here, and | ||
644 | * we will ultimatley finish the request in the data_end handler. | ||
645 | * If theres no data or we encountered an error, finish now. | ||
646 | */ | ||
647 | if (host->data && !cmd->error) { | ||
648 | if (host->data->flags & MMC_DATA_READ) { | ||
649 | if (!host->chan_rx) | ||
650 | enable_mmc_irqs(host, TMIO_MASK_READOP); | ||
651 | } else { | ||
652 | if (!host->chan_tx) | ||
653 | enable_mmc_irqs(host, TMIO_MASK_WRITEOP); | ||
654 | else | ||
655 | tasklet_schedule(&host->dma_issue); | ||
656 | } | ||
657 | } else { | ||
658 | tmio_mmc_finish_request(host); | ||
659 | } | ||
660 | |||
661 | out: | ||
662 | spin_unlock(&host->lock); | ||
663 | |||
664 | return; | ||
665 | } | ||
666 | |||
667 | static irqreturn_t tmio_mmc_irq(int irq, void *devid) | ||
668 | { | ||
669 | struct tmio_mmc_host *host = devid; | ||
670 | struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); | ||
671 | unsigned int ireg, irq_mask, status; | ||
672 | unsigned int sdio_ireg, sdio_irq_mask, sdio_status; | ||
673 | |||
674 | pr_debug("MMC IRQ begin\n"); | ||
675 | |||
676 | status = sd_ctrl_read32(host, CTL_STATUS); | ||
677 | irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); | ||
678 | ireg = status & TMIO_MASK_IRQ & ~irq_mask; | ||
679 | |||
680 | sdio_ireg = 0; | ||
681 | if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) { | ||
682 | sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS); | ||
683 | sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK); | ||
684 | sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask; | ||
685 | |||
686 | sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL); | ||
687 | |||
688 | if (sdio_ireg && !host->sdio_irq_enabled) { | ||
689 | pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n", | ||
690 | sdio_status, sdio_irq_mask, sdio_ireg); | ||
691 | tmio_mmc_enable_sdio_irq(host->mmc, 0); | ||
692 | goto out; | ||
693 | } | ||
694 | 23 | ||
695 | if (host->mmc->caps & MMC_CAP_SDIO_IRQ && | 24 | #include "tmio_mmc.h" |
696 | sdio_ireg & TMIO_SDIO_STAT_IOIRQ) | ||
697 | mmc_signal_sdio_irq(host->mmc); | ||
698 | |||
699 | if (sdio_ireg) | ||
700 | goto out; | ||
701 | } | ||
702 | |||
703 | pr_debug_status(status); | ||
704 | pr_debug_status(ireg); | ||
705 | |||
706 | if (!ireg) { | ||
707 | disable_mmc_irqs(host, status & ~irq_mask); | ||
708 | |||
709 | pr_warning("tmio_mmc: Spurious irq, disabling! " | ||
710 | "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg); | ||
711 | pr_debug_status(status); | ||
712 | |||
713 | goto out; | ||
714 | } | ||
715 | |||
716 | while (ireg) { | ||
717 | /* Card insert / remove attempts */ | ||
718 | if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { | ||
719 | ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT | | ||
720 | TMIO_STAT_CARD_REMOVE); | ||
721 | mmc_detect_change(host->mmc, msecs_to_jiffies(100)); | ||
722 | } | ||
723 | |||
724 | /* CRC and other errors */ | ||
725 | /* if (ireg & TMIO_STAT_ERR_IRQ) | ||
726 | * handled |= tmio_error_irq(host, irq, stat); | ||
727 | */ | ||
728 | |||
729 | /* Command completion */ | ||
730 | if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) { | ||
731 | ack_mmc_irqs(host, | ||
732 | TMIO_STAT_CMDRESPEND | | ||
733 | TMIO_STAT_CMDTIMEOUT); | ||
734 | tmio_mmc_cmd_irq(host, status); | ||
735 | } | ||
736 | |||
737 | /* Data transfer */ | ||
738 | if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) { | ||
739 | ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ); | ||
740 | tmio_mmc_pio_irq(host); | ||
741 | } | ||
742 | |||
743 | /* Data transfer completion */ | ||
744 | if (ireg & TMIO_STAT_DATAEND) { | ||
745 | ack_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
746 | tmio_mmc_data_irq(host); | ||
747 | } | ||
748 | |||
749 | /* Check status - keep going until we've handled it all */ | ||
750 | status = sd_ctrl_read32(host, CTL_STATUS); | ||
751 | irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); | ||
752 | ireg = status & TMIO_MASK_IRQ & ~irq_mask; | ||
753 | |||
754 | pr_debug("Status at end of loop: %08x\n", status); | ||
755 | pr_debug_status(status); | ||
756 | } | ||
757 | pr_debug("MMC IRQ end\n"); | ||
758 | |||
759 | out: | ||
760 | return IRQ_HANDLED; | ||
761 | } | ||
762 | |||
763 | #ifdef CONFIG_TMIO_MMC_DMA | ||
764 | static void tmio_check_bounce_buffer(struct tmio_mmc_host *host) | ||
765 | { | ||
766 | if (host->sg_ptr == &host->bounce_sg) { | ||
767 | unsigned long flags; | ||
768 | void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags); | ||
769 | memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length); | ||
770 | tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr); | ||
771 | } | ||
772 | } | ||
773 | |||
774 | static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) | ||
775 | { | ||
776 | #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE) | ||
777 | /* Switch DMA mode on or off - SuperH specific? */ | ||
778 | sd_ctrl_write16(host, 0xd8, enable ? 2 : 0); | ||
779 | #endif | ||
780 | } | ||
781 | |||
782 | static void tmio_dma_complete(void *arg) | ||
783 | { | ||
784 | struct tmio_mmc_host *host = arg; | ||
785 | |||
786 | dev_dbg(&host->pdev->dev, "Command completed\n"); | ||
787 | |||
788 | if (!host->data) | ||
789 | dev_warn(&host->pdev->dev, "NULL data in DMA completion!\n"); | ||
790 | else | ||
791 | enable_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
792 | } | ||
793 | |||
794 | static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) | ||
795 | { | ||
796 | struct scatterlist *sg = host->sg_ptr, *sg_tmp; | ||
797 | struct dma_async_tx_descriptor *desc = NULL; | ||
798 | struct dma_chan *chan = host->chan_rx; | ||
799 | struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); | ||
800 | dma_cookie_t cookie; | ||
801 | int ret, i; | ||
802 | bool aligned = true, multiple = true; | ||
803 | unsigned int align = (1 << pdata->dma->alignment_shift) - 1; | ||
804 | |||
805 | for_each_sg(sg, sg_tmp, host->sg_len, i) { | ||
806 | if (sg_tmp->offset & align) | ||
807 | aligned = false; | ||
808 | if (sg_tmp->length & align) { | ||
809 | multiple = false; | ||
810 | break; | ||
811 | } | ||
812 | } | ||
813 | |||
814 | if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || | ||
815 | align >= MAX_ALIGN)) || !multiple) { | ||
816 | ret = -EINVAL; | ||
817 | goto pio; | ||
818 | } | ||
819 | |||
820 | /* The only sg element can be unaligned, use our bounce buffer then */ | ||
821 | if (!aligned) { | ||
822 | sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); | ||
823 | host->sg_ptr = &host->bounce_sg; | ||
824 | sg = host->sg_ptr; | ||
825 | } | ||
826 | |||
827 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); | ||
828 | if (ret > 0) | ||
829 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, | ||
830 | DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
831 | |||
832 | if (desc) { | ||
833 | desc->callback = tmio_dma_complete; | ||
834 | desc->callback_param = host; | ||
835 | cookie = dmaengine_submit(desc); | ||
836 | dma_async_issue_pending(chan); | ||
837 | } | ||
838 | dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", | ||
839 | __func__, host->sg_len, ret, cookie, host->mrq); | ||
840 | |||
841 | pio: | ||
842 | if (!desc) { | ||
843 | /* DMA failed, fall back to PIO */ | ||
844 | if (ret >= 0) | ||
845 | ret = -EIO; | ||
846 | host->chan_rx = NULL; | ||
847 | dma_release_channel(chan); | ||
848 | /* Free the Tx channel too */ | ||
849 | chan = host->chan_tx; | ||
850 | if (chan) { | ||
851 | host->chan_tx = NULL; | ||
852 | dma_release_channel(chan); | ||
853 | } | ||
854 | dev_warn(&host->pdev->dev, | ||
855 | "DMA failed: %d, falling back to PIO\n", ret); | ||
856 | tmio_mmc_enable_dma(host, false); | ||
857 | } | ||
858 | |||
859 | dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, | ||
860 | desc, cookie, host->sg_len); | ||
861 | } | ||
862 | |||
863 | static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) | ||
864 | { | ||
865 | struct scatterlist *sg = host->sg_ptr, *sg_tmp; | ||
866 | struct dma_async_tx_descriptor *desc = NULL; | ||
867 | struct dma_chan *chan = host->chan_tx; | ||
868 | struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); | ||
869 | dma_cookie_t cookie; | ||
870 | int ret, i; | ||
871 | bool aligned = true, multiple = true; | ||
872 | unsigned int align = (1 << pdata->dma->alignment_shift) - 1; | ||
873 | |||
874 | for_each_sg(sg, sg_tmp, host->sg_len, i) { | ||
875 | if (sg_tmp->offset & align) | ||
876 | aligned = false; | ||
877 | if (sg_tmp->length & align) { | ||
878 | multiple = false; | ||
879 | break; | ||
880 | } | ||
881 | } | ||
882 | |||
883 | if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || | ||
884 | align >= MAX_ALIGN)) || !multiple) { | ||
885 | ret = -EINVAL; | ||
886 | goto pio; | ||
887 | } | ||
888 | |||
889 | /* The only sg element can be unaligned, use our bounce buffer then */ | ||
890 | if (!aligned) { | ||
891 | unsigned long flags; | ||
892 | void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags); | ||
893 | sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); | ||
894 | memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length); | ||
895 | tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr); | ||
896 | host->sg_ptr = &host->bounce_sg; | ||
897 | sg = host->sg_ptr; | ||
898 | } | ||
899 | |||
900 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); | ||
901 | if (ret > 0) | ||
902 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, | ||
903 | DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
904 | |||
905 | if (desc) { | ||
906 | desc->callback = tmio_dma_complete; | ||
907 | desc->callback_param = host; | ||
908 | cookie = dmaengine_submit(desc); | ||
909 | } | ||
910 | dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", | ||
911 | __func__, host->sg_len, ret, cookie, host->mrq); | ||
912 | |||
913 | pio: | ||
914 | if (!desc) { | ||
915 | /* DMA failed, fall back to PIO */ | ||
916 | if (ret >= 0) | ||
917 | ret = -EIO; | ||
918 | host->chan_tx = NULL; | ||
919 | dma_release_channel(chan); | ||
920 | /* Free the Rx channel too */ | ||
921 | chan = host->chan_rx; | ||
922 | if (chan) { | ||
923 | host->chan_rx = NULL; | ||
924 | dma_release_channel(chan); | ||
925 | } | ||
926 | dev_warn(&host->pdev->dev, | ||
927 | "DMA failed: %d, falling back to PIO\n", ret); | ||
928 | tmio_mmc_enable_dma(host, false); | ||
929 | } | ||
930 | |||
931 | dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__, | ||
932 | desc, cookie); | ||
933 | } | ||
934 | |||
935 | static void tmio_mmc_start_dma(struct tmio_mmc_host *host, | ||
936 | struct mmc_data *data) | ||
937 | { | ||
938 | if (data->flags & MMC_DATA_READ) { | ||
939 | if (host->chan_rx) | ||
940 | tmio_mmc_start_dma_rx(host); | ||
941 | } else { | ||
942 | if (host->chan_tx) | ||
943 | tmio_mmc_start_dma_tx(host); | ||
944 | } | ||
945 | } | ||
946 | |||
947 | static void tmio_issue_tasklet_fn(unsigned long priv) | ||
948 | { | ||
949 | struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; | ||
950 | struct dma_chan *chan = host->chan_tx; | ||
951 | |||
952 | dma_async_issue_pending(chan); | ||
953 | } | ||
954 | |||
955 | static void tmio_tasklet_fn(unsigned long arg) | ||
956 | { | ||
957 | struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; | ||
958 | unsigned long flags; | ||
959 | |||
960 | spin_lock_irqsave(&host->lock, flags); | ||
961 | |||
962 | if (!host->data) | ||
963 | goto out; | ||
964 | |||
965 | if (host->data->flags & MMC_DATA_READ) | ||
966 | dma_unmap_sg(host->chan_rx->device->dev, | ||
967 | host->sg_ptr, host->sg_len, | ||
968 | DMA_FROM_DEVICE); | ||
969 | else | ||
970 | dma_unmap_sg(host->chan_tx->device->dev, | ||
971 | host->sg_ptr, host->sg_len, | ||
972 | DMA_TO_DEVICE); | ||
973 | |||
974 | tmio_mmc_do_data_irq(host); | ||
975 | out: | ||
976 | spin_unlock_irqrestore(&host->lock, flags); | ||
977 | } | ||
978 | |||
979 | /* It might be necessary to make filter MFD specific */ | ||
980 | static bool tmio_mmc_filter(struct dma_chan *chan, void *arg) | ||
981 | { | ||
982 | dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg); | ||
983 | chan->private = arg; | ||
984 | return true; | ||
985 | } | ||
986 | |||
987 | static void tmio_mmc_request_dma(struct tmio_mmc_host *host, | ||
988 | struct tmio_mmc_data *pdata) | ||
989 | { | ||
990 | /* We can only either use DMA for both Tx and Rx or not use it at all */ | ||
991 | if (pdata->dma) { | ||
992 | dma_cap_mask_t mask; | ||
993 | |||
994 | dma_cap_zero(mask); | ||
995 | dma_cap_set(DMA_SLAVE, mask); | ||
996 | |||
997 | host->chan_tx = dma_request_channel(mask, tmio_mmc_filter, | ||
998 | pdata->dma->chan_priv_tx); | ||
999 | dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, | ||
1000 | host->chan_tx); | ||
1001 | |||
1002 | if (!host->chan_tx) | ||
1003 | return; | ||
1004 | |||
1005 | host->chan_rx = dma_request_channel(mask, tmio_mmc_filter, | ||
1006 | pdata->dma->chan_priv_rx); | ||
1007 | dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, | ||
1008 | host->chan_rx); | ||
1009 | |||
1010 | if (!host->chan_rx) { | ||
1011 | dma_release_channel(host->chan_tx); | ||
1012 | host->chan_tx = NULL; | ||
1013 | return; | ||
1014 | } | ||
1015 | |||
1016 | tasklet_init(&host->dma_complete, tmio_tasklet_fn, (unsigned long)host); | ||
1017 | tasklet_init(&host->dma_issue, tmio_issue_tasklet_fn, (unsigned long)host); | ||
1018 | |||
1019 | tmio_mmc_enable_dma(host, true); | ||
1020 | } | ||
1021 | } | ||
1022 | |||
1023 | static void tmio_mmc_release_dma(struct tmio_mmc_host *host) | ||
1024 | { | ||
1025 | if (host->chan_tx) { | ||
1026 | struct dma_chan *chan = host->chan_tx; | ||
1027 | host->chan_tx = NULL; | ||
1028 | dma_release_channel(chan); | ||
1029 | } | ||
1030 | if (host->chan_rx) { | ||
1031 | struct dma_chan *chan = host->chan_rx; | ||
1032 | host->chan_rx = NULL; | ||
1033 | dma_release_channel(chan); | ||
1034 | } | ||
1035 | } | ||
1036 | #else | ||
1037 | static void tmio_check_bounce_buffer(struct tmio_mmc_host *host) | ||
1038 | { | ||
1039 | } | ||
1040 | |||
1041 | static void tmio_mmc_start_dma(struct tmio_mmc_host *host, | ||
1042 | struct mmc_data *data) | ||
1043 | { | ||
1044 | } | ||
1045 | |||
1046 | static void tmio_mmc_request_dma(struct tmio_mmc_host *host, | ||
1047 | struct tmio_mmc_data *pdata) | ||
1048 | { | ||
1049 | host->chan_tx = NULL; | ||
1050 | host->chan_rx = NULL; | ||
1051 | } | ||
1052 | |||
1053 | static void tmio_mmc_release_dma(struct tmio_mmc_host *host) | ||
1054 | { | ||
1055 | } | ||
1056 | #endif | ||
1057 | |||
1058 | static int tmio_mmc_start_data(struct tmio_mmc_host *host, | ||
1059 | struct mmc_data *data) | ||
1060 | { | ||
1061 | struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); | ||
1062 | |||
1063 | pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n", | ||
1064 | data->blksz, data->blocks); | ||
1065 | |||
1066 | /* Some hardware cannot perform 2 byte requests in 4 bit mode */ | ||
1067 | if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) { | ||
1068 | int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES; | ||
1069 | |||
1070 | if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) { | ||
1071 | pr_err("%s: %d byte block unsupported in 4 bit mode\n", | ||
1072 | mmc_hostname(host->mmc), data->blksz); | ||
1073 | return -EINVAL; | ||
1074 | } | ||
1075 | } | ||
1076 | |||
1077 | tmio_mmc_init_sg(host, data); | ||
1078 | host->data = data; | ||
1079 | |||
1080 | /* Set transfer length / blocksize */ | ||
1081 | sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); | ||
1082 | sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); | ||
1083 | |||
1084 | tmio_mmc_start_dma(host, data); | ||
1085 | |||
1086 | return 0; | ||
1087 | } | ||
1088 | |||
1089 | /* Process requests from the MMC layer */ | ||
1090 | static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) | ||
1091 | { | ||
1092 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
1093 | int ret; | ||
1094 | |||
1095 | if (host->mrq) | ||
1096 | pr_debug("request not null\n"); | ||
1097 | |||
1098 | host->last_req_ts = jiffies; | ||
1099 | wmb(); | ||
1100 | host->mrq = mrq; | ||
1101 | |||
1102 | if (mrq->data) { | ||
1103 | ret = tmio_mmc_start_data(host, mrq->data); | ||
1104 | if (ret) | ||
1105 | goto fail; | ||
1106 | } | ||
1107 | |||
1108 | ret = tmio_mmc_start_command(host, mrq->cmd); | ||
1109 | if (!ret) { | ||
1110 | schedule_delayed_work(&host->delayed_reset_work, | ||
1111 | msecs_to_jiffies(2000)); | ||
1112 | return; | ||
1113 | } | ||
1114 | |||
1115 | fail: | ||
1116 | host->mrq = NULL; | ||
1117 | mrq->cmd->error = ret; | ||
1118 | mmc_request_done(mmc, mrq); | ||
1119 | } | ||
1120 | |||
1121 | /* Set MMC clock / power. | ||
1122 | * Note: This controller uses a simple divider scheme therefore it cannot | ||
1123 | * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as | ||
1124 | * MMC wont run that fast, it has to be clocked at 12MHz which is the next | ||
1125 | * slowest setting. | ||
1126 | */ | ||
1127 | static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | ||
1128 | { | ||
1129 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
1130 | |||
1131 | if (ios->clock) | ||
1132 | tmio_mmc_set_clock(host, ios->clock); | ||
1133 | |||
1134 | /* Power sequence - OFF -> ON -> UP */ | ||
1135 | switch (ios->power_mode) { | ||
1136 | case MMC_POWER_OFF: /* power down SD bus */ | ||
1137 | if (host->set_pwr) | ||
1138 | host->set_pwr(host->pdev, 0); | ||
1139 | tmio_mmc_clk_stop(host); | ||
1140 | break; | ||
1141 | case MMC_POWER_ON: /* power up SD bus */ | ||
1142 | if (host->set_pwr) | ||
1143 | host->set_pwr(host->pdev, 1); | ||
1144 | break; | ||
1145 | case MMC_POWER_UP: /* start bus clock */ | ||
1146 | tmio_mmc_clk_start(host); | ||
1147 | break; | ||
1148 | } | ||
1149 | |||
1150 | switch (ios->bus_width) { | ||
1151 | case MMC_BUS_WIDTH_1: | ||
1152 | sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0); | ||
1153 | break; | ||
1154 | case MMC_BUS_WIDTH_4: | ||
1155 | sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0); | ||
1156 | break; | ||
1157 | } | ||
1158 | |||
1159 | /* Let things settle. delay taken from winCE driver */ | ||
1160 | udelay(140); | ||
1161 | } | ||
1162 | |||
1163 | static int tmio_mmc_get_ro(struct mmc_host *mmc) | ||
1164 | { | ||
1165 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
1166 | struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); | ||
1167 | |||
1168 | return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || | ||
1169 | (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)) ? 0 : 1; | ||
1170 | } | ||
1171 | |||
1172 | static int tmio_mmc_get_cd(struct mmc_host *mmc) | ||
1173 | { | ||
1174 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
1175 | struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); | ||
1176 | |||
1177 | if (!pdata->get_cd) | ||
1178 | return -ENOSYS; | ||
1179 | else | ||
1180 | return pdata->get_cd(host->pdev); | ||
1181 | } | ||
1182 | |||
1183 | static const struct mmc_host_ops tmio_mmc_ops = { | ||
1184 | .request = tmio_mmc_request, | ||
1185 | .set_ios = tmio_mmc_set_ios, | ||
1186 | .get_ro = tmio_mmc_get_ro, | ||
1187 | .get_cd = tmio_mmc_get_cd, | ||
1188 | .enable_sdio_irq = tmio_mmc_enable_sdio_irq, | ||
1189 | }; | ||
1190 | 25 | ||
1191 | #ifdef CONFIG_PM | 26 | #ifdef CONFIG_PM |
1192 | static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) | 27 | static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) |
@@ -1227,138 +62,54 @@ out: | |||
1227 | #define tmio_mmc_resume NULL | 62 | #define tmio_mmc_resume NULL |
1228 | #endif | 63 | #endif |
1229 | 64 | ||
1230 | static int __devinit tmio_mmc_probe(struct platform_device *dev) | 65 | static int __devinit tmio_mmc_probe(struct platform_device *pdev) |
1231 | { | 66 | { |
1232 | const struct mfd_cell *cell = mfd_get_cell(dev); | 67 | const struct mfd_cell *cell = mfd_get_cell(pdev); |
1233 | struct tmio_mmc_data *pdata; | 68 | struct tmio_mmc_data *pdata; |
1234 | struct resource *res_ctl; | ||
1235 | struct tmio_mmc_host *host; | 69 | struct tmio_mmc_host *host; |
1236 | struct mmc_host *mmc; | ||
1237 | int ret = -EINVAL; | 70 | int ret = -EINVAL; |
1238 | u32 irq_mask = TMIO_MASK_CMD; | ||
1239 | 71 | ||
1240 | if (dev->num_resources != 2) | 72 | if (pdev->num_resources != 2) |
1241 | goto out; | 73 | goto out; |
1242 | 74 | ||
1243 | res_ctl = platform_get_resource(dev, IORESOURCE_MEM, 0); | 75 | pdata = mfd_get_data(pdev); |
1244 | if (!res_ctl) | ||
1245 | goto out; | ||
1246 | |||
1247 | pdata = mfd_get_data(dev); | ||
1248 | if (!pdata || !pdata->hclk) | 76 | if (!pdata || !pdata->hclk) |
1249 | goto out; | 77 | goto out; |
1250 | 78 | ||
1251 | ret = -ENOMEM; | ||
1252 | |||
1253 | mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &dev->dev); | ||
1254 | if (!mmc) | ||
1255 | goto out; | ||
1256 | |||
1257 | host = mmc_priv(mmc); | ||
1258 | host->mmc = mmc; | ||
1259 | host->pdev = dev; | ||
1260 | platform_set_drvdata(dev, mmc); | ||
1261 | |||
1262 | host->set_pwr = pdata->set_pwr; | ||
1263 | host->set_clk_div = pdata->set_clk_div; | ||
1264 | |||
1265 | /* SD control register space size is 0x200, 0x400 for bus_shift=1 */ | ||
1266 | host->bus_shift = resource_size(res_ctl) >> 10; | ||
1267 | |||
1268 | host->ctl = ioremap(res_ctl->start, resource_size(res_ctl)); | ||
1269 | if (!host->ctl) | ||
1270 | goto host_free; | ||
1271 | |||
1272 | mmc->ops = &tmio_mmc_ops; | ||
1273 | mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities; | ||
1274 | mmc->f_max = pdata->hclk; | ||
1275 | mmc->f_min = mmc->f_max / 512; | ||
1276 | mmc->max_segs = 32; | ||
1277 | mmc->max_blk_size = 512; | ||
1278 | mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) * | ||
1279 | mmc->max_segs; | ||
1280 | mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; | ||
1281 | mmc->max_seg_size = mmc->max_req_size; | ||
1282 | if (pdata->ocr_mask) | ||
1283 | mmc->ocr_avail = pdata->ocr_mask; | ||
1284 | else | ||
1285 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; | ||
1286 | |||
1287 | /* Tell the MFD core we are ready to be enabled */ | 79 | /* Tell the MFD core we are ready to be enabled */ |
1288 | if (cell->enable) { | 80 | if (cell->enable) { |
1289 | ret = cell->enable(dev); | 81 | ret = cell->enable(pdev); |
1290 | if (ret) | 82 | if (ret) |
1291 | goto unmap_ctl; | 83 | goto out; |
1292 | } | 84 | } |
1293 | 85 | ||
1294 | tmio_mmc_clk_stop(host); | 86 | ret = tmio_mmc_host_probe(&host, pdev, pdata); |
1295 | reset(host); | ||
1296 | |||
1297 | ret = platform_get_irq(dev, 0); | ||
1298 | if (ret >= 0) | ||
1299 | host->irq = ret; | ||
1300 | else | ||
1301 | goto cell_disable; | ||
1302 | |||
1303 | disable_mmc_irqs(host, TMIO_MASK_ALL); | ||
1304 | if (pdata->flags & TMIO_MMC_SDIO_IRQ) | ||
1305 | tmio_mmc_enable_sdio_irq(mmc, 0); | ||
1306 | |||
1307 | ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED | | ||
1308 | IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host); | ||
1309 | if (ret) | 87 | if (ret) |
1310 | goto cell_disable; | 88 | goto cell_disable; |
1311 | 89 | ||
1312 | spin_lock_init(&host->lock); | ||
1313 | |||
1314 | /* Init delayed work for request timeouts */ | ||
1315 | INIT_DELAYED_WORK(&host->delayed_reset_work, tmio_mmc_reset_work); | ||
1316 | |||
1317 | /* See if we also get DMA */ | ||
1318 | tmio_mmc_request_dma(host, pdata); | ||
1319 | |||
1320 | mmc_add_host(mmc); | ||
1321 | |||
1322 | pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), | 90 | pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), |
1323 | (unsigned long)host->ctl, host->irq); | 91 | (unsigned long)host->ctl, host->irq); |
1324 | 92 | ||
1325 | /* Unmask the IRQs we want to know about */ | ||
1326 | if (!host->chan_rx) | ||
1327 | irq_mask |= TMIO_MASK_READOP; | ||
1328 | if (!host->chan_tx) | ||
1329 | irq_mask |= TMIO_MASK_WRITEOP; | ||
1330 | enable_mmc_irqs(host, irq_mask); | ||
1331 | |||
1332 | return 0; | 93 | return 0; |
1333 | 94 | ||
1334 | cell_disable: | 95 | cell_disable: |
1335 | if (cell->disable) | 96 | if (cell->disable) |
1336 | cell->disable(dev); | 97 | cell->disable(pdev); |
1337 | unmap_ctl: | ||
1338 | iounmap(host->ctl); | ||
1339 | host_free: | ||
1340 | mmc_free_host(mmc); | ||
1341 | out: | 98 | out: |
1342 | return ret; | 99 | return ret; |
1343 | } | 100 | } |
1344 | 101 | ||
1345 | static int __devexit tmio_mmc_remove(struct platform_device *dev) | 102 | static int __devexit tmio_mmc_remove(struct platform_device *pdev) |
1346 | { | 103 | { |
1347 | const struct mfd_cell *cell = mfd_get_cell(dev); | 104 | const struct mfd_cell *cell = mfd_get_cell(pdev); |
1348 | struct mmc_host *mmc = platform_get_drvdata(dev); | 105 | struct mmc_host *mmc = platform_get_drvdata(pdev); |
1349 | 106 | ||
1350 | platform_set_drvdata(dev, NULL); | 107 | platform_set_drvdata(pdev, NULL); |
1351 | 108 | ||
1352 | if (mmc) { | 109 | if (mmc) { |
1353 | struct tmio_mmc_host *host = mmc_priv(mmc); | 110 | tmio_mmc_host_remove(mmc_priv(mmc)); |
1354 | mmc_remove_host(mmc); | ||
1355 | cancel_delayed_work_sync(&host->delayed_reset_work); | ||
1356 | tmio_mmc_release_dma(host); | ||
1357 | free_irq(host->irq, host); | ||
1358 | if (cell->disable) | 111 | if (cell->disable) |
1359 | cell->disable(dev); | 112 | cell->disable(pdev); |
1360 | iounmap(host->ctl); | ||
1361 | mmc_free_host(mmc); | ||
1362 | } | 113 | } |
1363 | 114 | ||
1364 | return 0; | 115 | return 0; |
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h new file mode 100644 index 000000000000..099ed49a259b --- /dev/null +++ b/drivers/mmc/host/tmio_mmc.h | |||
@@ -0,0 +1,123 @@ | |||
1 | /* | ||
2 | * linux/drivers/mmc/host/tmio_mmc.h | ||
3 | * | ||
4 | * Copyright (C) 2007 Ian Molton | ||
5 | * Copyright (C) 2004 Ian Molton | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * Driver for the MMC / SD / SDIO cell found in: | ||
12 | * | ||
13 | * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 | ||
14 | */ | ||
15 | |||
16 | #ifndef TMIO_MMC_H | ||
17 | #define TMIO_MMC_H | ||
18 | |||
19 | #include <linux/highmem.h> | ||
20 | #include <linux/mmc/tmio.h> | ||
21 | #include <linux/pagemap.h> | ||
22 | |||
23 | /* Definitions for values the CTRL_SDIO_STATUS register can take. */ | ||
24 | #define TMIO_SDIO_STAT_IOIRQ 0x0001 | ||
25 | #define TMIO_SDIO_STAT_EXPUB52 0x4000 | ||
26 | #define TMIO_SDIO_STAT_EXWT 0x8000 | ||
27 | #define TMIO_SDIO_MASK_ALL 0xc007 | ||
28 | |||
29 | /* Define some IRQ masks */ | ||
30 | /* This is the mask used at reset by the chip */ | ||
31 | #define TMIO_MASK_ALL 0x837f031d | ||
32 | #define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND) | ||
33 | #define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND) | ||
34 | #define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \ | ||
35 | TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT) | ||
36 | #define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD) | ||
37 | |||
38 | struct tmio_mmc_data; | ||
39 | |||
40 | struct tmio_mmc_host { | ||
41 | void __iomem *ctl; | ||
42 | unsigned long bus_shift; | ||
43 | struct mmc_command *cmd; | ||
44 | struct mmc_request *mrq; | ||
45 | struct mmc_data *data; | ||
46 | struct mmc_host *mmc; | ||
47 | int irq; | ||
48 | unsigned int sdio_irq_enabled; | ||
49 | |||
50 | /* Callbacks for clock / power control */ | ||
51 | void (*set_pwr)(struct platform_device *host, int state); | ||
52 | void (*set_clk_div)(struct platform_device *host, int state); | ||
53 | |||
54 | /* pio related stuff */ | ||
55 | struct scatterlist *sg_ptr; | ||
56 | struct scatterlist *sg_orig; | ||
57 | unsigned int sg_len; | ||
58 | unsigned int sg_off; | ||
59 | |||
60 | struct platform_device *pdev; | ||
61 | struct tmio_mmc_data *pdata; | ||
62 | |||
63 | /* DMA support */ | ||
64 | bool force_pio; | ||
65 | struct dma_chan *chan_rx; | ||
66 | struct dma_chan *chan_tx; | ||
67 | struct tasklet_struct dma_complete; | ||
68 | struct tasklet_struct dma_issue; | ||
69 | struct scatterlist bounce_sg; | ||
70 | u8 *bounce_buf; | ||
71 | |||
72 | /* Track lost interrupts */ | ||
73 | struct delayed_work delayed_reset_work; | ||
74 | spinlock_t lock; | ||
75 | unsigned long last_req_ts; | ||
76 | }; | ||
77 | |||
78 | int tmio_mmc_host_probe(struct tmio_mmc_host **host, | ||
79 | struct platform_device *pdev, | ||
80 | struct tmio_mmc_data *pdata); | ||
81 | void tmio_mmc_host_remove(struct tmio_mmc_host *host); | ||
82 | void tmio_mmc_do_data_irq(struct tmio_mmc_host *host); | ||
83 | |||
84 | void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i); | ||
85 | void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i); | ||
86 | |||
87 | static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg, | ||
88 | unsigned long *flags) | ||
89 | { | ||
90 | local_irq_save(*flags); | ||
91 | return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; | ||
92 | } | ||
93 | |||
94 | static inline void tmio_mmc_kunmap_atomic(struct scatterlist *sg, | ||
95 | unsigned long *flags, void *virt) | ||
96 | { | ||
97 | kunmap_atomic(virt - sg->offset, KM_BIO_SRC_IRQ); | ||
98 | local_irq_restore(*flags); | ||
99 | } | ||
100 | |||
101 | #if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE) | ||
102 | void tmio_mmc_start_dma(struct tmio_mmc_host *host, struct mmc_data *data); | ||
103 | void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata); | ||
104 | void tmio_mmc_release_dma(struct tmio_mmc_host *host); | ||
105 | #else | ||
106 | static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host, | ||
107 | struct mmc_data *data) | ||
108 | { | ||
109 | } | ||
110 | |||
111 | static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host, | ||
112 | struct tmio_mmc_data *pdata) | ||
113 | { | ||
114 | host->chan_tx = NULL; | ||
115 | host->chan_rx = NULL; | ||
116 | } | ||
117 | |||
118 | static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host) | ||
119 | { | ||
120 | } | ||
121 | #endif | ||
122 | |||
123 | #endif | ||
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c new file mode 100644 index 000000000000..d3de74ab633e --- /dev/null +++ b/drivers/mmc/host/tmio_mmc_dma.c | |||
@@ -0,0 +1,317 @@ | |||
1 | /* | ||
2 | * linux/drivers/mmc/tmio_mmc_dma.c | ||
3 | * | ||
4 | * Copyright (C) 2010-2011 Guennadi Liakhovetski | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * DMA function for TMIO MMC implementations | ||
11 | */ | ||
12 | |||
13 | #include <linux/device.h> | ||
14 | #include <linux/dmaengine.h> | ||
15 | #include <linux/mfd/tmio.h> | ||
16 | #include <linux/mmc/host.h> | ||
17 | #include <linux/mmc/tmio.h> | ||
18 | #include <linux/pagemap.h> | ||
19 | #include <linux/scatterlist.h> | ||
20 | |||
21 | #include "tmio_mmc.h" | ||
22 | |||
23 | #define TMIO_MMC_MIN_DMA_LEN 8 | ||
24 | |||
25 | static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) | ||
26 | { | ||
27 | #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE) | ||
28 | /* Switch DMA mode on or off - SuperH specific? */ | ||
29 | writew(enable ? 2 : 0, host->ctl + (0xd8 << host->bus_shift)); | ||
30 | #endif | ||
31 | } | ||
32 | |||
33 | static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) | ||
34 | { | ||
35 | struct scatterlist *sg = host->sg_ptr, *sg_tmp; | ||
36 | struct dma_async_tx_descriptor *desc = NULL; | ||
37 | struct dma_chan *chan = host->chan_rx; | ||
38 | struct tmio_mmc_data *pdata = host->pdata; | ||
39 | dma_cookie_t cookie; | ||
40 | int ret, i; | ||
41 | bool aligned = true, multiple = true; | ||
42 | unsigned int align = (1 << pdata->dma->alignment_shift) - 1; | ||
43 | |||
44 | for_each_sg(sg, sg_tmp, host->sg_len, i) { | ||
45 | if (sg_tmp->offset & align) | ||
46 | aligned = false; | ||
47 | if (sg_tmp->length & align) { | ||
48 | multiple = false; | ||
49 | break; | ||
50 | } | ||
51 | } | ||
52 | |||
53 | if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || | ||
54 | (align & PAGE_MASK))) || !multiple) { | ||
55 | ret = -EINVAL; | ||
56 | goto pio; | ||
57 | } | ||
58 | |||
59 | if (sg->length < TMIO_MMC_MIN_DMA_LEN) { | ||
60 | host->force_pio = true; | ||
61 | return; | ||
62 | } | ||
63 | |||
64 | tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY); | ||
65 | |||
66 | /* The only sg element can be unaligned, use our bounce buffer then */ | ||
67 | if (!aligned) { | ||
68 | sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); | ||
69 | host->sg_ptr = &host->bounce_sg; | ||
70 | sg = host->sg_ptr; | ||
71 | } | ||
72 | |||
73 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); | ||
74 | if (ret > 0) | ||
75 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, | ||
76 | DMA_FROM_DEVICE, DMA_CTRL_ACK); | ||
77 | |||
78 | if (desc) { | ||
79 | cookie = dmaengine_submit(desc); | ||
80 | if (cookie < 0) { | ||
81 | desc = NULL; | ||
82 | ret = cookie; | ||
83 | } | ||
84 | } | ||
85 | dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", | ||
86 | __func__, host->sg_len, ret, cookie, host->mrq); | ||
87 | |||
88 | pio: | ||
89 | if (!desc) { | ||
90 | /* DMA failed, fall back to PIO */ | ||
91 | if (ret >= 0) | ||
92 | ret = -EIO; | ||
93 | host->chan_rx = NULL; | ||
94 | dma_release_channel(chan); | ||
95 | /* Free the Tx channel too */ | ||
96 | chan = host->chan_tx; | ||
97 | if (chan) { | ||
98 | host->chan_tx = NULL; | ||
99 | dma_release_channel(chan); | ||
100 | } | ||
101 | dev_warn(&host->pdev->dev, | ||
102 | "DMA failed: %d, falling back to PIO\n", ret); | ||
103 | tmio_mmc_enable_dma(host, false); | ||
104 | } | ||
105 | |||
106 | dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, | ||
107 | desc, cookie, host->sg_len); | ||
108 | } | ||
109 | |||
110 | static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) | ||
111 | { | ||
112 | struct scatterlist *sg = host->sg_ptr, *sg_tmp; | ||
113 | struct dma_async_tx_descriptor *desc = NULL; | ||
114 | struct dma_chan *chan = host->chan_tx; | ||
115 | struct tmio_mmc_data *pdata = host->pdata; | ||
116 | dma_cookie_t cookie; | ||
117 | int ret, i; | ||
118 | bool aligned = true, multiple = true; | ||
119 | unsigned int align = (1 << pdata->dma->alignment_shift) - 1; | ||
120 | |||
121 | for_each_sg(sg, sg_tmp, host->sg_len, i) { | ||
122 | if (sg_tmp->offset & align) | ||
123 | aligned = false; | ||
124 | if (sg_tmp->length & align) { | ||
125 | multiple = false; | ||
126 | break; | ||
127 | } | ||
128 | } | ||
129 | |||
130 | if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || | ||
131 | (align & PAGE_MASK))) || !multiple) { | ||
132 | ret = -EINVAL; | ||
133 | goto pio; | ||
134 | } | ||
135 | |||
136 | if (sg->length < TMIO_MMC_MIN_DMA_LEN) { | ||
137 | host->force_pio = true; | ||
138 | return; | ||
139 | } | ||
140 | |||
141 | tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ); | ||
142 | |||
143 | /* The only sg element can be unaligned, use our bounce buffer then */ | ||
144 | if (!aligned) { | ||
145 | unsigned long flags; | ||
146 | void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags); | ||
147 | sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); | ||
148 | memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length); | ||
149 | tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr); | ||
150 | host->sg_ptr = &host->bounce_sg; | ||
151 | sg = host->sg_ptr; | ||
152 | } | ||
153 | |||
154 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); | ||
155 | if (ret > 0) | ||
156 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, | ||
157 | DMA_TO_DEVICE, DMA_CTRL_ACK); | ||
158 | |||
159 | if (desc) { | ||
160 | cookie = dmaengine_submit(desc); | ||
161 | if (cookie < 0) { | ||
162 | desc = NULL; | ||
163 | ret = cookie; | ||
164 | } | ||
165 | } | ||
166 | dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", | ||
167 | __func__, host->sg_len, ret, cookie, host->mrq); | ||
168 | |||
169 | pio: | ||
170 | if (!desc) { | ||
171 | /* DMA failed, fall back to PIO */ | ||
172 | if (ret >= 0) | ||
173 | ret = -EIO; | ||
174 | host->chan_tx = NULL; | ||
175 | dma_release_channel(chan); | ||
176 | /* Free the Rx channel too */ | ||
177 | chan = host->chan_rx; | ||
178 | if (chan) { | ||
179 | host->chan_rx = NULL; | ||
180 | dma_release_channel(chan); | ||
181 | } | ||
182 | dev_warn(&host->pdev->dev, | ||
183 | "DMA failed: %d, falling back to PIO\n", ret); | ||
184 | tmio_mmc_enable_dma(host, false); | ||
185 | } | ||
186 | |||
187 | dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__, | ||
188 | desc, cookie); | ||
189 | } | ||
190 | |||
191 | void tmio_mmc_start_dma(struct tmio_mmc_host *host, | ||
192 | struct mmc_data *data) | ||
193 | { | ||
194 | if (data->flags & MMC_DATA_READ) { | ||
195 | if (host->chan_rx) | ||
196 | tmio_mmc_start_dma_rx(host); | ||
197 | } else { | ||
198 | if (host->chan_tx) | ||
199 | tmio_mmc_start_dma_tx(host); | ||
200 | } | ||
201 | } | ||
202 | |||
203 | static void tmio_mmc_issue_tasklet_fn(unsigned long priv) | ||
204 | { | ||
205 | struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; | ||
206 | struct dma_chan *chan = NULL; | ||
207 | |||
208 | spin_lock_irq(&host->lock); | ||
209 | |||
210 | if (host && host->data) { | ||
211 | if (host->data->flags & MMC_DATA_READ) | ||
212 | chan = host->chan_rx; | ||
213 | else | ||
214 | chan = host->chan_tx; | ||
215 | } | ||
216 | |||
217 | spin_unlock_irq(&host->lock); | ||
218 | |||
219 | tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
220 | |||
221 | if (chan) | ||
222 | dma_async_issue_pending(chan); | ||
223 | } | ||
224 | |||
225 | static void tmio_mmc_tasklet_fn(unsigned long arg) | ||
226 | { | ||
227 | struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; | ||
228 | |||
229 | spin_lock_irq(&host->lock); | ||
230 | |||
231 | if (!host->data) | ||
232 | goto out; | ||
233 | |||
234 | if (host->data->flags & MMC_DATA_READ) | ||
235 | dma_unmap_sg(host->chan_rx->device->dev, | ||
236 | host->sg_ptr, host->sg_len, | ||
237 | DMA_FROM_DEVICE); | ||
238 | else | ||
239 | dma_unmap_sg(host->chan_tx->device->dev, | ||
240 | host->sg_ptr, host->sg_len, | ||
241 | DMA_TO_DEVICE); | ||
242 | |||
243 | tmio_mmc_do_data_irq(host); | ||
244 | out: | ||
245 | spin_unlock_irq(&host->lock); | ||
246 | } | ||
247 | |||
248 | /* It might be necessary to make filter MFD specific */ | ||
249 | static bool tmio_mmc_filter(struct dma_chan *chan, void *arg) | ||
250 | { | ||
251 | dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg); | ||
252 | chan->private = arg; | ||
253 | return true; | ||
254 | } | ||
255 | |||
256 | void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata) | ||
257 | { | ||
258 | /* We can only either use DMA for both Tx and Rx or not use it at all */ | ||
259 | if (pdata->dma) { | ||
260 | dma_cap_mask_t mask; | ||
261 | |||
262 | dma_cap_zero(mask); | ||
263 | dma_cap_set(DMA_SLAVE, mask); | ||
264 | |||
265 | host->chan_tx = dma_request_channel(mask, tmio_mmc_filter, | ||
266 | pdata->dma->chan_priv_tx); | ||
267 | dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, | ||
268 | host->chan_tx); | ||
269 | |||
270 | if (!host->chan_tx) | ||
271 | return; | ||
272 | |||
273 | host->chan_rx = dma_request_channel(mask, tmio_mmc_filter, | ||
274 | pdata->dma->chan_priv_rx); | ||
275 | dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, | ||
276 | host->chan_rx); | ||
277 | |||
278 | if (!host->chan_rx) | ||
279 | goto ereqrx; | ||
280 | |||
281 | host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA); | ||
282 | if (!host->bounce_buf) | ||
283 | goto ebouncebuf; | ||
284 | |||
285 | tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host); | ||
286 | tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host); | ||
287 | |||
288 | tmio_mmc_enable_dma(host, true); | ||
289 | |||
290 | return; | ||
291 | ebouncebuf: | ||
292 | dma_release_channel(host->chan_rx); | ||
293 | host->chan_rx = NULL; | ||
294 | ereqrx: | ||
295 | dma_release_channel(host->chan_tx); | ||
296 | host->chan_tx = NULL; | ||
297 | return; | ||
298 | } | ||
299 | } | ||
300 | |||
301 | void tmio_mmc_release_dma(struct tmio_mmc_host *host) | ||
302 | { | ||
303 | if (host->chan_tx) { | ||
304 | struct dma_chan *chan = host->chan_tx; | ||
305 | host->chan_tx = NULL; | ||
306 | dma_release_channel(chan); | ||
307 | } | ||
308 | if (host->chan_rx) { | ||
309 | struct dma_chan *chan = host->chan_rx; | ||
310 | host->chan_rx = NULL; | ||
311 | dma_release_channel(chan); | ||
312 | } | ||
313 | if (host->bounce_buf) { | ||
314 | free_pages((unsigned long)host->bounce_buf, 0); | ||
315 | host->bounce_buf = NULL; | ||
316 | } | ||
317 | } | ||
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c new file mode 100644 index 000000000000..6ae8d2f00ec7 --- /dev/null +++ b/drivers/mmc/host/tmio_mmc_pio.c | |||
@@ -0,0 +1,897 @@ | |||
1 | /* | ||
2 | * linux/drivers/mmc/host/tmio_mmc_pio.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Guennadi Liakhovetski | ||
5 | * Copyright (C) 2007 Ian Molton | ||
6 | * Copyright (C) 2004 Ian Molton | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * Driver for the MMC / SD / SDIO IP found in: | ||
13 | * | ||
14 | * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs | ||
15 | * | ||
16 | * This driver draws mainly on scattered spec sheets, Reverse engineering | ||
17 | * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit | ||
18 | * support). (Further 4 bit support from a later datasheet). | ||
19 | * | ||
20 | * TODO: | ||
21 | * Investigate using a workqueue for PIO transfers | ||
22 | * Eliminate FIXMEs | ||
23 | * SDIO support | ||
24 | * Better Power management | ||
25 | * Handle MMC errors better | ||
26 | * double buffer support | ||
27 | * | ||
28 | */ | ||
29 | |||
30 | #include <linux/delay.h> | ||
31 | #include <linux/device.h> | ||
32 | #include <linux/highmem.h> | ||
33 | #include <linux/interrupt.h> | ||
34 | #include <linux/io.h> | ||
35 | #include <linux/irq.h> | ||
36 | #include <linux/mfd/tmio.h> | ||
37 | #include <linux/mmc/host.h> | ||
38 | #include <linux/mmc/tmio.h> | ||
39 | #include <linux/module.h> | ||
40 | #include <linux/pagemap.h> | ||
41 | #include <linux/platform_device.h> | ||
42 | #include <linux/scatterlist.h> | ||
43 | #include <linux/workqueue.h> | ||
44 | #include <linux/spinlock.h> | ||
45 | |||
46 | #include "tmio_mmc.h" | ||
47 | |||
48 | static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr) | ||
49 | { | ||
50 | return readw(host->ctl + (addr << host->bus_shift)); | ||
51 | } | ||
52 | |||
53 | static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr, | ||
54 | u16 *buf, int count) | ||
55 | { | ||
56 | readsw(host->ctl + (addr << host->bus_shift), buf, count); | ||
57 | } | ||
58 | |||
59 | static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr) | ||
60 | { | ||
61 | return readw(host->ctl + (addr << host->bus_shift)) | | ||
62 | readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16; | ||
63 | } | ||
64 | |||
65 | static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val) | ||
66 | { | ||
67 | writew(val, host->ctl + (addr << host->bus_shift)); | ||
68 | } | ||
69 | |||
70 | static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr, | ||
71 | u16 *buf, int count) | ||
72 | { | ||
73 | writesw(host->ctl + (addr << host->bus_shift), buf, count); | ||
74 | } | ||
75 | |||
76 | static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val) | ||
77 | { | ||
78 | writew(val, host->ctl + (addr << host->bus_shift)); | ||
79 | writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); | ||
80 | } | ||
81 | |||
82 | void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i) | ||
83 | { | ||
84 | u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) & ~(i & TMIO_MASK_IRQ); | ||
85 | sd_ctrl_write32(host, CTL_IRQ_MASK, mask); | ||
86 | } | ||
87 | |||
88 | void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i) | ||
89 | { | ||
90 | u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) | (i & TMIO_MASK_IRQ); | ||
91 | sd_ctrl_write32(host, CTL_IRQ_MASK, mask); | ||
92 | } | ||
93 | |||
94 | static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i) | ||
95 | { | ||
96 | sd_ctrl_write32(host, CTL_STATUS, ~i); | ||
97 | } | ||
98 | |||
99 | static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data) | ||
100 | { | ||
101 | host->sg_len = data->sg_len; | ||
102 | host->sg_ptr = data->sg; | ||
103 | host->sg_orig = data->sg; | ||
104 | host->sg_off = 0; | ||
105 | } | ||
106 | |||
107 | static int tmio_mmc_next_sg(struct tmio_mmc_host *host) | ||
108 | { | ||
109 | host->sg_ptr = sg_next(host->sg_ptr); | ||
110 | host->sg_off = 0; | ||
111 | return --host->sg_len; | ||
112 | } | ||
113 | |||
114 | #ifdef CONFIG_MMC_DEBUG | ||
115 | |||
116 | #define STATUS_TO_TEXT(a, status, i) \ | ||
117 | do { \ | ||
118 | if (status & TMIO_STAT_##a) { \ | ||
119 | if (i++) \ | ||
120 | printk(" | "); \ | ||
121 | printk(#a); \ | ||
122 | } \ | ||
123 | } while (0) | ||
124 | |||
125 | static void pr_debug_status(u32 status) | ||
126 | { | ||
127 | int i = 0; | ||
128 | printk(KERN_DEBUG "status: %08x = ", status); | ||
129 | STATUS_TO_TEXT(CARD_REMOVE, status, i); | ||
130 | STATUS_TO_TEXT(CARD_INSERT, status, i); | ||
131 | STATUS_TO_TEXT(SIGSTATE, status, i); | ||
132 | STATUS_TO_TEXT(WRPROTECT, status, i); | ||
133 | STATUS_TO_TEXT(CARD_REMOVE_A, status, i); | ||
134 | STATUS_TO_TEXT(CARD_INSERT_A, status, i); | ||
135 | STATUS_TO_TEXT(SIGSTATE_A, status, i); | ||
136 | STATUS_TO_TEXT(CMD_IDX_ERR, status, i); | ||
137 | STATUS_TO_TEXT(STOPBIT_ERR, status, i); | ||
138 | STATUS_TO_TEXT(ILL_FUNC, status, i); | ||
139 | STATUS_TO_TEXT(CMD_BUSY, status, i); | ||
140 | STATUS_TO_TEXT(CMDRESPEND, status, i); | ||
141 | STATUS_TO_TEXT(DATAEND, status, i); | ||
142 | STATUS_TO_TEXT(CRCFAIL, status, i); | ||
143 | STATUS_TO_TEXT(DATATIMEOUT, status, i); | ||
144 | STATUS_TO_TEXT(CMDTIMEOUT, status, i); | ||
145 | STATUS_TO_TEXT(RXOVERFLOW, status, i); | ||
146 | STATUS_TO_TEXT(TXUNDERRUN, status, i); | ||
147 | STATUS_TO_TEXT(RXRDY, status, i); | ||
148 | STATUS_TO_TEXT(TXRQ, status, i); | ||
149 | STATUS_TO_TEXT(ILL_ACCESS, status, i); | ||
150 | printk("\n"); | ||
151 | } | ||
152 | |||
153 | #else | ||
154 | #define pr_debug_status(s) do { } while (0) | ||
155 | #endif | ||
156 | |||
157 | static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) | ||
158 | { | ||
159 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
160 | |||
161 | if (enable) { | ||
162 | host->sdio_irq_enabled = 1; | ||
163 | sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001); | ||
164 | sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, | ||
165 | (TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ)); | ||
166 | } else { | ||
167 | sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL); | ||
168 | sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000); | ||
169 | host->sdio_irq_enabled = 0; | ||
170 | } | ||
171 | } | ||
172 | |||
173 | static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock) | ||
174 | { | ||
175 | u32 clk = 0, clock; | ||
176 | |||
177 | if (new_clock) { | ||
178 | for (clock = host->mmc->f_min, clk = 0x80000080; | ||
179 | new_clock >= (clock<<1); clk >>= 1) | ||
180 | clock <<= 1; | ||
181 | clk |= 0x100; | ||
182 | } | ||
183 | |||
184 | if (host->set_clk_div) | ||
185 | host->set_clk_div(host->pdev, (clk>>22) & 1); | ||
186 | |||
187 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff); | ||
188 | } | ||
189 | |||
190 | static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) | ||
191 | { | ||
192 | struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0); | ||
193 | |||
194 | /* implicit BUG_ON(!res) */ | ||
195 | if (resource_size(res) > 0x100) { | ||
196 | sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); | ||
197 | msleep(10); | ||
198 | } | ||
199 | |||
200 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 & | ||
201 | sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); | ||
202 | msleep(10); | ||
203 | } | ||
204 | |||
205 | static void tmio_mmc_clk_start(struct tmio_mmc_host *host) | ||
206 | { | ||
207 | struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0); | ||
208 | |||
209 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 | | ||
210 | sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); | ||
211 | msleep(10); | ||
212 | |||
213 | /* implicit BUG_ON(!res) */ | ||
214 | if (resource_size(res) > 0x100) { | ||
215 | sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); | ||
216 | msleep(10); | ||
217 | } | ||
218 | } | ||
219 | |||
220 | static void tmio_mmc_reset(struct tmio_mmc_host *host) | ||
221 | { | ||
222 | struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0); | ||
223 | |||
224 | /* FIXME - should we set stop clock reg here */ | ||
225 | sd_ctrl_write16(host, CTL_RESET_SD, 0x0000); | ||
226 | /* implicit BUG_ON(!res) */ | ||
227 | if (resource_size(res) > 0x100) | ||
228 | sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000); | ||
229 | msleep(10); | ||
230 | sd_ctrl_write16(host, CTL_RESET_SD, 0x0001); | ||
231 | if (resource_size(res) > 0x100) | ||
232 | sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001); | ||
233 | msleep(10); | ||
234 | } | ||
235 | |||
236 | static void tmio_mmc_reset_work(struct work_struct *work) | ||
237 | { | ||
238 | struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, | ||
239 | delayed_reset_work.work); | ||
240 | struct mmc_request *mrq; | ||
241 | unsigned long flags; | ||
242 | |||
243 | spin_lock_irqsave(&host->lock, flags); | ||
244 | mrq = host->mrq; | ||
245 | |||
246 | /* request already finished */ | ||
247 | if (!mrq | ||
248 | || time_is_after_jiffies(host->last_req_ts + | ||
249 | msecs_to_jiffies(2000))) { | ||
250 | spin_unlock_irqrestore(&host->lock, flags); | ||
251 | return; | ||
252 | } | ||
253 | |||
254 | dev_warn(&host->pdev->dev, | ||
255 | "timeout waiting for hardware interrupt (CMD%u)\n", | ||
256 | mrq->cmd->opcode); | ||
257 | |||
258 | if (host->data) | ||
259 | host->data->error = -ETIMEDOUT; | ||
260 | else if (host->cmd) | ||
261 | host->cmd->error = -ETIMEDOUT; | ||
262 | else | ||
263 | mrq->cmd->error = -ETIMEDOUT; | ||
264 | |||
265 | host->cmd = NULL; | ||
266 | host->data = NULL; | ||
267 | host->mrq = NULL; | ||
268 | host->force_pio = false; | ||
269 | |||
270 | spin_unlock_irqrestore(&host->lock, flags); | ||
271 | |||
272 | tmio_mmc_reset(host); | ||
273 | |||
274 | mmc_request_done(host->mmc, mrq); | ||
275 | } | ||
276 | |||
277 | static void tmio_mmc_finish_request(struct tmio_mmc_host *host) | ||
278 | { | ||
279 | struct mmc_request *mrq = host->mrq; | ||
280 | |||
281 | if (!mrq) | ||
282 | return; | ||
283 | |||
284 | host->mrq = NULL; | ||
285 | host->cmd = NULL; | ||
286 | host->data = NULL; | ||
287 | host->force_pio = false; | ||
288 | |||
289 | cancel_delayed_work(&host->delayed_reset_work); | ||
290 | |||
291 | mmc_request_done(host->mmc, mrq); | ||
292 | } | ||
293 | |||
294 | /* These are the bitmasks the tmio chip requires to implement the MMC response | ||
295 | * types. Note that R1 and R6 are the same in this scheme. */ | ||
296 | #define APP_CMD 0x0040 | ||
297 | #define RESP_NONE 0x0300 | ||
298 | #define RESP_R1 0x0400 | ||
299 | #define RESP_R1B 0x0500 | ||
300 | #define RESP_R2 0x0600 | ||
301 | #define RESP_R3 0x0700 | ||
302 | #define DATA_PRESENT 0x0800 | ||
303 | #define TRANSFER_READ 0x1000 | ||
304 | #define TRANSFER_MULTI 0x2000 | ||
305 | #define SECURITY_CMD 0x4000 | ||
306 | |||
307 | static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd) | ||
308 | { | ||
309 | struct mmc_data *data = host->data; | ||
310 | int c = cmd->opcode; | ||
311 | |||
312 | /* Command 12 is handled by hardware */ | ||
313 | if (cmd->opcode == 12 && !cmd->arg) { | ||
314 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001); | ||
315 | return 0; | ||
316 | } | ||
317 | |||
318 | switch (mmc_resp_type(cmd)) { | ||
319 | case MMC_RSP_NONE: c |= RESP_NONE; break; | ||
320 | case MMC_RSP_R1: c |= RESP_R1; break; | ||
321 | case MMC_RSP_R1B: c |= RESP_R1B; break; | ||
322 | case MMC_RSP_R2: c |= RESP_R2; break; | ||
323 | case MMC_RSP_R3: c |= RESP_R3; break; | ||
324 | default: | ||
325 | pr_debug("Unknown response type %d\n", mmc_resp_type(cmd)); | ||
326 | return -EINVAL; | ||
327 | } | ||
328 | |||
329 | host->cmd = cmd; | ||
330 | |||
331 | /* FIXME - this seems to be ok commented out but the spec suggest this bit | ||
332 | * should be set when issuing app commands. | ||
333 | * if(cmd->flags & MMC_FLAG_ACMD) | ||
334 | * c |= APP_CMD; | ||
335 | */ | ||
336 | if (data) { | ||
337 | c |= DATA_PRESENT; | ||
338 | if (data->blocks > 1) { | ||
339 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100); | ||
340 | c |= TRANSFER_MULTI; | ||
341 | } | ||
342 | if (data->flags & MMC_DATA_READ) | ||
343 | c |= TRANSFER_READ; | ||
344 | } | ||
345 | |||
346 | tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_CMD); | ||
347 | |||
348 | /* Fire off the command */ | ||
349 | sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg); | ||
350 | sd_ctrl_write16(host, CTL_SD_CMD, c); | ||
351 | |||
352 | return 0; | ||
353 | } | ||
354 | |||
355 | /* | ||
356 | * This chip always returns (at least?) as much data as you ask for. | ||
357 | * I'm unsure what happens if you ask for less than a block. This should be | ||
358 | * looked into to ensure that a funny length read doesnt hose the controller. | ||
359 | */ | ||
360 | static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) | ||
361 | { | ||
362 | struct mmc_data *data = host->data; | ||
363 | void *sg_virt; | ||
364 | unsigned short *buf; | ||
365 | unsigned int count; | ||
366 | unsigned long flags; | ||
367 | |||
368 | if ((host->chan_tx || host->chan_rx) && !host->force_pio) { | ||
369 | pr_err("PIO IRQ in DMA mode!\n"); | ||
370 | return; | ||
371 | } else if (!data) { | ||
372 | pr_debug("Spurious PIO IRQ\n"); | ||
373 | return; | ||
374 | } | ||
375 | |||
376 | sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags); | ||
377 | buf = (unsigned short *)(sg_virt + host->sg_off); | ||
378 | |||
379 | count = host->sg_ptr->length - host->sg_off; | ||
380 | if (count > data->blksz) | ||
381 | count = data->blksz; | ||
382 | |||
383 | pr_debug("count: %08x offset: %08x flags %08x\n", | ||
384 | count, host->sg_off, data->flags); | ||
385 | |||
386 | /* Transfer the data */ | ||
387 | if (data->flags & MMC_DATA_READ) | ||
388 | sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); | ||
389 | else | ||
390 | sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); | ||
391 | |||
392 | host->sg_off += count; | ||
393 | |||
394 | tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt); | ||
395 | |||
396 | if (host->sg_off == host->sg_ptr->length) | ||
397 | tmio_mmc_next_sg(host); | ||
398 | |||
399 | return; | ||
400 | } | ||
401 | |||
402 | static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host) | ||
403 | { | ||
404 | if (host->sg_ptr == &host->bounce_sg) { | ||
405 | unsigned long flags; | ||
406 | void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags); | ||
407 | memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length); | ||
408 | tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr); | ||
409 | } | ||
410 | } | ||
411 | |||
412 | /* needs to be called with host->lock held */ | ||
413 | void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) | ||
414 | { | ||
415 | struct mmc_data *data = host->data; | ||
416 | struct mmc_command *stop; | ||
417 | |||
418 | host->data = NULL; | ||
419 | |||
420 | if (!data) { | ||
421 | dev_warn(&host->pdev->dev, "Spurious data end IRQ\n"); | ||
422 | return; | ||
423 | } | ||
424 | stop = data->stop; | ||
425 | |||
426 | /* FIXME - return correct transfer count on errors */ | ||
427 | if (!data->error) | ||
428 | data->bytes_xfered = data->blocks * data->blksz; | ||
429 | else | ||
430 | data->bytes_xfered = 0; | ||
431 | |||
432 | pr_debug("Completed data request\n"); | ||
433 | |||
434 | /* | ||
435 | * FIXME: other drivers allow an optional stop command of any given type | ||
436 | * which we dont do, as the chip can auto generate them. | ||
437 | * Perhaps we can be smarter about when to use auto CMD12 and | ||
438 | * only issue the auto request when we know this is the desired | ||
439 | * stop command, allowing fallback to the stop command the | ||
440 | * upper layers expect. For now, we do what works. | ||
441 | */ | ||
442 | |||
443 | if (data->flags & MMC_DATA_READ) { | ||
444 | if (host->chan_rx && !host->force_pio) | ||
445 | tmio_mmc_check_bounce_buffer(host); | ||
446 | dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", | ||
447 | host->mrq); | ||
448 | } else { | ||
449 | dev_dbg(&host->pdev->dev, "Complete Tx request %p\n", | ||
450 | host->mrq); | ||
451 | } | ||
452 | |||
453 | if (stop) { | ||
454 | if (stop->opcode == 12 && !stop->arg) | ||
455 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000); | ||
456 | else | ||
457 | BUG(); | ||
458 | } | ||
459 | |||
460 | tmio_mmc_finish_request(host); | ||
461 | } | ||
462 | |||
463 | static void tmio_mmc_data_irq(struct tmio_mmc_host *host) | ||
464 | { | ||
465 | struct mmc_data *data; | ||
466 | spin_lock(&host->lock); | ||
467 | data = host->data; | ||
468 | |||
469 | if (!data) | ||
470 | goto out; | ||
471 | |||
472 | if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) { | ||
473 | /* | ||
474 | * Has all data been written out yet? Testing on SuperH showed, | ||
475 | * that in most cases the first interrupt comes already with the | ||
476 | * BUSY status bit clear, but on some operations, like mount or | ||
477 | * in the beginning of a write / sync / umount, there is one | ||
478 | * DATAEND interrupt with the BUSY bit set, in this cases | ||
479 | * waiting for one more interrupt fixes the problem. | ||
480 | */ | ||
481 | if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) { | ||
482 | tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
483 | tasklet_schedule(&host->dma_complete); | ||
484 | } | ||
485 | } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) { | ||
486 | tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
487 | tasklet_schedule(&host->dma_complete); | ||
488 | } else { | ||
489 | tmio_mmc_do_data_irq(host); | ||
490 | tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP); | ||
491 | } | ||
492 | out: | ||
493 | spin_unlock(&host->lock); | ||
494 | } | ||
495 | |||
496 | static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, | ||
497 | unsigned int stat) | ||
498 | { | ||
499 | struct mmc_command *cmd = host->cmd; | ||
500 | int i, addr; | ||
501 | |||
502 | spin_lock(&host->lock); | ||
503 | |||
504 | if (!host->cmd) { | ||
505 | pr_debug("Spurious CMD irq\n"); | ||
506 | goto out; | ||
507 | } | ||
508 | |||
509 | host->cmd = NULL; | ||
510 | |||
511 | /* This controller is sicker than the PXA one. Not only do we need to | ||
512 | * drop the top 8 bits of the first response word, we also need to | ||
513 | * modify the order of the response for short response command types. | ||
514 | */ | ||
515 | |||
516 | for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4) | ||
517 | cmd->resp[i] = sd_ctrl_read32(host, addr); | ||
518 | |||
519 | if (cmd->flags & MMC_RSP_136) { | ||
520 | cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24); | ||
521 | cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24); | ||
522 | cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24); | ||
523 | cmd->resp[3] <<= 8; | ||
524 | } else if (cmd->flags & MMC_RSP_R3) { | ||
525 | cmd->resp[0] = cmd->resp[3]; | ||
526 | } | ||
527 | |||
528 | if (stat & TMIO_STAT_CMDTIMEOUT) | ||
529 | cmd->error = -ETIMEDOUT; | ||
530 | else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) | ||
531 | cmd->error = -EILSEQ; | ||
532 | |||
533 | /* If there is data to handle we enable data IRQs here, and | ||
534 | * we will ultimatley finish the request in the data_end handler. | ||
535 | * If theres no data or we encountered an error, finish now. | ||
536 | */ | ||
537 | if (host->data && !cmd->error) { | ||
538 | if (host->data->flags & MMC_DATA_READ) { | ||
539 | if (host->force_pio || !host->chan_rx) | ||
540 | tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP); | ||
541 | else | ||
542 | tasklet_schedule(&host->dma_issue); | ||
543 | } else { | ||
544 | if (host->force_pio || !host->chan_tx) | ||
545 | tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP); | ||
546 | else | ||
547 | tasklet_schedule(&host->dma_issue); | ||
548 | } | ||
549 | } else { | ||
550 | tmio_mmc_finish_request(host); | ||
551 | } | ||
552 | |||
553 | out: | ||
554 | spin_unlock(&host->lock); | ||
555 | } | ||
556 | |||
557 | static irqreturn_t tmio_mmc_irq(int irq, void *devid) | ||
558 | { | ||
559 | struct tmio_mmc_host *host = devid; | ||
560 | struct tmio_mmc_data *pdata = host->pdata; | ||
561 | unsigned int ireg, irq_mask, status; | ||
562 | unsigned int sdio_ireg, sdio_irq_mask, sdio_status; | ||
563 | |||
564 | pr_debug("MMC IRQ begin\n"); | ||
565 | |||
566 | status = sd_ctrl_read32(host, CTL_STATUS); | ||
567 | irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); | ||
568 | ireg = status & TMIO_MASK_IRQ & ~irq_mask; | ||
569 | |||
570 | sdio_ireg = 0; | ||
571 | if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) { | ||
572 | sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS); | ||
573 | sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK); | ||
574 | sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask; | ||
575 | |||
576 | sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL); | ||
577 | |||
578 | if (sdio_ireg && !host->sdio_irq_enabled) { | ||
579 | pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n", | ||
580 | sdio_status, sdio_irq_mask, sdio_ireg); | ||
581 | tmio_mmc_enable_sdio_irq(host->mmc, 0); | ||
582 | goto out; | ||
583 | } | ||
584 | |||
585 | if (host->mmc->caps & MMC_CAP_SDIO_IRQ && | ||
586 | sdio_ireg & TMIO_SDIO_STAT_IOIRQ) | ||
587 | mmc_signal_sdio_irq(host->mmc); | ||
588 | |||
589 | if (sdio_ireg) | ||
590 | goto out; | ||
591 | } | ||
592 | |||
593 | pr_debug_status(status); | ||
594 | pr_debug_status(ireg); | ||
595 | |||
596 | if (!ireg) { | ||
597 | tmio_mmc_disable_mmc_irqs(host, status & ~irq_mask); | ||
598 | |||
599 | pr_warning("tmio_mmc: Spurious irq, disabling! " | ||
600 | "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg); | ||
601 | pr_debug_status(status); | ||
602 | |||
603 | goto out; | ||
604 | } | ||
605 | |||
606 | while (ireg) { | ||
607 | /* Card insert / remove attempts */ | ||
608 | if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { | ||
609 | tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT | | ||
610 | TMIO_STAT_CARD_REMOVE); | ||
611 | mmc_detect_change(host->mmc, msecs_to_jiffies(100)); | ||
612 | } | ||
613 | |||
614 | /* CRC and other errors */ | ||
615 | /* if (ireg & TMIO_STAT_ERR_IRQ) | ||
616 | * handled |= tmio_error_irq(host, irq, stat); | ||
617 | */ | ||
618 | |||
619 | /* Command completion */ | ||
620 | if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) { | ||
621 | tmio_mmc_ack_mmc_irqs(host, | ||
622 | TMIO_STAT_CMDRESPEND | | ||
623 | TMIO_STAT_CMDTIMEOUT); | ||
624 | tmio_mmc_cmd_irq(host, status); | ||
625 | } | ||
626 | |||
627 | /* Data transfer */ | ||
628 | if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) { | ||
629 | tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ); | ||
630 | tmio_mmc_pio_irq(host); | ||
631 | } | ||
632 | |||
633 | /* Data transfer completion */ | ||
634 | if (ireg & TMIO_STAT_DATAEND) { | ||
635 | tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
636 | tmio_mmc_data_irq(host); | ||
637 | } | ||
638 | |||
639 | /* Check status - keep going until we've handled it all */ | ||
640 | status = sd_ctrl_read32(host, CTL_STATUS); | ||
641 | irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); | ||
642 | ireg = status & TMIO_MASK_IRQ & ~irq_mask; | ||
643 | |||
644 | pr_debug("Status at end of loop: %08x\n", status); | ||
645 | pr_debug_status(status); | ||
646 | } | ||
647 | pr_debug("MMC IRQ end\n"); | ||
648 | |||
649 | out: | ||
650 | return IRQ_HANDLED; | ||
651 | } | ||
652 | |||
653 | static int tmio_mmc_start_data(struct tmio_mmc_host *host, | ||
654 | struct mmc_data *data) | ||
655 | { | ||
656 | struct tmio_mmc_data *pdata = host->pdata; | ||
657 | |||
658 | pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n", | ||
659 | data->blksz, data->blocks); | ||
660 | |||
661 | /* Some hardware cannot perform 2 byte requests in 4 bit mode */ | ||
662 | if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) { | ||
663 | int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES; | ||
664 | |||
665 | if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) { | ||
666 | pr_err("%s: %d byte block unsupported in 4 bit mode\n", | ||
667 | mmc_hostname(host->mmc), data->blksz); | ||
668 | return -EINVAL; | ||
669 | } | ||
670 | } | ||
671 | |||
672 | tmio_mmc_init_sg(host, data); | ||
673 | host->data = data; | ||
674 | |||
675 | /* Set transfer length / blocksize */ | ||
676 | sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); | ||
677 | sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); | ||
678 | |||
679 | tmio_mmc_start_dma(host, data); | ||
680 | |||
681 | return 0; | ||
682 | } | ||
683 | |||
684 | /* Process requests from the MMC layer */ | ||
685 | static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) | ||
686 | { | ||
687 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
688 | int ret; | ||
689 | |||
690 | if (host->mrq) | ||
691 | pr_debug("request not null\n"); | ||
692 | |||
693 | host->last_req_ts = jiffies; | ||
694 | wmb(); | ||
695 | host->mrq = mrq; | ||
696 | |||
697 | if (mrq->data) { | ||
698 | ret = tmio_mmc_start_data(host, mrq->data); | ||
699 | if (ret) | ||
700 | goto fail; | ||
701 | } | ||
702 | |||
703 | ret = tmio_mmc_start_command(host, mrq->cmd); | ||
704 | if (!ret) { | ||
705 | schedule_delayed_work(&host->delayed_reset_work, | ||
706 | msecs_to_jiffies(2000)); | ||
707 | return; | ||
708 | } | ||
709 | |||
710 | fail: | ||
711 | host->mrq = NULL; | ||
712 | host->force_pio = false; | ||
713 | mrq->cmd->error = ret; | ||
714 | mmc_request_done(mmc, mrq); | ||
715 | } | ||
716 | |||
717 | /* Set MMC clock / power. | ||
718 | * Note: This controller uses a simple divider scheme therefore it cannot | ||
719 | * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as | ||
720 | * MMC wont run that fast, it has to be clocked at 12MHz which is the next | ||
721 | * slowest setting. | ||
722 | */ | ||
723 | static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | ||
724 | { | ||
725 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
726 | |||
727 | if (ios->clock) | ||
728 | tmio_mmc_set_clock(host, ios->clock); | ||
729 | |||
730 | /* Power sequence - OFF -> UP -> ON */ | ||
731 | if (ios->power_mode == MMC_POWER_OFF || !ios->clock) { | ||
732 | /* power down SD bus */ | ||
733 | if (ios->power_mode == MMC_POWER_OFF && host->set_pwr) | ||
734 | host->set_pwr(host->pdev, 0); | ||
735 | tmio_mmc_clk_stop(host); | ||
736 | } else if (ios->power_mode == MMC_POWER_UP) { | ||
737 | /* power up SD bus */ | ||
738 | if (host->set_pwr) | ||
739 | host->set_pwr(host->pdev, 1); | ||
740 | } else { | ||
741 | /* start bus clock */ | ||
742 | tmio_mmc_clk_start(host); | ||
743 | } | ||
744 | |||
745 | switch (ios->bus_width) { | ||
746 | case MMC_BUS_WIDTH_1: | ||
747 | sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0); | ||
748 | break; | ||
749 | case MMC_BUS_WIDTH_4: | ||
750 | sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0); | ||
751 | break; | ||
752 | } | ||
753 | |||
754 | /* Let things settle. delay taken from winCE driver */ | ||
755 | udelay(140); | ||
756 | } | ||
757 | |||
758 | static int tmio_mmc_get_ro(struct mmc_host *mmc) | ||
759 | { | ||
760 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
761 | struct tmio_mmc_data *pdata = host->pdata; | ||
762 | |||
763 | return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || | ||
764 | !(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)); | ||
765 | } | ||
766 | |||
767 | static int tmio_mmc_get_cd(struct mmc_host *mmc) | ||
768 | { | ||
769 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
770 | struct tmio_mmc_data *pdata = host->pdata; | ||
771 | |||
772 | if (!pdata->get_cd) | ||
773 | return -ENOSYS; | ||
774 | else | ||
775 | return pdata->get_cd(host->pdev); | ||
776 | } | ||
777 | |||
778 | static const struct mmc_host_ops tmio_mmc_ops = { | ||
779 | .request = tmio_mmc_request, | ||
780 | .set_ios = tmio_mmc_set_ios, | ||
781 | .get_ro = tmio_mmc_get_ro, | ||
782 | .get_cd = tmio_mmc_get_cd, | ||
783 | .enable_sdio_irq = tmio_mmc_enable_sdio_irq, | ||
784 | }; | ||
785 | |||
786 | int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, | ||
787 | struct platform_device *pdev, | ||
788 | struct tmio_mmc_data *pdata) | ||
789 | { | ||
790 | struct tmio_mmc_host *_host; | ||
791 | struct mmc_host *mmc; | ||
792 | struct resource *res_ctl; | ||
793 | int ret; | ||
794 | u32 irq_mask = TMIO_MASK_CMD; | ||
795 | |||
796 | res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
797 | if (!res_ctl) | ||
798 | return -EINVAL; | ||
799 | |||
800 | mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev); | ||
801 | if (!mmc) | ||
802 | return -ENOMEM; | ||
803 | |||
804 | _host = mmc_priv(mmc); | ||
805 | _host->pdata = pdata; | ||
806 | _host->mmc = mmc; | ||
807 | _host->pdev = pdev; | ||
808 | platform_set_drvdata(pdev, mmc); | ||
809 | |||
810 | _host->set_pwr = pdata->set_pwr; | ||
811 | _host->set_clk_div = pdata->set_clk_div; | ||
812 | |||
813 | /* SD control register space size is 0x200, 0x400 for bus_shift=1 */ | ||
814 | _host->bus_shift = resource_size(res_ctl) >> 10; | ||
815 | |||
816 | _host->ctl = ioremap(res_ctl->start, resource_size(res_ctl)); | ||
817 | if (!_host->ctl) { | ||
818 | ret = -ENOMEM; | ||
819 | goto host_free; | ||
820 | } | ||
821 | |||
822 | mmc->ops = &tmio_mmc_ops; | ||
823 | mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities; | ||
824 | mmc->f_max = pdata->hclk; | ||
825 | mmc->f_min = mmc->f_max / 512; | ||
826 | mmc->max_segs = 32; | ||
827 | mmc->max_blk_size = 512; | ||
828 | mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) * | ||
829 | mmc->max_segs; | ||
830 | mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; | ||
831 | mmc->max_seg_size = mmc->max_req_size; | ||
832 | if (pdata->ocr_mask) | ||
833 | mmc->ocr_avail = pdata->ocr_mask; | ||
834 | else | ||
835 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; | ||
836 | |||
837 | tmio_mmc_clk_stop(_host); | ||
838 | tmio_mmc_reset(_host); | ||
839 | |||
840 | ret = platform_get_irq(pdev, 0); | ||
841 | if (ret < 0) | ||
842 | goto unmap_ctl; | ||
843 | |||
844 | _host->irq = ret; | ||
845 | |||
846 | tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL); | ||
847 | if (pdata->flags & TMIO_MMC_SDIO_IRQ) | ||
848 | tmio_mmc_enable_sdio_irq(mmc, 0); | ||
849 | |||
850 | ret = request_irq(_host->irq, tmio_mmc_irq, IRQF_DISABLED | | ||
851 | IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), _host); | ||
852 | if (ret) | ||
853 | goto unmap_ctl; | ||
854 | |||
855 | spin_lock_init(&_host->lock); | ||
856 | |||
857 | /* Init delayed work for request timeouts */ | ||
858 | INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work); | ||
859 | |||
860 | /* See if we also get DMA */ | ||
861 | tmio_mmc_request_dma(_host, pdata); | ||
862 | |||
863 | mmc_add_host(mmc); | ||
864 | |||
865 | /* Unmask the IRQs we want to know about */ | ||
866 | if (!_host->chan_rx) | ||
867 | irq_mask |= TMIO_MASK_READOP; | ||
868 | if (!_host->chan_tx) | ||
869 | irq_mask |= TMIO_MASK_WRITEOP; | ||
870 | |||
871 | tmio_mmc_enable_mmc_irqs(_host, irq_mask); | ||
872 | |||
873 | *host = _host; | ||
874 | |||
875 | return 0; | ||
876 | |||
877 | unmap_ctl: | ||
878 | iounmap(_host->ctl); | ||
879 | host_free: | ||
880 | mmc_free_host(mmc); | ||
881 | |||
882 | return ret; | ||
883 | } | ||
884 | EXPORT_SYMBOL(tmio_mmc_host_probe); | ||
885 | |||
886 | void tmio_mmc_host_remove(struct tmio_mmc_host *host) | ||
887 | { | ||
888 | mmc_remove_host(host->mmc); | ||
889 | cancel_delayed_work_sync(&host->delayed_reset_work); | ||
890 | tmio_mmc_release_dma(host); | ||
891 | free_irq(host->irq, host); | ||
892 | iounmap(host->ctl); | ||
893 | mmc_free_host(host->mmc); | ||
894 | } | ||
895 | EXPORT_SYMBOL(tmio_mmc_host_remove); | ||
896 | |||
897 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c index 8c5b4881ccd6..4dfe2c02ea91 100644 --- a/drivers/mmc/host/via-sdmmc.c +++ b/drivers/mmc/host/via-sdmmc.c | |||
@@ -1087,14 +1087,13 @@ static int __devinit via_sd_probe(struct pci_dev *pcidev, | |||
1087 | struct mmc_host *mmc; | 1087 | struct mmc_host *mmc; |
1088 | struct via_crdr_mmc_host *sdhost; | 1088 | struct via_crdr_mmc_host *sdhost; |
1089 | u32 base, len; | 1089 | u32 base, len; |
1090 | u8 rev, gatt; | 1090 | u8 gatt; |
1091 | int ret; | 1091 | int ret; |
1092 | 1092 | ||
1093 | pci_read_config_byte(pcidev, PCI_CLASS_REVISION, &rev); | ||
1094 | pr_info(DRV_NAME | 1093 | pr_info(DRV_NAME |
1095 | ": VIA SDMMC controller found at %s [%04x:%04x] (rev %x)\n", | 1094 | ": VIA SDMMC controller found at %s [%04x:%04x] (rev %x)\n", |
1096 | pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device, | 1095 | pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device, |
1097 | (int)rev); | 1096 | (int)pcidev->revision); |
1098 | 1097 | ||
1099 | ret = pci_enable_device(pcidev); | 1098 | ret = pci_enable_device(pcidev); |
1100 | if (ret) | 1099 | if (ret) |