summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-02 20:34:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-02 20:34:32 -0400
commitbe580e7522eecfcf31c70abdf6fa0ae77b2e293b (patch)
tree1137d880a002ef342f9b1ab77331144c9ed956cf
parent8d65b08debc7e62b2c6032d7fe7389d895b92cbc (diff)
parenta627f025eb0534052ff451427c16750b3530634c (diff)
Merge tag 'mmc-v4.12' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
Pull MMC updates from Ulf Hansson: "MMC core: - Continue to re-factor code to prepare for eMMC CMDQ and blkmq support - Introduce queue semantics to prepare for eMMC CMDQ and blkmq support - Add helper functions to manage temporary enable/disable of eMMC CMDQ - Improve wait-busy detection for SDIO MMC host: - cavium: Add driver to support Cavium controllers - cavium: Extend Cavium driver to support Octeon and ThunderX SOCs - bcm2835: Add new driver for Broadcom BCM2835 controller - sdhci-xenon: Add driver to support Marvell Xenon SDHCI controller - sdhci-tegra: Add support for the Tegra186 variant - sdhci-of-esdhc: Support for UHS-I SD cards - sdhci-of-esdhc: Support for eMMC HS200 cards - sdhci-cadence: Add eMMC HS400 enhanced strobe support - sdhci-esdhc-imx: Reset tuning circuit when needed - sdhci-pci: Modernize and clean-up some PM related code - sdhci-pci: Avoid re-tuning at runtime PM for some Intel devices - sdhci-pci|acpi: Use aggressive PM for some Intel BYT controllers - sdhci: Re-factoring and modernizations - sdhci: Optimize delay loops - sdhci: Improve register dump print format - sdhci: Add support for the Command Queue Engine - meson-gx: Various improvements and clean-ups - meson-gx: Add support for CMD23 - meson-gx: Basic tuning support to avoid CRC errors - s3cmci: Enable probing via DT - mediatek: Improve tuning support for eMMC HS200 and HS400 mode - tmio: Improve DMA support - tmio: Use correct response for CMD12 - dw_mmc: Minor improvements and clean-ups" * tag 'mmc-v4.12' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc: (148 commits) mmc: sdhci-of-esdhc: limit SD clock for ls1012a/ls1046a mmc: sdhci-of-esdhc: poll ESDHC_CLOCK_STABLE bit with udelay mmc: sdhci-xenon: Fix default value of LOGIC_TIMING_ADJUST for eMMC5.0 PHY mmc: sdhci-xenon: Fix the work flow in xenon_remove(). MIPS: Octeon: cavium_octeon_defconfig: Enable Octeon MMC mmc: sdhci-xenon: Remove redundant dev_err call in get_dt_pad_ctrl_data() mmc: cavium: Use module_pci_driver to simplify the code mmc: cavium: Add MMC support for Octeon SOCs. mmc: cavium: Fix detection of block or byte addressing. mmc: core: Export API to allow hosts to get the card address mmc: sdio: Fix sdio wait busy implement limitation mmc: sdhci-esdhc-imx: reset tuning circuit when power on mmc card clk: apn806: fix spelling mistake: "mising" -> "missing" mmc: sdhci-of-esdhc: add delay between tuning cycles mmc: sdhci: Control the delay between tuning commands mmc: sdhci-of-esdhc: add tuning support mmc: sdhci-of-esdhc: add support for signal voltage switch mmc: sdhci-of-esdhc: add peripheral clock support mmc: sdhci-pci: Allow for 3 bytes from Intel DSM mmc: cavium: Fix a shift wrapping bug ...
-rw-r--r--Documentation/devicetree/bindings/mmc/brcm,bcm2835-sdhost.txt23
-rw-r--r--Documentation/devicetree/bindings/mmc/cavium-mmc.txt57
-rw-r--r--Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt170
-rw-r--r--Documentation/devicetree/bindings/mmc/mtk-sd.txt12
-rw-r--r--Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt12
-rw-r--r--Documentation/devicetree/bindings/mmc/renesas,mmcif.txt8
-rw-r--r--Documentation/devicetree/bindings/mmc/samsung,s3cmci.txt42
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-cadence.txt48
-rw-r--r--Documentation/mmc/mmc-dev-attrs.txt1
-rw-r--r--MAINTAINERS15
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap806.dtsi3
-rw-r--r--arch/mips/configs/cavium_octeon_defconfig5
-rw-r--r--drivers/clk/mvebu/ap806-system-controller.c21
-rw-r--r--drivers/mmc/core/block.c300
-rw-r--r--drivers/mmc/core/core.c193
-rw-r--r--drivers/mmc/core/mmc.c9
-rw-r--r--drivers/mmc/core/mmc_ops.c36
-rw-r--r--drivers/mmc/core/mmc_ops.h2
-rw-r--r--drivers/mmc/core/mmc_test.c14
-rw-r--r--drivers/mmc/core/queue.c307
-rw-r--r--drivers/mmc/core/queue.h12
-rw-r--r--drivers/mmc/core/sd.c4
-rw-r--r--drivers/mmc/core/sd_ops.c19
-rw-r--r--drivers/mmc/core/sd_ops.h2
-rw-r--r--drivers/mmc/core/sdio_io.c54
-rw-r--r--drivers/mmc/core/sdio_ops.c9
-rw-r--r--drivers/mmc/core/sdio_ops.h10
-rw-r--r--drivers/mmc/host/Kconfig43
-rw-r--r--drivers/mmc/host/Makefile8
-rw-r--r--drivers/mmc/host/android-goldfish.c10
-rw-r--r--drivers/mmc/host/atmel-mci.c30
-rw-r--r--drivers/mmc/host/bcm2835.c1466
-rw-r--r--drivers/mmc/host/cavium-octeon.c351
-rw-r--r--drivers/mmc/host/cavium-thunderx.c187
-rw-r--r--drivers/mmc/host/cavium.c1090
-rw-r--r--drivers/mmc/host/cavium.h215
-rw-r--r--drivers/mmc/host/davinci_mmc.c14
-rw-r--r--drivers/mmc/host/dw_mmc.c397
-rw-r--r--drivers/mmc/host/jz4740_mmc.c9
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c590
-rw-r--r--drivers/mmc/host/mmc_spi.c5
-rw-r--r--drivers/mmc/host/mmci.c20
-rw-r--r--drivers/mmc/host/moxart-mmc.c8
-rw-r--r--drivers/mmc/host/mtk-sd.c176
-rw-r--r--drivers/mmc/host/mvsdio.c11
-rw-r--r--drivers/mmc/host/omap_hsmmc.c21
-rw-r--r--drivers/mmc/host/s3cmci.c261
-rw-r--r--drivers/mmc/host/sdhci-acpi.c18
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c3
-rw-r--r--drivers/mmc/host/sdhci-cadence.c129
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c32
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h7
-rw-r--r--drivers/mmc/host/sdhci-msm.c8
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c26
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c5
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c194
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c562
-rw-r--r--drivers/mmc/host/sdhci-pci-data.c3
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c4
-rw-r--r--drivers/mmc/host/sdhci-pci.h24
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c3
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c9
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c12
-rw-r--r--drivers/mmc/host/sdhci-s3c.c10
-rw-r--r--drivers/mmc/host/sdhci-sirf.c3
-rw-r--r--drivers/mmc/host/sdhci-spear.c3
-rw-r--r--drivers/mmc/host/sdhci-st.c8
-rw-r--r--drivers/mmc/host/sdhci-tegra.c59
-rw-r--r--drivers/mmc/host/sdhci-xenon-phy.c837
-rw-r--r--drivers/mmc/host/sdhci-xenon.c548
-rw-r--r--drivers/mmc/host/sdhci-xenon.h101
-rw-r--r--drivers/mmc/host/sdhci.c453
-rw-r--r--drivers/mmc/host/sdhci.h65
-rw-r--r--drivers/mmc/host/sunxi-mmc.c16
-rw-r--r--drivers/mmc/host/tmio_mmc.h12
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c61
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c36
-rw-r--r--include/linux/mmc/card.h10
-rw-r--r--include/linux/mmc/host.h6
79 files changed, 7948 insertions, 1619 deletions
diff --git a/Documentation/devicetree/bindings/mmc/brcm,bcm2835-sdhost.txt b/Documentation/devicetree/bindings/mmc/brcm,bcm2835-sdhost.txt
new file mode 100644
index 000000000000..d876580ae3b8
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/brcm,bcm2835-sdhost.txt
@@ -0,0 +1,23 @@
1Broadcom BCM2835 SDHOST controller
2
3This file documents differences between the core properties described
4by mmc.txt and the properties that represent the BCM2835 controller.
5
6Required properties:
7- compatible: Should be "brcm,bcm2835-sdhost".
8- clocks: The clock feeding the SDHOST controller.
9
10Optional properties:
11- dmas: DMA channel for read and write.
12 See Documentation/devicetree/bindings/dma/dma.txt for details
13
14Example:
15
16sdhost: mmc@7e202000 {
17 compatible = "brcm,bcm2835-sdhost";
18 reg = <0x7e202000 0x100>;
19 interrupts = <2 24>;
20 clocks = <&clocks BCM2835_CLOCK_VPU>;
21 dmas = <&dma 13>;
22 dma-names = "rx-tx";
23};
diff --git a/Documentation/devicetree/bindings/mmc/cavium-mmc.txt b/Documentation/devicetree/bindings/mmc/cavium-mmc.txt
new file mode 100644
index 000000000000..1433e6201dff
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/cavium-mmc.txt
@@ -0,0 +1,57 @@
1* Cavium Octeon & ThunderX MMC controller
2
3The highspeed MMC host controller on Caviums SoCs provides an interface
4for MMC and SD types of memory cards.
5
6Supported maximum speeds are the ones of the eMMC standard 4.41 as well
7as the speed of SD standard 4.0. Only 3.3 Volt is supported.
8
9Required properties:
10 - compatible : should be one of:
11 cavium,octeon-6130-mmc
12 cavium,octeon-7890-mmc
13 cavium,thunder-8190-mmc
14 cavium,thunder-8390-mmc
15 mmc-slot
16 - reg : mmc controller base registers
17 - clocks : phandle
18
19Optional properties:
20 - for cd, bus-width and additional generic mmc parameters
21 please refer to mmc.txt within this directory
22 - cavium,cmd-clk-skew : number of coprocessor clocks before sampling command
23 - cavium,dat-clk-skew : number of coprocessor clocks before sampling data
24
25Deprecated properties:
26- spi-max-frequency : use max-frequency instead
27- cavium,bus-max-width : use bus-width instead
28- power-gpios : use vmmc-supply instead
29- cavium,octeon-6130-mmc-slot : use mmc-slot instead
30
31Examples:
32 mmc_1_4: mmc@1,4 {
33 compatible = "cavium,thunder-8390-mmc";
34 reg = <0x0c00 0 0 0 0>; /* DEVFN = 0x0c (1:4) */
35 #address-cells = <1>;
36 #size-cells = <0>;
37 clocks = <&sclk>;
38
39 mmc-slot@0 {
40 compatible = "mmc-slot";
41 reg = <0>;
42 vmmc-supply = <&mmc_supply_3v3>;
43 max-frequency = <42000000>;
44 bus-width = <4>;
45 cap-sd-highspeed;
46 };
47
48 mmc-slot@1 {
49 compatible = "mmc-slot";
50 reg = <1>;
51 vmmc-supply = <&mmc_supply_3v3>;
52 max-frequency = <42000000>;
53 bus-width = <8>;
54 cap-mmc-highspeed;
55 non-removable;
56 };
57 };
diff --git a/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt b/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt
new file mode 100644
index 000000000000..b878a1e305af
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt
@@ -0,0 +1,170 @@
1Marvell Xenon SDHCI Controller device tree bindings
2This file documents differences between the core mmc properties
3described by mmc.txt and the properties used by the Xenon implementation.
4
5Multiple SDHCs might be put into a single Xenon IP, to save size and cost.
6Each SDHC is independent and owns independent resources, such as register sets,
7clock and PHY.
8Each SDHC should have an independent device tree node.
9
10Required Properties:
11- compatible: should be one of the following
12 - "marvell,armada-3700-sdhci": For controllers on Armada-3700 SoC.
13 Must provide a second register area and marvell,pad-type.
14 - "marvell,armada-ap806-sdhci": For controllers on Armada AP806.
15 - "marvell,armada-cp110-sdhci": For controllers on Armada CP110.
16
17- clocks:
18 Array of clocks required for SDHC.
19 Require at least input clock for Xenon IP core.
20
21- clock-names:
22 Array of names corresponding to clocks property.
23 The input clock for Xenon IP core should be named as "core".
24
25- reg:
26 * For "marvell,armada-3700-sdhci", two register areas.
27 The first one for Xenon IP register. The second one for the Armada 3700 SoC
28 PHY PAD Voltage Control register.
29 Please follow the examples with compatible "marvell,armada-3700-sdhci"
30 in below.
31 Please also check property marvell,pad-type in below.
32
33 * For other compatible strings, one register area for Xenon IP.
34
35Optional Properties:
36- marvell,xenon-sdhc-id:
37 Indicate the corresponding bit index of current SDHC in
38 SDHC System Operation Control Register Bit[7:0].
39 Set/clear the corresponding bit to enable/disable current SDHC.
40 If Xenon IP contains only one SDHC, this property is optional.
41
42- marvell,xenon-phy-type:
43 Xenon support multiple types of PHYs.
44 To select eMMC 5.1 PHY, set:
45 marvell,xenon-phy-type = "emmc 5.1 phy"
46 eMMC 5.1 PHY is the default choice if this property is not provided.
47 To select eMMC 5.0 PHY, set:
48 marvell,xenon-phy-type = "emmc 5.0 phy"
49
50 All those types of PHYs can support eMMC, SD and SDIO.
51 Please note that this property only presents the type of PHY.
52 It doesn't stand for the entire SDHC type or property.
53 For example, "emmc 5.1 phy" doesn't mean that this Xenon SDHC only
54 supports eMMC 5.1.
55
56- marvell,xenon-phy-znr:
57 Set PHY ZNR value.
58 Only available for eMMC PHY.
59 Valid range = [0:0x1F].
60 ZNR is set as 0xF by default if this property is not provided.
61
62- marvell,xenon-phy-zpr:
63 Set PHY ZPR value.
64 Only available for eMMC PHY.
65 Valid range = [0:0x1F].
66 ZPR is set as 0xF by default if this property is not provided.
67
68- marvell,xenon-phy-nr-success-tun:
69 Set the number of required consecutive successful sampling points
70 used to identify a valid sampling window, in tuning process.
71 Valid range = [1:7].
72 Set as 0x4 by default if this property is not provided.
73
74- marvell,xenon-phy-tun-step-divider:
75 Set the divider for calculating TUN_STEP.
76 Set as 64 by default if this property is not provided.
77
78- marvell,xenon-phy-slow-mode:
79 If this property is selected, transfers will bypass PHY.
80 Only available when bus frequency lower than 55MHz in SDR mode.
81 Disabled by default. Please only try this property if timing issues
82 always occur with PHY enabled in eMMC HS SDR, SD SDR12, SD SDR25,
83 SD Default Speed and HS mode and eMMC legacy speed mode.
84
85- marvell,xenon-tun-count:
86 Xenon SDHC SoC usually doesn't provide re-tuning counter in
87 Capabilities Register 3 Bit[11:8].
88 This property provides the re-tuning counter.
89 If this property is not set, default re-tuning counter will
90 be set as 0x9 in driver.
91
92- marvell,pad-type:
93 Type of Armada 3700 SoC PHY PAD Voltage Controller register.
94 Only valid when "marvell,armada-3700-sdhci" is selected.
95 Two types: "sd" and "fixed-1-8v".
96 If "sd" is selected, SoC PHY PAD is set as 3.3V at the beginning and is
97 switched to 1.8V when later in higher speed mode.
98 If "fixed-1-8v" is selected, SoC PHY PAD is fixed 1.8V, such as for eMMC.
99 Please follow the examples with compatible "marvell,armada-3700-sdhci"
100 in below.
101
102Example:
103- For eMMC:
104
105 sdhci@aa0000 {
106 compatible = "marvell,armada-ap806-sdhci";
107 reg = <0xaa0000 0x1000>;
108 interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>
109 clocks = <&emmc_clk>;
110 clock-names = "core";
111 bus-width = <4>;
112 marvell,xenon-phy-slow-mode;
113 marvell,xenon-tun-count = <11>;
114 non-removable;
115 no-sd;
116 no-sdio;
117
118 /* Vmmc and Vqmmc are both fixed */
119 };
120
121- For SD/SDIO:
122
123 sdhci@ab0000 {
124 compatible = "marvell,armada-cp110-sdhci";
125 reg = <0xab0000 0x1000>;
126 interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>
127 vqmmc-supply = <&sd_vqmmc_regulator>;
128 vmmc-supply = <&sd_vmmc_regulator>;
129 clocks = <&sdclk>;
130 clock-names = "core";
131 bus-width = <4>;
132 marvell,xenon-tun-count = <9>;
133 };
134
135- For eMMC with compatible "marvell,armada-3700-sdhci":
136
137 sdhci@aa0000 {
138 compatible = "marvell,armada-3700-sdhci";
139 reg = <0xaa0000 0x1000>,
140 <phy_addr 0x4>;
141 interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>
142 clocks = <&emmcclk>;
143 clock-names = "core";
144 bus-width = <8>;
145 mmc-ddr-1_8v;
146 mmc-hs400-1_8v;
147 non-removable;
148 no-sd;
149 no-sdio;
150
151 /* Vmmc and Vqmmc are both fixed */
152
153 marvell,pad-type = "fixed-1-8v";
154 };
155
156- For SD/SDIO with compatible "marvell,armada-3700-sdhci":
157
158 sdhci@ab0000 {
159 compatible = "marvell,armada-3700-sdhci";
160 reg = <0xab0000 0x1000>,
161 <phy_addr 0x4>;
162 interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>
163 vqmmc-supply = <&sd_regulator>;
164 /* Vmmc is fixed */
165 clocks = <&sdclk>;
166 clock-names = "core";
167 bus-width = <4>;
168
169 marvell,pad-type = "sd";
170 };
diff --git a/Documentation/devicetree/bindings/mmc/mtk-sd.txt b/Documentation/devicetree/bindings/mmc/mtk-sd.txt
index 0120c7f1109c..4182ea36ca5b 100644
--- a/Documentation/devicetree/bindings/mmc/mtk-sd.txt
+++ b/Documentation/devicetree/bindings/mmc/mtk-sd.txt
@@ -21,6 +21,15 @@ Optional properties:
21- assigned-clocks: PLL of the source clock 21- assigned-clocks: PLL of the source clock
22- assigned-clock-parents: parent of source clock, used for HS400 mode to get 400Mhz source clock 22- assigned-clock-parents: parent of source clock, used for HS400 mode to get 400Mhz source clock
23- hs400-ds-delay: HS400 DS delay setting 23- hs400-ds-delay: HS400 DS delay setting
24- mediatek,hs200-cmd-int-delay: HS200 command internal delay setting.
25 This field has total 32 stages.
26 The value is an integer from 0 to 31.
27- mediatek,hs400-cmd-int-delay: HS400 command internal delay setting
28 This field has total 32 stages.
29 The value is an integer from 0 to 31.
30- mediatek,hs400-cmd-resp-sel-rising: HS400 command response sample selection
31 If present,HS400 command responses are sampled on rising edges.
32 If not present,HS400 command responses are sampled on falling edges.
24 33
25Examples: 34Examples:
26mmc0: mmc@11230000 { 35mmc0: mmc@11230000 {
@@ -38,4 +47,7 @@ mmc0: mmc@11230000 {
38 assigned-clocks = <&topckgen CLK_TOP_MSDC50_0_SEL>; 47 assigned-clocks = <&topckgen CLK_TOP_MSDC50_0_SEL>;
39 assigned-clock-parents = <&topckgen CLK_TOP_MSDCPLL_D2>; 48 assigned-clock-parents = <&topckgen CLK_TOP_MSDCPLL_D2>;
40 hs400-ds-delay = <0x14015>; 49 hs400-ds-delay = <0x14015>;
50 mediatek,hs200-cmd-int-delay = <26>;
51 mediatek,hs400-cmd-int-delay = <14>;
52 mediatek,hs400-cmd-resp-sel-rising;
41}; 53};
diff --git a/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt b/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt
index 15b8368ee1f2..9bce57862ed6 100644
--- a/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt
+++ b/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt
@@ -7,11 +7,13 @@ This file documents differences between the core properties described
7by mmc.txt and the properties used by the sdhci-tegra driver. 7by mmc.txt and the properties used by the sdhci-tegra driver.
8 8
9Required properties: 9Required properties:
10- compatible : For Tegra20, must contain "nvidia,tegra20-sdhci". 10- compatible : should be one of:
11 For Tegra30, must contain "nvidia,tegra30-sdhci". For Tegra114, 11 - "nvidia,tegra20-sdhci": for Tegra20
12 must contain "nvidia,tegra114-sdhci". For Tegra124, must contain 12 - "nvidia,tegra30-sdhci": for Tegra30
13 "nvidia,tegra124-sdhci". Otherwise, must contain "nvidia,<chip>-sdhci", 13 - "nvidia,tegra114-sdhci": for Tegra114
14 plus one of the above, where <chip> is tegra132 or tegra210. 14 - "nvidia,tegra124-sdhci": for Tegra124 and Tegra132
15 - "nvidia,tegra210-sdhci": for Tegra210
16 - "nvidia,tegra186-sdhci": for Tegra186
15- clocks : Must contain one entry, for the module clock. 17- clocks : Must contain one entry, for the module clock.
16 See ../clocks/clock-bindings.txt for details. 18 See ../clocks/clock-bindings.txt for details.
17- resets : Must contain an entry for each entry in reset-names. 19- resets : Must contain an entry for each entry in reset-names.
diff --git a/Documentation/devicetree/bindings/mmc/renesas,mmcif.txt b/Documentation/devicetree/bindings/mmc/renesas,mmcif.txt
index e4ba92aa035e..c32dc5a9dbe6 100644
--- a/Documentation/devicetree/bindings/mmc/renesas,mmcif.txt
+++ b/Documentation/devicetree/bindings/mmc/renesas,mmcif.txt
@@ -8,6 +8,7 @@ Required properties:
8 8
9- compatible: should be "renesas,mmcif-<soctype>", "renesas,sh-mmcif" as a 9- compatible: should be "renesas,mmcif-<soctype>", "renesas,sh-mmcif" as a
10 fallback. Examples with <soctype> are: 10 fallback. Examples with <soctype> are:
11 - "renesas,mmcif-r7s72100" for the MMCIF found in r7s72100 SoCs
11 - "renesas,mmcif-r8a73a4" for the MMCIF found in r8a73a4 SoCs 12 - "renesas,mmcif-r8a73a4" for the MMCIF found in r8a73a4 SoCs
12 - "renesas,mmcif-r8a7740" for the MMCIF found in r8a7740 SoCs 13 - "renesas,mmcif-r8a7740" for the MMCIF found in r8a7740 SoCs
13 - "renesas,mmcif-r8a7778" for the MMCIF found in r8a7778 SoCs 14 - "renesas,mmcif-r8a7778" for the MMCIF found in r8a7778 SoCs
@@ -17,6 +18,13 @@ Required properties:
17 - "renesas,mmcif-r8a7794" for the MMCIF found in r8a7794 SoCs 18 - "renesas,mmcif-r8a7794" for the MMCIF found in r8a7794 SoCs
18 - "renesas,mmcif-sh73a0" for the MMCIF found in sh73a0 SoCs 19 - "renesas,mmcif-sh73a0" for the MMCIF found in sh73a0 SoCs
19 20
21- interrupts: Some SoCs have only 1 shared interrupt, while others have either
22 2 or 3 individual interrupts (error, int, card detect). Below is the number
23 of interrupts for each SoC:
24 1: r8a73a4, r8a7778, r8a7790, r8a7791, r8a7793, r8a7794
25 2: r8a7740, sh73a0
26 3: r7s72100
27
20- clocks: reference to the functional clock 28- clocks: reference to the functional clock
21 29
22- dmas: reference to the DMA channels, one per channel name listed in the 30- dmas: reference to the DMA channels, one per channel name listed in the
diff --git a/Documentation/devicetree/bindings/mmc/samsung,s3cmci.txt b/Documentation/devicetree/bindings/mmc/samsung,s3cmci.txt
new file mode 100644
index 000000000000..5f68feb9f9d6
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/samsung,s3cmci.txt
@@ -0,0 +1,42 @@
1* Samsung's S3C24XX MMC/SD/SDIO controller device tree bindings
2
3Samsung's S3C24XX MMC/SD/SDIO controller is used as a connectivity interface
4with external MMC, SD and SDIO storage mediums.
5
6This file documents differences between the core mmc properties described by
7mmc.txt and the properties used by the Samsung S3C24XX MMC/SD/SDIO controller
8implementation.
9
10Required SoC Specific Properties:
11- compatible: should be one of the following
12 - "samsung,s3c2410-sdi": for controllers compatible with s3c2410
13 - "samsung,s3c2412-sdi": for controllers compatible with s3c2412
14 - "samsung,s3c2440-sdi": for controllers compatible with s3c2440
15- reg: register location and length
16- interrupts: mmc controller interrupt
17- clocks: Should reference the controller clock
18- clock-names: Should contain "sdi"
19
20Required Board Specific Properties:
21- pinctrl-0: Should specify pin control groups used for this controller.
22- pinctrl-names: Should contain only one value - "default".
23
24Optional Properties:
25- bus-width: number of data lines (see mmc.txt)
26- cd-gpios: gpio for card detection (see mmc.txt)
27- wp-gpios: gpio for write protection (see mmc.txt)
28
29Example:
30
31 mmc0: mmc@5a000000 {
32 compatible = "samsung,s3c2440-sdi";
33 pinctrl-names = "default";
34 pinctrl-0 = <&sdi_pins>;
35 reg = <0x5a000000 0x100000>;
36 interrupts = <0 0 21 3>;
37 clocks = <&clocks PCLK_SDI>;
38 clock-names = "sdi";
39 bus-width = <4>;
40 cd-gpios = <&gpg 8 GPIO_ACTIVE_LOW>;
41 wp-gpios = <&gph 8 GPIO_ACTIVE_LOW>;
42 };
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-cadence.txt b/Documentation/devicetree/bindings/mmc/sdhci-cadence.txt
index c0f37cb41a9b..fa423c277853 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-cadence.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-cadence.txt
@@ -19,6 +19,53 @@ if supported. See mmc.txt for details.
19- mmc-hs400-1_8v 19- mmc-hs400-1_8v
20- mmc-hs400-1_2v 20- mmc-hs400-1_2v
21 21
22Some PHY delays can be configured by following properties.
23PHY DLL input delays:
24They are used to delay the data valid window, and align the window
25to sampling clock. The delay starts from 5ns (for delay parameter equal to 0)
26and it is increased by 2.5ns in each step.
27- cdns,phy-input-delay-sd-highspeed:
28 Value of the delay in the input path for SD high-speed timing
29 Valid range = [0:0x1F].
30- cdns,phy-input-delay-legacy:
31 Value of the delay in the input path for legacy timing
32 Valid range = [0:0x1F].
33- cdns,phy-input-delay-sd-uhs-sdr12:
34 Value of the delay in the input path for SD UHS SDR12 timing
35 Valid range = [0:0x1F].
36- cdns,phy-input-delay-sd-uhs-sdr25:
37 Value of the delay in the input path for SD UHS SDR25 timing
38 Valid range = [0:0x1F].
39- cdns,phy-input-delay-sd-uhs-sdr50:
40 Value of the delay in the input path for SD UHS SDR50 timing
41 Valid range = [0:0x1F].
42- cdns,phy-input-delay-sd-uhs-ddr50:
43 Value of the delay in the input path for SD UHS DDR50 timing
44 Valid range = [0:0x1F].
45- cdns,phy-input-delay-mmc-highspeed:
46 Value of the delay in the input path for MMC high-speed timing
47 Valid range = [0:0x1F].
48- cdns,phy-input-delay-mmc-ddr:
49 Value of the delay in the input path for eMMC high-speed DDR timing
50 Valid range = [0:0x1F].
51
52PHY DLL clock delays:
53Each delay property represents the fraction of the clock period.
54The approximate delay value will be
55(<delay property value>/128)*sdmclk_clock_period.
56- cdns,phy-dll-delay-sdclk:
57 Value of the delay introduced on the sdclk output
58 for all modes except HS200, HS400 and HS400_ES.
59 Valid range = [0:0x7F].
60- cdns,phy-dll-delay-sdclk-hsmmc:
61 Value of the delay introduced on the sdclk output
62 for HS200, HS400 and HS400_ES speed modes.
63 Valid range = [0:0x7F].
64- cdns,phy-dll-delay-strobe:
65 Value of the delay introduced on the dat_strobe input
66 used in HS400 / HS400_ES speed modes.
67 Valid range = [0:0x7F].
68
22Example: 69Example:
23 emmc: sdhci@5a000000 { 70 emmc: sdhci@5a000000 {
24 compatible = "socionext,uniphier-sd4hc", "cdns,sd4hc"; 71 compatible = "socionext,uniphier-sd4hc", "cdns,sd4hc";
@@ -29,4 +76,5 @@ Example:
29 mmc-ddr-1_8v; 76 mmc-ddr-1_8v;
30 mmc-hs200-1_8v; 77 mmc-hs200-1_8v;
31 mmc-hs400-1_8v; 78 mmc-hs400-1_8v;
79 cdns,phy-dll-delay-sdclk = <0>;
32 }; 80 };
diff --git a/Documentation/mmc/mmc-dev-attrs.txt b/Documentation/mmc/mmc-dev-attrs.txt
index 2caff30b348c..4ad0bb17f343 100644
--- a/Documentation/mmc/mmc-dev-attrs.txt
+++ b/Documentation/mmc/mmc-dev-attrs.txt
@@ -30,6 +30,7 @@ All attributes are read-only.
30 rel_sectors Reliable write sector count 30 rel_sectors Reliable write sector count
31 ocr Operation Conditions Register 31 ocr Operation Conditions Register
32 dsr Driver Stage Register 32 dsr Driver Stage Register
33 cmdq_en Command Queue enabled: 1 => enabled, 0 => not enabled
33 34
34Note on Erase Size and Preferred Erase Size: 35Note on Erase Size and Preferred Erase Size:
35 36
diff --git a/MAINTAINERS b/MAINTAINERS
index b1036a7df39e..06d01a0a8a48 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3064,6 +3064,14 @@ S: Supported
3064F: drivers/i2c/busses/i2c-octeon* 3064F: drivers/i2c/busses/i2c-octeon*
3065F: drivers/i2c/busses/i2c-thunderx* 3065F: drivers/i2c/busses/i2c-thunderx*
3066 3066
3067CAVIUM MMC DRIVER
3068M: Jan Glauber <jglauber@cavium.com>
3069M: David Daney <david.daney@cavium.com>
3070M: Steven J. Hill <Steven.Hill@cavium.com>
3071W: http://www.cavium.com
3072S: Supported
3073F: drivers/mmc/host/cavium*
3074
3067CAVIUM LIQUIDIO NETWORK DRIVER 3075CAVIUM LIQUIDIO NETWORK DRIVER
3068M: Derek Chickles <derek.chickles@caviumnetworks.com> 3076M: Derek Chickles <derek.chickles@caviumnetworks.com>
3069M: Satanand Burla <satananda.burla@caviumnetworks.com> 3077M: Satanand Burla <satananda.burla@caviumnetworks.com>
@@ -7919,6 +7927,13 @@ M: Nicolas Pitre <nico@fluxnic.net>
7919S: Odd Fixes 7927S: Odd Fixes
7920F: drivers/mmc/host/mvsdio.* 7928F: drivers/mmc/host/mvsdio.*
7921 7929
7930MARVELL XENON MMC/SD/SDIO HOST CONTROLLER DRIVER
7931M: Hu Ziji <huziji@marvell.com>
7932L: linux-mmc@vger.kernel.org
7933S: Supported
7934F: drivers/mmc/host/sdhci-xenon*
7935F: Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt
7936
7922MATROX FRAMEBUFFER DRIVER 7937MATROX FRAMEBUFFER DRIVER
7923L: linux-fbdev@vger.kernel.org 7938L: linux-fbdev@vger.kernel.org
7924S: Orphan 7939S: Orphan
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
index a749ba2edec4..5019c8f4acd0 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
@@ -235,7 +235,8 @@
235 #clock-cells = <1>; 235 #clock-cells = <1>;
236 clock-output-names = "ap-cpu-cluster-0", 236 clock-output-names = "ap-cpu-cluster-0",
237 "ap-cpu-cluster-1", 237 "ap-cpu-cluster-1",
238 "ap-fixed", "ap-mss"; 238 "ap-fixed", "ap-mss",
239 "ap-emmc";
239 reg = <0x6f4000 0x1000>; 240 reg = <0x6f4000 0x1000>;
240 }; 241 };
241 }; 242 };
diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig
index 31e3c4d9adb0..d4fda41f00ba 100644
--- a/arch/mips/configs/cavium_octeon_defconfig
+++ b/arch/mips/configs/cavium_octeon_defconfig
@@ -127,6 +127,11 @@ CONFIG_USB_EHCI_HCD=m
127CONFIG_USB_EHCI_HCD_PLATFORM=m 127CONFIG_USB_EHCI_HCD_PLATFORM=m
128CONFIG_USB_OHCI_HCD=m 128CONFIG_USB_OHCI_HCD=m
129CONFIG_USB_OHCI_HCD_PLATFORM=m 129CONFIG_USB_OHCI_HCD_PLATFORM=m
130CONFIG_MMC=y
131# CONFIG_PWRSEQ_EMMC is not set
132# CONFIG_PWRSEQ_SIMPLE is not set
133# CONFIG_MMC_BLOCK_BOUNCE is not set
134CONFIG_MMC_CAVIUM_OCTEON=y
130CONFIG_RTC_CLASS=y 135CONFIG_RTC_CLASS=y
131CONFIG_RTC_DRV_DS1307=y 136CONFIG_RTC_DRV_DS1307=y
132CONFIG_STAGING=y 137CONFIG_STAGING=y
diff --git a/drivers/clk/mvebu/ap806-system-controller.c b/drivers/clk/mvebu/ap806-system-controller.c
index f17702107ac5..8155baccc98e 100644
--- a/drivers/clk/mvebu/ap806-system-controller.c
+++ b/drivers/clk/mvebu/ap806-system-controller.c
@@ -23,7 +23,7 @@
23#define AP806_SAR_REG 0x400 23#define AP806_SAR_REG 0x400
24#define AP806_SAR_CLKFREQ_MODE_MASK 0x1f 24#define AP806_SAR_CLKFREQ_MODE_MASK 0x1f
25 25
26#define AP806_CLK_NUM 4 26#define AP806_CLK_NUM 5
27 27
28static struct clk *ap806_clks[AP806_CLK_NUM]; 28static struct clk *ap806_clks[AP806_CLK_NUM];
29 29
@@ -135,6 +135,23 @@ static int ap806_syscon_clk_probe(struct platform_device *pdev)
135 goto fail3; 135 goto fail3;
136 } 136 }
137 137
138 /* eMMC Clock is fixed clock divided by 3 */
139 if (of_property_read_string_index(np, "clock-output-names",
140 4, &name)) {
141 ap806_clk_data.clk_num--;
142 dev_warn(&pdev->dev,
143 "eMMC clock missing: update the device tree!\n");
144 } else {
145 ap806_clks[4] = clk_register_fixed_factor(NULL, name,
146 fixedclk_name,
147 0, 1, 3);
148 if (IS_ERR(ap806_clks[4])) {
149 ret = PTR_ERR(ap806_clks[4]);
150 goto fail4;
151 }
152 }
153
154 of_clk_add_provider(np, of_clk_src_onecell_get, &ap806_clk_data);
138 ret = of_clk_add_provider(np, of_clk_src_onecell_get, &ap806_clk_data); 155 ret = of_clk_add_provider(np, of_clk_src_onecell_get, &ap806_clk_data);
139 if (ret) 156 if (ret)
140 goto fail_clk_add; 157 goto fail_clk_add;
@@ -142,6 +159,8 @@ static int ap806_syscon_clk_probe(struct platform_device *pdev)
142 return 0; 159 return 0;
143 160
144fail_clk_add: 161fail_clk_add:
162 clk_unregister_fixed_factor(ap806_clks[4]);
163fail4:
145 clk_unregister_fixed_factor(ap806_clks[3]); 164 clk_unregister_fixed_factor(ap806_clks[3]);
146fail3: 165fail3:
147 clk_unregister_fixed_rate(ap806_clks[2]); 166 clk_unregister_fixed_rate(ap806_clks[2]);
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index ff3da960c473..8273b078686d 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -129,6 +129,13 @@ static inline int mmc_blk_part_switch(struct mmc_card *card,
129 struct mmc_blk_data *md); 129 struct mmc_blk_data *md);
130static int get_card_status(struct mmc_card *card, u32 *status, int retries); 130static int get_card_status(struct mmc_card *card, u32 *status, int retries);
131 131
132static void mmc_blk_requeue(struct request_queue *q, struct request *req)
133{
134 spin_lock_irq(q->queue_lock);
135 blk_requeue_request(q, req);
136 spin_unlock_irq(q->queue_lock);
137}
138
132static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) 139static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
133{ 140{
134 struct mmc_blk_data *md; 141 struct mmc_blk_data *md;
@@ -721,10 +728,41 @@ static const struct block_device_operations mmc_bdops = {
721#endif 728#endif
722}; 729};
723 730
731static int mmc_blk_part_switch_pre(struct mmc_card *card,
732 unsigned int part_type)
733{
734 int ret = 0;
735
736 if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) {
737 if (card->ext_csd.cmdq_en) {
738 ret = mmc_cmdq_disable(card);
739 if (ret)
740 return ret;
741 }
742 mmc_retune_pause(card->host);
743 }
744
745 return ret;
746}
747
748static int mmc_blk_part_switch_post(struct mmc_card *card,
749 unsigned int part_type)
750{
751 int ret = 0;
752
753 if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) {
754 mmc_retune_unpause(card->host);
755 if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
756 ret = mmc_cmdq_enable(card);
757 }
758
759 return ret;
760}
761
724static inline int mmc_blk_part_switch(struct mmc_card *card, 762static inline int mmc_blk_part_switch(struct mmc_card *card,
725 struct mmc_blk_data *md) 763 struct mmc_blk_data *md)
726{ 764{
727 int ret; 765 int ret = 0;
728 struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev); 766 struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
729 767
730 if (main_md->part_curr == md->part_type) 768 if (main_md->part_curr == md->part_type)
@@ -733,8 +771,9 @@ static inline int mmc_blk_part_switch(struct mmc_card *card,
733 if (mmc_card_mmc(card)) { 771 if (mmc_card_mmc(card)) {
734 u8 part_config = card->ext_csd.part_config; 772 u8 part_config = card->ext_csd.part_config;
735 773
736 if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) 774 ret = mmc_blk_part_switch_pre(card, md->part_type);
737 mmc_retune_pause(card->host); 775 if (ret)
776 return ret;
738 777
739 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; 778 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
740 part_config |= md->part_type; 779 part_config |= md->part_type;
@@ -743,19 +782,17 @@ static inline int mmc_blk_part_switch(struct mmc_card *card,
743 EXT_CSD_PART_CONFIG, part_config, 782 EXT_CSD_PART_CONFIG, part_config,
744 card->ext_csd.part_time); 783 card->ext_csd.part_time);
745 if (ret) { 784 if (ret) {
746 if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) 785 mmc_blk_part_switch_post(card, md->part_type);
747 mmc_retune_unpause(card->host);
748 return ret; 786 return ret;
749 } 787 }
750 788
751 card->ext_csd.part_config = part_config; 789 card->ext_csd.part_config = part_config;
752 790
753 if (main_md->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB) 791 ret = mmc_blk_part_switch_post(card, main_md->part_curr);
754 mmc_retune_unpause(card->host);
755 } 792 }
756 793
757 main_md->part_curr = md->part_type; 794 main_md->part_curr = md->part_type;
758 return 0; 795 return ret;
759} 796}
760 797
761static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks) 798static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks)
@@ -1272,7 +1309,7 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
1272{ 1309{
1273 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) { 1310 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
1274 /* Legacy mode imposes restrictions on transfers. */ 1311 /* Legacy mode imposes restrictions on transfers. */
1275 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors)) 1312 if (!IS_ALIGNED(blk_rq_pos(req), card->ext_csd.rel_sectors))
1276 brq->data.blocks = 1; 1313 brq->data.blocks = 1;
1277 1314
1278 if (brq->data.blocks > card->ext_csd.rel_sectors) 1315 if (brq->data.blocks > card->ext_csd.rel_sectors)
@@ -1396,36 +1433,39 @@ static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
1396 return MMC_BLK_SUCCESS; 1433 return MMC_BLK_SUCCESS;
1397} 1434}
1398 1435
1399static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, 1436static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
1400 struct mmc_card *card, 1437 int disable_multi, bool *do_rel_wr,
1401 int disable_multi, 1438 bool *do_data_tag)
1402 struct mmc_queue *mq)
1403{ 1439{
1404 u32 readcmd, writecmd; 1440 struct mmc_blk_data *md = mq->blkdata;
1441 struct mmc_card *card = md->queue.card;
1405 struct mmc_blk_request *brq = &mqrq->brq; 1442 struct mmc_blk_request *brq = &mqrq->brq;
1406 struct request *req = mqrq->req; 1443 struct request *req = mqrq->req;
1407 struct mmc_blk_data *md = mq->blkdata;
1408 bool do_data_tag;
1409 1444
1410 /* 1445 /*
1411 * Reliable writes are used to implement Forced Unit Access and 1446 * Reliable writes are used to implement Forced Unit Access and
1412 * are supported only on MMCs. 1447 * are supported only on MMCs.
1413 */ 1448 */
1414 bool do_rel_wr = (req->cmd_flags & REQ_FUA) && 1449 *do_rel_wr = (req->cmd_flags & REQ_FUA) &&
1415 (rq_data_dir(req) == WRITE) && 1450 rq_data_dir(req) == WRITE &&
1416 (md->flags & MMC_BLK_REL_WR); 1451 (md->flags & MMC_BLK_REL_WR);
1417 1452
1418 memset(brq, 0, sizeof(struct mmc_blk_request)); 1453 memset(brq, 0, sizeof(struct mmc_blk_request));
1419 brq->mrq.cmd = &brq->cmd; 1454
1420 brq->mrq.data = &brq->data; 1455 brq->mrq.data = &brq->data;
1421 1456
1422 brq->cmd.arg = blk_rq_pos(req);
1423 if (!mmc_card_blockaddr(card))
1424 brq->cmd.arg <<= 9;
1425 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1426 brq->data.blksz = 512;
1427 brq->stop.opcode = MMC_STOP_TRANSMISSION; 1457 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1428 brq->stop.arg = 0; 1458 brq->stop.arg = 0;
1459
1460 if (rq_data_dir(req) == READ) {
1461 brq->data.flags = MMC_DATA_READ;
1462 brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1463 } else {
1464 brq->data.flags = MMC_DATA_WRITE;
1465 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1466 }
1467
1468 brq->data.blksz = 512;
1429 brq->data.blocks = blk_rq_sectors(req); 1469 brq->data.blocks = blk_rq_sectors(req);
1430 1470
1431 /* 1471 /*
@@ -1456,6 +1496,68 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1456 brq->data.blocks); 1496 brq->data.blocks);
1457 } 1497 }
1458 1498
1499 if (*do_rel_wr)
1500 mmc_apply_rel_rw(brq, card, req);
1501
1502 /*
1503 * Data tag is used only during writing meta data to speed
1504 * up write and any subsequent read of this meta data
1505 */
1506 *do_data_tag = card->ext_csd.data_tag_unit_size &&
1507 (req->cmd_flags & REQ_META) &&
1508 (rq_data_dir(req) == WRITE) &&
1509 ((brq->data.blocks * brq->data.blksz) >=
1510 card->ext_csd.data_tag_unit_size);
1511
1512 mmc_set_data_timeout(&brq->data, card);
1513
1514 brq->data.sg = mqrq->sg;
1515 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1516
1517 /*
1518 * Adjust the sg list so it is the same size as the
1519 * request.
1520 */
1521 if (brq->data.blocks != blk_rq_sectors(req)) {
1522 int i, data_size = brq->data.blocks << 9;
1523 struct scatterlist *sg;
1524
1525 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1526 data_size -= sg->length;
1527 if (data_size <= 0) {
1528 sg->length += data_size;
1529 i++;
1530 break;
1531 }
1532 }
1533 brq->data.sg_len = i;
1534 }
1535
1536 mqrq->areq.mrq = &brq->mrq;
1537
1538 mmc_queue_bounce_pre(mqrq);
1539}
1540
1541static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1542 struct mmc_card *card,
1543 int disable_multi,
1544 struct mmc_queue *mq)
1545{
1546 u32 readcmd, writecmd;
1547 struct mmc_blk_request *brq = &mqrq->brq;
1548 struct request *req = mqrq->req;
1549 struct mmc_blk_data *md = mq->blkdata;
1550 bool do_rel_wr, do_data_tag;
1551
1552 mmc_blk_data_prep(mq, mqrq, disable_multi, &do_rel_wr, &do_data_tag);
1553
1554 brq->mrq.cmd = &brq->cmd;
1555
1556 brq->cmd.arg = blk_rq_pos(req);
1557 if (!mmc_card_blockaddr(card))
1558 brq->cmd.arg <<= 9;
1559 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1560
1459 if (brq->data.blocks > 1 || do_rel_wr) { 1561 if (brq->data.blocks > 1 || do_rel_wr) {
1460 /* SPI multiblock writes terminate using a special 1562 /* SPI multiblock writes terminate using a special
1461 * token, not a STOP_TRANSMISSION request. 1563 * token, not a STOP_TRANSMISSION request.
@@ -1470,32 +1572,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1470 readcmd = MMC_READ_SINGLE_BLOCK; 1572 readcmd = MMC_READ_SINGLE_BLOCK;
1471 writecmd = MMC_WRITE_BLOCK; 1573 writecmd = MMC_WRITE_BLOCK;
1472 } 1574 }
1473 if (rq_data_dir(req) == READ) { 1575 brq->cmd.opcode = rq_data_dir(req) == READ ? readcmd : writecmd;
1474 brq->cmd.opcode = readcmd;
1475 brq->data.flags = MMC_DATA_READ;
1476 if (brq->mrq.stop)
1477 brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 |
1478 MMC_CMD_AC;
1479 } else {
1480 brq->cmd.opcode = writecmd;
1481 brq->data.flags = MMC_DATA_WRITE;
1482 if (brq->mrq.stop)
1483 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B |
1484 MMC_CMD_AC;
1485 }
1486
1487 if (do_rel_wr)
1488 mmc_apply_rel_rw(brq, card, req);
1489
1490 /*
1491 * Data tag is used only during writing meta data to speed
1492 * up write and any subsequent read of this meta data
1493 */
1494 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1495 (req->cmd_flags & REQ_META) &&
1496 (rq_data_dir(req) == WRITE) &&
1497 ((brq->data.blocks * brq->data.blksz) >=
1498 card->ext_csd.data_tag_unit_size);
1499 1576
1500 /* 1577 /*
1501 * Pre-defined multi-block transfers are preferable to 1578 * Pre-defined multi-block transfers are preferable to
@@ -1526,34 +1603,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1526 brq->mrq.sbc = &brq->sbc; 1603 brq->mrq.sbc = &brq->sbc;
1527 } 1604 }
1528 1605
1529 mmc_set_data_timeout(&brq->data, card);
1530
1531 brq->data.sg = mqrq->sg;
1532 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1533
1534 /*
1535 * Adjust the sg list so it is the same size as the
1536 * request.
1537 */
1538 if (brq->data.blocks != blk_rq_sectors(req)) {
1539 int i, data_size = brq->data.blocks << 9;
1540 struct scatterlist *sg;
1541
1542 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1543 data_size -= sg->length;
1544 if (data_size <= 0) {
1545 sg->length += data_size;
1546 i++;
1547 break;
1548 }
1549 }
1550 brq->data.sg_len = i;
1551 }
1552
1553 mqrq->areq.mrq = &brq->mrq;
1554 mqrq->areq.err_check = mmc_blk_err_check; 1606 mqrq->areq.err_check = mmc_blk_err_check;
1555
1556 mmc_queue_bounce_pre(mqrq);
1557} 1607}
1558 1608
1559static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, 1609static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
@@ -1585,11 +1635,14 @@ static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1585 return req_pending; 1635 return req_pending;
1586} 1636}
1587 1637
1588static void mmc_blk_rw_cmd_abort(struct mmc_card *card, struct request *req) 1638static void mmc_blk_rw_cmd_abort(struct mmc_queue *mq, struct mmc_card *card,
1639 struct request *req,
1640 struct mmc_queue_req *mqrq)
1589{ 1641{
1590 if (mmc_card_removed(card)) 1642 if (mmc_card_removed(card))
1591 req->rq_flags |= RQF_QUIET; 1643 req->rq_flags |= RQF_QUIET;
1592 while (blk_end_request(req, -EIO, blk_rq_cur_bytes(req))); 1644 while (blk_end_request(req, -EIO, blk_rq_cur_bytes(req)));
1645 mmc_queue_req_free(mq, mqrq);
1593} 1646}
1594 1647
1595/** 1648/**
@@ -1597,7 +1650,8 @@ static void mmc_blk_rw_cmd_abort(struct mmc_card *card, struct request *req)
1597 * @mq: the queue with the card and host to restart 1650 * @mq: the queue with the card and host to restart
1598 * @req: a new request that want to be started after the current one 1651 * @req: a new request that want to be started after the current one
1599 */ 1652 */
1600static void mmc_blk_rw_try_restart(struct mmc_queue *mq, struct request *req) 1653static void mmc_blk_rw_try_restart(struct mmc_queue *mq, struct request *req,
1654 struct mmc_queue_req *mqrq)
1601{ 1655{
1602 if (!req) 1656 if (!req)
1603 return; 1657 return;
@@ -1608,11 +1662,12 @@ static void mmc_blk_rw_try_restart(struct mmc_queue *mq, struct request *req)
1608 if (mmc_card_removed(mq->card)) { 1662 if (mmc_card_removed(mq->card)) {
1609 req->rq_flags |= RQF_QUIET; 1663 req->rq_flags |= RQF_QUIET;
1610 blk_end_request_all(req, -EIO); 1664 blk_end_request_all(req, -EIO);
1665 mmc_queue_req_free(mq, mqrq);
1611 return; 1666 return;
1612 } 1667 }
1613 /* Else proceed and try to restart the current async request */ 1668 /* Else proceed and try to restart the current async request */
1614 mmc_blk_rw_rq_prep(mq->mqrq_cur, mq->card, 0, mq); 1669 mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq);
1615 mmc_start_areq(mq->card->host, &mq->mqrq_cur->areq, NULL); 1670 mmc_start_areq(mq->card->host, &mqrq->areq, NULL);
1616} 1671}
1617 1672
1618static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) 1673static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
@@ -1622,13 +1677,23 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
1622 struct mmc_blk_request *brq; 1677 struct mmc_blk_request *brq;
1623 int disable_multi = 0, retry = 0, type, retune_retry_done = 0; 1678 int disable_multi = 0, retry = 0, type, retune_retry_done = 0;
1624 enum mmc_blk_status status; 1679 enum mmc_blk_status status;
1680 struct mmc_queue_req *mqrq_cur = NULL;
1625 struct mmc_queue_req *mq_rq; 1681 struct mmc_queue_req *mq_rq;
1626 struct request *old_req; 1682 struct request *old_req;
1627 struct mmc_async_req *new_areq; 1683 struct mmc_async_req *new_areq;
1628 struct mmc_async_req *old_areq; 1684 struct mmc_async_req *old_areq;
1629 bool req_pending = true; 1685 bool req_pending = true;
1630 1686
1631 if (!new_req && !mq->mqrq_prev->req) 1687 if (new_req) {
1688 mqrq_cur = mmc_queue_req_find(mq, new_req);
1689 if (!mqrq_cur) {
1690 WARN_ON(1);
1691 mmc_blk_requeue(mq->queue, new_req);
1692 new_req = NULL;
1693 }
1694 }
1695
1696 if (!mq->qcnt)
1632 return; 1697 return;
1633 1698
1634 do { 1699 do {
@@ -1641,12 +1706,12 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
1641 !IS_ALIGNED(blk_rq_sectors(new_req), 8)) { 1706 !IS_ALIGNED(blk_rq_sectors(new_req), 8)) {
1642 pr_err("%s: Transfer size is not 4KB sector size aligned\n", 1707 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
1643 new_req->rq_disk->disk_name); 1708 new_req->rq_disk->disk_name);
1644 mmc_blk_rw_cmd_abort(card, new_req); 1709 mmc_blk_rw_cmd_abort(mq, card, new_req, mqrq_cur);
1645 return; 1710 return;
1646 } 1711 }
1647 1712
1648 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); 1713 mmc_blk_rw_rq_prep(mqrq_cur, card, 0, mq);
1649 new_areq = &mq->mqrq_cur->areq; 1714 new_areq = &mqrq_cur->areq;
1650 } else 1715 } else
1651 new_areq = NULL; 1716 new_areq = NULL;
1652 1717
@@ -1657,8 +1722,6 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
1657 * and there is nothing more to do until it is 1722 * and there is nothing more to do until it is
1658 * complete. 1723 * complete.
1659 */ 1724 */
1660 if (status == MMC_BLK_NEW_REQUEST)
1661 mq->new_request = true;
1662 return; 1725 return;
1663 } 1726 }
1664 1727
@@ -1691,7 +1754,7 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
1691 pr_err("%s BUG rq_tot %d d_xfer %d\n", 1754 pr_err("%s BUG rq_tot %d d_xfer %d\n",
1692 __func__, blk_rq_bytes(old_req), 1755 __func__, blk_rq_bytes(old_req),
1693 brq->data.bytes_xfered); 1756 brq->data.bytes_xfered);
1694 mmc_blk_rw_cmd_abort(card, old_req); 1757 mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
1695 return; 1758 return;
1696 } 1759 }
1697 break; 1760 break;
@@ -1699,12 +1762,15 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
1699 req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending); 1762 req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending);
1700 if (mmc_blk_reset(md, card->host, type)) { 1763 if (mmc_blk_reset(md, card->host, type)) {
1701 if (req_pending) 1764 if (req_pending)
1702 mmc_blk_rw_cmd_abort(card, old_req); 1765 mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
1703 mmc_blk_rw_try_restart(mq, new_req); 1766 else
1767 mmc_queue_req_free(mq, mq_rq);
1768 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1704 return; 1769 return;
1705 } 1770 }
1706 if (!req_pending) { 1771 if (!req_pending) {
1707 mmc_blk_rw_try_restart(mq, new_req); 1772 mmc_queue_req_free(mq, mq_rq);
1773 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1708 return; 1774 return;
1709 } 1775 }
1710 break; 1776 break;
@@ -1716,8 +1782,8 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
1716 case MMC_BLK_ABORT: 1782 case MMC_BLK_ABORT:
1717 if (!mmc_blk_reset(md, card->host, type)) 1783 if (!mmc_blk_reset(md, card->host, type))
1718 break; 1784 break;
1719 mmc_blk_rw_cmd_abort(card, old_req); 1785 mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
1720 mmc_blk_rw_try_restart(mq, new_req); 1786 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1721 return; 1787 return;
1722 case MMC_BLK_DATA_ERR: { 1788 case MMC_BLK_DATA_ERR: {
1723 int err; 1789 int err;
@@ -1726,8 +1792,8 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
1726 if (!err) 1792 if (!err)
1727 break; 1793 break;
1728 if (err == -ENODEV) { 1794 if (err == -ENODEV) {
1729 mmc_blk_rw_cmd_abort(card, old_req); 1795 mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
1730 mmc_blk_rw_try_restart(mq, new_req); 1796 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1731 return; 1797 return;
1732 } 1798 }
1733 /* Fall through */ 1799 /* Fall through */
@@ -1748,19 +1814,20 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
1748 req_pending = blk_end_request(old_req, -EIO, 1814 req_pending = blk_end_request(old_req, -EIO,
1749 brq->data.blksz); 1815 brq->data.blksz);
1750 if (!req_pending) { 1816 if (!req_pending) {
1751 mmc_blk_rw_try_restart(mq, new_req); 1817 mmc_queue_req_free(mq, mq_rq);
1818 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1752 return; 1819 return;
1753 } 1820 }
1754 break; 1821 break;
1755 case MMC_BLK_NOMEDIUM: 1822 case MMC_BLK_NOMEDIUM:
1756 mmc_blk_rw_cmd_abort(card, old_req); 1823 mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
1757 mmc_blk_rw_try_restart(mq, new_req); 1824 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1758 return; 1825 return;
1759 default: 1826 default:
1760 pr_err("%s: Unhandled return value (%d)", 1827 pr_err("%s: Unhandled return value (%d)",
1761 old_req->rq_disk->disk_name, status); 1828 old_req->rq_disk->disk_name, status);
1762 mmc_blk_rw_cmd_abort(card, old_req); 1829 mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
1763 mmc_blk_rw_try_restart(mq, new_req); 1830 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1764 return; 1831 return;
1765 } 1832 }
1766 1833
@@ -1776,6 +1843,8 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
1776 mq_rq->brq.retune_retry_done = retune_retry_done; 1843 mq_rq->brq.retune_retry_done = retune_retry_done;
1777 } 1844 }
1778 } while (req_pending); 1845 } while (req_pending);
1846
1847 mmc_queue_req_free(mq, mq_rq);
1779} 1848}
1780 1849
1781void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) 1850void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
@@ -1783,9 +1852,8 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
1783 int ret; 1852 int ret;
1784 struct mmc_blk_data *md = mq->blkdata; 1853 struct mmc_blk_data *md = mq->blkdata;
1785 struct mmc_card *card = md->queue.card; 1854 struct mmc_card *card = md->queue.card;
1786 bool req_is_special = mmc_req_is_special(req);
1787 1855
1788 if (req && !mq->mqrq_prev->req) 1856 if (req && !mq->qcnt)
1789 /* claim host only for the first request */ 1857 /* claim host only for the first request */
1790 mmc_get_card(card); 1858 mmc_get_card(card);
1791 1859
@@ -1797,20 +1865,19 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
1797 goto out; 1865 goto out;
1798 } 1866 }
1799 1867
1800 mq->new_request = false;
1801 if (req && req_op(req) == REQ_OP_DISCARD) { 1868 if (req && req_op(req) == REQ_OP_DISCARD) {
1802 /* complete ongoing async transfer before issuing discard */ 1869 /* complete ongoing async transfer before issuing discard */
1803 if (card->host->areq) 1870 if (mq->qcnt)
1804 mmc_blk_issue_rw_rq(mq, NULL); 1871 mmc_blk_issue_rw_rq(mq, NULL);
1805 mmc_blk_issue_discard_rq(mq, req); 1872 mmc_blk_issue_discard_rq(mq, req);
1806 } else if (req && req_op(req) == REQ_OP_SECURE_ERASE) { 1873 } else if (req && req_op(req) == REQ_OP_SECURE_ERASE) {
1807 /* complete ongoing async transfer before issuing secure erase*/ 1874 /* complete ongoing async transfer before issuing secure erase*/
1808 if (card->host->areq) 1875 if (mq->qcnt)
1809 mmc_blk_issue_rw_rq(mq, NULL); 1876 mmc_blk_issue_rw_rq(mq, NULL);
1810 mmc_blk_issue_secdiscard_rq(mq, req); 1877 mmc_blk_issue_secdiscard_rq(mq, req);
1811 } else if (req && req_op(req) == REQ_OP_FLUSH) { 1878 } else if (req && req_op(req) == REQ_OP_FLUSH) {
1812 /* complete ongoing async transfer before issuing flush */ 1879 /* complete ongoing async transfer before issuing flush */
1813 if (card->host->areq) 1880 if (mq->qcnt)
1814 mmc_blk_issue_rw_rq(mq, NULL); 1881 mmc_blk_issue_rw_rq(mq, NULL);
1815 mmc_blk_issue_flush(mq, req); 1882 mmc_blk_issue_flush(mq, req);
1816 } else { 1883 } else {
@@ -1819,13 +1886,7 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
1819 } 1886 }
1820 1887
1821out: 1888out:
1822 if ((!req && !mq->new_request) || req_is_special) 1889 if (!mq->qcnt)
1823 /*
1824 * Release host when there are no more requests
1825 * and after special request(discard, flush) is done.
1826 * In case sepecial request, there is no reentry to
1827 * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
1828 */
1829 mmc_put_card(card); 1890 mmc_put_card(card);
1830} 1891}
1831 1892
@@ -2105,6 +2166,7 @@ static int mmc_blk_probe(struct mmc_card *card)
2105{ 2166{
2106 struct mmc_blk_data *md, *part_md; 2167 struct mmc_blk_data *md, *part_md;
2107 char cap_str[10]; 2168 char cap_str[10];
2169 int ret;
2108 2170
2109 /* 2171 /*
2110 * Check that the card supports the command class(es) we need. 2172 * Check that the card supports the command class(es) we need.
@@ -2114,9 +2176,15 @@ static int mmc_blk_probe(struct mmc_card *card)
2114 2176
2115 mmc_fixup_device(card, mmc_blk_fixups); 2177 mmc_fixup_device(card, mmc_blk_fixups);
2116 2178
2179 ret = mmc_queue_alloc_shared_queue(card);
2180 if (ret)
2181 return ret;
2182
2117 md = mmc_blk_alloc(card); 2183 md = mmc_blk_alloc(card);
2118 if (IS_ERR(md)) 2184 if (IS_ERR(md)) {
2185 mmc_queue_free_shared_queue(card);
2119 return PTR_ERR(md); 2186 return PTR_ERR(md);
2187 }
2120 2188
2121 string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2, 2189 string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2,
2122 cap_str, sizeof(cap_str)); 2190 cap_str, sizeof(cap_str));
@@ -2154,6 +2222,7 @@ static int mmc_blk_probe(struct mmc_card *card)
2154 out: 2222 out:
2155 mmc_blk_remove_parts(card, md); 2223 mmc_blk_remove_parts(card, md);
2156 mmc_blk_remove_req(md); 2224 mmc_blk_remove_req(md);
2225 mmc_queue_free_shared_queue(card);
2157 return 0; 2226 return 0;
2158} 2227}
2159 2228
@@ -2171,6 +2240,7 @@ static void mmc_blk_remove(struct mmc_card *card)
2171 pm_runtime_put_noidle(&card->dev); 2240 pm_runtime_put_noidle(&card->dev);
2172 mmc_blk_remove_req(md); 2241 mmc_blk_remove_req(md);
2173 dev_set_drvdata(&card->dev, NULL); 2242 dev_set_drvdata(&card->dev, NULL);
2243 mmc_queue_free_shared_queue(card);
2174} 2244}
2175 2245
2176static int _mmc_blk_suspend(struct mmc_card *card) 2246static int _mmc_blk_suspend(struct mmc_card *card)
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 926e0fde07d7..82c45ddfa202 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -172,14 +172,16 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
172 172
173 trace_mmc_request_done(host, mrq); 173 trace_mmc_request_done(host, mrq);
174 174
175 if (err && cmd->retries && !mmc_card_removed(host->card)) { 175 /*
176 /* 176 * We list various conditions for the command to be considered
177 * Request starter must handle retries - see 177 * properly done:
178 * mmc_wait_for_req_done(). 178 *
179 */ 179 * - There was no error, OK fine then
180 if (mrq->done) 180 * - We are not doing some kind of retry
181 mrq->done(mrq); 181 * - The card was removed (...so just complete everything no matter
182 } else { 182 * if there are errors or retries)
183 */
184 if (!err || !cmd->retries || mmc_card_removed(host->card)) {
183 mmc_should_fail_request(host, mrq); 185 mmc_should_fail_request(host, mrq);
184 186
185 if (!host->ongoing_mrq) 187 if (!host->ongoing_mrq)
@@ -211,10 +213,13 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
211 mrq->stop->resp[0], mrq->stop->resp[1], 213 mrq->stop->resp[0], mrq->stop->resp[1],
212 mrq->stop->resp[2], mrq->stop->resp[3]); 214 mrq->stop->resp[2], mrq->stop->resp[3]);
213 } 215 }
214
215 if (mrq->done)
216 mrq->done(mrq);
217 } 216 }
217 /*
218 * Request starter must handle retries - see
219 * mmc_wait_for_req_done().
220 */
221 if (mrq->done)
222 mrq->done(mrq);
218} 223}
219 224
220EXPORT_SYMBOL(mmc_request_done); 225EXPORT_SYMBOL(mmc_request_done);
@@ -234,8 +239,10 @@ static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
234 /* 239 /*
235 * For sdio rw commands we must wait for card busy otherwise some 240 * For sdio rw commands we must wait for card busy otherwise some
236 * sdio devices won't work properly. 241 * sdio devices won't work properly.
242 * And bypass I/O abort, reset and bus suspend operations.
237 */ 243 */
238 if (mmc_is_io_op(mrq->cmd->opcode) && host->ops->card_busy) { 244 if (sdio_is_io_busy(mrq->cmd->opcode, mrq->cmd->arg) &&
245 host->ops->card_busy) {
239 int tries = 500; /* Wait aprox 500ms at maximum */ 246 int tries = 500; /* Wait aprox 500ms at maximum */
240 247
241 while (host->ops->card_busy(host) && --tries) 248 while (host->ops->card_busy(host) && --tries)
@@ -262,26 +269,19 @@ static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
262 host->ops->request(host, mrq); 269 host->ops->request(host, mrq);
263} 270}
264 271
265static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) 272static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq)
266{ 273{
267#ifdef CONFIG_MMC_DEBUG
268 unsigned int i, sz;
269 struct scatterlist *sg;
270#endif
271 mmc_retune_hold(host);
272
273 if (mmc_card_removed(host->card))
274 return -ENOMEDIUM;
275
276 if (mrq->sbc) { 274 if (mrq->sbc) {
277 pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n", 275 pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
278 mmc_hostname(host), mrq->sbc->opcode, 276 mmc_hostname(host), mrq->sbc->opcode,
279 mrq->sbc->arg, mrq->sbc->flags); 277 mrq->sbc->arg, mrq->sbc->flags);
280 } 278 }
281 279
282 pr_debug("%s: starting CMD%u arg %08x flags %08x\n", 280 if (mrq->cmd) {
283 mmc_hostname(host), mrq->cmd->opcode, 281 pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
284 mrq->cmd->arg, mrq->cmd->flags); 282 mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->arg,
283 mrq->cmd->flags);
284 }
285 285
286 if (mrq->data) { 286 if (mrq->data) {
287 pr_debug("%s: blksz %d blocks %d flags %08x " 287 pr_debug("%s: blksz %d blocks %d flags %08x "
@@ -297,11 +297,20 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
297 mmc_hostname(host), mrq->stop->opcode, 297 mmc_hostname(host), mrq->stop->opcode,
298 mrq->stop->arg, mrq->stop->flags); 298 mrq->stop->arg, mrq->stop->flags);
299 } 299 }
300}
300 301
301 WARN_ON(!host->claimed); 302static int mmc_mrq_prep(struct mmc_host *host, struct mmc_request *mrq)
303{
304#ifdef CONFIG_MMC_DEBUG
305 unsigned int i, sz;
306 struct scatterlist *sg;
307#endif
302 308
303 mrq->cmd->error = 0; 309 if (mrq->cmd) {
304 mrq->cmd->mrq = mrq; 310 mrq->cmd->error = 0;
311 mrq->cmd->mrq = mrq;
312 mrq->cmd->data = mrq->data;
313 }
305 if (mrq->sbc) { 314 if (mrq->sbc) {
306 mrq->sbc->error = 0; 315 mrq->sbc->error = 0;
307 mrq->sbc->mrq = mrq; 316 mrq->sbc->mrq = mrq;
@@ -318,8 +327,6 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
318 if (sz != mrq->data->blocks * mrq->data->blksz) 327 if (sz != mrq->data->blocks * mrq->data->blksz)
319 return -EINVAL; 328 return -EINVAL;
320#endif 329#endif
321
322 mrq->cmd->data = mrq->data;
323 mrq->data->error = 0; 330 mrq->data->error = 0;
324 mrq->data->mrq = mrq; 331 mrq->data->mrq = mrq;
325 if (mrq->stop) { 332 if (mrq->stop) {
@@ -328,6 +335,27 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
328 mrq->stop->mrq = mrq; 335 mrq->stop->mrq = mrq;
329 } 336 }
330 } 337 }
338
339 return 0;
340}
341
342static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
343{
344 int err;
345
346 mmc_retune_hold(host);
347
348 if (mmc_card_removed(host->card))
349 return -ENOMEDIUM;
350
351 mmc_mrq_pr_debug(host, mrq);
352
353 WARN_ON(!host->claimed);
354
355 err = mmc_mrq_prep(host, mrq);
356 if (err)
357 return err;
358
331 led_trigger_event(host->led, LED_FULL); 359 led_trigger_event(host->led, LED_FULL);
332 __mmc_start_request(host, mrq); 360 __mmc_start_request(host, mrq);
333 361
@@ -485,56 +513,6 @@ static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
485 return err; 513 return err;
486} 514}
487 515
488/*
489 * mmc_wait_for_data_req_done() - wait for request completed
490 * @host: MMC host to prepare the command.
491 * @mrq: MMC request to wait for
492 *
493 * Blocks MMC context till host controller will ack end of data request
494 * execution or new request notification arrives from the block layer.
495 * Handles command retries.
496 *
497 * Returns enum mmc_blk_status after checking errors.
498 */
499static enum mmc_blk_status mmc_wait_for_data_req_done(struct mmc_host *host,
500 struct mmc_request *mrq)
501{
502 struct mmc_command *cmd;
503 struct mmc_context_info *context_info = &host->context_info;
504 enum mmc_blk_status status;
505
506 while (1) {
507 wait_event_interruptible(context_info->wait,
508 (context_info->is_done_rcv ||
509 context_info->is_new_req));
510
511 if (context_info->is_done_rcv) {
512 context_info->is_done_rcv = false;
513 cmd = mrq->cmd;
514
515 if (!cmd->error || !cmd->retries ||
516 mmc_card_removed(host->card)) {
517 status = host->areq->err_check(host->card,
518 host->areq);
519 break; /* return status */
520 } else {
521 mmc_retune_recheck(host);
522 pr_info("%s: req failed (CMD%u): %d, retrying...\n",
523 mmc_hostname(host),
524 cmd->opcode, cmd->error);
525 cmd->retries--;
526 cmd->error = 0;
527 __mmc_start_request(host, mrq);
528 continue; /* wait for done/new event again */
529 }
530 }
531
532 return MMC_BLK_NEW_REQUEST;
533 }
534 mmc_retune_release(host);
535 return status;
536}
537
538void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq) 516void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
539{ 517{
540 struct mmc_command *cmd; 518 struct mmc_command *cmd;
@@ -639,14 +617,44 @@ static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
639 */ 617 */
640static enum mmc_blk_status mmc_finalize_areq(struct mmc_host *host) 618static enum mmc_blk_status mmc_finalize_areq(struct mmc_host *host)
641{ 619{
620 struct mmc_context_info *context_info = &host->context_info;
642 enum mmc_blk_status status; 621 enum mmc_blk_status status;
643 622
644 if (!host->areq) 623 if (!host->areq)
645 return MMC_BLK_SUCCESS; 624 return MMC_BLK_SUCCESS;
646 625
647 status = mmc_wait_for_data_req_done(host, host->areq->mrq); 626 while (1) {
648 if (status == MMC_BLK_NEW_REQUEST) 627 wait_event_interruptible(context_info->wait,
649 return status; 628 (context_info->is_done_rcv ||
629 context_info->is_new_req));
630
631 if (context_info->is_done_rcv) {
632 struct mmc_command *cmd;
633
634 context_info->is_done_rcv = false;
635 cmd = host->areq->mrq->cmd;
636
637 if (!cmd->error || !cmd->retries ||
638 mmc_card_removed(host->card)) {
639 status = host->areq->err_check(host->card,
640 host->areq);
641 break; /* return status */
642 } else {
643 mmc_retune_recheck(host);
644 pr_info("%s: req failed (CMD%u): %d, retrying...\n",
645 mmc_hostname(host),
646 cmd->opcode, cmd->error);
647 cmd->retries--;
648 cmd->error = 0;
649 __mmc_start_request(host, host->areq->mrq);
650 continue; /* wait for done/new event again */
651 }
652 }
653
654 return MMC_BLK_NEW_REQUEST;
655 }
656
657 mmc_retune_release(host);
650 658
651 /* 659 /*
652 * Check BKOPS urgency for each R1 response 660 * Check BKOPS urgency for each R1 response
@@ -683,7 +691,7 @@ struct mmc_async_req *mmc_start_areq(struct mmc_host *host,
683{ 691{
684 enum mmc_blk_status status; 692 enum mmc_blk_status status;
685 int start_err = 0; 693 int start_err = 0;
686 struct mmc_async_req *data = host->areq; 694 struct mmc_async_req *previous = host->areq;
687 695
688 /* Prepare a new request */ 696 /* Prepare a new request */
689 if (areq) 697 if (areq)
@@ -691,13 +699,12 @@ struct mmc_async_req *mmc_start_areq(struct mmc_host *host,
691 699
692 /* Finalize previous request */ 700 /* Finalize previous request */
693 status = mmc_finalize_areq(host); 701 status = mmc_finalize_areq(host);
702 if (ret_stat)
703 *ret_stat = status;
694 704
695 /* The previous request is still going on... */ 705 /* The previous request is still going on... */
696 if (status == MMC_BLK_NEW_REQUEST) { 706 if (status == MMC_BLK_NEW_REQUEST)
697 if (ret_stat)
698 *ret_stat = status;
699 return NULL; 707 return NULL;
700 }
701 708
702 /* Fine so far, start the new request! */ 709 /* Fine so far, start the new request! */
703 if (status == MMC_BLK_SUCCESS && areq) 710 if (status == MMC_BLK_SUCCESS && areq)
@@ -716,9 +723,7 @@ struct mmc_async_req *mmc_start_areq(struct mmc_host *host,
716 else 723 else
717 host->areq = areq; 724 host->areq = areq;
718 725
719 if (ret_stat) 726 return previous;
720 *ret_stat = status;
721 return data;
722} 727}
723EXPORT_SYMBOL(mmc_start_areq); 728EXPORT_SYMBOL(mmc_start_areq);
724 729
@@ -2555,6 +2560,12 @@ unsigned int mmc_calc_max_discard(struct mmc_card *card)
2555} 2560}
2556EXPORT_SYMBOL(mmc_calc_max_discard); 2561EXPORT_SYMBOL(mmc_calc_max_discard);
2557 2562
2563bool mmc_card_is_blockaddr(struct mmc_card *card)
2564{
2565 return card ? mmc_card_blockaddr(card) : false;
2566}
2567EXPORT_SYMBOL(mmc_card_is_blockaddr);
2568
2558int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen) 2569int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
2559{ 2570{
2560 struct mmc_command cmd = {}; 2571 struct mmc_command cmd = {};
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index b502601df228..2c87dede5841 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -790,6 +790,7 @@ MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
790MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult); 790MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
791MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors); 791MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
792MMC_DEV_ATTR(ocr, "%08x\n", card->ocr); 792MMC_DEV_ATTR(ocr, "%08x\n", card->ocr);
793MMC_DEV_ATTR(cmdq_en, "%d\n", card->ext_csd.cmdq_en);
793 794
794static ssize_t mmc_fwrev_show(struct device *dev, 795static ssize_t mmc_fwrev_show(struct device *dev,
795 struct device_attribute *attr, 796 struct device_attribute *attr,
@@ -845,6 +846,7 @@ static struct attribute *mmc_std_attrs[] = {
845 &dev_attr_rel_sectors.attr, 846 &dev_attr_rel_sectors.attr,
846 &dev_attr_ocr.attr, 847 &dev_attr_ocr.attr,
847 &dev_attr_dsr.attr, 848 &dev_attr_dsr.attr,
849 &dev_attr_cmdq_en.attr,
848 NULL, 850 NULL,
849}; 851};
850ATTRIBUTE_GROUPS(mmc_std); 852ATTRIBUTE_GROUPS(mmc_std);
@@ -1788,6 +1790,13 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
1788 } 1790 }
1789 1791
1790 /* 1792 /*
1793 * In some cases (e.g. RPMB or mmc_test), the Command Queue must be
1794 * disabled for a time, so a flag is needed to indicate to re-enable the
1795 * Command Queue.
1796 */
1797 card->reenable_cmdq = card->ext_csd.cmdq_en;
1798
1799 /*
1791 * The mandatory minimum values are defined for packed command. 1800 * The mandatory minimum values are defined for packed command.
1792 * read: 5, write: 3 1801 * read: 5, write: 3
1793 */ 1802 */
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index fe80f26d6971..78f75f00efc5 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -305,7 +305,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
305int mmc_send_csd(struct mmc_card *card, u32 *csd) 305int mmc_send_csd(struct mmc_card *card, u32 *csd)
306{ 306{
307 int ret, i; 307 int ret, i;
308 u32 *csd_tmp; 308 __be32 *csd_tmp;
309 309
310 if (!mmc_host_is_spi(card->host)) 310 if (!mmc_host_is_spi(card->host))
311 return mmc_send_cxd_native(card->host, card->rca << 16, 311 return mmc_send_cxd_native(card->host, card->rca << 16,
@@ -319,7 +319,7 @@ int mmc_send_csd(struct mmc_card *card, u32 *csd)
319 if (ret) 319 if (ret)
320 goto err; 320 goto err;
321 321
322 for (i = 0;i < 4;i++) 322 for (i = 0; i < 4; i++)
323 csd[i] = be32_to_cpu(csd_tmp[i]); 323 csd[i] = be32_to_cpu(csd_tmp[i]);
324 324
325err: 325err:
@@ -330,7 +330,7 @@ err:
330int mmc_send_cid(struct mmc_host *host, u32 *cid) 330int mmc_send_cid(struct mmc_host *host, u32 *cid)
331{ 331{
332 int ret, i; 332 int ret, i;
333 u32 *cid_tmp; 333 __be32 *cid_tmp;
334 334
335 if (!mmc_host_is_spi(host)) { 335 if (!mmc_host_is_spi(host)) {
336 if (!host->card) 336 if (!host->card)
@@ -347,7 +347,7 @@ int mmc_send_cid(struct mmc_host *host, u32 *cid)
347 if (ret) 347 if (ret)
348 goto err; 348 goto err;
349 349
350 for (i = 0;i < 4;i++) 350 for (i = 0; i < 4; i++)
351 cid[i] = be32_to_cpu(cid_tmp[i]); 351 cid[i] = be32_to_cpu(cid_tmp[i]);
352 352
353err: 353err:
@@ -838,3 +838,31 @@ int mmc_can_ext_csd(struct mmc_card *card)
838{ 838{
839 return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3); 839 return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
840} 840}
841
842static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
843{
844 u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
845 int err;
846
847 if (!card->ext_csd.cmdq_support)
848 return -EOPNOTSUPP;
849
850 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
851 val, card->ext_csd.generic_cmd6_time);
852 if (!err)
853 card->ext_csd.cmdq_en = enable;
854
855 return err;
856}
857
858int mmc_cmdq_enable(struct mmc_card *card)
859{
860 return mmc_cmdq_switch(card, true);
861}
862EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
863
864int mmc_cmdq_disable(struct mmc_card *card)
865{
866 return mmc_cmdq_switch(card, false);
867}
868EXPORT_SYMBOL_GPL(mmc_cmdq_disable);
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index 74beea8a9c7e..978bd2e60f8a 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -46,6 +46,8 @@ int mmc_read_bkops_status(struct mmc_card *card);
46void mmc_start_bkops(struct mmc_card *card, bool from_exception); 46void mmc_start_bkops(struct mmc_card *card, bool from_exception);
47int mmc_can_reset(struct mmc_card *card); 47int mmc_can_reset(struct mmc_card *card);
48int mmc_flush_cache(struct mmc_card *card); 48int mmc_flush_cache(struct mmc_card *card);
49int mmc_cmdq_enable(struct mmc_card *card);
50int mmc_cmdq_disable(struct mmc_card *card);
49 51
50#endif 52#endif
51 53
diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c
index f99ac3123fd2..fd1b4b8510b9 100644
--- a/drivers/mmc/core/mmc_test.c
+++ b/drivers/mmc/core/mmc_test.c
@@ -26,6 +26,7 @@
26#include "card.h" 26#include "card.h"
27#include "host.h" 27#include "host.h"
28#include "bus.h" 28#include "bus.h"
29#include "mmc_ops.h"
29 30
30#define RESULT_OK 0 31#define RESULT_OK 0
31#define RESULT_FAIL 1 32#define RESULT_FAIL 1
@@ -3264,6 +3265,14 @@ static int mmc_test_probe(struct mmc_card *card)
3264 if (ret) 3265 if (ret)
3265 return ret; 3266 return ret;
3266 3267
3268 if (card->ext_csd.cmdq_en) {
3269 mmc_claim_host(card->host);
3270 ret = mmc_cmdq_disable(card);
3271 mmc_release_host(card->host);
3272 if (ret)
3273 return ret;
3274 }
3275
3267 dev_info(&card->dev, "Card claimed for testing.\n"); 3276 dev_info(&card->dev, "Card claimed for testing.\n");
3268 3277
3269 return 0; 3278 return 0;
@@ -3271,6 +3280,11 @@ static int mmc_test_probe(struct mmc_card *card)
3271 3280
3272static void mmc_test_remove(struct mmc_card *card) 3281static void mmc_test_remove(struct mmc_card *card)
3273{ 3282{
3283 if (card->reenable_cmdq) {
3284 mmc_claim_host(card->host);
3285 mmc_cmdq_enable(card);
3286 mmc_release_host(card->host);
3287 }
3274 mmc_test_free_result(card); 3288 mmc_test_free_result(card);
3275 mmc_test_free_dbgfs_file(card); 3289 mmc_test_free_dbgfs_file(card);
3276} 3290}
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 4c54ad34e17a..5c37b6be3e7b 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -40,6 +40,35 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
40 return BLKPREP_OK; 40 return BLKPREP_OK;
41} 41}
42 42
43struct mmc_queue_req *mmc_queue_req_find(struct mmc_queue *mq,
44 struct request *req)
45{
46 struct mmc_queue_req *mqrq;
47 int i = ffz(mq->qslots);
48
49 if (i >= mq->qdepth)
50 return NULL;
51
52 mqrq = &mq->mqrq[i];
53 WARN_ON(mqrq->req || mq->qcnt >= mq->qdepth ||
54 test_bit(mqrq->task_id, &mq->qslots));
55 mqrq->req = req;
56 mq->qcnt += 1;
57 __set_bit(mqrq->task_id, &mq->qslots);
58
59 return mqrq;
60}
61
62void mmc_queue_req_free(struct mmc_queue *mq,
63 struct mmc_queue_req *mqrq)
64{
65 WARN_ON(!mqrq->req || mq->qcnt < 1 ||
66 !test_bit(mqrq->task_id, &mq->qslots));
67 mqrq->req = NULL;
68 mq->qcnt -= 1;
69 __clear_bit(mqrq->task_id, &mq->qslots);
70}
71
43static int mmc_queue_thread(void *d) 72static int mmc_queue_thread(void *d)
44{ 73{
45 struct mmc_queue *mq = d; 74 struct mmc_queue *mq = d;
@@ -50,7 +79,7 @@ static int mmc_queue_thread(void *d)
50 79
51 down(&mq->thread_sem); 80 down(&mq->thread_sem);
52 do { 81 do {
53 struct request *req = NULL; 82 struct request *req;
54 83
55 spin_lock_irq(q->queue_lock); 84 spin_lock_irq(q->queue_lock);
56 set_current_state(TASK_INTERRUPTIBLE); 85 set_current_state(TASK_INTERRUPTIBLE);
@@ -63,38 +92,17 @@ static int mmc_queue_thread(void *d)
63 * Dispatch queue is empty so set flags for 92 * Dispatch queue is empty so set flags for
64 * mmc_request_fn() to wake us up. 93 * mmc_request_fn() to wake us up.
65 */ 94 */
66 if (mq->mqrq_prev->req) 95 if (mq->qcnt)
67 cntx->is_waiting_last_req = true; 96 cntx->is_waiting_last_req = true;
68 else 97 else
69 mq->asleep = true; 98 mq->asleep = true;
70 } 99 }
71 mq->mqrq_cur->req = req;
72 spin_unlock_irq(q->queue_lock); 100 spin_unlock_irq(q->queue_lock);
73 101
74 if (req || mq->mqrq_prev->req) { 102 if (req || mq->qcnt) {
75 bool req_is_special = mmc_req_is_special(req);
76
77 set_current_state(TASK_RUNNING); 103 set_current_state(TASK_RUNNING);
78 mmc_blk_issue_rq(mq, req); 104 mmc_blk_issue_rq(mq, req);
79 cond_resched(); 105 cond_resched();
80 if (mq->new_request) {
81 mq->new_request = false;
82 continue; /* fetch again */
83 }
84
85 /*
86 * Current request becomes previous request
87 * and vice versa.
88 * In case of special requests, current request
89 * has been finished. Do not assign it to previous
90 * request.
91 */
92 if (req_is_special)
93 mq->mqrq_cur->req = NULL;
94
95 mq->mqrq_prev->brq.mrq.data = NULL;
96 mq->mqrq_prev->req = NULL;
97 swap(mq->mqrq_prev, mq->mqrq_cur);
98 } else { 106 } else {
99 if (kthread_should_stop()) { 107 if (kthread_should_stop()) {
100 set_current_state(TASK_RUNNING); 108 set_current_state(TASK_RUNNING);
@@ -141,17 +149,13 @@ static void mmc_request_fn(struct request_queue *q)
141 wake_up_process(mq->thread); 149 wake_up_process(mq->thread);
142} 150}
143 151
144static struct scatterlist *mmc_alloc_sg(int sg_len, int *err) 152static struct scatterlist *mmc_alloc_sg(int sg_len)
145{ 153{
146 struct scatterlist *sg; 154 struct scatterlist *sg;
147 155
148 sg = kmalloc_array(sg_len, sizeof(*sg), GFP_KERNEL); 156 sg = kmalloc_array(sg_len, sizeof(*sg), GFP_KERNEL);
149 if (!sg) 157 if (sg)
150 *err = -ENOMEM;
151 else {
152 *err = 0;
153 sg_init_table(sg, sg_len); 158 sg_init_table(sg, sg_len);
154 }
155 159
156 return sg; 160 return sg;
157} 161}
@@ -175,80 +179,178 @@ static void mmc_queue_setup_discard(struct request_queue *q,
175 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); 179 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
176} 180}
177 181
178#ifdef CONFIG_MMC_BLOCK_BOUNCE 182static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq)
179static bool mmc_queue_alloc_bounce_bufs(struct mmc_queue *mq, 183{
180 unsigned int bouncesz) 184 kfree(mqrq->bounce_sg);
185 mqrq->bounce_sg = NULL;
186
187 kfree(mqrq->sg);
188 mqrq->sg = NULL;
189
190 kfree(mqrq->bounce_buf);
191 mqrq->bounce_buf = NULL;
192}
193
194static void mmc_queue_reqs_free_bufs(struct mmc_queue_req *mqrq, int qdepth)
181{ 195{
182 int i; 196 int i;
183 197
184 for (i = 0; i < mq->qdepth; i++) { 198 for (i = 0; i < qdepth; i++)
185 mq->mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 199 mmc_queue_req_free_bufs(&mqrq[i]);
186 if (!mq->mqrq[i].bounce_buf) 200}
187 goto out_err;
188 }
189 201
190 return true; 202static void mmc_queue_free_mqrqs(struct mmc_queue_req *mqrq, int qdepth)
203{
204 mmc_queue_reqs_free_bufs(mqrq, qdepth);
205 kfree(mqrq);
206}
191 207
192out_err: 208static struct mmc_queue_req *mmc_queue_alloc_mqrqs(int qdepth)
193 while (--i >= 0) { 209{
194 kfree(mq->mqrq[i].bounce_buf); 210 struct mmc_queue_req *mqrq;
195 mq->mqrq[i].bounce_buf = NULL; 211 int i;
212
213 mqrq = kcalloc(qdepth, sizeof(*mqrq), GFP_KERNEL);
214 if (mqrq) {
215 for (i = 0; i < qdepth; i++)
216 mqrq[i].task_id = i;
196 } 217 }
197 pr_warn("%s: unable to allocate bounce buffers\n", 218
198 mmc_card_name(mq->card)); 219 return mqrq;
199 return false;
200} 220}
201 221
202static int mmc_queue_alloc_bounce_sgs(struct mmc_queue *mq, 222#ifdef CONFIG_MMC_BLOCK_BOUNCE
203 unsigned int bouncesz) 223static int mmc_queue_alloc_bounce_bufs(struct mmc_queue_req *mqrq, int qdepth,
224 unsigned int bouncesz)
204{ 225{
205 int i, ret; 226 int i;
206 227
207 for (i = 0; i < mq->qdepth; i++) { 228 for (i = 0; i < qdepth; i++) {
208 mq->mqrq[i].sg = mmc_alloc_sg(1, &ret); 229 mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
209 if (ret) 230 if (!mqrq[i].bounce_buf)
210 return ret; 231 return -ENOMEM;
211 232
212 mq->mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret); 233 mqrq[i].sg = mmc_alloc_sg(1);
213 if (ret) 234 if (!mqrq[i].sg)
214 return ret; 235 return -ENOMEM;
236
237 mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512);
238 if (!mqrq[i].bounce_sg)
239 return -ENOMEM;
215 } 240 }
216 241
217 return 0; 242 return 0;
218} 243}
244
245static bool mmc_queue_alloc_bounce(struct mmc_queue_req *mqrq, int qdepth,
246 unsigned int bouncesz)
247{
248 int ret;
249
250 ret = mmc_queue_alloc_bounce_bufs(mqrq, qdepth, bouncesz);
251 if (ret)
252 mmc_queue_reqs_free_bufs(mqrq, qdepth);
253
254 return !ret;
255}
256
257static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
258{
259 unsigned int bouncesz = MMC_QUEUE_BOUNCESZ;
260
261 if (host->max_segs != 1)
262 return 0;
263
264 if (bouncesz > host->max_req_size)
265 bouncesz = host->max_req_size;
266 if (bouncesz > host->max_seg_size)
267 bouncesz = host->max_seg_size;
268 if (bouncesz > host->max_blk_count * 512)
269 bouncesz = host->max_blk_count * 512;
270
271 if (bouncesz <= 512)
272 return 0;
273
274 return bouncesz;
275}
276#else
277static inline bool mmc_queue_alloc_bounce(struct mmc_queue_req *mqrq,
278 int qdepth, unsigned int bouncesz)
279{
280 return false;
281}
282
283static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
284{
285 return 0;
286}
219#endif 287#endif
220 288
221static int mmc_queue_alloc_sgs(struct mmc_queue *mq, int max_segs) 289static int mmc_queue_alloc_sgs(struct mmc_queue_req *mqrq, int qdepth,
290 int max_segs)
222{ 291{
223 int i, ret; 292 int i;
224 293
225 for (i = 0; i < mq->qdepth; i++) { 294 for (i = 0; i < qdepth; i++) {
226 mq->mqrq[i].sg = mmc_alloc_sg(max_segs, &ret); 295 mqrq[i].sg = mmc_alloc_sg(max_segs);
227 if (ret) 296 if (!mqrq[i].sg)
228 return ret; 297 return -ENOMEM;
229 } 298 }
230 299
231 return 0; 300 return 0;
232} 301}
233 302
234static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq) 303void mmc_queue_free_shared_queue(struct mmc_card *card)
235{ 304{
236 kfree(mqrq->bounce_sg); 305 if (card->mqrq) {
237 mqrq->bounce_sg = NULL; 306 mmc_queue_free_mqrqs(card->mqrq, card->qdepth);
307 card->mqrq = NULL;
308 }
309}
238 310
239 kfree(mqrq->sg); 311static int __mmc_queue_alloc_shared_queue(struct mmc_card *card, int qdepth)
240 mqrq->sg = NULL; 312{
313 struct mmc_host *host = card->host;
314 struct mmc_queue_req *mqrq;
315 unsigned int bouncesz;
316 int ret = 0;
241 317
242 kfree(mqrq->bounce_buf); 318 if (card->mqrq)
243 mqrq->bounce_buf = NULL; 319 return -EINVAL;
320
321 mqrq = mmc_queue_alloc_mqrqs(qdepth);
322 if (!mqrq)
323 return -ENOMEM;
324
325 card->mqrq = mqrq;
326 card->qdepth = qdepth;
327
328 bouncesz = mmc_queue_calc_bouncesz(host);
329
330 if (bouncesz && !mmc_queue_alloc_bounce(mqrq, qdepth, bouncesz)) {
331 bouncesz = 0;
332 pr_warn("%s: unable to allocate bounce buffers\n",
333 mmc_card_name(card));
334 }
335
336 card->bouncesz = bouncesz;
337
338 if (!bouncesz) {
339 ret = mmc_queue_alloc_sgs(mqrq, qdepth, host->max_segs);
340 if (ret)
341 goto out_err;
342 }
343
344 return ret;
345
346out_err:
347 mmc_queue_free_shared_queue(card);
348 return ret;
244} 349}
245 350
246static void mmc_queue_reqs_free_bufs(struct mmc_queue *mq) 351int mmc_queue_alloc_shared_queue(struct mmc_card *card)
247{ 352{
248 int i; 353 return __mmc_queue_alloc_shared_queue(card, 2);
249
250 for (i = 0; i < mq->qdepth; i++)
251 mmc_queue_req_free_bufs(&mq->mqrq[i]);
252} 354}
253 355
254/** 356/**
@@ -265,7 +367,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
265{ 367{
266 struct mmc_host *host = card->host; 368 struct mmc_host *host = card->host;
267 u64 limit = BLK_BOUNCE_HIGH; 369 u64 limit = BLK_BOUNCE_HIGH;
268 bool bounce = false;
269 int ret = -ENOMEM; 370 int ret = -ENOMEM;
270 371
271 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) 372 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
@@ -276,13 +377,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
276 if (!mq->queue) 377 if (!mq->queue)
277 return -ENOMEM; 378 return -ENOMEM;
278 379
279 mq->qdepth = 2; 380 mq->mqrq = card->mqrq;
280 mq->mqrq = kcalloc(mq->qdepth, sizeof(struct mmc_queue_req), 381 mq->qdepth = card->qdepth;
281 GFP_KERNEL);
282 if (!mq->mqrq)
283 goto blk_cleanup;
284 mq->mqrq_cur = &mq->mqrq[0];
285 mq->mqrq_prev = &mq->mqrq[1];
286 mq->queue->queuedata = mq; 382 mq->queue->queuedata = mq;
287 383
288 blk_queue_prep_rq(mq->queue, mmc_prep_request); 384 blk_queue_prep_rq(mq->queue, mmc_prep_request);
@@ -291,44 +387,17 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
291 if (mmc_can_erase(card)) 387 if (mmc_can_erase(card))
292 mmc_queue_setup_discard(mq->queue, card); 388 mmc_queue_setup_discard(mq->queue, card);
293 389
294#ifdef CONFIG_MMC_BLOCK_BOUNCE 390 if (card->bouncesz) {
295 if (host->max_segs == 1) { 391 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
296 unsigned int bouncesz; 392 blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512);
297 393 blk_queue_max_segments(mq->queue, card->bouncesz / 512);
298 bouncesz = MMC_QUEUE_BOUNCESZ; 394 blk_queue_max_segment_size(mq->queue, card->bouncesz);
299 395 } else {
300 if (bouncesz > host->max_req_size)
301 bouncesz = host->max_req_size;
302 if (bouncesz > host->max_seg_size)
303 bouncesz = host->max_seg_size;
304 if (bouncesz > (host->max_blk_count * 512))
305 bouncesz = host->max_blk_count * 512;
306
307 if (bouncesz > 512 &&
308 mmc_queue_alloc_bounce_bufs(mq, bouncesz)) {
309 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
310 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
311 blk_queue_max_segments(mq->queue, bouncesz / 512);
312 blk_queue_max_segment_size(mq->queue, bouncesz);
313
314 ret = mmc_queue_alloc_bounce_sgs(mq, bouncesz);
315 if (ret)
316 goto cleanup_queue;
317 bounce = true;
318 }
319 }
320#endif
321
322 if (!bounce) {
323 blk_queue_bounce_limit(mq->queue, limit); 396 blk_queue_bounce_limit(mq->queue, limit);
324 blk_queue_max_hw_sectors(mq->queue, 397 blk_queue_max_hw_sectors(mq->queue,
325 min(host->max_blk_count, host->max_req_size / 512)); 398 min(host->max_blk_count, host->max_req_size / 512));
326 blk_queue_max_segments(mq->queue, host->max_segs); 399 blk_queue_max_segments(mq->queue, host->max_segs);
327 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 400 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
328
329 ret = mmc_queue_alloc_sgs(mq, host->max_segs);
330 if (ret)
331 goto cleanup_queue;
332 } 401 }
333 402
334 sema_init(&mq->thread_sem, 1); 403 sema_init(&mq->thread_sem, 1);
@@ -343,11 +412,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
343 412
344 return 0; 413 return 0;
345 414
346 cleanup_queue: 415cleanup_queue:
347 mmc_queue_reqs_free_bufs(mq);
348 kfree(mq->mqrq);
349 mq->mqrq = NULL; 416 mq->mqrq = NULL;
350blk_cleanup:
351 blk_cleanup_queue(mq->queue); 417 blk_cleanup_queue(mq->queue);
352 return ret; 418 return ret;
353} 419}
@@ -369,10 +435,7 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
369 blk_start_queue(q); 435 blk_start_queue(q);
370 spin_unlock_irqrestore(q->queue_lock, flags); 436 spin_unlock_irqrestore(q->queue_lock, flags);
371 437
372 mmc_queue_reqs_free_bufs(mq);
373 kfree(mq->mqrq);
374 mq->mqrq = NULL; 438 mq->mqrq = NULL;
375
376 mq->card = NULL; 439 mq->card = NULL;
377} 440}
378EXPORT_SYMBOL(mmc_cleanup_queue); 441EXPORT_SYMBOL(mmc_cleanup_queue);
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index e298f100101b..871796c3f406 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -34,23 +34,25 @@ struct mmc_queue_req {
34 struct scatterlist *bounce_sg; 34 struct scatterlist *bounce_sg;
35 unsigned int bounce_sg_len; 35 unsigned int bounce_sg_len;
36 struct mmc_async_req areq; 36 struct mmc_async_req areq;
37 int task_id;
37}; 38};
38 39
39struct mmc_queue { 40struct mmc_queue {
40 struct mmc_card *card; 41 struct mmc_card *card;
41 struct task_struct *thread; 42 struct task_struct *thread;
42 struct semaphore thread_sem; 43 struct semaphore thread_sem;
43 bool new_request;
44 bool suspended; 44 bool suspended;
45 bool asleep; 45 bool asleep;
46 struct mmc_blk_data *blkdata; 46 struct mmc_blk_data *blkdata;
47 struct request_queue *queue; 47 struct request_queue *queue;
48 struct mmc_queue_req *mqrq; 48 struct mmc_queue_req *mqrq;
49 struct mmc_queue_req *mqrq_cur;
50 struct mmc_queue_req *mqrq_prev;
51 int qdepth; 49 int qdepth;
50 int qcnt;
51 unsigned long qslots;
52}; 52};
53 53
54extern int mmc_queue_alloc_shared_queue(struct mmc_card *card);
55extern void mmc_queue_free_shared_queue(struct mmc_card *card);
54extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *, 56extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
55 const char *); 57 const char *);
56extern void mmc_cleanup_queue(struct mmc_queue *); 58extern void mmc_cleanup_queue(struct mmc_queue *);
@@ -64,4 +66,8 @@ extern void mmc_queue_bounce_post(struct mmc_queue_req *);
64 66
65extern int mmc_access_rpmb(struct mmc_queue *); 67extern int mmc_access_rpmb(struct mmc_queue *);
66 68
69extern struct mmc_queue_req *mmc_queue_req_find(struct mmc_queue *,
70 struct request *);
71extern void mmc_queue_req_free(struct mmc_queue *, struct mmc_queue_req *);
72
67#endif 73#endif
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 89531b48ae84..d109634fbfce 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -225,7 +225,7 @@ static int mmc_decode_scr(struct mmc_card *card)
225static int mmc_read_ssr(struct mmc_card *card) 225static int mmc_read_ssr(struct mmc_card *card)
226{ 226{
227 unsigned int au, es, et, eo; 227 unsigned int au, es, et, eo;
228 u32 *raw_ssr; 228 __be32 *raw_ssr;
229 int i; 229 int i;
230 230
231 if (!(card->csd.cmdclass & CCC_APP_SPEC)) { 231 if (!(card->csd.cmdclass & CCC_APP_SPEC)) {
@@ -853,7 +853,7 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
853 /* 853 /*
854 * Fetch SCR from card. 854 * Fetch SCR from card.
855 */ 855 */
856 err = mmc_app_send_scr(card, card->raw_scr); 856 err = mmc_app_send_scr(card);
857 if (err) 857 if (err)
858 return err; 858 return err;
859 859
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c
index 9d5824a37586..47056d8d1bac 100644
--- a/drivers/mmc/core/sd_ops.c
+++ b/drivers/mmc/core/sd_ops.c
@@ -232,14 +232,14 @@ int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca)
232 return 0; 232 return 0;
233} 233}
234 234
235int mmc_app_send_scr(struct mmc_card *card, u32 *scr) 235int mmc_app_send_scr(struct mmc_card *card)
236{ 236{
237 int err; 237 int err;
238 struct mmc_request mrq = {}; 238 struct mmc_request mrq = {};
239 struct mmc_command cmd = {}; 239 struct mmc_command cmd = {};
240 struct mmc_data data = {}; 240 struct mmc_data data = {};
241 struct scatterlist sg; 241 struct scatterlist sg;
242 void *data_buf; 242 __be32 *scr;
243 243
244 /* NOTE: caller guarantees scr is heap-allocated */ 244 /* NOTE: caller guarantees scr is heap-allocated */
245 245
@@ -250,8 +250,8 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
250 /* dma onto stack is unsafe/nonportable, but callers to this 250 /* dma onto stack is unsafe/nonportable, but callers to this
251 * routine normally provide temporary on-stack buffers ... 251 * routine normally provide temporary on-stack buffers ...
252 */ 252 */
253 data_buf = kmalloc(sizeof(card->raw_scr), GFP_KERNEL); 253 scr = kmalloc(sizeof(card->raw_scr), GFP_KERNEL);
254 if (data_buf == NULL) 254 if (!scr)
255 return -ENOMEM; 255 return -ENOMEM;
256 256
257 mrq.cmd = &cmd; 257 mrq.cmd = &cmd;
@@ -267,23 +267,22 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
267 data.sg = &sg; 267 data.sg = &sg;
268 data.sg_len = 1; 268 data.sg_len = 1;
269 269
270 sg_init_one(&sg, data_buf, 8); 270 sg_init_one(&sg, scr, 8);
271 271
272 mmc_set_data_timeout(&data, card); 272 mmc_set_data_timeout(&data, card);
273 273
274 mmc_wait_for_req(card->host, &mrq); 274 mmc_wait_for_req(card->host, &mrq);
275 275
276 memcpy(scr, data_buf, sizeof(card->raw_scr)); 276 card->raw_scr[0] = be32_to_cpu(scr[0]);
277 kfree(data_buf); 277 card->raw_scr[1] = be32_to_cpu(scr[1]);
278
279 kfree(scr);
278 280
279 if (cmd.error) 281 if (cmd.error)
280 return cmd.error; 282 return cmd.error;
281 if (data.error) 283 if (data.error)
282 return data.error; 284 return data.error;
283 285
284 scr[0] = be32_to_cpu(scr[0]);
285 scr[1] = be32_to_cpu(scr[1]);
286
287 return 0; 286 return 0;
288} 287}
289 288
diff --git a/drivers/mmc/core/sd_ops.h b/drivers/mmc/core/sd_ops.h
index 784f8e6b6baa..0e6c3d51e66d 100644
--- a/drivers/mmc/core/sd_ops.h
+++ b/drivers/mmc/core/sd_ops.h
@@ -22,7 +22,7 @@ int mmc_app_set_bus_width(struct mmc_card *card, int width);
22int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr); 22int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr);
23int mmc_send_if_cond(struct mmc_host *host, u32 ocr); 23int mmc_send_if_cond(struct mmc_host *host, u32 ocr);
24int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca); 24int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca);
25int mmc_app_send_scr(struct mmc_card *card, u32 *scr); 25int mmc_app_send_scr(struct mmc_card *card);
26int mmc_sd_switch(struct mmc_card *card, int mode, int group, 26int mmc_sd_switch(struct mmc_card *card, int mode, int group,
27 u8 value, u8 *resp); 27 u8 value, u8 *resp);
28int mmc_app_sd_status(struct mmc_card *card, void *ssr); 28int mmc_app_sd_status(struct mmc_card *card, void *ssr);
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index 74195d772f5a..d40744bbafa9 100644
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -373,19 +373,16 @@ u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret)
373 u8 val; 373 u8 val;
374 374
375 if (!func) { 375 if (!func) {
376 *err_ret = -EINVAL; 376 if (err_ret)
377 *err_ret = -EINVAL;
377 return 0xFF; 378 return 0xFF;
378 } 379 }
379 380
380 if (err_ret)
381 *err_ret = 0;
382
383 ret = mmc_io_rw_direct(func->card, 0, func->num, addr, 0, &val); 381 ret = mmc_io_rw_direct(func->card, 0, func->num, addr, 0, &val);
384 if (ret) { 382 if (err_ret)
385 if (err_ret) 383 *err_ret = ret;
386 *err_ret = ret; 384 if (ret)
387 return 0xFF; 385 return 0xFF;
388 }
389 386
390 return val; 387 return val;
391} 388}
@@ -407,7 +404,8 @@ void sdio_writeb(struct sdio_func *func, u8 b, unsigned int addr, int *err_ret)
407 int ret; 404 int ret;
408 405
409 if (!func) { 406 if (!func) {
410 *err_ret = -EINVAL; 407 if (err_ret)
408 *err_ret = -EINVAL;
411 return; 409 return;
412 } 410 }
413 411
@@ -441,7 +439,7 @@ u8 sdio_writeb_readb(struct sdio_func *func, u8 write_byte,
441 if (err_ret) 439 if (err_ret)
442 *err_ret = ret; 440 *err_ret = ret;
443 if (ret) 441 if (ret)
444 val = 0xff; 442 return 0xff;
445 443
446 return val; 444 return val;
447} 445}
@@ -529,15 +527,11 @@ u16 sdio_readw(struct sdio_func *func, unsigned int addr, int *err_ret)
529{ 527{
530 int ret; 528 int ret;
531 529
532 if (err_ret)
533 *err_ret = 0;
534
535 ret = sdio_memcpy_fromio(func, func->tmpbuf, addr, 2); 530 ret = sdio_memcpy_fromio(func, func->tmpbuf, addr, 2);
536 if (ret) { 531 if (err_ret)
537 if (err_ret) 532 *err_ret = ret;
538 *err_ret = ret; 533 if (ret)
539 return 0xFFFF; 534 return 0xFFFF;
540 }
541 535
542 return le16_to_cpup((__le16 *)func->tmpbuf); 536 return le16_to_cpup((__le16 *)func->tmpbuf);
543} 537}
@@ -581,15 +575,11 @@ u32 sdio_readl(struct sdio_func *func, unsigned int addr, int *err_ret)
581{ 575{
582 int ret; 576 int ret;
583 577
584 if (err_ret)
585 *err_ret = 0;
586
587 ret = sdio_memcpy_fromio(func, func->tmpbuf, addr, 4); 578 ret = sdio_memcpy_fromio(func, func->tmpbuf, addr, 4);
588 if (ret) { 579 if (err_ret)
589 if (err_ret) 580 *err_ret = ret;
590 *err_ret = ret; 581 if (ret)
591 return 0xFFFFFFFF; 582 return 0xFFFFFFFF;
592 }
593 583
594 return le32_to_cpup((__le32 *)func->tmpbuf); 584 return le32_to_cpup((__le32 *)func->tmpbuf);
595} 585}
@@ -635,19 +625,16 @@ unsigned char sdio_f0_readb(struct sdio_func *func, unsigned int addr,
635 unsigned char val; 625 unsigned char val;
636 626
637 if (!func) { 627 if (!func) {
638 *err_ret = -EINVAL; 628 if (err_ret)
629 *err_ret = -EINVAL;
639 return 0xFF; 630 return 0xFF;
640 } 631 }
641 632
642 if (err_ret)
643 *err_ret = 0;
644
645 ret = mmc_io_rw_direct(func->card, 0, 0, addr, 0, &val); 633 ret = mmc_io_rw_direct(func->card, 0, 0, addr, 0, &val);
646 if (ret) { 634 if (err_ret)
647 if (err_ret) 635 *err_ret = ret;
648 *err_ret = ret; 636 if (ret)
649 return 0xFF; 637 return 0xFF;
650 }
651 638
652 return val; 639 return val;
653} 640}
@@ -673,7 +660,8 @@ void sdio_f0_writeb(struct sdio_func *func, unsigned char b, unsigned int addr,
673 int ret; 660 int ret;
674 661
675 if (!func) { 662 if (!func) {
676 *err_ret = -EINVAL; 663 if (err_ret)
664 *err_ret = -EINVAL;
677 return; 665 return;
678 } 666 }
679 667
diff --git a/drivers/mmc/core/sdio_ops.c b/drivers/mmc/core/sdio_ops.c
index 3c0d3ab4324c..abaaba38514f 100644
--- a/drivers/mmc/core/sdio_ops.c
+++ b/drivers/mmc/core/sdio_ops.c
@@ -152,7 +152,7 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
152 data.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; 152 data.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
153 153
154 left_size = data.blksz * data.blocks; 154 left_size = data.blksz * data.blocks;
155 nents = (left_size - 1) / seg_size + 1; 155 nents = DIV_ROUND_UP(left_size, seg_size);
156 if (nents > 1) { 156 if (nents > 1) {
157 if (sg_alloc_table(&sgtable, nents, GFP_KERNEL)) 157 if (sg_alloc_table(&sgtable, nents, GFP_KERNEL))
158 return -ENOMEM; 158 return -ENOMEM;
@@ -161,10 +161,9 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
161 data.sg_len = nents; 161 data.sg_len = nents;
162 162
163 for_each_sg(data.sg, sg_ptr, data.sg_len, i) { 163 for_each_sg(data.sg, sg_ptr, data.sg_len, i) {
164 sg_set_page(sg_ptr, virt_to_page(buf + (i * seg_size)), 164 sg_set_buf(sg_ptr, buf + i * seg_size,
165 min(seg_size, left_size), 165 min(seg_size, left_size));
166 offset_in_page(buf + (i * seg_size))); 166 left_size -= seg_size;
167 left_size = left_size - seg_size;
168 } 167 }
169 } else { 168 } else {
170 data.sg = &sg; 169 data.sg = &sg;
diff --git a/drivers/mmc/core/sdio_ops.h b/drivers/mmc/core/sdio_ops.h
index bed8a8377fec..ee35cb4d170e 100644
--- a/drivers/mmc/core/sdio_ops.h
+++ b/drivers/mmc/core/sdio_ops.h
@@ -26,9 +26,15 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
26int sdio_reset(struct mmc_host *host); 26int sdio_reset(struct mmc_host *host);
27unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz); 27unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz);
28 28
29static inline bool mmc_is_io_op(u32 opcode) 29static inline bool sdio_is_io_busy(u32 opcode, u32 arg)
30{ 30{
31 return opcode == SD_IO_RW_DIRECT || opcode == SD_IO_RW_EXTENDED; 31 u32 addr;
32
33 addr = (arg >> 9) & 0x1FFFF;
34
35 return (opcode == SD_IO_RW_EXTENDED ||
36 (opcode == SD_IO_RW_DIRECT &&
37 !(addr == SDIO_CCCR_ABORT || addr == SDIO_CCCR_SUSPEND)));
32} 38}
33 39
34#endif 40#endif
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index f08691a58d7e..2db84dd664d7 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -622,6 +622,27 @@ config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
622 help 622 help
623 If you say yes here SD-Cards may work on the EZkit. 623 If you say yes here SD-Cards may work on the EZkit.
624 624
625config MMC_CAVIUM_OCTEON
626 tristate "Cavium OCTEON SD/MMC Card Interface support"
627 depends on CAVIUM_OCTEON_SOC
628 help
629 This selects Cavium OCTEON SD/MMC card Interface.
630 If you have an OCTEON board with a Multimedia Card slot,
631 say Y or M here.
632
633 If unsure, say N.
634
635config MMC_CAVIUM_THUNDERX
636 tristate "Cavium ThunderX SD/MMC Card Interface support"
637 depends on PCI && 64BIT && (ARM64 || COMPILE_TEST)
638 depends on GPIOLIB
639 depends on OF_ADDRESS
640 help
641 This selects Cavium ThunderX SD/MMC Card Interface.
642 If you have an Cavium ARM64 board with a Multimedia Card slot
643 or builtin eMMC chip say Y or M here. If built as a module
644 the module will be called thunderx_mmc.ko.
645
625config MMC_DW 646config MMC_DW
626 tristate "Synopsys DesignWare Memory Card Interface" 647 tristate "Synopsys DesignWare Memory Card Interface"
627 depends on HAS_DMA 648 depends on HAS_DMA
@@ -799,6 +820,20 @@ config MMC_TOSHIBA_PCI
799 depends on PCI 820 depends on PCI
800 help 821 help
801 822
823config MMC_BCM2835
824 tristate "Broadcom BCM2835 SDHOST MMC Controller support"
825 depends on ARCH_BCM2835 || COMPILE_TEST
826 depends on HAS_DMA
827 help
828 This selects the BCM2835 SDHOST MMC controller. If you have
829 a BCM2835 platform with SD or MMC devices, say Y or M here.
830
831 Note that the BCM2835 has two SD controllers: The Arasan
832 sdhci controller (supported by MMC_SDHCI_IPROC) and a custom
833 sdhost controller (supported by this driver).
834
835 If unsure, say N.
836
802config MMC_MTK 837config MMC_MTK
803 tristate "MediaTek SD/MMC Card Interface support" 838 tristate "MediaTek SD/MMC Card Interface support"
804 depends on HAS_DMA 839 depends on HAS_DMA
@@ -828,3 +863,11 @@ config MMC_SDHCI_BRCMSTB
828 Broadcom STB SoCs. 863 Broadcom STB SoCs.
829 864
830 If unsure, say Y. 865 If unsure, say Y.
866
867config MMC_SDHCI_XENON
868 tristate "Marvell Xenon eMMC/SD/SDIO SDHCI driver"
869 depends on MMC_SDHCI_PLTFM
870 help
871 This selects Marvell Xenon eMMC/SD/SDIO SDHCI.
872 If you have a controller with this interface, say Y or M here.
873 If unsure, say N.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 6d548c4ee2fa..926347c2eeb4 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -42,6 +42,10 @@ obj-$(CONFIG_MMC_SDHI) += sh_mobile_sdhi.o
42obj-$(CONFIG_MMC_CB710) += cb710-mmc.o 42obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
43obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o 43obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
44obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o 44obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
45octeon-mmc-objs := cavium.o cavium-octeon.o
46obj-$(CONFIG_MMC_CAVIUM_OCTEON) += octeon-mmc.o
47thunderx-mmc-objs := cavium.o cavium-thunderx.o
48obj-$(CONFIG_MMC_CAVIUM_THUNDERX) += thunderx-mmc.o
45obj-$(CONFIG_MMC_DW) += dw_mmc.o 49obj-$(CONFIG_MMC_DW) += dw_mmc.o
46obj-$(CONFIG_MMC_DW_PLTFM) += dw_mmc-pltfm.o 50obj-$(CONFIG_MMC_DW_PLTFM) += dw_mmc-pltfm.o
47obj-$(CONFIG_MMC_DW_EXYNOS) += dw_mmc-exynos.o 51obj-$(CONFIG_MMC_DW_EXYNOS) += dw_mmc-exynos.o
@@ -59,6 +63,7 @@ obj-$(CONFIG_MMC_MOXART) += moxart-mmc.o
59obj-$(CONFIG_MMC_SUNXI) += sunxi-mmc.o 63obj-$(CONFIG_MMC_SUNXI) += sunxi-mmc.o
60obj-$(CONFIG_MMC_USDHI6ROL0) += usdhi6rol0.o 64obj-$(CONFIG_MMC_USDHI6ROL0) += usdhi6rol0.o
61obj-$(CONFIG_MMC_TOSHIBA_PCI) += toshsd.o 65obj-$(CONFIG_MMC_TOSHIBA_PCI) += toshsd.o
66obj-$(CONFIG_MMC_BCM2835) += bcm2835.o
62 67
63obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o 68obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o
64obj-$(CONFIG_MMC_REALTEK_USB) += rtsx_usb_sdmmc.o 69obj-$(CONFIG_MMC_REALTEK_USB) += rtsx_usb_sdmmc.o
@@ -83,3 +88,6 @@ obj-$(CONFIG_MMC_SDHCI_BRCMSTB) += sdhci-brcmstb.o
83ifeq ($(CONFIG_CB710_DEBUG),y) 88ifeq ($(CONFIG_CB710_DEBUG),y)
84 CFLAGS-cb710-mmc += -DDEBUG 89 CFLAGS-cb710-mmc += -DDEBUG
85endif 90endif
91
92obj-$(CONFIG_MMC_SDHCI_XENON) += sdhci-xenon-driver.o
93sdhci-xenon-driver-y += sdhci-xenon.o sdhci-xenon-phy.o
diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c
index 590a8a4522be..5b3e1c9bb75f 100644
--- a/drivers/mmc/host/android-goldfish.c
+++ b/drivers/mmc/host/android-goldfish.c
@@ -212,10 +212,7 @@ static void goldfish_mmc_xfer_done(struct goldfish_mmc_host *host,
212 if (host->dma_in_use) { 212 if (host->dma_in_use) {
213 enum dma_data_direction dma_data_dir; 213 enum dma_data_direction dma_data_dir;
214 214
215 if (data->flags & MMC_DATA_WRITE) 215 dma_data_dir = mmc_get_dma_dir(data);
216 dma_data_dir = DMA_TO_DEVICE;
217 else
218 dma_data_dir = DMA_FROM_DEVICE;
219 216
220 if (dma_data_dir == DMA_FROM_DEVICE) { 217 if (dma_data_dir == DMA_FROM_DEVICE) {
221 /* 218 /*
@@ -390,10 +387,7 @@ static void goldfish_mmc_prepare_data(struct goldfish_mmc_host *host,
390 */ 387 */
391 sg_len = (data->blocks == 1) ? 1 : data->sg_len; 388 sg_len = (data->blocks == 1) ? 1 : data->sg_len;
392 389
393 if (data->flags & MMC_DATA_WRITE) 390 dma_data_dir = mmc_get_dma_dir(data);
394 dma_data_dir = DMA_TO_DEVICE;
395 else
396 dma_data_dir = DMA_FROM_DEVICE;
397 391
398 host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, 392 host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
399 sg_len, dma_data_dir); 393 sg_len, dma_data_dir);
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 0ad8ef565b74..388e4a3f13e6 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -954,8 +954,7 @@ static void atmci_pdc_cleanup(struct atmel_mci *host)
954 if (data) 954 if (data)
955 dma_unmap_sg(&host->pdev->dev, 955 dma_unmap_sg(&host->pdev->dev,
956 data->sg, data->sg_len, 956 data->sg, data->sg_len,
957 ((data->flags & MMC_DATA_WRITE) 957 mmc_get_dma_dir(data));
958 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
959} 958}
960 959
961/* 960/*
@@ -993,8 +992,7 @@ static void atmci_dma_cleanup(struct atmel_mci *host)
993 if (data) 992 if (data)
994 dma_unmap_sg(host->dma.chan->device->dev, 993 dma_unmap_sg(host->dma.chan->device->dev,
995 data->sg, data->sg_len, 994 data->sg, data->sg_len,
996 ((data->flags & MMC_DATA_WRITE) 995 mmc_get_dma_dir(data));
997 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
998} 996}
999 997
1000/* 998/*
@@ -1095,7 +1093,6 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
1095{ 1093{
1096 u32 iflags, tmp; 1094 u32 iflags, tmp;
1097 unsigned int sg_len; 1095 unsigned int sg_len;
1098 enum dma_data_direction dir;
1099 int i; 1096 int i;
1100 1097
1101 data->error = -EINPROGRESS; 1098 data->error = -EINPROGRESS;
@@ -1107,13 +1104,10 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
1107 /* Enable pdc mode */ 1104 /* Enable pdc mode */
1108 atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCMODE); 1105 atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCMODE);
1109 1106
1110 if (data->flags & MMC_DATA_READ) { 1107 if (data->flags & MMC_DATA_READ)
1111 dir = DMA_FROM_DEVICE;
1112 iflags |= ATMCI_ENDRX | ATMCI_RXBUFF; 1108 iflags |= ATMCI_ENDRX | ATMCI_RXBUFF;
1113 } else { 1109 else
1114 dir = DMA_TO_DEVICE;
1115 iflags |= ATMCI_ENDTX | ATMCI_TXBUFE | ATMCI_BLKE; 1110 iflags |= ATMCI_ENDTX | ATMCI_TXBUFE | ATMCI_BLKE;
1116 }
1117 1111
1118 /* Set BLKLEN */ 1112 /* Set BLKLEN */
1119 tmp = atmci_readl(host, ATMCI_MR); 1113 tmp = atmci_readl(host, ATMCI_MR);
@@ -1123,7 +1117,8 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
1123 1117
1124 /* Configure PDC */ 1118 /* Configure PDC */
1125 host->data_size = data->blocks * data->blksz; 1119 host->data_size = data->blocks * data->blksz;
1126 sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, dir); 1120 sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
1121 mmc_get_dma_dir(data));
1127 1122
1128 if ((!host->caps.has_rwproof) 1123 if ((!host->caps.has_rwproof)
1129 && (host->data->flags & MMC_DATA_WRITE)) { 1124 && (host->data->flags & MMC_DATA_WRITE)) {
@@ -1135,9 +1130,8 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
1135 } 1130 }
1136 1131
1137 if (host->data_size) 1132 if (host->data_size)
1138 atmci_pdc_set_both_buf(host, 1133 atmci_pdc_set_both_buf(host, data->flags & MMC_DATA_READ ?
1139 ((dir == DMA_FROM_DEVICE) ? XFER_RECEIVE : XFER_TRANSMIT)); 1134 XFER_RECEIVE : XFER_TRANSMIT);
1140
1141 return iflags; 1135 return iflags;
1142} 1136}
1143 1137
@@ -1148,7 +1142,6 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
1148 struct dma_async_tx_descriptor *desc; 1142 struct dma_async_tx_descriptor *desc;
1149 struct scatterlist *sg; 1143 struct scatterlist *sg;
1150 unsigned int i; 1144 unsigned int i;
1151 enum dma_data_direction direction;
1152 enum dma_transfer_direction slave_dirn; 1145 enum dma_transfer_direction slave_dirn;
1153 unsigned int sglen; 1146 unsigned int sglen;
1154 u32 maxburst; 1147 u32 maxburst;
@@ -1186,12 +1179,10 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
1186 return -ENODEV; 1179 return -ENODEV;
1187 1180
1188 if (data->flags & MMC_DATA_READ) { 1181 if (data->flags & MMC_DATA_READ) {
1189 direction = DMA_FROM_DEVICE;
1190 host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM; 1182 host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
1191 maxburst = atmci_convert_chksize(host, 1183 maxburst = atmci_convert_chksize(host,
1192 host->dma_conf.src_maxburst); 1184 host->dma_conf.src_maxburst);
1193 } else { 1185 } else {
1194 direction = DMA_TO_DEVICE;
1195 host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV; 1186 host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV;
1196 maxburst = atmci_convert_chksize(host, 1187 maxburst = atmci_convert_chksize(host,
1197 host->dma_conf.dst_maxburst); 1188 host->dma_conf.dst_maxburst);
@@ -1202,7 +1193,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
1202 ATMCI_DMAEN); 1193 ATMCI_DMAEN);
1203 1194
1204 sglen = dma_map_sg(chan->device->dev, data->sg, 1195 sglen = dma_map_sg(chan->device->dev, data->sg,
1205 data->sg_len, direction); 1196 data->sg_len, mmc_get_dma_dir(data));
1206 1197
1207 dmaengine_slave_config(chan, &host->dma_conf); 1198 dmaengine_slave_config(chan, &host->dma_conf);
1208 desc = dmaengine_prep_slave_sg(chan, 1199 desc = dmaengine_prep_slave_sg(chan,
@@ -1217,7 +1208,8 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
1217 1208
1218 return iflags; 1209 return iflags;
1219unmap_exit: 1210unmap_exit:
1220 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, direction); 1211 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len,
1212 mmc_get_dma_dir(data));
1221 return -ENOMEM; 1213 return -ENOMEM;
1222} 1214}
1223 1215
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
new file mode 100644
index 000000000000..1f343a477b3d
--- /dev/null
+++ b/drivers/mmc/host/bcm2835.c
@@ -0,0 +1,1466 @@
1/*
2 * bcm2835 sdhost driver.
3 *
4 * The 2835 has two SD controllers: The Arasan sdhci controller
5 * (supported by the iproc driver) and a custom sdhost controller
6 * (supported by this driver).
7 *
8 * The sdhci controller supports both sdcard and sdio. The sdhost
9 * controller supports the sdcard only, but has better performance.
10 * Also note that the rpi3 has sdio wifi, so driving the sdcard with
11 * the sdhost controller allows to use the sdhci controller for wifi
12 * support.
13 *
14 * The configuration is done by devicetree via pin muxing. Both
15 * SD controller are available on the same pins (2 pin groups = pin 22
16 * to 27 + pin 48 to 53). So it's possible to use both SD controllers
17 * at the same time with different pin groups.
18 *
19 * Author: Phil Elwell <phil@raspberrypi.org>
20 * Copyright (C) 2015-2016 Raspberry Pi (Trading) Ltd.
21 *
22 * Based on
23 * mmc-bcm2835.c by Gellert Weisz
24 * which is, in turn, based on
25 * sdhci-bcm2708.c by Broadcom
26 * sdhci-bcm2835.c by Stephen Warren and Oleksandr Tymoshenko
27 * sdhci.c and sdhci-pci.c by Pierre Ossman
28 *
29 * This program is free software; you can redistribute it and/or modify it
30 * under the terms and conditions of the GNU General Public License,
31 * version 2, as published by the Free Software Foundation.
32 *
33 * This program is distributed in the hope it will be useful, but WITHOUT
34 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
35 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
36 * more details.
37 *
38 * You should have received a copy of the GNU General Public License
39 * along with this program. If not, see <http://www.gnu.org/licenses/>.
40 */
41#include <linux/clk.h>
42#include <linux/delay.h>
43#include <linux/device.h>
44#include <linux/dmaengine.h>
45#include <linux/dma-mapping.h>
46#include <linux/err.h>
47#include <linux/highmem.h>
48#include <linux/interrupt.h>
49#include <linux/io.h>
50#include <linux/iopoll.h>
51#include <linux/module.h>
52#include <linux/of_address.h>
53#include <linux/of_irq.h>
54#include <linux/platform_device.h>
55#include <linux/scatterlist.h>
56#include <linux/time.h>
57#include <linux/workqueue.h>
58
59#include <linux/mmc/host.h>
60#include <linux/mmc/mmc.h>
61#include <linux/mmc/sd.h>
62
63#define SDCMD 0x00 /* Command to SD card - 16 R/W */
64#define SDARG 0x04 /* Argument to SD card - 32 R/W */
65#define SDTOUT 0x08 /* Start value for timeout counter - 32 R/W */
66#define SDCDIV 0x0c /* Start value for clock divider - 11 R/W */
67#define SDRSP0 0x10 /* SD card response (31:0) - 32 R */
68#define SDRSP1 0x14 /* SD card response (63:32) - 32 R */
69#define SDRSP2 0x18 /* SD card response (95:64) - 32 R */
70#define SDRSP3 0x1c /* SD card response (127:96) - 32 R */
71#define SDHSTS 0x20 /* SD host status - 11 R/W */
72#define SDVDD 0x30 /* SD card power control - 1 R/W */
73#define SDEDM 0x34 /* Emergency Debug Mode - 13 R/W */
74#define SDHCFG 0x38 /* Host configuration - 2 R/W */
75#define SDHBCT 0x3c /* Host byte count (debug) - 32 R/W */
76#define SDDATA 0x40 /* Data to/from SD card - 32 R/W */
77#define SDHBLC 0x50 /* Host block count (SDIO/SDHC) - 9 R/W */
78
79#define SDCMD_NEW_FLAG 0x8000
80#define SDCMD_FAIL_FLAG 0x4000
81#define SDCMD_BUSYWAIT 0x800
82#define SDCMD_NO_RESPONSE 0x400
83#define SDCMD_LONG_RESPONSE 0x200
84#define SDCMD_WRITE_CMD 0x80
85#define SDCMD_READ_CMD 0x40
86#define SDCMD_CMD_MASK 0x3f
87
88#define SDCDIV_MAX_CDIV 0x7ff
89
90#define SDHSTS_BUSY_IRPT 0x400
91#define SDHSTS_BLOCK_IRPT 0x200
92#define SDHSTS_SDIO_IRPT 0x100
93#define SDHSTS_REW_TIME_OUT 0x80
94#define SDHSTS_CMD_TIME_OUT 0x40
95#define SDHSTS_CRC16_ERROR 0x20
96#define SDHSTS_CRC7_ERROR 0x10
97#define SDHSTS_FIFO_ERROR 0x08
98/* Reserved */
99/* Reserved */
100#define SDHSTS_DATA_FLAG 0x01
101
102#define SDHSTS_TRANSFER_ERROR_MASK (SDHSTS_CRC7_ERROR | \
103 SDHSTS_CRC16_ERROR | \
104 SDHSTS_REW_TIME_OUT | \
105 SDHSTS_FIFO_ERROR)
106
107#define SDHSTS_ERROR_MASK (SDHSTS_CMD_TIME_OUT | \
108 SDHSTS_TRANSFER_ERROR_MASK)
109
110#define SDHCFG_BUSY_IRPT_EN BIT(10)
111#define SDHCFG_BLOCK_IRPT_EN BIT(8)
112#define SDHCFG_SDIO_IRPT_EN BIT(5)
113#define SDHCFG_DATA_IRPT_EN BIT(4)
114#define SDHCFG_SLOW_CARD BIT(3)
115#define SDHCFG_WIDE_EXT_BUS BIT(2)
116#define SDHCFG_WIDE_INT_BUS BIT(1)
117#define SDHCFG_REL_CMD_LINE BIT(0)
118
119#define SDVDD_POWER_OFF 0
120#define SDVDD_POWER_ON 1
121
122#define SDEDM_FORCE_DATA_MODE BIT(19)
123#define SDEDM_CLOCK_PULSE BIT(20)
124#define SDEDM_BYPASS BIT(21)
125
126#define SDEDM_WRITE_THRESHOLD_SHIFT 9
127#define SDEDM_READ_THRESHOLD_SHIFT 14
128#define SDEDM_THRESHOLD_MASK 0x1f
129
130#define SDEDM_FSM_MASK 0xf
131#define SDEDM_FSM_IDENTMODE 0x0
132#define SDEDM_FSM_DATAMODE 0x1
133#define SDEDM_FSM_READDATA 0x2
134#define SDEDM_FSM_WRITEDATA 0x3
135#define SDEDM_FSM_READWAIT 0x4
136#define SDEDM_FSM_READCRC 0x5
137#define SDEDM_FSM_WRITECRC 0x6
138#define SDEDM_FSM_WRITEWAIT1 0x7
139#define SDEDM_FSM_POWERDOWN 0x8
140#define SDEDM_FSM_POWERUP 0x9
141#define SDEDM_FSM_WRITESTART1 0xa
142#define SDEDM_FSM_WRITESTART2 0xb
143#define SDEDM_FSM_GENPULSES 0xc
144#define SDEDM_FSM_WRITEWAIT2 0xd
145#define SDEDM_FSM_STARTPOWDOWN 0xf
146
147#define SDDATA_FIFO_WORDS 16
148
149#define FIFO_READ_THRESHOLD 4
150#define FIFO_WRITE_THRESHOLD 4
151#define SDDATA_FIFO_PIO_BURST 8
152
153#define PIO_THRESHOLD 1 /* Maximum block count for PIO (0 = always DMA) */
154
155struct bcm2835_host {
156 spinlock_t lock;
157 struct mutex mutex;
158
159 void __iomem *ioaddr;
160 u32 phys_addr;
161
162 struct mmc_host *mmc;
163 struct platform_device *pdev;
164
165 int clock; /* Current clock speed */
166 unsigned int max_clk; /* Max possible freq */
167 struct work_struct dma_work;
168 struct delayed_work timeout_work; /* Timer for timeouts */
169 struct sg_mapping_iter sg_miter; /* SG state for PIO */
170 unsigned int blocks; /* remaining PIO blocks */
171 int irq; /* Device IRQ */
172
173 u32 ns_per_fifo_word;
174
175 /* cached registers */
176 u32 hcfg;
177 u32 cdiv;
178
179 struct mmc_request *mrq; /* Current request */
180 struct mmc_command *cmd; /* Current command */
181 struct mmc_data *data; /* Current data request */
182 bool data_complete:1;/* Data finished before cmd */
183 bool use_busy:1; /* Wait for busy interrupt */
184 bool use_sbc:1; /* Send CMD23 */
185
186 /* for threaded irq handler */
187 bool irq_block;
188 bool irq_busy;
189 bool irq_data;
190
191 /* DMA part */
192 struct dma_chan *dma_chan_rxtx;
193 struct dma_chan *dma_chan;
194 struct dma_slave_config dma_cfg_rx;
195 struct dma_slave_config dma_cfg_tx;
196 struct dma_async_tx_descriptor *dma_desc;
197 u32 dma_dir;
198 u32 drain_words;
199 struct page *drain_page;
200 u32 drain_offset;
201 bool use_dma;
202};
203
204static void bcm2835_dumpcmd(struct bcm2835_host *host, struct mmc_command *cmd,
205 const char *label)
206{
207 struct device *dev = &host->pdev->dev;
208
209 if (!cmd)
210 return;
211
212 dev_dbg(dev, "%c%s op %d arg 0x%x flags 0x%x - resp %08x %08x %08x %08x, err %d\n",
213 (cmd == host->cmd) ? '>' : ' ',
214 label, cmd->opcode, cmd->arg, cmd->flags,
215 cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3],
216 cmd->error);
217}
218
219static void bcm2835_dumpregs(struct bcm2835_host *host)
220{
221 struct mmc_request *mrq = host->mrq;
222 struct device *dev = &host->pdev->dev;
223
224 if (mrq) {
225 bcm2835_dumpcmd(host, mrq->sbc, "sbc");
226 bcm2835_dumpcmd(host, mrq->cmd, "cmd");
227 if (mrq->data) {
228 dev_dbg(dev, "data blocks %x blksz %x - err %d\n",
229 mrq->data->blocks,
230 mrq->data->blksz,
231 mrq->data->error);
232 }
233 bcm2835_dumpcmd(host, mrq->stop, "stop");
234 }
235
236 dev_dbg(dev, "=========== REGISTER DUMP ===========\n");
237 dev_dbg(dev, "SDCMD 0x%08x\n", readl(host->ioaddr + SDCMD));
238 dev_dbg(dev, "SDARG 0x%08x\n", readl(host->ioaddr + SDARG));
239 dev_dbg(dev, "SDTOUT 0x%08x\n", readl(host->ioaddr + SDTOUT));
240 dev_dbg(dev, "SDCDIV 0x%08x\n", readl(host->ioaddr + SDCDIV));
241 dev_dbg(dev, "SDRSP0 0x%08x\n", readl(host->ioaddr + SDRSP0));
242 dev_dbg(dev, "SDRSP1 0x%08x\n", readl(host->ioaddr + SDRSP1));
243 dev_dbg(dev, "SDRSP2 0x%08x\n", readl(host->ioaddr + SDRSP2));
244 dev_dbg(dev, "SDRSP3 0x%08x\n", readl(host->ioaddr + SDRSP3));
245 dev_dbg(dev, "SDHSTS 0x%08x\n", readl(host->ioaddr + SDHSTS));
246 dev_dbg(dev, "SDVDD 0x%08x\n", readl(host->ioaddr + SDVDD));
247 dev_dbg(dev, "SDEDM 0x%08x\n", readl(host->ioaddr + SDEDM));
248 dev_dbg(dev, "SDHCFG 0x%08x\n", readl(host->ioaddr + SDHCFG));
249 dev_dbg(dev, "SDHBCT 0x%08x\n", readl(host->ioaddr + SDHBCT));
250 dev_dbg(dev, "SDHBLC 0x%08x\n", readl(host->ioaddr + SDHBLC));
251 dev_dbg(dev, "===========================================\n");
252}
253
254static void bcm2835_reset_internal(struct bcm2835_host *host)
255{
256 u32 temp;
257
258 writel(SDVDD_POWER_OFF, host->ioaddr + SDVDD);
259 writel(0, host->ioaddr + SDCMD);
260 writel(0, host->ioaddr + SDARG);
261 writel(0xf00000, host->ioaddr + SDTOUT);
262 writel(0, host->ioaddr + SDCDIV);
263 writel(0x7f8, host->ioaddr + SDHSTS); /* Write 1s to clear */
264 writel(0, host->ioaddr + SDHCFG);
265 writel(0, host->ioaddr + SDHBCT);
266 writel(0, host->ioaddr + SDHBLC);
267
268 /* Limit fifo usage due to silicon bug */
269 temp = readl(host->ioaddr + SDEDM);
270 temp &= ~((SDEDM_THRESHOLD_MASK << SDEDM_READ_THRESHOLD_SHIFT) |
271 (SDEDM_THRESHOLD_MASK << SDEDM_WRITE_THRESHOLD_SHIFT));
272 temp |= (FIFO_READ_THRESHOLD << SDEDM_READ_THRESHOLD_SHIFT) |
273 (FIFO_WRITE_THRESHOLD << SDEDM_WRITE_THRESHOLD_SHIFT);
274 writel(temp, host->ioaddr + SDEDM);
275 msleep(20);
276 writel(SDVDD_POWER_ON, host->ioaddr + SDVDD);
277 msleep(20);
278 host->clock = 0;
279 writel(host->hcfg, host->ioaddr + SDHCFG);
280 writel(host->cdiv, host->ioaddr + SDCDIV);
281}
282
283static void bcm2835_reset(struct mmc_host *mmc)
284{
285 struct bcm2835_host *host = mmc_priv(mmc);
286
287 if (host->dma_chan)
288 dmaengine_terminate_sync(host->dma_chan);
289 bcm2835_reset_internal(host);
290}
291
292static void bcm2835_finish_command(struct bcm2835_host *host);
293
294static void bcm2835_wait_transfer_complete(struct bcm2835_host *host)
295{
296 int timediff;
297 u32 alternate_idle;
298
299 alternate_idle = (host->mrq->data->flags & MMC_DATA_READ) ?
300 SDEDM_FSM_READWAIT : SDEDM_FSM_WRITESTART1;
301
302 timediff = 0;
303
304 while (1) {
305 u32 edm, fsm;
306
307 edm = readl(host->ioaddr + SDEDM);
308 fsm = edm & SDEDM_FSM_MASK;
309
310 if ((fsm == SDEDM_FSM_IDENTMODE) ||
311 (fsm == SDEDM_FSM_DATAMODE))
312 break;
313 if (fsm == alternate_idle) {
314 writel(edm | SDEDM_FORCE_DATA_MODE,
315 host->ioaddr + SDEDM);
316 break;
317 }
318
319 timediff++;
320 if (timediff == 100000) {
321 dev_err(&host->pdev->dev,
322 "wait_transfer_complete - still waiting after %d retries\n",
323 timediff);
324 bcm2835_dumpregs(host);
325 host->mrq->data->error = -ETIMEDOUT;
326 return;
327 }
328 cpu_relax();
329 }
330}
331
332static void bcm2835_dma_complete(void *param)
333{
334 struct bcm2835_host *host = param;
335
336 schedule_work(&host->dma_work);
337}
338
339static void bcm2835_transfer_block_pio(struct bcm2835_host *host, bool is_read)
340{
341 unsigned long flags;
342 size_t blksize;
343 unsigned long wait_max;
344
345 blksize = host->data->blksz;
346
347 wait_max = jiffies + msecs_to_jiffies(500);
348
349 local_irq_save(flags);
350
351 while (blksize) {
352 int copy_words;
353 u32 hsts = 0;
354 size_t len;
355 u32 *buf;
356
357 if (!sg_miter_next(&host->sg_miter)) {
358 host->data->error = -EINVAL;
359 break;
360 }
361
362 len = min(host->sg_miter.length, blksize);
363 if (len % 4) {
364 host->data->error = -EINVAL;
365 break;
366 }
367
368 blksize -= len;
369 host->sg_miter.consumed = len;
370
371 buf = (u32 *)host->sg_miter.addr;
372
373 copy_words = len / 4;
374
375 while (copy_words) {
376 int burst_words, words;
377 u32 edm;
378
379 burst_words = min(SDDATA_FIFO_PIO_BURST, copy_words);
380 edm = readl(host->ioaddr + SDEDM);
381 if (is_read)
382 words = ((edm >> 4) & 0x1f);
383 else
384 words = SDDATA_FIFO_WORDS - ((edm >> 4) & 0x1f);
385
386 if (words < burst_words) {
387 int fsm_state = (edm & SDEDM_FSM_MASK);
388 struct device *dev = &host->pdev->dev;
389
390 if ((is_read &&
391 (fsm_state != SDEDM_FSM_READDATA &&
392 fsm_state != SDEDM_FSM_READWAIT &&
393 fsm_state != SDEDM_FSM_READCRC)) ||
394 (!is_read &&
395 (fsm_state != SDEDM_FSM_WRITEDATA &&
396 fsm_state != SDEDM_FSM_WRITESTART1 &&
397 fsm_state != SDEDM_FSM_WRITESTART2))) {
398 hsts = readl(host->ioaddr + SDHSTS);
399 dev_err(dev, "fsm %x, hsts %08x\n",
400 fsm_state, hsts);
401 if (hsts & SDHSTS_ERROR_MASK)
402 break;
403 }
404
405 if (time_after(jiffies, wait_max)) {
406 dev_err(dev, "PIO %s timeout - EDM %08x\n",
407 is_read ? "read" : "write",
408 edm);
409 hsts = SDHSTS_REW_TIME_OUT;
410 break;
411 }
412 ndelay((burst_words - words) *
413 host->ns_per_fifo_word);
414 continue;
415 } else if (words > copy_words) {
416 words = copy_words;
417 }
418
419 copy_words -= words;
420
421 while (words) {
422 if (is_read)
423 *(buf++) = readl(host->ioaddr + SDDATA);
424 else
425 writel(*(buf++), host->ioaddr + SDDATA);
426 words--;
427 }
428 }
429
430 if (hsts & SDHSTS_ERROR_MASK)
431 break;
432 }
433
434 sg_miter_stop(&host->sg_miter);
435
436 local_irq_restore(flags);
437}
438
439static void bcm2835_transfer_pio(struct bcm2835_host *host)
440{
441 struct device *dev = &host->pdev->dev;
442 u32 sdhsts;
443 bool is_read;
444
445 is_read = (host->data->flags & MMC_DATA_READ) != 0;
446 bcm2835_transfer_block_pio(host, is_read);
447
448 sdhsts = readl(host->ioaddr + SDHSTS);
449 if (sdhsts & (SDHSTS_CRC16_ERROR |
450 SDHSTS_CRC7_ERROR |
451 SDHSTS_FIFO_ERROR)) {
452 dev_err(dev, "%s transfer error - HSTS %08x\n",
453 is_read ? "read" : "write", sdhsts);
454 host->data->error = -EILSEQ;
455 } else if ((sdhsts & (SDHSTS_CMD_TIME_OUT |
456 SDHSTS_REW_TIME_OUT))) {
457 dev_err(dev, "%s timeout error - HSTS %08x\n",
458 is_read ? "read" : "write", sdhsts);
459 host->data->error = -ETIMEDOUT;
460 }
461}
462
463static
464void bcm2835_prepare_dma(struct bcm2835_host *host, struct mmc_data *data)
465{
466 int len, dir_data, dir_slave;
467 struct dma_async_tx_descriptor *desc = NULL;
468 struct dma_chan *dma_chan;
469
470 dma_chan = host->dma_chan_rxtx;
471 if (data->flags & MMC_DATA_READ) {
472 dir_data = DMA_FROM_DEVICE;
473 dir_slave = DMA_DEV_TO_MEM;
474 } else {
475 dir_data = DMA_TO_DEVICE;
476 dir_slave = DMA_MEM_TO_DEV;
477 }
478
479 /* The block doesn't manage the FIFO DREQs properly for
480 * multi-block transfers, so don't attempt to DMA the final
481 * few words. Unfortunately this requires the final sg entry
482 * to be trimmed. N.B. This code demands that the overspill
483 * is contained in a single sg entry.
484 */
485
486 host->drain_words = 0;
487 if ((data->blocks > 1) && (dir_data == DMA_FROM_DEVICE)) {
488 struct scatterlist *sg;
489 u32 len;
490 int i;
491
492 len = min((u32)(FIFO_READ_THRESHOLD - 1) * 4,
493 (u32)data->blocks * data->blksz);
494
495 for_each_sg(data->sg, sg, data->sg_len, i) {
496 if (sg_is_last(sg)) {
497 WARN_ON(sg->length < len);
498 sg->length -= len;
499 host->drain_page = sg_page(sg);
500 host->drain_offset = sg->offset + sg->length;
501 }
502 }
503 host->drain_words = len / 4;
504 }
505
506 /* The parameters have already been validated, so this will not fail */
507 (void)dmaengine_slave_config(dma_chan,
508 (dir_data == DMA_FROM_DEVICE) ?
509 &host->dma_cfg_rx :
510 &host->dma_cfg_tx);
511
512 len = dma_map_sg(dma_chan->device->dev, data->sg, data->sg_len,
513 dir_data);
514
515 if (len > 0) {
516 desc = dmaengine_prep_slave_sg(dma_chan, data->sg,
517 len, dir_slave,
518 DMA_PREP_INTERRUPT |
519 DMA_CTRL_ACK);
520 }
521
522 if (desc) {
523 desc->callback = bcm2835_dma_complete;
524 desc->callback_param = host;
525 host->dma_desc = desc;
526 host->dma_chan = dma_chan;
527 host->dma_dir = dir_data;
528 }
529}
530
531static void bcm2835_start_dma(struct bcm2835_host *host)
532{
533 dmaengine_submit(host->dma_desc);
534 dma_async_issue_pending(host->dma_chan);
535}
536
537static void bcm2835_set_transfer_irqs(struct bcm2835_host *host)
538{
539 u32 all_irqs = SDHCFG_DATA_IRPT_EN | SDHCFG_BLOCK_IRPT_EN |
540 SDHCFG_BUSY_IRPT_EN;
541
542 if (host->dma_desc) {
543 host->hcfg = (host->hcfg & ~all_irqs) |
544 SDHCFG_BUSY_IRPT_EN;
545 } else {
546 host->hcfg = (host->hcfg & ~all_irqs) |
547 SDHCFG_DATA_IRPT_EN |
548 SDHCFG_BUSY_IRPT_EN;
549 }
550
551 writel(host->hcfg, host->ioaddr + SDHCFG);
552}
553
554static
555void bcm2835_prepare_data(struct bcm2835_host *host, struct mmc_command *cmd)
556{
557 struct mmc_data *data = cmd->data;
558
559 WARN_ON(host->data);
560
561 host->data = data;
562 if (!data)
563 return;
564
565 host->data_complete = false;
566 host->data->bytes_xfered = 0;
567
568 if (!host->dma_desc) {
569 /* Use PIO */
570 int flags = SG_MITER_ATOMIC;
571
572 if (data->flags & MMC_DATA_READ)
573 flags |= SG_MITER_TO_SG;
574 else
575 flags |= SG_MITER_FROM_SG;
576 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
577 host->blocks = data->blocks;
578 }
579
580 bcm2835_set_transfer_irqs(host);
581
582 writel(data->blksz, host->ioaddr + SDHBCT);
583 writel(data->blocks, host->ioaddr + SDHBLC);
584}
585
586static u32 bcm2835_read_wait_sdcmd(struct bcm2835_host *host, u32 max_ms)
587{
588 struct device *dev = &host->pdev->dev;
589 u32 value;
590 int ret;
591
592 ret = readl_poll_timeout(host->ioaddr + SDCMD, value,
593 !(value & SDCMD_NEW_FLAG), 1, 10);
594 if (ret == -ETIMEDOUT)
595 /* if it takes a while make poll interval bigger */
596 ret = readl_poll_timeout(host->ioaddr + SDCMD, value,
597 !(value & SDCMD_NEW_FLAG),
598 10, max_ms * 1000);
599 if (ret == -ETIMEDOUT)
600 dev_err(dev, "%s: timeout (%d ms)\n", __func__, max_ms);
601
602 return value;
603}
604
605static void bcm2835_finish_request(struct bcm2835_host *host)
606{
607 struct dma_chan *terminate_chan = NULL;
608 struct mmc_request *mrq;
609
610 cancel_delayed_work(&host->timeout_work);
611
612 mrq = host->mrq;
613
614 host->mrq = NULL;
615 host->cmd = NULL;
616 host->data = NULL;
617
618 host->dma_desc = NULL;
619 terminate_chan = host->dma_chan;
620 host->dma_chan = NULL;
621
622 if (terminate_chan) {
623 int err = dmaengine_terminate_all(terminate_chan);
624
625 if (err)
626 dev_err(&host->pdev->dev,
627 "failed to terminate DMA (%d)\n", err);
628 }
629
630 mmc_request_done(host->mmc, mrq);
631}
632
633static
634bool bcm2835_send_command(struct bcm2835_host *host, struct mmc_command *cmd)
635{
636 struct device *dev = &host->pdev->dev;
637 u32 sdcmd, sdhsts;
638 unsigned long timeout;
639
640 WARN_ON(host->cmd);
641
642 sdcmd = bcm2835_read_wait_sdcmd(host, 100);
643 if (sdcmd & SDCMD_NEW_FLAG) {
644 dev_err(dev, "previous command never completed.\n");
645 bcm2835_dumpregs(host);
646 cmd->error = -EILSEQ;
647 bcm2835_finish_request(host);
648 return false;
649 }
650
651 if (!cmd->data && cmd->busy_timeout > 9000)
652 timeout = DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
653 else
654 timeout = 10 * HZ;
655 schedule_delayed_work(&host->timeout_work, timeout);
656
657 host->cmd = cmd;
658
659 /* Clear any error flags */
660 sdhsts = readl(host->ioaddr + SDHSTS);
661 if (sdhsts & SDHSTS_ERROR_MASK)
662 writel(sdhsts, host->ioaddr + SDHSTS);
663
664 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
665 dev_err(dev, "unsupported response type!\n");
666 cmd->error = -EINVAL;
667 bcm2835_finish_request(host);
668 return false;
669 }
670
671 bcm2835_prepare_data(host, cmd);
672
673 writel(cmd->arg, host->ioaddr + SDARG);
674
675 sdcmd = cmd->opcode & SDCMD_CMD_MASK;
676
677 host->use_busy = false;
678 if (!(cmd->flags & MMC_RSP_PRESENT)) {
679 sdcmd |= SDCMD_NO_RESPONSE;
680 } else {
681 if (cmd->flags & MMC_RSP_136)
682 sdcmd |= SDCMD_LONG_RESPONSE;
683 if (cmd->flags & MMC_RSP_BUSY) {
684 sdcmd |= SDCMD_BUSYWAIT;
685 host->use_busy = true;
686 }
687 }
688
689 if (cmd->data) {
690 if (cmd->data->flags & MMC_DATA_WRITE)
691 sdcmd |= SDCMD_WRITE_CMD;
692 if (cmd->data->flags & MMC_DATA_READ)
693 sdcmd |= SDCMD_READ_CMD;
694 }
695
696 writel(sdcmd | SDCMD_NEW_FLAG, host->ioaddr + SDCMD);
697
698 return true;
699}
700
701static void bcm2835_transfer_complete(struct bcm2835_host *host)
702{
703 struct mmc_data *data;
704
705 WARN_ON(!host->data_complete);
706
707 data = host->data;
708 host->data = NULL;
709
710 /* Need to send CMD12 if -
711 * a) open-ended multiblock transfer (no CMD23)
712 * b) error in multiblock transfer
713 */
714 if (host->mrq->stop && (data->error || !host->use_sbc)) {
715 if (bcm2835_send_command(host, host->mrq->stop)) {
716 /* No busy, so poll for completion */
717 if (!host->use_busy)
718 bcm2835_finish_command(host);
719 }
720 } else {
721 bcm2835_wait_transfer_complete(host);
722 bcm2835_finish_request(host);
723 }
724}
725
726static void bcm2835_finish_data(struct bcm2835_host *host)
727{
728 struct device *dev = &host->pdev->dev;
729 struct mmc_data *data;
730
731 data = host->data;
732
733 host->hcfg &= ~(SDHCFG_DATA_IRPT_EN | SDHCFG_BLOCK_IRPT_EN);
734 writel(host->hcfg, host->ioaddr + SDHCFG);
735
736 data->bytes_xfered = data->error ? 0 : (data->blksz * data->blocks);
737
738 host->data_complete = true;
739
740 if (host->cmd) {
741 /* Data managed to finish before the
742 * command completed. Make sure we do
743 * things in the proper order.
744 */
745 dev_dbg(dev, "Finished early - HSTS %08x\n",
746 readl(host->ioaddr + SDHSTS));
747 } else {
748 bcm2835_transfer_complete(host);
749 }
750}
751
752static void bcm2835_finish_command(struct bcm2835_host *host)
753{
754 struct device *dev = &host->pdev->dev;
755 struct mmc_command *cmd = host->cmd;
756 u32 sdcmd;
757
758 sdcmd = bcm2835_read_wait_sdcmd(host, 100);
759
760 /* Check for errors */
761 if (sdcmd & SDCMD_NEW_FLAG) {
762 dev_err(dev, "command never completed.\n");
763 bcm2835_dumpregs(host);
764 host->cmd->error = -EIO;
765 bcm2835_finish_request(host);
766 return;
767 } else if (sdcmd & SDCMD_FAIL_FLAG) {
768 u32 sdhsts = readl(host->ioaddr + SDHSTS);
769
770 /* Clear the errors */
771 writel(SDHSTS_ERROR_MASK, host->ioaddr + SDHSTS);
772
773 if (!(sdhsts & SDHSTS_CRC7_ERROR) ||
774 (host->cmd->opcode != MMC_SEND_OP_COND)) {
775 if (sdhsts & SDHSTS_CMD_TIME_OUT) {
776 host->cmd->error = -ETIMEDOUT;
777 } else {
778 dev_err(dev, "unexpected command %d error\n",
779 host->cmd->opcode);
780 bcm2835_dumpregs(host);
781 host->cmd->error = -EILSEQ;
782 }
783 bcm2835_finish_request(host);
784 return;
785 }
786 }
787
788 if (cmd->flags & MMC_RSP_PRESENT) {
789 if (cmd->flags & MMC_RSP_136) {
790 int i;
791
792 for (i = 0; i < 4; i++) {
793 cmd->resp[3 - i] =
794 readl(host->ioaddr + SDRSP0 + i * 4);
795 }
796 } else {
797 cmd->resp[0] = readl(host->ioaddr + SDRSP0);
798 }
799 }
800
801 if (cmd == host->mrq->sbc) {
802 /* Finished CMD23, now send actual command. */
803 host->cmd = NULL;
804 if (bcm2835_send_command(host, host->mrq->cmd)) {
805 if (host->data && host->dma_desc)
806 /* DMA transfer starts now, PIO starts
807 * after irq
808 */
809 bcm2835_start_dma(host);
810
811 if (!host->use_busy)
812 bcm2835_finish_command(host);
813 }
814 } else if (cmd == host->mrq->stop) {
815 /* Finished CMD12 */
816 bcm2835_finish_request(host);
817 } else {
818 /* Processed actual command. */
819 host->cmd = NULL;
820 if (!host->data)
821 bcm2835_finish_request(host);
822 else if (host->data_complete)
823 bcm2835_transfer_complete(host);
824 }
825}
826
827static void bcm2835_timeout(struct work_struct *work)
828{
829 struct delayed_work *d = to_delayed_work(work);
830 struct bcm2835_host *host =
831 container_of(d, struct bcm2835_host, timeout_work);
832 struct device *dev = &host->pdev->dev;
833
834 mutex_lock(&host->mutex);
835
836 if (host->mrq) {
837 dev_err(dev, "timeout waiting for hardware interrupt.\n");
838 bcm2835_dumpregs(host);
839
840 if (host->data) {
841 host->data->error = -ETIMEDOUT;
842 bcm2835_finish_data(host);
843 } else {
844 if (host->cmd)
845 host->cmd->error = -ETIMEDOUT;
846 else
847 host->mrq->cmd->error = -ETIMEDOUT;
848
849 bcm2835_finish_request(host);
850 }
851 }
852
853 mutex_unlock(&host->mutex);
854}
855
856static bool bcm2835_check_cmd_error(struct bcm2835_host *host, u32 intmask)
857{
858 struct device *dev = &host->pdev->dev;
859
860 if (!(intmask & SDHSTS_ERROR_MASK))
861 return false;
862
863 if (!host->cmd)
864 return true;
865
866 dev_err(dev, "sdhost_busy_irq: intmask %08x\n", intmask);
867 if (intmask & SDHSTS_CRC7_ERROR) {
868 host->cmd->error = -EILSEQ;
869 } else if (intmask & (SDHSTS_CRC16_ERROR |
870 SDHSTS_FIFO_ERROR)) {
871 if (host->mrq->data)
872 host->mrq->data->error = -EILSEQ;
873 else
874 host->cmd->error = -EILSEQ;
875 } else if (intmask & SDHSTS_REW_TIME_OUT) {
876 if (host->mrq->data)
877 host->mrq->data->error = -ETIMEDOUT;
878 else
879 host->cmd->error = -ETIMEDOUT;
880 } else if (intmask & SDHSTS_CMD_TIME_OUT) {
881 host->cmd->error = -ETIMEDOUT;
882 }
883 bcm2835_dumpregs(host);
884 return true;
885}
886
887static void bcm2835_check_data_error(struct bcm2835_host *host, u32 intmask)
888{
889 if (!host->data)
890 return;
891 if (intmask & (SDHSTS_CRC16_ERROR | SDHSTS_FIFO_ERROR))
892 host->data->error = -EILSEQ;
893 if (intmask & SDHSTS_REW_TIME_OUT)
894 host->data->error = -ETIMEDOUT;
895}
896
897static void bcm2835_busy_irq(struct bcm2835_host *host)
898{
899 if (WARN_ON(!host->cmd)) {
900 bcm2835_dumpregs(host);
901 return;
902 }
903
904 if (WARN_ON(!host->use_busy)) {
905 bcm2835_dumpregs(host);
906 return;
907 }
908 host->use_busy = false;
909
910 bcm2835_finish_command(host);
911}
912
913static void bcm2835_data_irq(struct bcm2835_host *host, u32 intmask)
914{
915 /* There are no dedicated data/space available interrupt
916 * status bits, so it is necessary to use the single shared
917 * data/space available FIFO status bits. It is therefore not
918 * an error to get here when there is no data transfer in
919 * progress.
920 */
921 if (!host->data)
922 return;
923
924 bcm2835_check_data_error(host, intmask);
925 if (host->data->error)
926 goto finished;
927
928 if (host->data->flags & MMC_DATA_WRITE) {
929 /* Use the block interrupt for writes after the first block */
930 host->hcfg &= ~(SDHCFG_DATA_IRPT_EN);
931 host->hcfg |= SDHCFG_BLOCK_IRPT_EN;
932 writel(host->hcfg, host->ioaddr + SDHCFG);
933 bcm2835_transfer_pio(host);
934 } else {
935 bcm2835_transfer_pio(host);
936 host->blocks--;
937 if ((host->blocks == 0) || host->data->error)
938 goto finished;
939 }
940 return;
941
942finished:
943 host->hcfg &= ~(SDHCFG_DATA_IRPT_EN | SDHCFG_BLOCK_IRPT_EN);
944 writel(host->hcfg, host->ioaddr + SDHCFG);
945}
946
947static void bcm2835_data_threaded_irq(struct bcm2835_host *host)
948{
949 if (!host->data)
950 return;
951 if ((host->blocks == 0) || host->data->error)
952 bcm2835_finish_data(host);
953}
954
955static void bcm2835_block_irq(struct bcm2835_host *host)
956{
957 if (WARN_ON(!host->data)) {
958 bcm2835_dumpregs(host);
959 return;
960 }
961
962 if (!host->dma_desc) {
963 WARN_ON(!host->blocks);
964 if (host->data->error || (--host->blocks == 0))
965 bcm2835_finish_data(host);
966 else
967 bcm2835_transfer_pio(host);
968 } else if (host->data->flags & MMC_DATA_WRITE) {
969 bcm2835_finish_data(host);
970 }
971}
972
973static irqreturn_t bcm2835_irq(int irq, void *dev_id)
974{
975 irqreturn_t result = IRQ_NONE;
976 struct bcm2835_host *host = dev_id;
977 u32 intmask;
978
979 spin_lock(&host->lock);
980
981 intmask = readl(host->ioaddr + SDHSTS);
982
983 writel(SDHSTS_BUSY_IRPT |
984 SDHSTS_BLOCK_IRPT |
985 SDHSTS_SDIO_IRPT |
986 SDHSTS_DATA_FLAG,
987 host->ioaddr + SDHSTS);
988
989 if (intmask & SDHSTS_BLOCK_IRPT) {
990 bcm2835_check_data_error(host, intmask);
991 host->irq_block = true;
992 result = IRQ_WAKE_THREAD;
993 }
994
995 if (intmask & SDHSTS_BUSY_IRPT) {
996 if (!bcm2835_check_cmd_error(host, intmask)) {
997 host->irq_busy = true;
998 result = IRQ_WAKE_THREAD;
999 } else {
1000 result = IRQ_HANDLED;
1001 }
1002 }
1003
1004 /* There is no true data interrupt status bit, so it is
1005 * necessary to qualify the data flag with the interrupt
1006 * enable bit.
1007 */
1008 if ((intmask & SDHSTS_DATA_FLAG) &&
1009 (host->hcfg & SDHCFG_DATA_IRPT_EN)) {
1010 bcm2835_data_irq(host, intmask);
1011 host->irq_data = true;
1012 result = IRQ_WAKE_THREAD;
1013 }
1014
1015 spin_unlock(&host->lock);
1016
1017 return result;
1018}
1019
1020static irqreturn_t bcm2835_threaded_irq(int irq, void *dev_id)
1021{
1022 struct bcm2835_host *host = dev_id;
1023 unsigned long flags;
1024 bool block, busy, data;
1025
1026 spin_lock_irqsave(&host->lock, flags);
1027
1028 block = host->irq_block;
1029 busy = host->irq_busy;
1030 data = host->irq_data;
1031 host->irq_block = false;
1032 host->irq_busy = false;
1033 host->irq_data = false;
1034
1035 spin_unlock_irqrestore(&host->lock, flags);
1036
1037 mutex_lock(&host->mutex);
1038
1039 if (block)
1040 bcm2835_block_irq(host);
1041 if (busy)
1042 bcm2835_busy_irq(host);
1043 if (data)
1044 bcm2835_data_threaded_irq(host);
1045
1046 mutex_unlock(&host->mutex);
1047
1048 return IRQ_HANDLED;
1049}
1050
1051static void bcm2835_dma_complete_work(struct work_struct *work)
1052{
1053 struct bcm2835_host *host =
1054 container_of(work, struct bcm2835_host, dma_work);
1055 struct mmc_data *data = host->data;
1056
1057 mutex_lock(&host->mutex);
1058
1059 if (host->dma_chan) {
1060 dma_unmap_sg(host->dma_chan->device->dev,
1061 data->sg, data->sg_len,
1062 host->dma_dir);
1063
1064 host->dma_chan = NULL;
1065 }
1066
1067 if (host->drain_words) {
1068 unsigned long flags;
1069 void *page;
1070 u32 *buf;
1071
1072 if (host->drain_offset & PAGE_MASK) {
1073 host->drain_page += host->drain_offset >> PAGE_SHIFT;
1074 host->drain_offset &= ~PAGE_MASK;
1075 }
1076 local_irq_save(flags);
1077 page = kmap_atomic(host->drain_page);
1078 buf = page + host->drain_offset;
1079
1080 while (host->drain_words) {
1081 u32 edm = readl(host->ioaddr + SDEDM);
1082
1083 if ((edm >> 4) & 0x1f)
1084 *(buf++) = readl(host->ioaddr + SDDATA);
1085 host->drain_words--;
1086 }
1087
1088 kunmap_atomic(page);
1089 local_irq_restore(flags);
1090 }
1091
1092 bcm2835_finish_data(host);
1093
1094 mutex_unlock(&host->mutex);
1095}
1096
1097static void bcm2835_set_clock(struct bcm2835_host *host, unsigned int clock)
1098{
1099 int div;
1100
1101 /* The SDCDIV register has 11 bits, and holds (div - 2). But
1102 * in data mode the max is 50MHz wihout a minimum, and only
1103 * the bottom 3 bits are used. Since the switch over is
1104 * automatic (unless we have marked the card as slow...),
1105 * chosen values have to make sense in both modes. Ident mode
1106 * must be 100-400KHz, so can range check the requested
1107 * clock. CMD15 must be used to return to data mode, so this
1108 * can be monitored.
1109 *
1110 * clock 250MHz -> 0->125MHz, 1->83.3MHz, 2->62.5MHz, 3->50.0MHz
1111 * 4->41.7MHz, 5->35.7MHz, 6->31.3MHz, 7->27.8MHz
1112 *
1113 * 623->400KHz/27.8MHz
1114 * reset value (507)->491159/50MHz
1115 *
1116 * BUT, the 3-bit clock divisor in data mode is too small if
1117 * the core clock is higher than 250MHz, so instead use the
1118 * SLOW_CARD configuration bit to force the use of the ident
1119 * clock divisor at all times.
1120 */
1121
1122 if (clock < 100000) {
1123 /* Can't stop the clock, but make it as slow as possible
1124 * to show willing
1125 */
1126 host->cdiv = SDCDIV_MAX_CDIV;
1127 writel(host->cdiv, host->ioaddr + SDCDIV);
1128 return;
1129 }
1130
1131 div = host->max_clk / clock;
1132 if (div < 2)
1133 div = 2;
1134 if ((host->max_clk / div) > clock)
1135 div++;
1136 div -= 2;
1137
1138 if (div > SDCDIV_MAX_CDIV)
1139 div = SDCDIV_MAX_CDIV;
1140
1141 clock = host->max_clk / (div + 2);
1142 host->mmc->actual_clock = clock;
1143
1144 /* Calibrate some delays */
1145
1146 host->ns_per_fifo_word = (1000000000 / clock) *
1147 ((host->mmc->caps & MMC_CAP_4_BIT_DATA) ? 8 : 32);
1148
1149 host->cdiv = div;
1150 writel(host->cdiv, host->ioaddr + SDCDIV);
1151
1152 /* Set the timeout to 500ms */
1153 writel(host->mmc->actual_clock / 2, host->ioaddr + SDTOUT);
1154}
1155
1156static void bcm2835_request(struct mmc_host *mmc, struct mmc_request *mrq)
1157{
1158 struct bcm2835_host *host = mmc_priv(mmc);
1159 struct device *dev = &host->pdev->dev;
1160 u32 edm, fsm;
1161
1162 /* Reset the error statuses in case this is a retry */
1163 if (mrq->sbc)
1164 mrq->sbc->error = 0;
1165 if (mrq->cmd)
1166 mrq->cmd->error = 0;
1167 if (mrq->data)
1168 mrq->data->error = 0;
1169 if (mrq->stop)
1170 mrq->stop->error = 0;
1171
1172 if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
1173 dev_err(dev, "unsupported block size (%d bytes)\n",
1174 mrq->data->blksz);
1175 mrq->cmd->error = -EINVAL;
1176 mmc_request_done(mmc, mrq);
1177 return;
1178 }
1179
1180 if (host->use_dma && mrq->data && (mrq->data->blocks > PIO_THRESHOLD))
1181 bcm2835_prepare_dma(host, mrq->data);
1182
1183 mutex_lock(&host->mutex);
1184
1185 WARN_ON(host->mrq);
1186 host->mrq = mrq;
1187
1188 edm = readl(host->ioaddr + SDEDM);
1189 fsm = edm & SDEDM_FSM_MASK;
1190
1191 if ((fsm != SDEDM_FSM_IDENTMODE) &&
1192 (fsm != SDEDM_FSM_DATAMODE)) {
1193 dev_err(dev, "previous command (%d) not complete (EDM %08x)\n",
1194 readl(host->ioaddr + SDCMD) & SDCMD_CMD_MASK,
1195 edm);
1196 bcm2835_dumpregs(host);
1197 mrq->cmd->error = -EILSEQ;
1198 bcm2835_finish_request(host);
1199 mutex_unlock(&host->mutex);
1200 return;
1201 }
1202
1203 host->use_sbc = !!mrq->sbc && host->mrq->data &&
1204 (host->mrq->data->flags & MMC_DATA_READ);
1205 if (host->use_sbc) {
1206 if (bcm2835_send_command(host, mrq->sbc)) {
1207 if (!host->use_busy)
1208 bcm2835_finish_command(host);
1209 }
1210 } else if (bcm2835_send_command(host, mrq->cmd)) {
1211 if (host->data && host->dma_desc) {
1212 /* DMA transfer starts now, PIO starts after irq */
1213 bcm2835_start_dma(host);
1214 }
1215
1216 if (!host->use_busy)
1217 bcm2835_finish_command(host);
1218 }
1219
1220 mutex_unlock(&host->mutex);
1221}
1222
1223static void bcm2835_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1224{
1225 struct bcm2835_host *host = mmc_priv(mmc);
1226
1227 mutex_lock(&host->mutex);
1228
1229 if (!ios->clock || ios->clock != host->clock) {
1230 bcm2835_set_clock(host, ios->clock);
1231 host->clock = ios->clock;
1232 }
1233
1234 /* set bus width */
1235 host->hcfg &= ~SDHCFG_WIDE_EXT_BUS;
1236 if (ios->bus_width == MMC_BUS_WIDTH_4)
1237 host->hcfg |= SDHCFG_WIDE_EXT_BUS;
1238
1239 host->hcfg |= SDHCFG_WIDE_INT_BUS;
1240
1241 /* Disable clever clock switching, to cope with fast core clocks */
1242 host->hcfg |= SDHCFG_SLOW_CARD;
1243
1244 writel(host->hcfg, host->ioaddr + SDHCFG);
1245
1246 mutex_unlock(&host->mutex);
1247}
1248
1249static struct mmc_host_ops bcm2835_ops = {
1250 .request = bcm2835_request,
1251 .set_ios = bcm2835_set_ios,
1252 .hw_reset = bcm2835_reset,
1253};
1254
1255static int bcm2835_add_host(struct bcm2835_host *host)
1256{
1257 struct mmc_host *mmc = host->mmc;
1258 struct device *dev = &host->pdev->dev;
1259 char pio_limit_string[20];
1260 int ret;
1261
1262 mmc->f_max = host->max_clk;
1263 mmc->f_min = host->max_clk / SDCDIV_MAX_CDIV;
1264
1265 mmc->max_busy_timeout = ~0 / (mmc->f_max / 1000);
1266
1267 dev_dbg(dev, "f_max %d, f_min %d, max_busy_timeout %d\n",
1268 mmc->f_max, mmc->f_min, mmc->max_busy_timeout);
1269
1270 /* host controller capabilities */
1271 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
1272 MMC_CAP_NEEDS_POLL | MMC_CAP_HW_RESET | MMC_CAP_ERASE |
1273 MMC_CAP_CMD23;
1274
1275 spin_lock_init(&host->lock);
1276 mutex_init(&host->mutex);
1277
1278 if (IS_ERR_OR_NULL(host->dma_chan_rxtx)) {
1279 dev_warn(dev, "unable to initialise DMA channel. Falling back to PIO\n");
1280 host->use_dma = false;
1281 } else {
1282 host->use_dma = true;
1283
1284 host->dma_cfg_tx.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1285 host->dma_cfg_tx.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1286 host->dma_cfg_tx.slave_id = 13; /* DREQ channel */
1287 host->dma_cfg_tx.direction = DMA_MEM_TO_DEV;
1288 host->dma_cfg_tx.src_addr = 0;
1289 host->dma_cfg_tx.dst_addr = host->phys_addr + SDDATA;
1290
1291 host->dma_cfg_rx.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1292 host->dma_cfg_rx.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1293 host->dma_cfg_rx.slave_id = 13; /* DREQ channel */
1294 host->dma_cfg_rx.direction = DMA_DEV_TO_MEM;
1295 host->dma_cfg_rx.src_addr = host->phys_addr + SDDATA;
1296 host->dma_cfg_rx.dst_addr = 0;
1297
1298 if (dmaengine_slave_config(host->dma_chan_rxtx,
1299 &host->dma_cfg_tx) != 0 ||
1300 dmaengine_slave_config(host->dma_chan_rxtx,
1301 &host->dma_cfg_rx) != 0)
1302 host->use_dma = false;
1303 }
1304
1305 mmc->max_segs = 128;
1306 mmc->max_req_size = 524288;
1307 mmc->max_seg_size = mmc->max_req_size;
1308 mmc->max_blk_size = 1024;
1309 mmc->max_blk_count = 65535;
1310
1311 /* report supported voltage ranges */
1312 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1313
1314 INIT_WORK(&host->dma_work, bcm2835_dma_complete_work);
1315 INIT_DELAYED_WORK(&host->timeout_work, bcm2835_timeout);
1316
1317 /* Set interrupt enables */
1318 host->hcfg = SDHCFG_BUSY_IRPT_EN;
1319
1320 bcm2835_reset_internal(host);
1321
1322 ret = request_threaded_irq(host->irq, bcm2835_irq,
1323 bcm2835_threaded_irq,
1324 0, mmc_hostname(mmc), host);
1325 if (ret) {
1326 dev_err(dev, "failed to request IRQ %d: %d\n", host->irq, ret);
1327 return ret;
1328 }
1329
1330 ret = mmc_add_host(mmc);
1331 if (ret) {
1332 free_irq(host->irq, host);
1333 return ret;
1334 }
1335
1336 pio_limit_string[0] = '\0';
1337 if (host->use_dma && (PIO_THRESHOLD > 0))
1338 sprintf(pio_limit_string, " (>%d)", PIO_THRESHOLD);
1339 dev_info(dev, "loaded - DMA %s%s\n",
1340 host->use_dma ? "enabled" : "disabled", pio_limit_string);
1341
1342 return 0;
1343}
1344
1345static int bcm2835_probe(struct platform_device *pdev)
1346{
1347 struct device *dev = &pdev->dev;
1348 struct clk *clk;
1349 struct resource *iomem;
1350 struct bcm2835_host *host;
1351 struct mmc_host *mmc;
1352 const __be32 *regaddr_p;
1353 int ret;
1354
1355 dev_dbg(dev, "%s\n", __func__);
1356 mmc = mmc_alloc_host(sizeof(*host), dev);
1357 if (!mmc)
1358 return -ENOMEM;
1359
1360 mmc->ops = &bcm2835_ops;
1361 host = mmc_priv(mmc);
1362 host->mmc = mmc;
1363 host->pdev = pdev;
1364 spin_lock_init(&host->lock);
1365
1366 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1367 host->ioaddr = devm_ioremap_resource(dev, iomem);
1368 if (IS_ERR(host->ioaddr)) {
1369 ret = PTR_ERR(host->ioaddr);
1370 goto err;
1371 }
1372
1373 /* Parse OF address directly to get the physical address for
1374 * DMA to our registers.
1375 */
1376 regaddr_p = of_get_address(pdev->dev.of_node, 0, NULL, NULL);
1377 if (!regaddr_p) {
1378 dev_err(dev, "Can't get phys address\n");
1379 ret = -EINVAL;
1380 goto err;
1381 }
1382
1383 host->phys_addr = be32_to_cpup(regaddr_p);
1384
1385 host->dma_chan = NULL;
1386 host->dma_desc = NULL;
1387
1388 host->dma_chan_rxtx = dma_request_slave_channel(dev, "rx-tx");
1389
1390 clk = devm_clk_get(dev, NULL);
1391 if (IS_ERR(clk)) {
1392 ret = PTR_ERR(clk);
1393 if (ret != -EPROBE_DEFER)
1394 dev_err(dev, "could not get clk: %d\n", ret);
1395 goto err;
1396 }
1397
1398 host->max_clk = clk_get_rate(clk);
1399
1400 host->irq = platform_get_irq(pdev, 0);
1401 if (host->irq <= 0) {
1402 dev_err(dev, "get IRQ failed\n");
1403 ret = -EINVAL;
1404 goto err;
1405 }
1406
1407 ret = mmc_of_parse(mmc);
1408 if (ret)
1409 goto err;
1410
1411 ret = bcm2835_add_host(host);
1412 if (ret)
1413 goto err;
1414
1415 platform_set_drvdata(pdev, host);
1416
1417 dev_dbg(dev, "%s -> OK\n", __func__);
1418
1419 return 0;
1420
1421err:
1422 dev_dbg(dev, "%s -> err %d\n", __func__, ret);
1423 mmc_free_host(mmc);
1424
1425 return ret;
1426}
1427
1428static int bcm2835_remove(struct platform_device *pdev)
1429{
1430 struct bcm2835_host *host = platform_get_drvdata(pdev);
1431
1432 mmc_remove_host(host->mmc);
1433
1434 writel(SDVDD_POWER_OFF, host->ioaddr + SDVDD);
1435
1436 free_irq(host->irq, host);
1437
1438 cancel_work_sync(&host->dma_work);
1439 cancel_delayed_work_sync(&host->timeout_work);
1440
1441 mmc_free_host(host->mmc);
1442 platform_set_drvdata(pdev, NULL);
1443
1444 return 0;
1445}
1446
1447static const struct of_device_id bcm2835_match[] = {
1448 { .compatible = "brcm,bcm2835-sdhost" },
1449 { }
1450};
1451MODULE_DEVICE_TABLE(of, bcm2835_match);
1452
1453static struct platform_driver bcm2835_driver = {
1454 .probe = bcm2835_probe,
1455 .remove = bcm2835_remove,
1456 .driver = {
1457 .name = "sdhost-bcm2835",
1458 .of_match_table = bcm2835_match,
1459 },
1460};
1461module_platform_driver(bcm2835_driver);
1462
1463MODULE_ALIAS("platform:sdhost-bcm2835");
1464MODULE_DESCRIPTION("BCM2835 SDHost driver");
1465MODULE_LICENSE("GPL v2");
1466MODULE_AUTHOR("Phil Elwell");
diff --git a/drivers/mmc/host/cavium-octeon.c b/drivers/mmc/host/cavium-octeon.c
new file mode 100644
index 000000000000..772d0900026d
--- /dev/null
+++ b/drivers/mmc/host/cavium-octeon.c
@@ -0,0 +1,351 @@
1/*
2 * Driver for MMC and SSD cards for Cavium OCTEON SOCs.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2012-2017 Cavium Inc.
9 */
10#include <linux/dma-mapping.h>
11#include <linux/gpio/consumer.h>
12#include <linux/interrupt.h>
13#include <linux/mmc/mmc.h>
14#include <linux/mmc/slot-gpio.h>
15#include <linux/module.h>
16#include <linux/of_platform.h>
17#include <asm/octeon/octeon.h>
18#include "cavium.h"
19
20#define CVMX_MIO_BOOT_CTL CVMX_ADD_IO_SEG(0x00011800000000D0ull)
21
22/*
23 * The l2c* functions below are used for the EMMC-17978 workaround.
24 *
25 * Due to a bug in the design of the MMC bus hardware, the 2nd to last
26 * cache block of a DMA read must be locked into the L2 Cache.
27 * Otherwise, data corruption may occur.
28 */
29static inline void *phys_to_ptr(u64 address)
30{
31 return (void *)(address | (1ull << 63)); /* XKPHYS */
32}
33
34/*
35 * Lock a single line into L2. The line is zeroed before locking
36 * to make sure no dram accesses are made.
37 */
38static void l2c_lock_line(u64 addr)
39{
40 char *addr_ptr = phys_to_ptr(addr);
41
42 asm volatile (
43 "cache 31, %[line]" /* Unlock the line */
44 ::[line] "m" (*addr_ptr));
45}
46
47/* Unlock a single line in the L2 cache. */
48static void l2c_unlock_line(u64 addr)
49{
50 char *addr_ptr = phys_to_ptr(addr);
51
52 asm volatile (
53 "cache 23, %[line]" /* Unlock the line */
54 ::[line] "m" (*addr_ptr));
55}
56
57/* Locks a memory region in the L2 cache. */
58static void l2c_lock_mem_region(u64 start, u64 len)
59{
60 u64 end;
61
62 /* Round start/end to cache line boundaries */
63 end = ALIGN(start + len - 1, CVMX_CACHE_LINE_SIZE);
64 start = ALIGN(start, CVMX_CACHE_LINE_SIZE);
65
66 while (start <= end) {
67 l2c_lock_line(start);
68 start += CVMX_CACHE_LINE_SIZE;
69 }
70 asm volatile("sync");
71}
72
73/* Unlock a memory region in the L2 cache. */
74static void l2c_unlock_mem_region(u64 start, u64 len)
75{
76 u64 end;
77
78 /* Round start/end to cache line boundaries */
79 end = ALIGN(start + len - 1, CVMX_CACHE_LINE_SIZE);
80 start = ALIGN(start, CVMX_CACHE_LINE_SIZE);
81
82 while (start <= end) {
83 l2c_unlock_line(start);
84 start += CVMX_CACHE_LINE_SIZE;
85 }
86}
87
88static void octeon_mmc_acquire_bus(struct cvm_mmc_host *host)
89{
90 if (!host->has_ciu3) {
91 down(&octeon_bootbus_sem);
92 /* For CN70XX, switch the MMC controller onto the bus. */
93 if (OCTEON_IS_MODEL(OCTEON_CN70XX))
94 writeq(0, (void __iomem *)CVMX_MIO_BOOT_CTL);
95 } else {
96 down(&host->mmc_serializer);
97 }
98}
99
100static void octeon_mmc_release_bus(struct cvm_mmc_host *host)
101{
102 if (!host->has_ciu3)
103 up(&octeon_bootbus_sem);
104 else
105 up(&host->mmc_serializer);
106}
107
108static void octeon_mmc_int_enable(struct cvm_mmc_host *host, u64 val)
109{
110 writeq(val, host->base + MIO_EMM_INT(host));
111 if (!host->dma_active || (host->dma_active && !host->has_ciu3))
112 writeq(val, host->base + MIO_EMM_INT_EN(host));
113}
114
115static void octeon_mmc_set_shared_power(struct cvm_mmc_host *host, int dir)
116{
117 if (dir == 0)
118 if (!atomic_dec_return(&host->shared_power_users))
119 gpiod_set_value_cansleep(host->global_pwr_gpiod, 0);
120 if (dir == 1)
121 if (atomic_inc_return(&host->shared_power_users) == 1)
122 gpiod_set_value_cansleep(host->global_pwr_gpiod, 1);
123}
124
125static void octeon_mmc_dmar_fixup(struct cvm_mmc_host *host,
126 struct mmc_command *cmd,
127 struct mmc_data *data,
128 u64 addr)
129{
130 if (cmd->opcode != MMC_WRITE_MULTIPLE_BLOCK)
131 return;
132 if (data->blksz * data->blocks <= 1024)
133 return;
134
135 host->n_minus_one = addr + (data->blksz * data->blocks) - 1024;
136 l2c_lock_mem_region(host->n_minus_one, 512);
137}
138
139static void octeon_mmc_dmar_fixup_done(struct cvm_mmc_host *host)
140{
141 if (!host->n_minus_one)
142 return;
143 l2c_unlock_mem_region(host->n_minus_one, 512);
144 host->n_minus_one = 0;
145}
146
147static int octeon_mmc_probe(struct platform_device *pdev)
148{
149 struct device_node *cn, *node = pdev->dev.of_node;
150 struct cvm_mmc_host *host;
151 struct resource *res;
152 void __iomem *base;
153 int mmc_irq[9];
154 int i, ret = 0;
155 u64 val;
156
157 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
158 if (!host)
159 return -ENOMEM;
160
161 spin_lock_init(&host->irq_handler_lock);
162 sema_init(&host->mmc_serializer, 1);
163
164 host->dev = &pdev->dev;
165 host->acquire_bus = octeon_mmc_acquire_bus;
166 host->release_bus = octeon_mmc_release_bus;
167 host->int_enable = octeon_mmc_int_enable;
168 host->set_shared_power = octeon_mmc_set_shared_power;
169 if (OCTEON_IS_MODEL(OCTEON_CN6XXX) ||
170 OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
171 host->dmar_fixup = octeon_mmc_dmar_fixup;
172 host->dmar_fixup_done = octeon_mmc_dmar_fixup_done;
173 }
174
175 host->sys_freq = octeon_get_io_clock_rate();
176
177 if (of_device_is_compatible(node, "cavium,octeon-7890-mmc")) {
178 host->big_dma_addr = true;
179 host->need_irq_handler_lock = true;
180 host->has_ciu3 = true;
181 host->use_sg = true;
182 /*
183 * First seven are the EMM_INT bits 0..6, then two for
184 * the EMM_DMA_INT bits
185 */
186 for (i = 0; i < 9; i++) {
187 mmc_irq[i] = platform_get_irq(pdev, i);
188 if (mmc_irq[i] < 0)
189 return mmc_irq[i];
190
191 /* work around legacy u-boot device trees */
192 irq_set_irq_type(mmc_irq[i], IRQ_TYPE_EDGE_RISING);
193 }
194 } else {
195 host->big_dma_addr = false;
196 host->need_irq_handler_lock = false;
197 host->has_ciu3 = false;
198 /* First one is EMM second DMA */
199 for (i = 0; i < 2; i++) {
200 mmc_irq[i] = platform_get_irq(pdev, i);
201 if (mmc_irq[i] < 0)
202 return mmc_irq[i];
203 }
204 }
205
206 host->last_slot = -1;
207
208 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
209 if (!res) {
210 dev_err(&pdev->dev, "Platform resource[0] is missing\n");
211 return -ENXIO;
212 }
213 base = devm_ioremap_resource(&pdev->dev, res);
214 if (IS_ERR(base))
215 return PTR_ERR(base);
216 host->base = (void __iomem *)base;
217 host->reg_off = 0;
218
219 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
220 if (!res) {
221 dev_err(&pdev->dev, "Platform resource[1] is missing\n");
222 return -EINVAL;
223 }
224 base = devm_ioremap_resource(&pdev->dev, res);
225 if (IS_ERR(base))
226 return PTR_ERR(base);
227 host->dma_base = (void __iomem *)base;
228 /*
229 * To keep the register addresses shared we intentionaly use
230 * a negative offset here, first register used on Octeon therefore
231 * starts at 0x20 (MIO_EMM_DMA_CFG).
232 */
233 host->reg_off_dma = -0x20;
234
235 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
236 if (ret)
237 return ret;
238
239 /*
240 * Clear out any pending interrupts that may be left over from
241 * bootloader.
242 */
243 val = readq(host->base + MIO_EMM_INT(host));
244 writeq(val, host->base + MIO_EMM_INT(host));
245
246 if (host->has_ciu3) {
247 /* Only CMD_DONE, DMA_DONE, CMD_ERR, DMA_ERR */
248 for (i = 1; i <= 4; i++) {
249 ret = devm_request_irq(&pdev->dev, mmc_irq[i],
250 cvm_mmc_interrupt,
251 0, cvm_mmc_irq_names[i], host);
252 if (ret < 0) {
253 dev_err(&pdev->dev, "Error: devm_request_irq %d\n",
254 mmc_irq[i]);
255 return ret;
256 }
257 }
258 } else {
259 ret = devm_request_irq(&pdev->dev, mmc_irq[0],
260 cvm_mmc_interrupt, 0, KBUILD_MODNAME,
261 host);
262 if (ret < 0) {
263 dev_err(&pdev->dev, "Error: devm_request_irq %d\n",
264 mmc_irq[0]);
265 return ret;
266 }
267 }
268
269 host->global_pwr_gpiod = devm_gpiod_get_optional(&pdev->dev,
270 "power-gpios",
271 GPIOD_OUT_HIGH);
272 if (IS_ERR(host->global_pwr_gpiod)) {
273 dev_err(&pdev->dev, "Invalid power GPIO\n");
274 return PTR_ERR(host->global_pwr_gpiod);
275 }
276
277 platform_set_drvdata(pdev, host);
278
279 i = 0;
280 for_each_child_of_node(node, cn) {
281 host->slot_pdev[i] =
282 of_platform_device_create(cn, NULL, &pdev->dev);
283 if (!host->slot_pdev[i]) {
284 i++;
285 continue;
286 }
287 ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host);
288 if (ret) {
289 dev_err(&pdev->dev, "Error populating slots\n");
290 octeon_mmc_set_shared_power(host, 0);
291 return ret;
292 }
293 i++;
294 }
295 return 0;
296}
297
298static int octeon_mmc_remove(struct platform_device *pdev)
299{
300 struct cvm_mmc_host *host = platform_get_drvdata(pdev);
301 u64 dma_cfg;
302 int i;
303
304 for (i = 0; i < CAVIUM_MAX_MMC; i++)
305 if (host->slot[i])
306 cvm_mmc_of_slot_remove(host->slot[i]);
307
308 dma_cfg = readq(host->dma_base + MIO_EMM_DMA_CFG(host));
309 dma_cfg &= ~MIO_EMM_DMA_CFG_EN;
310 writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
311
312 octeon_mmc_set_shared_power(host, 0);
313 return 0;
314}
315
316static const struct of_device_id octeon_mmc_match[] = {
317 {
318 .compatible = "cavium,octeon-6130-mmc",
319 },
320 {
321 .compatible = "cavium,octeon-7890-mmc",
322 },
323 {},
324};
325MODULE_DEVICE_TABLE(of, octeon_mmc_match);
326
327static struct platform_driver octeon_mmc_driver = {
328 .probe = octeon_mmc_probe,
329 .remove = octeon_mmc_remove,
330 .driver = {
331 .name = KBUILD_MODNAME,
332 .of_match_table = octeon_mmc_match,
333 },
334};
335
336static int __init octeon_mmc_init(void)
337{
338 return platform_driver_register(&octeon_mmc_driver);
339}
340
341static void __exit octeon_mmc_cleanup(void)
342{
343 platform_driver_unregister(&octeon_mmc_driver);
344}
345
346module_init(octeon_mmc_init);
347module_exit(octeon_mmc_cleanup);
348
349MODULE_AUTHOR("Cavium Inc. <support@cavium.com>");
350MODULE_DESCRIPTION("Low-level driver for Cavium OCTEON MMC/SSD card");
351MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/cavium-thunderx.c b/drivers/mmc/host/cavium-thunderx.c
new file mode 100644
index 000000000000..fe3d77267cd6
--- /dev/null
+++ b/drivers/mmc/host/cavium-thunderx.c
@@ -0,0 +1,187 @@
1/*
2 * Driver for MMC and SSD cards for Cavium ThunderX SOCs.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2016 Cavium Inc.
9 */
10#include <linux/dma-mapping.h>
11#include <linux/interrupt.h>
12#include <linux/mmc/mmc.h>
13#include <linux/module.h>
14#include <linux/of.h>
15#include <linux/of_platform.h>
16#include <linux/pci.h>
17#include "cavium.h"
18
19static void thunder_mmc_acquire_bus(struct cvm_mmc_host *host)
20{
21 down(&host->mmc_serializer);
22}
23
24static void thunder_mmc_release_bus(struct cvm_mmc_host *host)
25{
26 up(&host->mmc_serializer);
27}
28
29static void thunder_mmc_int_enable(struct cvm_mmc_host *host, u64 val)
30{
31 writeq(val, host->base + MIO_EMM_INT(host));
32 writeq(val, host->base + MIO_EMM_INT_EN_SET(host));
33}
34
35static int thunder_mmc_register_interrupts(struct cvm_mmc_host *host,
36 struct pci_dev *pdev)
37{
38 int nvec, ret, i;
39
40 nvec = pci_alloc_irq_vectors(pdev, 1, 9, PCI_IRQ_MSIX);
41 if (nvec < 0)
42 return nvec;
43
44 /* register interrupts */
45 for (i = 0; i < nvec; i++) {
46 ret = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i),
47 cvm_mmc_interrupt,
48 0, cvm_mmc_irq_names[i], host);
49 if (ret)
50 return ret;
51 }
52 return 0;
53}
54
55static int thunder_mmc_probe(struct pci_dev *pdev,
56 const struct pci_device_id *id)
57{
58 struct device_node *node = pdev->dev.of_node;
59 struct device *dev = &pdev->dev;
60 struct device_node *child_node;
61 struct cvm_mmc_host *host;
62 int ret, i = 0;
63
64 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
65 if (!host)
66 return -ENOMEM;
67
68 pci_set_drvdata(pdev, host);
69 ret = pcim_enable_device(pdev);
70 if (ret)
71 return ret;
72
73 ret = pci_request_regions(pdev, KBUILD_MODNAME);
74 if (ret)
75 return ret;
76
77 host->base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
78 if (!host->base)
79 return -EINVAL;
80
81 /* On ThunderX these are identical */
82 host->dma_base = host->base;
83
84 host->reg_off = 0x2000;
85 host->reg_off_dma = 0x160;
86
87 host->clk = devm_clk_get(dev, NULL);
88 if (IS_ERR(host->clk))
89 return PTR_ERR(host->clk);
90
91 ret = clk_prepare_enable(host->clk);
92 if (ret)
93 return ret;
94 host->sys_freq = clk_get_rate(host->clk);
95
96 spin_lock_init(&host->irq_handler_lock);
97 sema_init(&host->mmc_serializer, 1);
98
99 host->dev = dev;
100 host->acquire_bus = thunder_mmc_acquire_bus;
101 host->release_bus = thunder_mmc_release_bus;
102 host->int_enable = thunder_mmc_int_enable;
103
104 host->use_sg = true;
105 host->big_dma_addr = true;
106 host->need_irq_handler_lock = true;
107 host->last_slot = -1;
108
109 ret = dma_set_mask(dev, DMA_BIT_MASK(48));
110 if (ret)
111 goto error;
112
113 /*
114 * Clear out any pending interrupts that may be left over from
115 * bootloader. Writing 1 to the bits clears them.
116 */
117 writeq(127, host->base + MIO_EMM_INT_EN(host));
118 writeq(3, host->base + MIO_EMM_DMA_INT_ENA_W1C(host));
119 /* Clear DMA FIFO */
120 writeq(BIT_ULL(16), host->base + MIO_EMM_DMA_FIFO_CFG(host));
121
122 ret = thunder_mmc_register_interrupts(host, pdev);
123 if (ret)
124 goto error;
125
126 for_each_child_of_node(node, child_node) {
127 /*
128 * mmc_of_parse and devm* require one device per slot.
129 * Create a dummy device per slot and set the node pointer to
130 * the slot. The easiest way to get this is using
131 * of_platform_device_create.
132 */
133 if (of_device_is_compatible(child_node, "mmc-slot")) {
134 host->slot_pdev[i] = of_platform_device_create(child_node, NULL,
135 &pdev->dev);
136 if (!host->slot_pdev[i])
137 continue;
138
139 ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host);
140 if (ret)
141 goto error;
142 }
143 i++;
144 }
145 dev_info(dev, "probed\n");
146 return 0;
147
148error:
149 clk_disable_unprepare(host->clk);
150 return ret;
151}
152
153static void thunder_mmc_remove(struct pci_dev *pdev)
154{
155 struct cvm_mmc_host *host = pci_get_drvdata(pdev);
156 u64 dma_cfg;
157 int i;
158
159 for (i = 0; i < CAVIUM_MAX_MMC; i++)
160 if (host->slot[i])
161 cvm_mmc_of_slot_remove(host->slot[i]);
162
163 dma_cfg = readq(host->dma_base + MIO_EMM_DMA_CFG(host));
164 dma_cfg &= ~MIO_EMM_DMA_CFG_EN;
165 writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
166
167 clk_disable_unprepare(host->clk);
168}
169
170static const struct pci_device_id thunder_mmc_id_table[] = {
171 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa010) },
172 { 0, } /* end of table */
173};
174
175static struct pci_driver thunder_mmc_driver = {
176 .name = KBUILD_MODNAME,
177 .id_table = thunder_mmc_id_table,
178 .probe = thunder_mmc_probe,
179 .remove = thunder_mmc_remove,
180};
181
182module_pci_driver(thunder_mmc_driver);
183
184MODULE_AUTHOR("Cavium Inc.");
185MODULE_DESCRIPTION("Cavium ThunderX eMMC Driver");
186MODULE_LICENSE("GPL");
187MODULE_DEVICE_TABLE(pci, thunder_mmc_id_table);
diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c
new file mode 100644
index 000000000000..58b51ba6aabd
--- /dev/null
+++ b/drivers/mmc/host/cavium.c
@@ -0,0 +1,1090 @@
1/*
2 * Shared part of driver for MMC/SDHC controller on Cavium OCTEON and
3 * ThunderX SOCs.
4 *
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * Copyright (C) 2012-2017 Cavium Inc.
10 * Authors:
11 * David Daney <david.daney@cavium.com>
12 * Peter Swain <pswain@cavium.com>
13 * Steven J. Hill <steven.hill@cavium.com>
14 * Jan Glauber <jglauber@cavium.com>
15 */
16#include <linux/bitfield.h>
17#include <linux/delay.h>
18#include <linux/dma-direction.h>
19#include <linux/dma-mapping.h>
20#include <linux/gpio/consumer.h>
21#include <linux/interrupt.h>
22#include <linux/mmc/mmc.h>
23#include <linux/mmc/slot-gpio.h>
24#include <linux/module.h>
25#include <linux/regulator/consumer.h>
26#include <linux/scatterlist.h>
27#include <linux/time.h>
28
29#include "cavium.h"
30
31const char *cvm_mmc_irq_names[] = {
32 "MMC Buffer",
33 "MMC Command",
34 "MMC DMA",
35 "MMC Command Error",
36 "MMC DMA Error",
37 "MMC Switch",
38 "MMC Switch Error",
39 "MMC DMA int Fifo",
40 "MMC DMA int",
41};
42
43/*
44 * The Cavium MMC host hardware assumes that all commands have fixed
45 * command and response types. These are correct if MMC devices are
46 * being used. However, non-MMC devices like SD use command and
47 * response types that are unexpected by the host hardware.
48 *
49 * The command and response types can be overridden by supplying an
50 * XOR value that is applied to the type. We calculate the XOR value
51 * from the values in this table and the flags passed from the MMC
52 * core.
53 */
54static struct cvm_mmc_cr_type cvm_mmc_cr_types[] = {
55 {0, 0}, /* CMD0 */
56 {0, 3}, /* CMD1 */
57 {0, 2}, /* CMD2 */
58 {0, 1}, /* CMD3 */
59 {0, 0}, /* CMD4 */
60 {0, 1}, /* CMD5 */
61 {0, 1}, /* CMD6 */
62 {0, 1}, /* CMD7 */
63 {1, 1}, /* CMD8 */
64 {0, 2}, /* CMD9 */
65 {0, 2}, /* CMD10 */
66 {1, 1}, /* CMD11 */
67 {0, 1}, /* CMD12 */
68 {0, 1}, /* CMD13 */
69 {1, 1}, /* CMD14 */
70 {0, 0}, /* CMD15 */
71 {0, 1}, /* CMD16 */
72 {1, 1}, /* CMD17 */
73 {1, 1}, /* CMD18 */
74 {3, 1}, /* CMD19 */
75 {2, 1}, /* CMD20 */
76 {0, 0}, /* CMD21 */
77 {0, 0}, /* CMD22 */
78 {0, 1}, /* CMD23 */
79 {2, 1}, /* CMD24 */
80 {2, 1}, /* CMD25 */
81 {2, 1}, /* CMD26 */
82 {2, 1}, /* CMD27 */
83 {0, 1}, /* CMD28 */
84 {0, 1}, /* CMD29 */
85 {1, 1}, /* CMD30 */
86 {1, 1}, /* CMD31 */
87 {0, 0}, /* CMD32 */
88 {0, 0}, /* CMD33 */
89 {0, 0}, /* CMD34 */
90 {0, 1}, /* CMD35 */
91 {0, 1}, /* CMD36 */
92 {0, 0}, /* CMD37 */
93 {0, 1}, /* CMD38 */
94 {0, 4}, /* CMD39 */
95 {0, 5}, /* CMD40 */
96 {0, 0}, /* CMD41 */
97 {2, 1}, /* CMD42 */
98 {0, 0}, /* CMD43 */
99 {0, 0}, /* CMD44 */
100 {0, 0}, /* CMD45 */
101 {0, 0}, /* CMD46 */
102 {0, 0}, /* CMD47 */
103 {0, 0}, /* CMD48 */
104 {0, 0}, /* CMD49 */
105 {0, 0}, /* CMD50 */
106 {0, 0}, /* CMD51 */
107 {0, 0}, /* CMD52 */
108 {0, 0}, /* CMD53 */
109 {0, 0}, /* CMD54 */
110 {0, 1}, /* CMD55 */
111 {0xff, 0xff}, /* CMD56 */
112 {0, 0}, /* CMD57 */
113 {0, 0}, /* CMD58 */
114 {0, 0}, /* CMD59 */
115 {0, 0}, /* CMD60 */
116 {0, 0}, /* CMD61 */
117 {0, 0}, /* CMD62 */
118 {0, 0} /* CMD63 */
119};
120
121static struct cvm_mmc_cr_mods cvm_mmc_get_cr_mods(struct mmc_command *cmd)
122{
123 struct cvm_mmc_cr_type *cr;
124 u8 hardware_ctype, hardware_rtype;
125 u8 desired_ctype = 0, desired_rtype = 0;
126 struct cvm_mmc_cr_mods r;
127
128 cr = cvm_mmc_cr_types + (cmd->opcode & 0x3f);
129 hardware_ctype = cr->ctype;
130 hardware_rtype = cr->rtype;
131 if (cmd->opcode == MMC_GEN_CMD)
132 hardware_ctype = (cmd->arg & 1) ? 1 : 2;
133
134 switch (mmc_cmd_type(cmd)) {
135 case MMC_CMD_ADTC:
136 desired_ctype = (cmd->data->flags & MMC_DATA_WRITE) ? 2 : 1;
137 break;
138 case MMC_CMD_AC:
139 case MMC_CMD_BC:
140 case MMC_CMD_BCR:
141 desired_ctype = 0;
142 break;
143 }
144
145 switch (mmc_resp_type(cmd)) {
146 case MMC_RSP_NONE:
147 desired_rtype = 0;
148 break;
149 case MMC_RSP_R1:/* MMC_RSP_R5, MMC_RSP_R6, MMC_RSP_R7 */
150 case MMC_RSP_R1B:
151 desired_rtype = 1;
152 break;
153 case MMC_RSP_R2:
154 desired_rtype = 2;
155 break;
156 case MMC_RSP_R3: /* MMC_RSP_R4 */
157 desired_rtype = 3;
158 break;
159 }
160 r.ctype_xor = desired_ctype ^ hardware_ctype;
161 r.rtype_xor = desired_rtype ^ hardware_rtype;
162 return r;
163}
164
165static void check_switch_errors(struct cvm_mmc_host *host)
166{
167 u64 emm_switch;
168
169 emm_switch = readq(host->base + MIO_EMM_SWITCH(host));
170 if (emm_switch & MIO_EMM_SWITCH_ERR0)
171 dev_err(host->dev, "Switch power class error\n");
172 if (emm_switch & MIO_EMM_SWITCH_ERR1)
173 dev_err(host->dev, "Switch hs timing error\n");
174 if (emm_switch & MIO_EMM_SWITCH_ERR2)
175 dev_err(host->dev, "Switch bus width error\n");
176}
177
178static void clear_bus_id(u64 *reg)
179{
180 u64 bus_id_mask = GENMASK_ULL(61, 60);
181
182 *reg &= ~bus_id_mask;
183}
184
185static void set_bus_id(u64 *reg, int bus_id)
186{
187 clear_bus_id(reg);
188 *reg |= FIELD_PREP(GENMASK(61, 60), bus_id);
189}
190
191static int get_bus_id(u64 reg)
192{
193 return FIELD_GET(GENMASK_ULL(61, 60), reg);
194}
195
196/*
197 * We never set the switch_exe bit since that would interfere
198 * with the commands send by the MMC core.
199 */
200static void do_switch(struct cvm_mmc_host *host, u64 emm_switch)
201{
202 int retries = 100;
203 u64 rsp_sts;
204 int bus_id;
205
206 /*
207 * Modes setting only taken from slot 0. Work around that hardware
208 * issue by first switching to slot 0.
209 */
210 bus_id = get_bus_id(emm_switch);
211 clear_bus_id(&emm_switch);
212 writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
213
214 set_bus_id(&emm_switch, bus_id);
215 writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
216
217 /* wait for the switch to finish */
218 do {
219 rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
220 if (!(rsp_sts & MIO_EMM_RSP_STS_SWITCH_VAL))
221 break;
222 udelay(10);
223 } while (--retries);
224
225 check_switch_errors(host);
226}
227
228static bool switch_val_changed(struct cvm_mmc_slot *slot, u64 new_val)
229{
230 /* Match BUS_ID, HS_TIMING, BUS_WIDTH, POWER_CLASS, CLK_HI, CLK_LO */
231 u64 match = 0x3001070fffffffffull;
232
233 return (slot->cached_switch & match) != (new_val & match);
234}
235
236static void set_wdog(struct cvm_mmc_slot *slot, unsigned int ns)
237{
238 u64 timeout;
239
240 if (!slot->clock)
241 return;
242
243 if (ns)
244 timeout = (slot->clock * ns) / NSEC_PER_SEC;
245 else
246 timeout = (slot->clock * 850ull) / 1000ull;
247 writeq(timeout, slot->host->base + MIO_EMM_WDOG(slot->host));
248}
249
250static void cvm_mmc_reset_bus(struct cvm_mmc_slot *slot)
251{
252 struct cvm_mmc_host *host = slot->host;
253 u64 emm_switch, wdog;
254
255 emm_switch = readq(slot->host->base + MIO_EMM_SWITCH(host));
256 emm_switch &= ~(MIO_EMM_SWITCH_EXE | MIO_EMM_SWITCH_ERR0 |
257 MIO_EMM_SWITCH_ERR1 | MIO_EMM_SWITCH_ERR2);
258 set_bus_id(&emm_switch, slot->bus_id);
259
260 wdog = readq(slot->host->base + MIO_EMM_WDOG(host));
261 do_switch(slot->host, emm_switch);
262
263 slot->cached_switch = emm_switch;
264
265 msleep(20);
266
267 writeq(wdog, slot->host->base + MIO_EMM_WDOG(host));
268}
269
270/* Switch to another slot if needed */
271static void cvm_mmc_switch_to(struct cvm_mmc_slot *slot)
272{
273 struct cvm_mmc_host *host = slot->host;
274 struct cvm_mmc_slot *old_slot;
275 u64 emm_sample, emm_switch;
276
277 if (slot->bus_id == host->last_slot)
278 return;
279
280 if (host->last_slot >= 0 && host->slot[host->last_slot]) {
281 old_slot = host->slot[host->last_slot];
282 old_slot->cached_switch = readq(host->base + MIO_EMM_SWITCH(host));
283 old_slot->cached_rca = readq(host->base + MIO_EMM_RCA(host));
284 }
285
286 writeq(slot->cached_rca, host->base + MIO_EMM_RCA(host));
287 emm_switch = slot->cached_switch;
288 set_bus_id(&emm_switch, slot->bus_id);
289 do_switch(host, emm_switch);
290
291 emm_sample = FIELD_PREP(MIO_EMM_SAMPLE_CMD_CNT, slot->cmd_cnt) |
292 FIELD_PREP(MIO_EMM_SAMPLE_DAT_CNT, slot->dat_cnt);
293 writeq(emm_sample, host->base + MIO_EMM_SAMPLE(host));
294
295 host->last_slot = slot->bus_id;
296}
297
298static void do_read(struct cvm_mmc_host *host, struct mmc_request *req,
299 u64 dbuf)
300{
301 struct sg_mapping_iter *smi = &host->smi;
302 int data_len = req->data->blocks * req->data->blksz;
303 int bytes_xfered, shift = -1;
304 u64 dat = 0;
305
306 /* Auto inc from offset zero */
307 writeq((0x10000 | (dbuf << 6)), host->base + MIO_EMM_BUF_IDX(host));
308
309 for (bytes_xfered = 0; bytes_xfered < data_len;) {
310 if (smi->consumed >= smi->length) {
311 if (!sg_miter_next(smi))
312 break;
313 smi->consumed = 0;
314 }
315
316 if (shift < 0) {
317 dat = readq(host->base + MIO_EMM_BUF_DAT(host));
318 shift = 56;
319 }
320
321 while (smi->consumed < smi->length && shift >= 0) {
322 ((u8 *)smi->addr)[smi->consumed] = (dat >> shift) & 0xff;
323 bytes_xfered++;
324 smi->consumed++;
325 shift -= 8;
326 }
327 }
328
329 sg_miter_stop(smi);
330 req->data->bytes_xfered = bytes_xfered;
331 req->data->error = 0;
332}
333
334static void do_write(struct mmc_request *req)
335{
336 req->data->bytes_xfered = req->data->blocks * req->data->blksz;
337 req->data->error = 0;
338}
339
340static void set_cmd_response(struct cvm_mmc_host *host, struct mmc_request *req,
341 u64 rsp_sts)
342{
343 u64 rsp_hi, rsp_lo;
344
345 if (!(rsp_sts & MIO_EMM_RSP_STS_RSP_VAL))
346 return;
347
348 rsp_lo = readq(host->base + MIO_EMM_RSP_LO(host));
349
350 switch (FIELD_GET(MIO_EMM_RSP_STS_RSP_TYPE, rsp_sts)) {
351 case 1:
352 case 3:
353 req->cmd->resp[0] = (rsp_lo >> 8) & 0xffffffff;
354 req->cmd->resp[1] = 0;
355 req->cmd->resp[2] = 0;
356 req->cmd->resp[3] = 0;
357 break;
358 case 2:
359 req->cmd->resp[3] = rsp_lo & 0xffffffff;
360 req->cmd->resp[2] = (rsp_lo >> 32) & 0xffffffff;
361 rsp_hi = readq(host->base + MIO_EMM_RSP_HI(host));
362 req->cmd->resp[1] = rsp_hi & 0xffffffff;
363 req->cmd->resp[0] = (rsp_hi >> 32) & 0xffffffff;
364 break;
365 }
366}
367
368static int get_dma_dir(struct mmc_data *data)
369{
370 return (data->flags & MMC_DATA_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
371}
372
373static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
374{
375 data->bytes_xfered = data->blocks * data->blksz;
376 data->error = 0;
377 return 1;
378}
379
380static int finish_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
381{
382 u64 fifo_cfg;
383 int count;
384
385 /* Check if there are any pending requests left */
386 fifo_cfg = readq(host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
387 count = FIELD_GET(MIO_EMM_DMA_FIFO_CFG_COUNT, fifo_cfg);
388 if (count)
389 dev_err(host->dev, "%u requests still pending\n", count);
390
391 data->bytes_xfered = data->blocks * data->blksz;
392 data->error = 0;
393
394 /* Clear and disable FIFO */
395 writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
396 dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
397 return 1;
398}
399
400static int finish_dma(struct cvm_mmc_host *host, struct mmc_data *data)
401{
402 if (host->use_sg && data->sg_len > 1)
403 return finish_dma_sg(host, data);
404 else
405 return finish_dma_single(host, data);
406}
407
408static int check_status(u64 rsp_sts)
409{
410 if (rsp_sts & MIO_EMM_RSP_STS_RSP_BAD_STS ||
411 rsp_sts & MIO_EMM_RSP_STS_RSP_CRC_ERR ||
412 rsp_sts & MIO_EMM_RSP_STS_BLK_CRC_ERR)
413 return -EILSEQ;
414 if (rsp_sts & MIO_EMM_RSP_STS_RSP_TIMEOUT ||
415 rsp_sts & MIO_EMM_RSP_STS_BLK_TIMEOUT)
416 return -ETIMEDOUT;
417 if (rsp_sts & MIO_EMM_RSP_STS_DBUF_ERR)
418 return -EIO;
419 return 0;
420}
421
422/* Try to clean up failed DMA. */
423static void cleanup_dma(struct cvm_mmc_host *host, u64 rsp_sts)
424{
425 u64 emm_dma;
426
427 emm_dma = readq(host->base + MIO_EMM_DMA(host));
428 emm_dma |= FIELD_PREP(MIO_EMM_DMA_VAL, 1) |
429 FIELD_PREP(MIO_EMM_DMA_DAT_NULL, 1);
430 set_bus_id(&emm_dma, get_bus_id(rsp_sts));
431 writeq(emm_dma, host->base + MIO_EMM_DMA(host));
432}
433
434irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id)
435{
436 struct cvm_mmc_host *host = dev_id;
437 struct mmc_request *req;
438 unsigned long flags = 0;
439 u64 emm_int, rsp_sts;
440 bool host_done;
441
442 if (host->need_irq_handler_lock)
443 spin_lock_irqsave(&host->irq_handler_lock, flags);
444 else
445 __acquire(&host->irq_handler_lock);
446
447 /* Clear interrupt bits (write 1 clears ). */
448 emm_int = readq(host->base + MIO_EMM_INT(host));
449 writeq(emm_int, host->base + MIO_EMM_INT(host));
450
451 if (emm_int & MIO_EMM_INT_SWITCH_ERR)
452 check_switch_errors(host);
453
454 req = host->current_req;
455 if (!req)
456 goto out;
457
458 rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
459 /*
460 * dma_val set means DMA is still in progress. Don't touch
461 * the request and wait for the interrupt indicating that
462 * the DMA is finished.
463 */
464 if ((rsp_sts & MIO_EMM_RSP_STS_DMA_VAL) && host->dma_active)
465 goto out;
466
467 if (!host->dma_active && req->data &&
468 (emm_int & MIO_EMM_INT_BUF_DONE)) {
469 unsigned int type = (rsp_sts >> 7) & 3;
470
471 if (type == 1)
472 do_read(host, req, rsp_sts & MIO_EMM_RSP_STS_DBUF);
473 else if (type == 2)
474 do_write(req);
475 }
476
477 host_done = emm_int & MIO_EMM_INT_CMD_DONE ||
478 emm_int & MIO_EMM_INT_DMA_DONE ||
479 emm_int & MIO_EMM_INT_CMD_ERR ||
480 emm_int & MIO_EMM_INT_DMA_ERR;
481
482 if (!(host_done && req->done))
483 goto no_req_done;
484
485 req->cmd->error = check_status(rsp_sts);
486
487 if (host->dma_active && req->data)
488 if (!finish_dma(host, req->data))
489 goto no_req_done;
490
491 set_cmd_response(host, req, rsp_sts);
492 if ((emm_int & MIO_EMM_INT_DMA_ERR) &&
493 (rsp_sts & MIO_EMM_RSP_STS_DMA_PEND))
494 cleanup_dma(host, rsp_sts);
495
496 host->current_req = NULL;
497 req->done(req);
498
499no_req_done:
500 if (host->dmar_fixup_done)
501 host->dmar_fixup_done(host);
502 if (host_done)
503 host->release_bus(host);
504out:
505 if (host->need_irq_handler_lock)
506 spin_unlock_irqrestore(&host->irq_handler_lock, flags);
507 else
508 __release(&host->irq_handler_lock);
509 return IRQ_RETVAL(emm_int != 0);
510}
511
512/*
513 * Program DMA_CFG and if needed DMA_ADR.
514 * Returns 0 on error, DMA address otherwise.
515 */
516static u64 prepare_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
517{
518 u64 dma_cfg, addr;
519 int count, rw;
520
521 count = dma_map_sg(host->dev, data->sg, data->sg_len,
522 get_dma_dir(data));
523 if (!count)
524 return 0;
525
526 rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
527 dma_cfg = FIELD_PREP(MIO_EMM_DMA_CFG_EN, 1) |
528 FIELD_PREP(MIO_EMM_DMA_CFG_RW, rw);
529#ifdef __LITTLE_ENDIAN
530 dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_ENDIAN, 1);
531#endif
532 dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_SIZE,
533 (sg_dma_len(&data->sg[0]) / 8) - 1);
534
535 addr = sg_dma_address(&data->sg[0]);
536 if (!host->big_dma_addr)
537 dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_ADR, addr);
538 writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
539
540 pr_debug("[%s] sg_dma_len: %u total sg_elem: %d\n",
541 (rw) ? "W" : "R", sg_dma_len(&data->sg[0]), count);
542
543 if (host->big_dma_addr)
544 writeq(addr, host->dma_base + MIO_EMM_DMA_ADR(host));
545 return addr;
546}
547
548/*
549 * Queue complete sg list into the FIFO.
550 * Returns 0 on error, 1 otherwise.
551 */
552static u64 prepare_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
553{
554 struct scatterlist *sg;
555 u64 fifo_cmd, addr;
556 int count, i, rw;
557
558 count = dma_map_sg(host->dev, data->sg, data->sg_len,
559 get_dma_dir(data));
560 if (!count)
561 return 0;
562 if (count > 16)
563 goto error;
564
565 /* Enable FIFO by removing CLR bit */
566 writeq(0, host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
567
568 for_each_sg(data->sg, sg, count, i) {
569 /* Program DMA address */
570 addr = sg_dma_address(sg);
571 if (addr & 7)
572 goto error;
573 writeq(addr, host->dma_base + MIO_EMM_DMA_FIFO_ADR(host));
574
575 /*
576 * If we have scatter-gather support we also have an extra
577 * register for the DMA addr, so no need to check
578 * host->big_dma_addr here.
579 */
580 rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
581 fifo_cmd = FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_RW, rw);
582
583 /* enable interrupts on the last element */
584 fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_INTDIS,
585 (i + 1 == count) ? 0 : 1);
586
587#ifdef __LITTLE_ENDIAN
588 fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_ENDIAN, 1);
589#endif
590 fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_SIZE,
591 sg_dma_len(sg) / 8 - 1);
592 /*
593 * The write copies the address and the command to the FIFO
594 * and increments the FIFO's COUNT field.
595 */
596 writeq(fifo_cmd, host->dma_base + MIO_EMM_DMA_FIFO_CMD(host));
597 pr_debug("[%s] sg_dma_len: %u sg_elem: %d/%d\n",
598 (rw) ? "W" : "R", sg_dma_len(sg), i, count);
599 }
600
601 /*
602 * In difference to prepare_dma_single we don't return the
603 * address here, as it would not make sense for scatter-gather.
604 * The dma fixup is only required on models that don't support
605 * scatter-gather, so that is not a problem.
606 */
607 return 1;
608
609error:
610 WARN_ON_ONCE(1);
611 dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
612 /* Disable FIFO */
613 writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
614 return 0;
615}
616
617static u64 prepare_dma(struct cvm_mmc_host *host, struct mmc_data *data)
618{
619 if (host->use_sg && data->sg_len > 1)
620 return prepare_dma_sg(host, data);
621 else
622 return prepare_dma_single(host, data);
623}
624
625static u64 prepare_ext_dma(struct mmc_host *mmc, struct mmc_request *mrq)
626{
627 struct cvm_mmc_slot *slot = mmc_priv(mmc);
628 u64 emm_dma;
629
630 emm_dma = FIELD_PREP(MIO_EMM_DMA_VAL, 1) |
631 FIELD_PREP(MIO_EMM_DMA_SECTOR,
632 mmc_card_is_blockaddr(mmc->card) ? 1 : 0) |
633 FIELD_PREP(MIO_EMM_DMA_RW,
634 (mrq->data->flags & MMC_DATA_WRITE) ? 1 : 0) |
635 FIELD_PREP(MIO_EMM_DMA_BLOCK_CNT, mrq->data->blocks) |
636 FIELD_PREP(MIO_EMM_DMA_CARD_ADDR, mrq->cmd->arg);
637 set_bus_id(&emm_dma, slot->bus_id);
638
639 if (mmc_card_mmc(mmc->card) || (mmc_card_sd(mmc->card) &&
640 (mmc->card->scr.cmds & SD_SCR_CMD23_SUPPORT)))
641 emm_dma |= FIELD_PREP(MIO_EMM_DMA_MULTI, 1);
642
643 pr_debug("[%s] blocks: %u multi: %d\n",
644 (emm_dma & MIO_EMM_DMA_RW) ? "W" : "R",
645 mrq->data->blocks, (emm_dma & MIO_EMM_DMA_MULTI) ? 1 : 0);
646 return emm_dma;
647}
648
649static void cvm_mmc_dma_request(struct mmc_host *mmc,
650 struct mmc_request *mrq)
651{
652 struct cvm_mmc_slot *slot = mmc_priv(mmc);
653 struct cvm_mmc_host *host = slot->host;
654 struct mmc_data *data;
655 u64 emm_dma, addr;
656
657 if (!mrq->data || !mrq->data->sg || !mrq->data->sg_len ||
658 !mrq->stop || mrq->stop->opcode != MMC_STOP_TRANSMISSION) {
659 dev_err(&mmc->card->dev,
660 "Error: cmv_mmc_dma_request no data\n");
661 goto error;
662 }
663
664 cvm_mmc_switch_to(slot);
665
666 data = mrq->data;
667 pr_debug("DMA request blocks: %d block_size: %d total_size: %d\n",
668 data->blocks, data->blksz, data->blocks * data->blksz);
669 if (data->timeout_ns)
670 set_wdog(slot, data->timeout_ns);
671
672 WARN_ON(host->current_req);
673 host->current_req = mrq;
674
675 emm_dma = prepare_ext_dma(mmc, mrq);
676 addr = prepare_dma(host, data);
677 if (!addr) {
678 dev_err(host->dev, "prepare_dma failed\n");
679 goto error;
680 }
681
682 host->dma_active = true;
683 host->int_enable(host, MIO_EMM_INT_CMD_ERR | MIO_EMM_INT_DMA_DONE |
684 MIO_EMM_INT_DMA_ERR);
685
686 if (host->dmar_fixup)
687 host->dmar_fixup(host, mrq->cmd, data, addr);
688
689 /*
690 * If we have a valid SD card in the slot, we set the response
691 * bit mask to check for CRC errors and timeouts only.
692 * Otherwise, use the default power reset value.
693 */
694 if (mmc_card_sd(mmc->card))
695 writeq(0x00b00000ull, host->base + MIO_EMM_STS_MASK(host));
696 else
697 writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host));
698 writeq(emm_dma, host->base + MIO_EMM_DMA(host));
699 return;
700
701error:
702 mrq->cmd->error = -EINVAL;
703 if (mrq->done)
704 mrq->done(mrq);
705 host->release_bus(host);
706}
707
708static void do_read_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
709{
710 sg_miter_start(&host->smi, mrq->data->sg, mrq->data->sg_len,
711 SG_MITER_ATOMIC | SG_MITER_TO_SG);
712}
713
714static void do_write_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
715{
716 unsigned int data_len = mrq->data->blocks * mrq->data->blksz;
717 struct sg_mapping_iter *smi = &host->smi;
718 unsigned int bytes_xfered;
719 int shift = 56;
720 u64 dat = 0;
721
722 /* Copy data to the xmit buffer before issuing the command. */
723 sg_miter_start(smi, mrq->data->sg, mrq->data->sg_len, SG_MITER_FROM_SG);
724
725 /* Auto inc from offset zero, dbuf zero */
726 writeq(0x10000ull, host->base + MIO_EMM_BUF_IDX(host));
727
728 for (bytes_xfered = 0; bytes_xfered < data_len;) {
729 if (smi->consumed >= smi->length) {
730 if (!sg_miter_next(smi))
731 break;
732 smi->consumed = 0;
733 }
734
735 while (smi->consumed < smi->length && shift >= 0) {
736 dat |= (u64)((u8 *)smi->addr)[smi->consumed] << shift;
737 bytes_xfered++;
738 smi->consumed++;
739 shift -= 8;
740 }
741
742 if (shift < 0) {
743 writeq(dat, host->base + MIO_EMM_BUF_DAT(host));
744 shift = 56;
745 dat = 0;
746 }
747 }
748 sg_miter_stop(smi);
749}
750
751static void cvm_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
752{
753 struct cvm_mmc_slot *slot = mmc_priv(mmc);
754 struct cvm_mmc_host *host = slot->host;
755 struct mmc_command *cmd = mrq->cmd;
756 struct cvm_mmc_cr_mods mods;
757 u64 emm_cmd, rsp_sts;
758 int retries = 100;
759
760 /*
761 * Note about locking:
762 * All MMC devices share the same bus and controller. Allow only a
763 * single user of the bootbus/MMC bus at a time. The lock is acquired
764 * on all entry points from the MMC layer.
765 *
766 * For requests the lock is only released after the completion
767 * interrupt!
768 */
769 host->acquire_bus(host);
770
771 if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
772 cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
773 return cvm_mmc_dma_request(mmc, mrq);
774
775 cvm_mmc_switch_to(slot);
776
777 mods = cvm_mmc_get_cr_mods(cmd);
778
779 WARN_ON(host->current_req);
780 host->current_req = mrq;
781
782 if (cmd->data) {
783 if (cmd->data->flags & MMC_DATA_READ)
784 do_read_request(host, mrq);
785 else
786 do_write_request(host, mrq);
787
788 if (cmd->data->timeout_ns)
789 set_wdog(slot, cmd->data->timeout_ns);
790 } else
791 set_wdog(slot, 0);
792
793 host->dma_active = false;
794 host->int_enable(host, MIO_EMM_INT_CMD_DONE | MIO_EMM_INT_CMD_ERR);
795
796 emm_cmd = FIELD_PREP(MIO_EMM_CMD_VAL, 1) |
797 FIELD_PREP(MIO_EMM_CMD_CTYPE_XOR, mods.ctype_xor) |
798 FIELD_PREP(MIO_EMM_CMD_RTYPE_XOR, mods.rtype_xor) |
799 FIELD_PREP(MIO_EMM_CMD_IDX, cmd->opcode) |
800 FIELD_PREP(MIO_EMM_CMD_ARG, cmd->arg);
801 set_bus_id(&emm_cmd, slot->bus_id);
802 if (cmd->data && mmc_cmd_type(cmd) == MMC_CMD_ADTC)
803 emm_cmd |= FIELD_PREP(MIO_EMM_CMD_OFFSET,
804 64 - ((cmd->data->blocks * cmd->data->blksz) / 8));
805
806 writeq(0, host->base + MIO_EMM_STS_MASK(host));
807
808retry:
809 rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
810 if (rsp_sts & MIO_EMM_RSP_STS_DMA_VAL ||
811 rsp_sts & MIO_EMM_RSP_STS_CMD_VAL ||
812 rsp_sts & MIO_EMM_RSP_STS_SWITCH_VAL ||
813 rsp_sts & MIO_EMM_RSP_STS_DMA_PEND) {
814 udelay(10);
815 if (--retries)
816 goto retry;
817 }
818 if (!retries)
819 dev_err(host->dev, "Bad status: %llx before command write\n", rsp_sts);
820 writeq(emm_cmd, host->base + MIO_EMM_CMD(host));
821}
822
823static void cvm_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
824{
825 struct cvm_mmc_slot *slot = mmc_priv(mmc);
826 struct cvm_mmc_host *host = slot->host;
827 int clk_period = 0, power_class = 10, bus_width = 0;
828 u64 clock, emm_switch;
829
830 host->acquire_bus(host);
831 cvm_mmc_switch_to(slot);
832
833 /* Set the power state */
834 switch (ios->power_mode) {
835 case MMC_POWER_ON:
836 break;
837
838 case MMC_POWER_OFF:
839 cvm_mmc_reset_bus(slot);
840 if (host->global_pwr_gpiod)
841 host->set_shared_power(host, 0);
842 else
843 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
844 break;
845
846 case MMC_POWER_UP:
847 if (host->global_pwr_gpiod)
848 host->set_shared_power(host, 1);
849 else
850 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
851 break;
852 }
853
854 /* Convert bus width to HW definition */
855 switch (ios->bus_width) {
856 case MMC_BUS_WIDTH_8:
857 bus_width = 2;
858 break;
859 case MMC_BUS_WIDTH_4:
860 bus_width = 1;
861 break;
862 case MMC_BUS_WIDTH_1:
863 bus_width = 0;
864 break;
865 }
866
867 /* DDR is available for 4/8 bit bus width */
868 if (ios->bus_width && ios->timing == MMC_TIMING_MMC_DDR52)
869 bus_width |= 4;
870
871 /* Change the clock frequency. */
872 clock = ios->clock;
873 if (clock > 52000000)
874 clock = 52000000;
875 slot->clock = clock;
876
877 if (clock)
878 clk_period = (host->sys_freq + clock - 1) / (2 * clock);
879
880 emm_switch = FIELD_PREP(MIO_EMM_SWITCH_HS_TIMING,
881 (ios->timing == MMC_TIMING_MMC_HS)) |
882 FIELD_PREP(MIO_EMM_SWITCH_BUS_WIDTH, bus_width) |
883 FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, power_class) |
884 FIELD_PREP(MIO_EMM_SWITCH_CLK_HI, clk_period) |
885 FIELD_PREP(MIO_EMM_SWITCH_CLK_LO, clk_period);
886 set_bus_id(&emm_switch, slot->bus_id);
887
888 if (!switch_val_changed(slot, emm_switch))
889 goto out;
890
891 set_wdog(slot, 0);
892 do_switch(host, emm_switch);
893 slot->cached_switch = emm_switch;
894out:
895 host->release_bus(host);
896}
897
898static const struct mmc_host_ops cvm_mmc_ops = {
899 .request = cvm_mmc_request,
900 .set_ios = cvm_mmc_set_ios,
901 .get_ro = mmc_gpio_get_ro,
902 .get_cd = mmc_gpio_get_cd,
903};
904
905static void cvm_mmc_set_clock(struct cvm_mmc_slot *slot, unsigned int clock)
906{
907 struct mmc_host *mmc = slot->mmc;
908
909 clock = min(clock, mmc->f_max);
910 clock = max(clock, mmc->f_min);
911 slot->clock = clock;
912}
913
914static int cvm_mmc_init_lowlevel(struct cvm_mmc_slot *slot)
915{
916 struct cvm_mmc_host *host = slot->host;
917 u64 emm_switch;
918
919 /* Enable this bus slot. */
920 host->emm_cfg |= (1ull << slot->bus_id);
921 writeq(host->emm_cfg, slot->host->base + MIO_EMM_CFG(host));
922 udelay(10);
923
924 /* Program initial clock speed and power. */
925 cvm_mmc_set_clock(slot, slot->mmc->f_min);
926 emm_switch = FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, 10);
927 emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_HI,
928 (host->sys_freq / slot->clock) / 2);
929 emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_LO,
930 (host->sys_freq / slot->clock) / 2);
931
932 /* Make the changes take effect on this bus slot. */
933 set_bus_id(&emm_switch, slot->bus_id);
934 do_switch(host, emm_switch);
935
936 slot->cached_switch = emm_switch;
937
938 /*
939 * Set watchdog timeout value and default reset value
940 * for the mask register. Finally, set the CARD_RCA
941 * bit so that we can get the card address relative
942 * to the CMD register for CMD7 transactions.
943 */
944 set_wdog(slot, 0);
945 writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host));
946 writeq(1, host->base + MIO_EMM_RCA(host));
947 return 0;
948}
949
950static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
951{
952 u32 id, cmd_skew = 0, dat_skew = 0, bus_width = 0;
953 struct device_node *node = dev->of_node;
954 struct mmc_host *mmc = slot->mmc;
955 u64 clock_period;
956 int ret;
957
958 ret = of_property_read_u32(node, "reg", &id);
959 if (ret) {
960 dev_err(dev, "Missing or invalid reg property on %s\n",
961 of_node_full_name(node));
962 return ret;
963 }
964
965 if (id >= CAVIUM_MAX_MMC || slot->host->slot[id]) {
966 dev_err(dev, "Invalid reg property on %s\n",
967 of_node_full_name(node));
968 return -EINVAL;
969 }
970
971 mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
972 if (IS_ERR(mmc->supply.vmmc)) {
973 if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
974 return -EPROBE_DEFER;
975 /*
976 * Legacy Octeon firmware has no regulator entry, fall-back to
977 * a hard-coded voltage to get a sane OCR.
978 */
979 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
980 } else {
981 ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
982 if (ret > 0)
983 mmc->ocr_avail = ret;
984 }
985
986 /* Common MMC bindings */
987 ret = mmc_of_parse(mmc);
988 if (ret)
989 return ret;
990
991 /* Set bus width */
992 if (!(mmc->caps & (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA))) {
993 of_property_read_u32(node, "cavium,bus-max-width", &bus_width);
994 if (bus_width == 8)
995 mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
996 else if (bus_width == 4)
997 mmc->caps |= MMC_CAP_4_BIT_DATA;
998 }
999
1000 /* Set maximum and minimum frequency */
1001 if (!mmc->f_max)
1002 of_property_read_u32(node, "spi-max-frequency", &mmc->f_max);
1003 if (!mmc->f_max || mmc->f_max > 52000000)
1004 mmc->f_max = 52000000;
1005 mmc->f_min = 400000;
1006
1007 /* Sampling register settings, period in picoseconds */
1008 clock_period = 1000000000000ull / slot->host->sys_freq;
1009 of_property_read_u32(node, "cavium,cmd-clk-skew", &cmd_skew);
1010 of_property_read_u32(node, "cavium,dat-clk-skew", &dat_skew);
1011 slot->cmd_cnt = (cmd_skew + clock_period / 2) / clock_period;
1012 slot->dat_cnt = (dat_skew + clock_period / 2) / clock_period;
1013
1014 return id;
1015}
1016
1017int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
1018{
1019 struct cvm_mmc_slot *slot;
1020 struct mmc_host *mmc;
1021 int ret, id;
1022
1023 mmc = mmc_alloc_host(sizeof(struct cvm_mmc_slot), dev);
1024 if (!mmc)
1025 return -ENOMEM;
1026
1027 slot = mmc_priv(mmc);
1028 slot->mmc = mmc;
1029 slot->host = host;
1030
1031 ret = cvm_mmc_of_parse(dev, slot);
1032 if (ret < 0)
1033 goto error;
1034 id = ret;
1035
1036 /* Set up host parameters */
1037 mmc->ops = &cvm_mmc_ops;
1038
1039 /*
1040 * We only have a 3.3v supply, we cannot support any
1041 * of the UHS modes. We do support the high speed DDR
1042 * modes up to 52MHz.
1043 */
1044 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
1045 MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD |
1046 MMC_CAP_3_3V_DDR;
1047
1048 if (host->use_sg)
1049 mmc->max_segs = 16;
1050 else
1051 mmc->max_segs = 1;
1052
1053 /* DMA size field can address up to 8 MB */
1054 mmc->max_seg_size = 8 * 1024 * 1024;
1055 mmc->max_req_size = mmc->max_seg_size;
1056 /* External DMA is in 512 byte blocks */
1057 mmc->max_blk_size = 512;
1058 /* DMA block count field is 15 bits */
1059 mmc->max_blk_count = 32767;
1060
1061 slot->clock = mmc->f_min;
1062 slot->bus_id = id;
1063 slot->cached_rca = 1;
1064
1065 host->acquire_bus(host);
1066 host->slot[id] = slot;
1067 cvm_mmc_switch_to(slot);
1068 cvm_mmc_init_lowlevel(slot);
1069 host->release_bus(host);
1070
1071 ret = mmc_add_host(mmc);
1072 if (ret) {
1073 dev_err(dev, "mmc_add_host() returned %d\n", ret);
1074 slot->host->slot[id] = NULL;
1075 goto error;
1076 }
1077 return 0;
1078
1079error:
1080 mmc_free_host(slot->mmc);
1081 return ret;
1082}
1083
1084int cvm_mmc_of_slot_remove(struct cvm_mmc_slot *slot)
1085{
1086 mmc_remove_host(slot->mmc);
1087 slot->host->slot[slot->bus_id] = NULL;
1088 mmc_free_host(slot->mmc);
1089 return 0;
1090}
diff --git a/drivers/mmc/host/cavium.h b/drivers/mmc/host/cavium.h
new file mode 100644
index 000000000000..f3eea5eaa678
--- /dev/null
+++ b/drivers/mmc/host/cavium.h
@@ -0,0 +1,215 @@
1/*
2 * Driver for MMC and SSD cards for Cavium OCTEON and ThunderX SOCs.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2012-2017 Cavium Inc.
9 */
10
11#ifndef _CAVIUM_MMC_H_
12#define _CAVIUM_MMC_H_
13
14#include <linux/bitops.h>
15#include <linux/clk.h>
16#include <linux/gpio/consumer.h>
17#include <linux/io.h>
18#include <linux/mmc/host.h>
19#include <linux/of.h>
20#include <linux/scatterlist.h>
21#include <linux/semaphore.h>
22
23#define CAVIUM_MAX_MMC 4
24
25/* DMA register addresses */
26#define MIO_EMM_DMA_FIFO_CFG(x) (0x00 + x->reg_off_dma)
27#define MIO_EMM_DMA_FIFO_ADR(x) (0x10 + x->reg_off_dma)
28#define MIO_EMM_DMA_FIFO_CMD(x) (0x18 + x->reg_off_dma)
29#define MIO_EMM_DMA_CFG(x) (0x20 + x->reg_off_dma)
30#define MIO_EMM_DMA_ADR(x) (0x28 + x->reg_off_dma)
31#define MIO_EMM_DMA_INT(x) (0x30 + x->reg_off_dma)
32#define MIO_EMM_DMA_INT_W1S(x) (0x38 + x->reg_off_dma)
33#define MIO_EMM_DMA_INT_ENA_W1S(x) (0x40 + x->reg_off_dma)
34#define MIO_EMM_DMA_INT_ENA_W1C(x) (0x48 + x->reg_off_dma)
35
36/* register addresses */
37#define MIO_EMM_CFG(x) (0x00 + x->reg_off)
38#define MIO_EMM_SWITCH(x) (0x48 + x->reg_off)
39#define MIO_EMM_DMA(x) (0x50 + x->reg_off)
40#define MIO_EMM_CMD(x) (0x58 + x->reg_off)
41#define MIO_EMM_RSP_STS(x) (0x60 + x->reg_off)
42#define MIO_EMM_RSP_LO(x) (0x68 + x->reg_off)
43#define MIO_EMM_RSP_HI(x) (0x70 + x->reg_off)
44#define MIO_EMM_INT(x) (0x78 + x->reg_off)
45#define MIO_EMM_INT_EN(x) (0x80 + x->reg_off)
46#define MIO_EMM_WDOG(x) (0x88 + x->reg_off)
47#define MIO_EMM_SAMPLE(x) (0x90 + x->reg_off)
48#define MIO_EMM_STS_MASK(x) (0x98 + x->reg_off)
49#define MIO_EMM_RCA(x) (0xa0 + x->reg_off)
50#define MIO_EMM_INT_EN_SET(x) (0xb0 + x->reg_off)
51#define MIO_EMM_INT_EN_CLR(x) (0xb8 + x->reg_off)
52#define MIO_EMM_BUF_IDX(x) (0xe0 + x->reg_off)
53#define MIO_EMM_BUF_DAT(x) (0xe8 + x->reg_off)
54
55struct cvm_mmc_host {
56 struct device *dev;
57 void __iomem *base;
58 void __iomem *dma_base;
59 int reg_off;
60 int reg_off_dma;
61 u64 emm_cfg;
62 u64 n_minus_one; /* OCTEON II workaround location */
63 int last_slot;
64 struct clk *clk;
65 int sys_freq;
66
67 struct mmc_request *current_req;
68 struct sg_mapping_iter smi;
69 bool dma_active;
70 bool use_sg;
71
72 bool has_ciu3;
73 bool big_dma_addr;
74 bool need_irq_handler_lock;
75 spinlock_t irq_handler_lock;
76 struct semaphore mmc_serializer;
77
78 struct gpio_desc *global_pwr_gpiod;
79 atomic_t shared_power_users;
80
81 struct cvm_mmc_slot *slot[CAVIUM_MAX_MMC];
82 struct platform_device *slot_pdev[CAVIUM_MAX_MMC];
83
84 void (*set_shared_power)(struct cvm_mmc_host *, int);
85 void (*acquire_bus)(struct cvm_mmc_host *);
86 void (*release_bus)(struct cvm_mmc_host *);
87 void (*int_enable)(struct cvm_mmc_host *, u64);
88 /* required on some MIPS models */
89 void (*dmar_fixup)(struct cvm_mmc_host *, struct mmc_command *,
90 struct mmc_data *, u64);
91 void (*dmar_fixup_done)(struct cvm_mmc_host *);
92};
93
94struct cvm_mmc_slot {
95 struct mmc_host *mmc; /* slot-level mmc_core object */
96 struct cvm_mmc_host *host; /* common hw for all slots */
97
98 u64 clock;
99
100 u64 cached_switch;
101 u64 cached_rca;
102
103 unsigned int cmd_cnt; /* sample delay */
104 unsigned int dat_cnt; /* sample delay */
105
106 int bus_id;
107};
108
109struct cvm_mmc_cr_type {
110 u8 ctype;
111 u8 rtype;
112};
113
114struct cvm_mmc_cr_mods {
115 u8 ctype_xor;
116 u8 rtype_xor;
117};
118
119/* Bitfield definitions */
120#define MIO_EMM_DMA_FIFO_CFG_CLR BIT_ULL(16)
121#define MIO_EMM_DMA_FIFO_CFG_INT_LVL GENMASK_ULL(12, 8)
122#define MIO_EMM_DMA_FIFO_CFG_COUNT GENMASK_ULL(4, 0)
123
124#define MIO_EMM_DMA_FIFO_CMD_RW BIT_ULL(62)
125#define MIO_EMM_DMA_FIFO_CMD_INTDIS BIT_ULL(60)
126#define MIO_EMM_DMA_FIFO_CMD_SWAP32 BIT_ULL(59)
127#define MIO_EMM_DMA_FIFO_CMD_SWAP16 BIT_ULL(58)
128#define MIO_EMM_DMA_FIFO_CMD_SWAP8 BIT_ULL(57)
129#define MIO_EMM_DMA_FIFO_CMD_ENDIAN BIT_ULL(56)
130#define MIO_EMM_DMA_FIFO_CMD_SIZE GENMASK_ULL(55, 36)
131
132#define MIO_EMM_CMD_SKIP_BUSY BIT_ULL(62)
133#define MIO_EMM_CMD_BUS_ID GENMASK_ULL(61, 60)
134#define MIO_EMM_CMD_VAL BIT_ULL(59)
135#define MIO_EMM_CMD_DBUF BIT_ULL(55)
136#define MIO_EMM_CMD_OFFSET GENMASK_ULL(54, 49)
137#define MIO_EMM_CMD_CTYPE_XOR GENMASK_ULL(42, 41)
138#define MIO_EMM_CMD_RTYPE_XOR GENMASK_ULL(40, 38)
139#define MIO_EMM_CMD_IDX GENMASK_ULL(37, 32)
140#define MIO_EMM_CMD_ARG GENMASK_ULL(31, 0)
141
142#define MIO_EMM_DMA_SKIP_BUSY BIT_ULL(62)
143#define MIO_EMM_DMA_BUS_ID GENMASK_ULL(61, 60)
144#define MIO_EMM_DMA_VAL BIT_ULL(59)
145#define MIO_EMM_DMA_SECTOR BIT_ULL(58)
146#define MIO_EMM_DMA_DAT_NULL BIT_ULL(57)
147#define MIO_EMM_DMA_THRES GENMASK_ULL(56, 51)
148#define MIO_EMM_DMA_REL_WR BIT_ULL(50)
149#define MIO_EMM_DMA_RW BIT_ULL(49)
150#define MIO_EMM_DMA_MULTI BIT_ULL(48)
151#define MIO_EMM_DMA_BLOCK_CNT GENMASK_ULL(47, 32)
152#define MIO_EMM_DMA_CARD_ADDR GENMASK_ULL(31, 0)
153
154#define MIO_EMM_DMA_CFG_EN BIT_ULL(63)
155#define MIO_EMM_DMA_CFG_RW BIT_ULL(62)
156#define MIO_EMM_DMA_CFG_CLR BIT_ULL(61)
157#define MIO_EMM_DMA_CFG_SWAP32 BIT_ULL(59)
158#define MIO_EMM_DMA_CFG_SWAP16 BIT_ULL(58)
159#define MIO_EMM_DMA_CFG_SWAP8 BIT_ULL(57)
160#define MIO_EMM_DMA_CFG_ENDIAN BIT_ULL(56)
161#define MIO_EMM_DMA_CFG_SIZE GENMASK_ULL(55, 36)
162#define MIO_EMM_DMA_CFG_ADR GENMASK_ULL(35, 0)
163
164#define MIO_EMM_INT_SWITCH_ERR BIT_ULL(6)
165#define MIO_EMM_INT_SWITCH_DONE BIT_ULL(5)
166#define MIO_EMM_INT_DMA_ERR BIT_ULL(4)
167#define MIO_EMM_INT_CMD_ERR BIT_ULL(3)
168#define MIO_EMM_INT_DMA_DONE BIT_ULL(2)
169#define MIO_EMM_INT_CMD_DONE BIT_ULL(1)
170#define MIO_EMM_INT_BUF_DONE BIT_ULL(0)
171
172#define MIO_EMM_RSP_STS_BUS_ID GENMASK_ULL(61, 60)
173#define MIO_EMM_RSP_STS_CMD_VAL BIT_ULL(59)
174#define MIO_EMM_RSP_STS_SWITCH_VAL BIT_ULL(58)
175#define MIO_EMM_RSP_STS_DMA_VAL BIT_ULL(57)
176#define MIO_EMM_RSP_STS_DMA_PEND BIT_ULL(56)
177#define MIO_EMM_RSP_STS_DBUF_ERR BIT_ULL(28)
178#define MIO_EMM_RSP_STS_DBUF BIT_ULL(23)
179#define MIO_EMM_RSP_STS_BLK_TIMEOUT BIT_ULL(22)
180#define MIO_EMM_RSP_STS_BLK_CRC_ERR BIT_ULL(21)
181#define MIO_EMM_RSP_STS_RSP_BUSYBIT BIT_ULL(20)
182#define MIO_EMM_RSP_STS_STP_TIMEOUT BIT_ULL(19)
183#define MIO_EMM_RSP_STS_STP_CRC_ERR BIT_ULL(18)
184#define MIO_EMM_RSP_STS_STP_BAD_STS BIT_ULL(17)
185#define MIO_EMM_RSP_STS_STP_VAL BIT_ULL(16)
186#define MIO_EMM_RSP_STS_RSP_TIMEOUT BIT_ULL(15)
187#define MIO_EMM_RSP_STS_RSP_CRC_ERR BIT_ULL(14)
188#define MIO_EMM_RSP_STS_RSP_BAD_STS BIT_ULL(13)
189#define MIO_EMM_RSP_STS_RSP_VAL BIT_ULL(12)
190#define MIO_EMM_RSP_STS_RSP_TYPE GENMASK_ULL(11, 9)
191#define MIO_EMM_RSP_STS_CMD_TYPE GENMASK_ULL(8, 7)
192#define MIO_EMM_RSP_STS_CMD_IDX GENMASK_ULL(6, 1)
193#define MIO_EMM_RSP_STS_CMD_DONE BIT_ULL(0)
194
195#define MIO_EMM_SAMPLE_CMD_CNT GENMASK_ULL(25, 16)
196#define MIO_EMM_SAMPLE_DAT_CNT GENMASK_ULL(9, 0)
197
198#define MIO_EMM_SWITCH_BUS_ID GENMASK_ULL(61, 60)
199#define MIO_EMM_SWITCH_EXE BIT_ULL(59)
200#define MIO_EMM_SWITCH_ERR0 BIT_ULL(58)
201#define MIO_EMM_SWITCH_ERR1 BIT_ULL(57)
202#define MIO_EMM_SWITCH_ERR2 BIT_ULL(56)
203#define MIO_EMM_SWITCH_HS_TIMING BIT_ULL(48)
204#define MIO_EMM_SWITCH_BUS_WIDTH GENMASK_ULL(42, 40)
205#define MIO_EMM_SWITCH_POWER_CLASS GENMASK_ULL(35, 32)
206#define MIO_EMM_SWITCH_CLK_HI GENMASK_ULL(31, 16)
207#define MIO_EMM_SWITCH_CLK_LO GENMASK_ULL(15, 0)
208
209/* Protoypes */
210irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id);
211int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host);
212int cvm_mmc_of_slot_remove(struct cvm_mmc_slot *slot);
213extern const char *cvm_mmc_irq_names[];
214
215#endif
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 1e2600da105f..621ce47e0e4a 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -478,18 +478,14 @@ static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host,
478 int ret = 0; 478 int ret = 0;
479 479
480 host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 480 host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
481 ((data->flags & MMC_DATA_WRITE) 481 mmc_get_dma_dir(data));
482 ? DMA_TO_DEVICE
483 : DMA_FROM_DEVICE));
484 482
485 /* no individual DMA segment should need a partial FIFO */ 483 /* no individual DMA segment should need a partial FIFO */
486 for (i = 0; i < host->sg_len; i++) { 484 for (i = 0; i < host->sg_len; i++) {
487 if (sg_dma_len(data->sg + i) & mask) { 485 if (sg_dma_len(data->sg + i) & mask) {
488 dma_unmap_sg(mmc_dev(host->mmc), 486 dma_unmap_sg(mmc_dev(host->mmc),
489 data->sg, data->sg_len, 487 data->sg, data->sg_len,
490 (data->flags & MMC_DATA_WRITE) 488 mmc_get_dma_dir(data));
491 ? DMA_TO_DEVICE
492 : DMA_FROM_DEVICE);
493 return -1; 489 return -1;
494 } 490 }
495 } 491 }
@@ -802,9 +798,7 @@ mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data)
802 davinci_abort_dma(host); 798 davinci_abort_dma(host);
803 799
804 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 800 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
805 (data->flags & MMC_DATA_WRITE) 801 mmc_get_dma_dir(data));
806 ? DMA_TO_DEVICE
807 : DMA_FROM_DEVICE);
808 host->do_dma = false; 802 host->do_dma = false;
809 } 803 }
810 host->data_dir = DAVINCI_MMC_DATADIR_NONE; 804 host->data_dir = DAVINCI_MMC_DATADIR_NONE;
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 8718432751c5..e45129f48174 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -19,6 +19,7 @@
19#include <linux/err.h> 19#include <linux/err.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/iopoll.h>
22#include <linux/ioport.h> 23#include <linux/ioport.h>
23#include <linux/module.h> 24#include <linux/module.h>
24#include <linux/platform_device.h> 25#include <linux/platform_device.h>
@@ -65,6 +66,8 @@
65 66
66struct idmac_desc_64addr { 67struct idmac_desc_64addr {
67 u32 des0; /* Control Descriptor */ 68 u32 des0; /* Control Descriptor */
69#define IDMAC_OWN_CLR64(x) \
70 !((x) & cpu_to_le32(IDMAC_DES0_OWN))
68 71
69 u32 des1; /* Reserved */ 72 u32 des1; /* Reserved */
70 73
@@ -104,11 +107,6 @@ struct idmac_desc {
104/* Each descriptor can transfer up to 4KB of data in chained mode */ 107/* Each descriptor can transfer up to 4KB of data in chained mode */
105#define DW_MCI_DESC_DATA_LENGTH 0x1000 108#define DW_MCI_DESC_DATA_LENGTH 0x1000
106 109
107static bool dw_mci_reset(struct dw_mci *host);
108static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
109static int dw_mci_card_busy(struct mmc_host *mmc);
110static int dw_mci_get_cd(struct mmc_host *mmc);
111
112#if defined(CONFIG_DEBUG_FS) 110#if defined(CONFIG_DEBUG_FS)
113static int dw_mci_req_show(struct seq_file *s, void *v) 111static int dw_mci_req_show(struct seq_file *s, void *v)
114{ 112{
@@ -232,7 +230,66 @@ err:
232} 230}
233#endif /* defined(CONFIG_DEBUG_FS) */ 231#endif /* defined(CONFIG_DEBUG_FS) */
234 232
235static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg); 233static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
234{
235 u32 ctrl;
236
237 ctrl = mci_readl(host, CTRL);
238 ctrl |= reset;
239 mci_writel(host, CTRL, ctrl);
240
241 /* wait till resets clear */
242 if (readl_poll_timeout_atomic(host->regs + SDMMC_CTRL, ctrl,
243 !(ctrl & reset),
244 1, 500 * USEC_PER_MSEC)) {
245 dev_err(host->dev,
246 "Timeout resetting block (ctrl reset %#x)\n",
247 ctrl & reset);
248 return false;
249 }
250
251 return true;
252}
253
254static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags)
255{
256 u32 status;
257
258 /*
259 * Databook says that before issuing a new data transfer command
260 * we need to check to see if the card is busy. Data transfer commands
261 * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that.
262 *
263 * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
264 * expected.
265 */
266 if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) &&
267 !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) {
268 if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
269 status,
270 !(status & SDMMC_STATUS_BUSY),
271 10, 500 * USEC_PER_MSEC))
272 dev_err(host->dev, "Busy; trying anyway\n");
273 }
274}
275
276static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
277{
278 struct dw_mci *host = slot->host;
279 unsigned int cmd_status = 0;
280
281 mci_writel(host, CMDARG, arg);
282 wmb(); /* drain writebuffer */
283 dw_mci_wait_while_busy(host, cmd);
284 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
285
286 if (readl_poll_timeout_atomic(host->regs + SDMMC_CMD, cmd_status,
287 !(cmd_status & SDMMC_CMD_START),
288 1, 500 * USEC_PER_MSEC))
289 dev_err(&slot->mmc->class_dev,
290 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
291 cmd, arg, cmd_status);
292}
236 293
237static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) 294static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
238{ 295{
@@ -341,31 +398,6 @@ static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
341 return cmdr; 398 return cmdr;
342} 399}
343 400
344static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags)
345{
346 unsigned long timeout = jiffies + msecs_to_jiffies(500);
347
348 /*
349 * Databook says that before issuing a new data transfer command
350 * we need to check to see if the card is busy. Data transfer commands
351 * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that.
352 *
353 * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
354 * expected.
355 */
356 if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) &&
357 !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) {
358 while (mci_readl(host, STATUS) & SDMMC_STATUS_BUSY) {
359 if (time_after(jiffies, timeout)) {
360 /* Command will fail; we'll pass error then */
361 dev_err(host->dev, "Busy; trying anyway\n");
362 break;
363 }
364 udelay(10);
365 }
366 }
367}
368
369static void dw_mci_start_command(struct dw_mci *host, 401static void dw_mci_start_command(struct dw_mci *host,
370 struct mmc_command *cmd, u32 cmd_flags) 402 struct mmc_command *cmd, u32 cmd_flags)
371{ 403{
@@ -400,14 +432,6 @@ static void dw_mci_stop_dma(struct dw_mci *host)
400 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 432 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
401} 433}
402 434
403static int dw_mci_get_dma_dir(struct mmc_data *data)
404{
405 if (data->flags & MMC_DATA_WRITE)
406 return DMA_TO_DEVICE;
407 else
408 return DMA_FROM_DEVICE;
409}
410
411static void dw_mci_dma_cleanup(struct dw_mci *host) 435static void dw_mci_dma_cleanup(struct dw_mci *host)
412{ 436{
413 struct mmc_data *data = host->data; 437 struct mmc_data *data = host->data;
@@ -416,7 +440,7 @@ static void dw_mci_dma_cleanup(struct dw_mci *host)
416 dma_unmap_sg(host->dev, 440 dma_unmap_sg(host->dev,
417 data->sg, 441 data->sg,
418 data->sg_len, 442 data->sg_len,
419 dw_mci_get_dma_dir(data)); 443 mmc_get_dma_dir(data));
420 data->host_cookie = COOKIE_UNMAPPED; 444 data->host_cookie = COOKIE_UNMAPPED;
421 } 445 }
422} 446}
@@ -555,7 +579,7 @@ static inline int dw_mci_prepare_desc64(struct dw_mci *host,
555{ 579{
556 unsigned int desc_len; 580 unsigned int desc_len;
557 struct idmac_desc_64addr *desc_first, *desc_last, *desc; 581 struct idmac_desc_64addr *desc_first, *desc_last, *desc;
558 unsigned long timeout; 582 u32 val;
559 int i; 583 int i;
560 584
561 desc_first = desc_last = desc = host->sg_cpu; 585 desc_first = desc_last = desc = host->sg_cpu;
@@ -577,12 +601,10 @@ static inline int dw_mci_prepare_desc64(struct dw_mci *host,
577 * isn't still owned by IDMAC as IDMAC's write 601 * isn't still owned by IDMAC as IDMAC's write
578 * ops and CPU's read ops are asynchronous. 602 * ops and CPU's read ops are asynchronous.
579 */ 603 */
580 timeout = jiffies + msecs_to_jiffies(100); 604 if (readl_poll_timeout_atomic(&desc->des0, val,
581 while (readl(&desc->des0) & IDMAC_DES0_OWN) { 605 !(val & IDMAC_DES0_OWN),
582 if (time_after(jiffies, timeout)) 606 10, 100 * USEC_PER_MSEC))
583 goto err_own_bit; 607 goto err_own_bit;
584 udelay(10);
585 }
586 608
587 /* 609 /*
588 * Set the OWN bit and disable interrupts 610 * Set the OWN bit and disable interrupts
@@ -629,7 +651,7 @@ static inline int dw_mci_prepare_desc32(struct dw_mci *host,
629{ 651{
630 unsigned int desc_len; 652 unsigned int desc_len;
631 struct idmac_desc *desc_first, *desc_last, *desc; 653 struct idmac_desc *desc_first, *desc_last, *desc;
632 unsigned long timeout; 654 u32 val;
633 int i; 655 int i;
634 656
635 desc_first = desc_last = desc = host->sg_cpu; 657 desc_first = desc_last = desc = host->sg_cpu;
@@ -651,13 +673,11 @@ static inline int dw_mci_prepare_desc32(struct dw_mci *host,
651 * isn't still owned by IDMAC as IDMAC's write 673 * isn't still owned by IDMAC as IDMAC's write
652 * ops and CPU's read ops are asynchronous. 674 * ops and CPU's read ops are asynchronous.
653 */ 675 */
654 timeout = jiffies + msecs_to_jiffies(100); 676 if (readl_poll_timeout_atomic(&desc->des0, val,
655 while (readl(&desc->des0) & 677 IDMAC_OWN_CLR64(val),
656 cpu_to_le32(IDMAC_DES0_OWN)) { 678 10,
657 if (time_after(jiffies, timeout)) 679 100 * USEC_PER_MSEC))
658 goto err_own_bit; 680 goto err_own_bit;
659 udelay(10);
660 }
661 681
662 /* 682 /*
663 * Set the OWN bit and disable interrupts 683 * Set the OWN bit and disable interrupts
@@ -876,7 +896,7 @@ static int dw_mci_pre_dma_transfer(struct dw_mci *host,
876 sg_len = dma_map_sg(host->dev, 896 sg_len = dma_map_sg(host->dev,
877 data->sg, 897 data->sg,
878 data->sg_len, 898 data->sg_len,
879 dw_mci_get_dma_dir(data)); 899 mmc_get_dma_dir(data));
880 if (sg_len == 0) 900 if (sg_len == 0)
881 return -EINVAL; 901 return -EINVAL;
882 902
@@ -916,10 +936,51 @@ static void dw_mci_post_req(struct mmc_host *mmc,
916 dma_unmap_sg(slot->host->dev, 936 dma_unmap_sg(slot->host->dev,
917 data->sg, 937 data->sg,
918 data->sg_len, 938 data->sg_len,
919 dw_mci_get_dma_dir(data)); 939 mmc_get_dma_dir(data));
920 data->host_cookie = COOKIE_UNMAPPED; 940 data->host_cookie = COOKIE_UNMAPPED;
921} 941}
922 942
943static int dw_mci_get_cd(struct mmc_host *mmc)
944{
945 int present;
946 struct dw_mci_slot *slot = mmc_priv(mmc);
947 struct dw_mci *host = slot->host;
948 int gpio_cd = mmc_gpio_get_cd(mmc);
949
950 /* Use platform get_cd function, else try onboard card detect */
951 if (((mmc->caps & MMC_CAP_NEEDS_POLL)
952 || !mmc_card_is_removable(mmc))) {
953 present = 1;
954
955 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
956 if (mmc->caps & MMC_CAP_NEEDS_POLL) {
957 dev_info(&mmc->class_dev,
958 "card is polling.\n");
959 } else {
960 dev_info(&mmc->class_dev,
961 "card is non-removable.\n");
962 }
963 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
964 }
965
966 return present;
967 } else if (gpio_cd >= 0)
968 present = gpio_cd;
969 else
970 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
971 == 0 ? 1 : 0;
972
973 spin_lock_bh(&host->lock);
974 if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags))
975 dev_dbg(&mmc->class_dev, "card is present\n");
976 else if (!present &&
977 !test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags))
978 dev_dbg(&mmc->class_dev, "card is not present\n");
979 spin_unlock_bh(&host->lock);
980
981 return present;
982}
983
923static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data) 984static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
924{ 985{
925 unsigned int blksz = data->blksz; 986 unsigned int blksz = data->blksz;
@@ -1133,27 +1194,6 @@ static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
1133 } 1194 }
1134} 1195}
1135 1196
1136static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
1137{
1138 struct dw_mci *host = slot->host;
1139 unsigned long timeout = jiffies + msecs_to_jiffies(500);
1140 unsigned int cmd_status = 0;
1141
1142 mci_writel(host, CMDARG, arg);
1143 wmb(); /* drain writebuffer */
1144 dw_mci_wait_while_busy(host, cmd);
1145 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
1146
1147 while (time_before(jiffies, timeout)) {
1148 cmd_status = mci_readl(host, CMD);
1149 if (!(cmd_status & SDMMC_CMD_START))
1150 return;
1151 }
1152 dev_err(&slot->mmc->class_dev,
1153 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
1154 cmd, arg, cmd_status);
1155}
1156
1157static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) 1197static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1158{ 1198{
1159 struct dw_mci *host = slot->host; 1199 struct dw_mci *host = slot->host;
@@ -1534,47 +1574,6 @@ static int dw_mci_get_ro(struct mmc_host *mmc)
1534 return read_only; 1574 return read_only;
1535} 1575}
1536 1576
1537static int dw_mci_get_cd(struct mmc_host *mmc)
1538{
1539 int present;
1540 struct dw_mci_slot *slot = mmc_priv(mmc);
1541 struct dw_mci *host = slot->host;
1542 int gpio_cd = mmc_gpio_get_cd(mmc);
1543
1544 /* Use platform get_cd function, else try onboard card detect */
1545 if (((mmc->caps & MMC_CAP_NEEDS_POLL)
1546 || !mmc_card_is_removable(mmc))) {
1547 present = 1;
1548
1549 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1550 if (mmc->caps & MMC_CAP_NEEDS_POLL) {
1551 dev_info(&mmc->class_dev,
1552 "card is polling.\n");
1553 } else {
1554 dev_info(&mmc->class_dev,
1555 "card is non-removable.\n");
1556 }
1557 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1558 }
1559
1560 return present;
1561 } else if (gpio_cd >= 0)
1562 present = gpio_cd;
1563 else
1564 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1565 == 0 ? 1 : 0;
1566
1567 spin_lock_bh(&host->lock);
1568 if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags))
1569 dev_dbg(&mmc->class_dev, "card is present\n");
1570 else if (!present &&
1571 !test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags))
1572 dev_dbg(&mmc->class_dev, "card is not present\n");
1573 spin_unlock_bh(&host->lock);
1574
1575 return present;
1576}
1577
1578static void dw_mci_hw_reset(struct mmc_host *mmc) 1577static void dw_mci_hw_reset(struct mmc_host *mmc)
1579{ 1578{
1580 struct dw_mci_slot *slot = mmc_priv(mmc); 1579 struct dw_mci_slot *slot = mmc_priv(mmc);
@@ -1688,6 +1687,73 @@ static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc,
1688 return 0; 1687 return 0;
1689} 1688}
1690 1689
1690static bool dw_mci_reset(struct dw_mci *host)
1691{
1692 u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
1693 bool ret = false;
1694 u32 status = 0;
1695
1696 /*
1697 * Resetting generates a block interrupt, hence setting
1698 * the scatter-gather pointer to NULL.
1699 */
1700 if (host->sg) {
1701 sg_miter_stop(&host->sg_miter);
1702 host->sg = NULL;
1703 }
1704
1705 if (host->use_dma)
1706 flags |= SDMMC_CTRL_DMA_RESET;
1707
1708 if (dw_mci_ctrl_reset(host, flags)) {
1709 /*
1710 * In all cases we clear the RAWINTS
1711 * register to clear any interrupts.
1712 */
1713 mci_writel(host, RINTSTS, 0xFFFFFFFF);
1714
1715 if (!host->use_dma) {
1716 ret = true;
1717 goto ciu_out;
1718 }
1719
1720 /* Wait for dma_req to be cleared */
1721 if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
1722 status,
1723 !(status & SDMMC_STATUS_DMA_REQ),
1724 1, 500 * USEC_PER_MSEC)) {
1725 dev_err(host->dev,
1726 "%s: Timeout waiting for dma_req to be cleared\n",
1727 __func__);
1728 goto ciu_out;
1729 }
1730
1731 /* when using DMA next we reset the fifo again */
1732 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
1733 goto ciu_out;
1734 } else {
1735 /* if the controller reset bit did clear, then set clock regs */
1736 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
1737 dev_err(host->dev,
1738 "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n",
1739 __func__);
1740 goto ciu_out;
1741 }
1742 }
1743
1744 if (host->use_dma == TRANS_MODE_IDMAC)
1745 /* It is also recommended that we reset and reprogram idmac */
1746 dw_mci_idmac_reset(host);
1747
1748 ret = true;
1749
1750ciu_out:
1751 /* After a CTRL reset we need to have CIU set clock registers */
1752 mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0);
1753
1754 return ret;
1755}
1756
1691static const struct mmc_host_ops dw_mci_ops = { 1757static const struct mmc_host_ops dw_mci_ops = {
1692 .request = dw_mci_request, 1758 .request = dw_mci_request,
1693 .pre_req = dw_mci_pre_req, 1759 .pre_req = dw_mci_pre_req,
@@ -2830,99 +2896,6 @@ no_dma:
2830 host->use_dma = TRANS_MODE_PIO; 2896 host->use_dma = TRANS_MODE_PIO;
2831} 2897}
2832 2898
2833static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
2834{
2835 unsigned long timeout = jiffies + msecs_to_jiffies(500);
2836 u32 ctrl;
2837
2838 ctrl = mci_readl(host, CTRL);
2839 ctrl |= reset;
2840 mci_writel(host, CTRL, ctrl);
2841
2842 /* wait till resets clear */
2843 do {
2844 ctrl = mci_readl(host, CTRL);
2845 if (!(ctrl & reset))
2846 return true;
2847 } while (time_before(jiffies, timeout));
2848
2849 dev_err(host->dev,
2850 "Timeout resetting block (ctrl reset %#x)\n",
2851 ctrl & reset);
2852
2853 return false;
2854}
2855
2856static bool dw_mci_reset(struct dw_mci *host)
2857{
2858 u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
2859 bool ret = false;
2860
2861 /*
2862 * Reseting generates a block interrupt, hence setting
2863 * the scatter-gather pointer to NULL.
2864 */
2865 if (host->sg) {
2866 sg_miter_stop(&host->sg_miter);
2867 host->sg = NULL;
2868 }
2869
2870 if (host->use_dma)
2871 flags |= SDMMC_CTRL_DMA_RESET;
2872
2873 if (dw_mci_ctrl_reset(host, flags)) {
2874 /*
2875 * In all cases we clear the RAWINTS register to clear any
2876 * interrupts.
2877 */
2878 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2879
2880 /* if using dma we wait for dma_req to clear */
2881 if (host->use_dma) {
2882 unsigned long timeout = jiffies + msecs_to_jiffies(500);
2883 u32 status;
2884
2885 do {
2886 status = mci_readl(host, STATUS);
2887 if (!(status & SDMMC_STATUS_DMA_REQ))
2888 break;
2889 cpu_relax();
2890 } while (time_before(jiffies, timeout));
2891
2892 if (status & SDMMC_STATUS_DMA_REQ) {
2893 dev_err(host->dev,
2894 "%s: Timeout waiting for dma_req to clear during reset\n",
2895 __func__);
2896 goto ciu_out;
2897 }
2898
2899 /* when using DMA next we reset the fifo again */
2900 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
2901 goto ciu_out;
2902 }
2903 } else {
2904 /* if the controller reset bit did clear, then set clock regs */
2905 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
2906 dev_err(host->dev,
2907 "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n",
2908 __func__);
2909 goto ciu_out;
2910 }
2911 }
2912
2913 if (host->use_dma == TRANS_MODE_IDMAC)
2914 /* It is also recommended that we reset and reprogram idmac */
2915 dw_mci_idmac_reset(host);
2916
2917 ret = true;
2918
2919ciu_out:
2920 /* After a CTRL reset we need to have CIU set clock registers */
2921 mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0);
2922
2923 return ret;
2924}
2925
2926static void dw_mci_cmd11_timer(unsigned long arg) 2899static void dw_mci_cmd11_timer(unsigned long arg)
2927{ 2900{
2928 struct dw_mci *host = (struct dw_mci *)arg; 2901 struct dw_mci *host = (struct dw_mci *)arg;
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 819ad32964fc..57e254aac48d 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -200,11 +200,6 @@ free_master_write:
200 return -ENODEV; 200 return -ENODEV;
201} 201}
202 202
203static inline int jz4740_mmc_get_dma_dir(struct mmc_data *data)
204{
205 return (data->flags & MMC_DATA_READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
206}
207
208static inline struct dma_chan *jz4740_mmc_get_dma_chan(struct jz4740_mmc_host *host, 203static inline struct dma_chan *jz4740_mmc_get_dma_chan(struct jz4740_mmc_host *host,
209 struct mmc_data *data) 204 struct mmc_data *data)
210{ 205{
@@ -215,7 +210,7 @@ static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host,
215 struct mmc_data *data) 210 struct mmc_data *data)
216{ 211{
217 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); 212 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
218 enum dma_data_direction dir = jz4740_mmc_get_dma_dir(data); 213 enum dma_data_direction dir = mmc_get_dma_dir(data);
219 214
220 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); 215 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
221} 216}
@@ -227,7 +222,7 @@ static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host,
227 struct dma_chan *chan) 222 struct dma_chan *chan)
228{ 223{
229 struct jz4740_mmc_host_next *next_data = &host->next_data; 224 struct jz4740_mmc_host_next *next_data = &host->next_data;
230 enum dma_data_direction dir = jz4740_mmc_get_dma_dir(data); 225 enum dma_data_direction dir = mmc_get_dma_dir(data);
231 int sg_len; 226 int sg_len;
232 227
233 if (!next && data->host_cookie && 228 if (!next && data->host_cookie &&
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 5a959783304b..1842ed341af1 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -36,23 +36,21 @@
36#include <linux/clk-provider.h> 36#include <linux/clk-provider.h>
37#include <linux/regulator/consumer.h> 37#include <linux/regulator/consumer.h>
38#include <linux/interrupt.h> 38#include <linux/interrupt.h>
39#include <linux/bitfield.h>
39 40
40#define DRIVER_NAME "meson-gx-mmc" 41#define DRIVER_NAME "meson-gx-mmc"
41 42
42#define SD_EMMC_CLOCK 0x0 43#define SD_EMMC_CLOCK 0x0
43#define CLK_DIV_SHIFT 0 44#define CLK_DIV_MASK GENMASK(5, 0)
44#define CLK_DIV_WIDTH 6
45#define CLK_DIV_MASK 0x3f
46#define CLK_DIV_MAX 63 45#define CLK_DIV_MAX 63
47#define CLK_SRC_SHIFT 6 46#define CLK_SRC_MASK GENMASK(7, 6)
48#define CLK_SRC_WIDTH 2
49#define CLK_SRC_MASK 0x3
50#define CLK_SRC_XTAL 0 /* external crystal */ 47#define CLK_SRC_XTAL 0 /* external crystal */
51#define CLK_SRC_XTAL_RATE 24000000 48#define CLK_SRC_XTAL_RATE 24000000
52#define CLK_SRC_PLL 1 /* FCLK_DIV2 */ 49#define CLK_SRC_PLL 1 /* FCLK_DIV2 */
53#define CLK_SRC_PLL_RATE 1000000000 50#define CLK_SRC_PLL_RATE 1000000000
54#define CLK_PHASE_SHIFT 8 51#define CLK_CORE_PHASE_MASK GENMASK(9, 8)
55#define CLK_PHASE_MASK 0x3 52#define CLK_TX_PHASE_MASK GENMASK(11, 10)
53#define CLK_RX_PHASE_MASK GENMASK(13, 12)
56#define CLK_PHASE_0 0 54#define CLK_PHASE_0 0
57#define CLK_PHASE_90 1 55#define CLK_PHASE_90 1
58#define CLK_PHASE_180 2 56#define CLK_PHASE_180 2
@@ -65,22 +63,17 @@
65#define SD_EMMC_START 0x40 63#define SD_EMMC_START 0x40
66#define START_DESC_INIT BIT(0) 64#define START_DESC_INIT BIT(0)
67#define START_DESC_BUSY BIT(1) 65#define START_DESC_BUSY BIT(1)
68#define START_DESC_ADDR_SHIFT 2 66#define START_DESC_ADDR_MASK GENMASK(31, 2)
69#define START_DESC_ADDR_MASK (~0x3)
70 67
71#define SD_EMMC_CFG 0x44 68#define SD_EMMC_CFG 0x44
72#define CFG_BUS_WIDTH_SHIFT 0 69#define CFG_BUS_WIDTH_MASK GENMASK(1, 0)
73#define CFG_BUS_WIDTH_MASK 0x3
74#define CFG_BUS_WIDTH_1 0x0 70#define CFG_BUS_WIDTH_1 0x0
75#define CFG_BUS_WIDTH_4 0x1 71#define CFG_BUS_WIDTH_4 0x1
76#define CFG_BUS_WIDTH_8 0x2 72#define CFG_BUS_WIDTH_8 0x2
77#define CFG_DDR BIT(2) 73#define CFG_DDR BIT(2)
78#define CFG_BLK_LEN_SHIFT 4 74#define CFG_BLK_LEN_MASK GENMASK(7, 4)
79#define CFG_BLK_LEN_MASK 0xf 75#define CFG_RESP_TIMEOUT_MASK GENMASK(11, 8)
80#define CFG_RESP_TIMEOUT_SHIFT 8 76#define CFG_RC_CC_MASK GENMASK(15, 12)
81#define CFG_RESP_TIMEOUT_MASK 0xf
82#define CFG_RC_CC_SHIFT 12
83#define CFG_RC_CC_MASK 0xf
84#define CFG_STOP_CLOCK BIT(22) 77#define CFG_STOP_CLOCK BIT(22)
85#define CFG_CLK_ALWAYS_ON BIT(18) 78#define CFG_CLK_ALWAYS_ON BIT(18)
86#define CFG_CHK_DS BIT(20) 79#define CFG_CHK_DS BIT(20)
@@ -90,9 +83,8 @@
90#define STATUS_BUSY BIT(31) 83#define STATUS_BUSY BIT(31)
91 84
92#define SD_EMMC_IRQ_EN 0x4c 85#define SD_EMMC_IRQ_EN 0x4c
93#define IRQ_EN_MASK 0x3fff 86#define IRQ_EN_MASK GENMASK(13, 0)
94#define IRQ_RXD_ERR_SHIFT 0 87#define IRQ_RXD_ERR_MASK GENMASK(7, 0)
95#define IRQ_RXD_ERR_MASK 0xff
96#define IRQ_TXD_ERR BIT(8) 88#define IRQ_TXD_ERR BIT(8)
97#define IRQ_DESC_ERR BIT(9) 89#define IRQ_DESC_ERR BIT(9)
98#define IRQ_RESP_ERR BIT(10) 90#define IRQ_RESP_ERR BIT(10)
@@ -116,23 +108,39 @@
116 108
117#define SD_EMMC_CFG_BLK_SIZE 512 /* internal buffer max: 512 bytes */ 109#define SD_EMMC_CFG_BLK_SIZE 512 /* internal buffer max: 512 bytes */
118#define SD_EMMC_CFG_RESP_TIMEOUT 256 /* in clock cycles */ 110#define SD_EMMC_CFG_RESP_TIMEOUT 256 /* in clock cycles */
111#define SD_EMMC_CMD_TIMEOUT 1024 /* in ms */
112#define SD_EMMC_CMD_TIMEOUT_DATA 4096 /* in ms */
119#define SD_EMMC_CFG_CMD_GAP 16 /* in clock cycles */ 113#define SD_EMMC_CFG_CMD_GAP 16 /* in clock cycles */
114#define SD_EMMC_DESC_BUF_LEN PAGE_SIZE
115
116#define SD_EMMC_PRE_REQ_DONE BIT(0)
117#define SD_EMMC_DESC_CHAIN_MODE BIT(1)
118
120#define MUX_CLK_NUM_PARENTS 2 119#define MUX_CLK_NUM_PARENTS 2
121 120
121struct meson_tuning_params {
122 u8 core_phase;
123 u8 tx_phase;
124 u8 rx_phase;
125};
126
127struct sd_emmc_desc {
128 u32 cmd_cfg;
129 u32 cmd_arg;
130 u32 cmd_data;
131 u32 cmd_resp;
132};
133
122struct meson_host { 134struct meson_host {
123 struct device *dev; 135 struct device *dev;
124 struct mmc_host *mmc; 136 struct mmc_host *mmc;
125 struct mmc_request *mrq;
126 struct mmc_command *cmd; 137 struct mmc_command *cmd;
127 138
128 spinlock_t lock; 139 spinlock_t lock;
129 void __iomem *regs; 140 void __iomem *regs;
130 int irq;
131 u32 ocr_mask;
132 struct clk *core_clk; 141 struct clk *core_clk;
133 struct clk_mux mux; 142 struct clk_mux mux;
134 struct clk *mux_clk; 143 struct clk *mux_clk;
135 struct clk *mux_parent[MUX_CLK_NUM_PARENTS];
136 unsigned long current_clock; 144 unsigned long current_clock;
137 145
138 struct clk_divider cfg_div; 146 struct clk_divider cfg_div;
@@ -141,23 +149,18 @@ struct meson_host {
141 unsigned int bounce_buf_size; 149 unsigned int bounce_buf_size;
142 void *bounce_buf; 150 void *bounce_buf;
143 dma_addr_t bounce_dma_addr; 151 dma_addr_t bounce_dma_addr;
152 struct sd_emmc_desc *descs;
153 dma_addr_t descs_dma_addr;
144 154
155 struct meson_tuning_params tp;
145 bool vqmmc_enabled; 156 bool vqmmc_enabled;
146}; 157};
147 158
148struct sd_emmc_desc { 159#define CMD_CFG_LENGTH_MASK GENMASK(8, 0)
149 u32 cmd_cfg;
150 u32 cmd_arg;
151 u32 cmd_data;
152 u32 cmd_resp;
153};
154#define CMD_CFG_LENGTH_SHIFT 0
155#define CMD_CFG_LENGTH_MASK 0x1ff
156#define CMD_CFG_BLOCK_MODE BIT(9) 160#define CMD_CFG_BLOCK_MODE BIT(9)
157#define CMD_CFG_R1B BIT(10) 161#define CMD_CFG_R1B BIT(10)
158#define CMD_CFG_END_OF_CHAIN BIT(11) 162#define CMD_CFG_END_OF_CHAIN BIT(11)
159#define CMD_CFG_TIMEOUT_SHIFT 12 163#define CMD_CFG_TIMEOUT_MASK GENMASK(15, 12)
160#define CMD_CFG_TIMEOUT_MASK 0xf
161#define CMD_CFG_NO_RESP BIT(16) 164#define CMD_CFG_NO_RESP BIT(16)
162#define CMD_CFG_NO_CMD BIT(17) 165#define CMD_CFG_NO_CMD BIT(17)
163#define CMD_CFG_DATA_IO BIT(18) 166#define CMD_CFG_DATA_IO BIT(18)
@@ -166,17 +169,99 @@ struct sd_emmc_desc {
166#define CMD_CFG_RESP_128 BIT(21) 169#define CMD_CFG_RESP_128 BIT(21)
167#define CMD_CFG_RESP_NUM BIT(22) 170#define CMD_CFG_RESP_NUM BIT(22)
168#define CMD_CFG_DATA_NUM BIT(23) 171#define CMD_CFG_DATA_NUM BIT(23)
169#define CMD_CFG_CMD_INDEX_SHIFT 24 172#define CMD_CFG_CMD_INDEX_MASK GENMASK(29, 24)
170#define CMD_CFG_CMD_INDEX_MASK 0x3f
171#define CMD_CFG_ERROR BIT(30) 173#define CMD_CFG_ERROR BIT(30)
172#define CMD_CFG_OWNER BIT(31) 174#define CMD_CFG_OWNER BIT(31)
173 175
174#define CMD_DATA_MASK (~0x3) 176#define CMD_DATA_MASK GENMASK(31, 2)
175#define CMD_DATA_BIG_ENDIAN BIT(1) 177#define CMD_DATA_BIG_ENDIAN BIT(1)
176#define CMD_DATA_SRAM BIT(0) 178#define CMD_DATA_SRAM BIT(0)
177#define CMD_RESP_MASK (~0x1) 179#define CMD_RESP_MASK GENMASK(31, 1)
178#define CMD_RESP_SRAM BIT(0) 180#define CMD_RESP_SRAM BIT(0)
179 181
182static unsigned int meson_mmc_get_timeout_msecs(struct mmc_data *data)
183{
184 unsigned int timeout = data->timeout_ns / NSEC_PER_MSEC;
185
186 if (!timeout)
187 return SD_EMMC_CMD_TIMEOUT_DATA;
188
189 timeout = roundup_pow_of_two(timeout);
190
191 return min(timeout, 32768U); /* max. 2^15 ms */
192}
193
194static struct mmc_command *meson_mmc_get_next_command(struct mmc_command *cmd)
195{
196 if (cmd->opcode == MMC_SET_BLOCK_COUNT && !cmd->error)
197 return cmd->mrq->cmd;
198 else if (mmc_op_multi(cmd->opcode) &&
199 (!cmd->mrq->sbc || cmd->error || cmd->data->error))
200 return cmd->mrq->stop;
201 else
202 return NULL;
203}
204
205static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
206 struct mmc_request *mrq)
207{
208 struct mmc_data *data = mrq->data;
209 struct scatterlist *sg;
210 int i;
211 bool use_desc_chain_mode = true;
212
213 for_each_sg(data->sg, sg, data->sg_len, i)
214 /* check for 8 byte alignment */
215 if (sg->offset & 7) {
216 WARN_ONCE(1, "unaligned scatterlist buffer\n");
217 use_desc_chain_mode = false;
218 break;
219 }
220
221 if (use_desc_chain_mode)
222 data->host_cookie |= SD_EMMC_DESC_CHAIN_MODE;
223}
224
225static inline bool meson_mmc_desc_chain_mode(const struct mmc_data *data)
226{
227 return data->host_cookie & SD_EMMC_DESC_CHAIN_MODE;
228}
229
230static inline bool meson_mmc_bounce_buf_read(const struct mmc_data *data)
231{
232 return data && data->flags & MMC_DATA_READ &&
233 !meson_mmc_desc_chain_mode(data);
234}
235
236static void meson_mmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
237{
238 struct mmc_data *data = mrq->data;
239
240 if (!data)
241 return;
242
243 meson_mmc_get_transfer_mode(mmc, mrq);
244 data->host_cookie |= SD_EMMC_PRE_REQ_DONE;
245
246 if (!meson_mmc_desc_chain_mode(data))
247 return;
248
249 data->sg_count = dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len,
250 mmc_get_dma_dir(data));
251 if (!data->sg_count)
252 dev_err(mmc_dev(mmc), "dma_map_sg failed");
253}
254
255static void meson_mmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
256 int err)
257{
258 struct mmc_data *data = mrq->data;
259
260 if (data && meson_mmc_desc_chain_mode(data) && data->sg_count)
261 dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len,
262 mmc_get_dma_dir(data));
263}
264
180static int meson_mmc_clk_set(struct meson_host *host, unsigned long clk_rate) 265static int meson_mmc_clk_set(struct meson_host *host, unsigned long clk_rate)
181{ 266{
182 struct mmc_host *mmc = host->mmc; 267 struct mmc_host *mmc = host->mmc;
@@ -244,26 +329,23 @@ static int meson_mmc_clk_init(struct meson_host *host)
244 char clk_name[32]; 329 char clk_name[32];
245 int i, ret = 0; 330 int i, ret = 0;
246 const char *mux_parent_names[MUX_CLK_NUM_PARENTS]; 331 const char *mux_parent_names[MUX_CLK_NUM_PARENTS];
247 unsigned int mux_parent_count = 0;
248 const char *clk_div_parents[1]; 332 const char *clk_div_parents[1];
249 u32 clk_reg, cfg; 333 u32 clk_reg, cfg;
250 334
251 /* get the mux parents */ 335 /* get the mux parents */
252 for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) { 336 for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) {
337 struct clk *clk;
253 char name[16]; 338 char name[16];
254 339
255 snprintf(name, sizeof(name), "clkin%d", i); 340 snprintf(name, sizeof(name), "clkin%d", i);
256 host->mux_parent[i] = devm_clk_get(host->dev, name); 341 clk = devm_clk_get(host->dev, name);
257 if (IS_ERR(host->mux_parent[i])) { 342 if (IS_ERR(clk)) {
258 ret = PTR_ERR(host->mux_parent[i]); 343 if (clk != ERR_PTR(-EPROBE_DEFER))
259 if (PTR_ERR(host->mux_parent[i]) != -EPROBE_DEFER)
260 dev_err(host->dev, "Missing clock %s\n", name); 344 dev_err(host->dev, "Missing clock %s\n", name);
261 host->mux_parent[i] = NULL; 345 return PTR_ERR(clk);
262 return ret;
263 } 346 }
264 347
265 mux_parent_names[i] = __clk_get_name(host->mux_parent[i]); 348 mux_parent_names[i] = __clk_get_name(clk);
266 mux_parent_count++;
267 } 349 }
268 350
269 /* create the mux */ 351 /* create the mux */
@@ -272,10 +354,9 @@ static int meson_mmc_clk_init(struct meson_host *host)
272 init.ops = &clk_mux_ops; 354 init.ops = &clk_mux_ops;
273 init.flags = 0; 355 init.flags = 0;
274 init.parent_names = mux_parent_names; 356 init.parent_names = mux_parent_names;
275 init.num_parents = mux_parent_count; 357 init.num_parents = MUX_CLK_NUM_PARENTS;
276
277 host->mux.reg = host->regs + SD_EMMC_CLOCK; 358 host->mux.reg = host->regs + SD_EMMC_CLOCK;
278 host->mux.shift = CLK_SRC_SHIFT; 359 host->mux.shift = __bf_shf(CLK_SRC_MASK);
279 host->mux.mask = CLK_SRC_MASK; 360 host->mux.mask = CLK_SRC_MASK;
280 host->mux.flags = 0; 361 host->mux.flags = 0;
281 host->mux.table = NULL; 362 host->mux.table = NULL;
@@ -287,7 +368,7 @@ static int meson_mmc_clk_init(struct meson_host *host)
287 368
288 /* create the divider */ 369 /* create the divider */
289 snprintf(clk_name, sizeof(clk_name), "%s#div", dev_name(host->dev)); 370 snprintf(clk_name, sizeof(clk_name), "%s#div", dev_name(host->dev));
290 init.name = devm_kstrdup(host->dev, clk_name, GFP_KERNEL); 371 init.name = clk_name;
291 init.ops = &clk_divider_ops; 372 init.ops = &clk_divider_ops;
292 init.flags = CLK_SET_RATE_PARENT; 373 init.flags = CLK_SET_RATE_PARENT;
293 clk_div_parents[0] = __clk_get_name(host->mux_clk); 374 clk_div_parents[0] = __clk_get_name(host->mux_clk);
@@ -295,8 +376,8 @@ static int meson_mmc_clk_init(struct meson_host *host)
295 init.num_parents = ARRAY_SIZE(clk_div_parents); 376 init.num_parents = ARRAY_SIZE(clk_div_parents);
296 377
297 host->cfg_div.reg = host->regs + SD_EMMC_CLOCK; 378 host->cfg_div.reg = host->regs + SD_EMMC_CLOCK;
298 host->cfg_div.shift = CLK_DIV_SHIFT; 379 host->cfg_div.shift = __bf_shf(CLK_DIV_MASK);
299 host->cfg_div.width = CLK_DIV_WIDTH; 380 host->cfg_div.width = __builtin_popcountl(CLK_DIV_MASK);
300 host->cfg_div.hw.init = &init; 381 host->cfg_div.hw.init = &init;
301 host->cfg_div.flags = CLK_DIVIDER_ONE_BASED | 382 host->cfg_div.flags = CLK_DIVIDER_ONE_BASED |
302 CLK_DIVIDER_ROUND_CLOSEST | CLK_DIVIDER_ALLOW_ZERO; 383 CLK_DIVIDER_ROUND_CLOSEST | CLK_DIVIDER_ALLOW_ZERO;
@@ -307,9 +388,11 @@ static int meson_mmc_clk_init(struct meson_host *host)
307 388
308 /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */ 389 /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
309 clk_reg = 0; 390 clk_reg = 0;
310 clk_reg |= CLK_PHASE_180 << CLK_PHASE_SHIFT; 391 clk_reg |= FIELD_PREP(CLK_CORE_PHASE_MASK, host->tp.core_phase);
311 clk_reg |= CLK_SRC_XTAL << CLK_SRC_SHIFT; 392 clk_reg |= FIELD_PREP(CLK_TX_PHASE_MASK, host->tp.tx_phase);
312 clk_reg |= CLK_DIV_MAX << CLK_DIV_SHIFT; 393 clk_reg |= FIELD_PREP(CLK_RX_PHASE_MASK, host->tp.rx_phase);
394 clk_reg |= FIELD_PREP(CLK_SRC_MASK, CLK_SRC_XTAL);
395 clk_reg |= FIELD_PREP(CLK_DIV_MASK, CLK_DIV_MAX);
313 clk_reg &= ~CLK_ALWAYS_ON; 396 clk_reg &= ~CLK_ALWAYS_ON;
314 writel(clk_reg, host->regs + SD_EMMC_CLOCK); 397 writel(clk_reg, host->regs + SD_EMMC_CLOCK);
315 398
@@ -327,12 +410,37 @@ static int meson_mmc_clk_init(struct meson_host *host)
327 host->mmc->f_min = clk_round_rate(host->cfg_div_clk, 400000); 410 host->mmc->f_min = clk_round_rate(host->cfg_div_clk, 400000);
328 411
329 ret = meson_mmc_clk_set(host, host->mmc->f_min); 412 ret = meson_mmc_clk_set(host, host->mmc->f_min);
330 if (!ret) 413 if (ret)
331 clk_disable_unprepare(host->cfg_div_clk); 414 clk_disable_unprepare(host->cfg_div_clk);
332 415
333 return ret; 416 return ret;
334} 417}
335 418
419static void meson_mmc_set_tuning_params(struct mmc_host *mmc)
420{
421 struct meson_host *host = mmc_priv(mmc);
422 u32 regval;
423
424 /* stop clock */
425 regval = readl(host->regs + SD_EMMC_CFG);
426 regval |= CFG_STOP_CLOCK;
427 writel(regval, host->regs + SD_EMMC_CFG);
428
429 regval = readl(host->regs + SD_EMMC_CLOCK);
430 regval &= ~CLK_CORE_PHASE_MASK;
431 regval |= FIELD_PREP(CLK_CORE_PHASE_MASK, host->tp.core_phase);
432 regval &= ~CLK_TX_PHASE_MASK;
433 regval |= FIELD_PREP(CLK_TX_PHASE_MASK, host->tp.tx_phase);
434 regval &= ~CLK_RX_PHASE_MASK;
435 regval |= FIELD_PREP(CLK_RX_PHASE_MASK, host->tp.rx_phase);
436 writel(regval, host->regs + SD_EMMC_CLOCK);
437
438 /* start clock */
439 regval = readl(host->regs + SD_EMMC_CFG);
440 regval &= ~CFG_STOP_CLOCK;
441 writel(regval, host->regs + SD_EMMC_CFG);
442}
443
336static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 444static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
337{ 445{
338 struct meson_host *host = mmc_priv(mmc); 446 struct meson_host *host = mmc_priv(mmc);
@@ -397,17 +505,8 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
397 val = readl(host->regs + SD_EMMC_CFG); 505 val = readl(host->regs + SD_EMMC_CFG);
398 orig = val; 506 orig = val;
399 507
400 val &= ~(CFG_BUS_WIDTH_MASK << CFG_BUS_WIDTH_SHIFT); 508 val &= ~CFG_BUS_WIDTH_MASK;
401 val |= bus_width << CFG_BUS_WIDTH_SHIFT; 509 val |= FIELD_PREP(CFG_BUS_WIDTH_MASK, bus_width);
402
403 val &= ~(CFG_BLK_LEN_MASK << CFG_BLK_LEN_SHIFT);
404 val |= ilog2(SD_EMMC_CFG_BLK_SIZE) << CFG_BLK_LEN_SHIFT;
405
406 val &= ~(CFG_RESP_TIMEOUT_MASK << CFG_RESP_TIMEOUT_SHIFT);
407 val |= ilog2(SD_EMMC_CFG_RESP_TIMEOUT) << CFG_RESP_TIMEOUT_SHIFT;
408
409 val &= ~(CFG_RC_CC_MASK << CFG_RC_CC_SHIFT);
410 val |= ilog2(SD_EMMC_CFG_CMD_GAP) << CFG_RC_CC_SHIFT;
411 510
412 val &= ~CFG_DDR; 511 val &= ~CFG_DDR;
413 if (ios->timing == MMC_TIMING_UHS_DDR50 || 512 if (ios->timing == MMC_TIMING_UHS_DDR50 ||
@@ -419,149 +518,189 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
419 if (ios->timing == MMC_TIMING_MMC_HS400) 518 if (ios->timing == MMC_TIMING_MMC_HS400)
420 val |= CFG_CHK_DS; 519 val |= CFG_CHK_DS;
421 520
422 writel(val, host->regs + SD_EMMC_CFG); 521 if (val != orig) {
423 522 writel(val, host->regs + SD_EMMC_CFG);
424 if (val != orig)
425 dev_dbg(host->dev, "%s: SD_EMMC_CFG: 0x%08x -> 0x%08x\n", 523 dev_dbg(host->dev, "%s: SD_EMMC_CFG: 0x%08x -> 0x%08x\n",
426 __func__, orig, val); 524 __func__, orig, val);
525 }
427} 526}
428 527
429static int meson_mmc_request_done(struct mmc_host *mmc, struct mmc_request *mrq) 528static void meson_mmc_request_done(struct mmc_host *mmc,
529 struct mmc_request *mrq)
430{ 530{
431 struct meson_host *host = mmc_priv(mmc); 531 struct meson_host *host = mmc_priv(mmc);
432 532
433 WARN_ON(host->mrq != mrq);
434
435 host->mrq = NULL;
436 host->cmd = NULL; 533 host->cmd = NULL;
437 mmc_request_done(host->mmc, mrq); 534 mmc_request_done(host->mmc, mrq);
438
439 return 0;
440} 535}
441 536
442static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd) 537static void meson_mmc_set_blksz(struct mmc_host *mmc, unsigned int blksz)
443{ 538{
444 struct meson_host *host = mmc_priv(mmc); 539 struct meson_host *host = mmc_priv(mmc);
445 struct sd_emmc_desc *desc, desc_tmp; 540 u32 cfg, blksz_old;
446 u32 cfg;
447 u8 blk_len, cmd_cfg_timeout;
448 unsigned int xfer_bytes = 0;
449 541
450 /* Setup descriptors */ 542 cfg = readl(host->regs + SD_EMMC_CFG);
451 dma_rmb(); 543 blksz_old = FIELD_GET(CFG_BLK_LEN_MASK, cfg);
452 desc = &desc_tmp; 544
453 memset(desc, 0, sizeof(struct sd_emmc_desc)); 545 if (!is_power_of_2(blksz))
546 dev_err(host->dev, "blksz %u is not a power of 2\n", blksz);
547
548 blksz = ilog2(blksz);
549
550 /* check if block-size matches, if not update */
551 if (blksz == blksz_old)
552 return;
454 553
455 desc->cmd_cfg |= (cmd->opcode & CMD_CFG_CMD_INDEX_MASK) << 554 dev_dbg(host->dev, "%s: update blk_len %d -> %d\n", __func__,
456 CMD_CFG_CMD_INDEX_SHIFT; 555 blksz_old, blksz);
457 desc->cmd_cfg |= CMD_CFG_OWNER; /* owned by CPU */
458 desc->cmd_arg = cmd->arg;
459 556
460 /* Response */ 557 cfg &= ~CFG_BLK_LEN_MASK;
558 cfg |= FIELD_PREP(CFG_BLK_LEN_MASK, blksz);
559 writel(cfg, host->regs + SD_EMMC_CFG);
560}
561
562static void meson_mmc_set_response_bits(struct mmc_command *cmd, u32 *cmd_cfg)
563{
461 if (cmd->flags & MMC_RSP_PRESENT) { 564 if (cmd->flags & MMC_RSP_PRESENT) {
462 desc->cmd_cfg &= ~CMD_CFG_NO_RESP;
463 if (cmd->flags & MMC_RSP_136) 565 if (cmd->flags & MMC_RSP_136)
464 desc->cmd_cfg |= CMD_CFG_RESP_128; 566 *cmd_cfg |= CMD_CFG_RESP_128;
465 desc->cmd_cfg |= CMD_CFG_RESP_NUM; 567 *cmd_cfg |= CMD_CFG_RESP_NUM;
466 desc->cmd_resp = 0;
467 568
468 if (!(cmd->flags & MMC_RSP_CRC)) 569 if (!(cmd->flags & MMC_RSP_CRC))
469 desc->cmd_cfg |= CMD_CFG_RESP_NOCRC; 570 *cmd_cfg |= CMD_CFG_RESP_NOCRC;
470 571
471 if (cmd->flags & MMC_RSP_BUSY) 572 if (cmd->flags & MMC_RSP_BUSY)
472 desc->cmd_cfg |= CMD_CFG_R1B; 573 *cmd_cfg |= CMD_CFG_R1B;
473 } else { 574 } else {
474 desc->cmd_cfg |= CMD_CFG_NO_RESP; 575 *cmd_cfg |= CMD_CFG_NO_RESP;
475 } 576 }
577}
578
579static void meson_mmc_desc_chain_transfer(struct mmc_host *mmc, u32 cmd_cfg)
580{
581 struct meson_host *host = mmc_priv(mmc);
582 struct sd_emmc_desc *desc = host->descs;
583 struct mmc_data *data = host->cmd->data;
584 struct scatterlist *sg;
585 u32 start;
586 int i;
587
588 if (data->flags & MMC_DATA_WRITE)
589 cmd_cfg |= CMD_CFG_DATA_WR;
590
591 if (data->blocks > 1) {
592 cmd_cfg |= CMD_CFG_BLOCK_MODE;
593 meson_mmc_set_blksz(mmc, data->blksz);
594 }
595
596 for_each_sg(data->sg, sg, data->sg_count, i) {
597 unsigned int len = sg_dma_len(sg);
598
599 if (data->blocks > 1)
600 len /= data->blksz;
601
602 desc[i].cmd_cfg = cmd_cfg;
603 desc[i].cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, len);
604 if (i > 0)
605 desc[i].cmd_cfg |= CMD_CFG_NO_CMD;
606 desc[i].cmd_arg = host->cmd->arg;
607 desc[i].cmd_resp = 0;
608 desc[i].cmd_data = sg_dma_address(sg);
609 }
610 desc[data->sg_count - 1].cmd_cfg |= CMD_CFG_END_OF_CHAIN;
611
612 dma_wmb(); /* ensure descriptor is written before kicked */
613 start = host->descs_dma_addr | START_DESC_BUSY;
614 writel(start, host->regs + SD_EMMC_START);
615}
616
617static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
618{
619 struct meson_host *host = mmc_priv(mmc);
620 struct mmc_data *data = cmd->data;
621 u32 cmd_cfg = 0, cmd_data = 0;
622 unsigned int xfer_bytes = 0;
623
624 /* Setup descriptors */
625 dma_rmb();
626
627 host->cmd = cmd;
628
629 cmd_cfg |= FIELD_PREP(CMD_CFG_CMD_INDEX_MASK, cmd->opcode);
630 cmd_cfg |= CMD_CFG_OWNER; /* owned by CPU */
631
632 meson_mmc_set_response_bits(cmd, &cmd_cfg);
476 633
477 /* data? */ 634 /* data? */
478 if (cmd->data) { 635 if (data) {
479 desc->cmd_cfg |= CMD_CFG_DATA_IO; 636 data->bytes_xfered = 0;
480 if (cmd->data->blocks > 1) { 637 cmd_cfg |= CMD_CFG_DATA_IO;
481 desc->cmd_cfg |= CMD_CFG_BLOCK_MODE; 638 cmd_cfg |= FIELD_PREP(CMD_CFG_TIMEOUT_MASK,
482 desc->cmd_cfg |= 639 ilog2(meson_mmc_get_timeout_msecs(data)));
483 (cmd->data->blocks & CMD_CFG_LENGTH_MASK) << 640
484 CMD_CFG_LENGTH_SHIFT; 641 if (meson_mmc_desc_chain_mode(data)) {
485 642 meson_mmc_desc_chain_transfer(mmc, cmd_cfg);
486 /* check if block-size matches, if not update */ 643 return;
487 cfg = readl(host->regs + SD_EMMC_CFG); 644 }
488 blk_len = cfg & (CFG_BLK_LEN_MASK << CFG_BLK_LEN_SHIFT); 645
489 blk_len >>= CFG_BLK_LEN_SHIFT; 646 if (data->blocks > 1) {
490 if (blk_len != ilog2(cmd->data->blksz)) { 647 cmd_cfg |= CMD_CFG_BLOCK_MODE;
491 dev_dbg(host->dev, "%s: update blk_len %d -> %d\n", 648 cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK,
492 __func__, blk_len, 649 data->blocks);
493 ilog2(cmd->data->blksz)); 650 meson_mmc_set_blksz(mmc, data->blksz);
494 blk_len = ilog2(cmd->data->blksz);
495 cfg &= ~(CFG_BLK_LEN_MASK << CFG_BLK_LEN_SHIFT);
496 cfg |= blk_len << CFG_BLK_LEN_SHIFT;
497 writel(cfg, host->regs + SD_EMMC_CFG);
498 }
499 } else { 651 } else {
500 desc->cmd_cfg &= ~CMD_CFG_BLOCK_MODE; 652 cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, data->blksz);
501 desc->cmd_cfg |=
502 (cmd->data->blksz & CMD_CFG_LENGTH_MASK) <<
503 CMD_CFG_LENGTH_SHIFT;
504 } 653 }
505 654
506 cmd->data->bytes_xfered = 0; 655 xfer_bytes = data->blksz * data->blocks;
507 xfer_bytes = cmd->data->blksz * cmd->data->blocks; 656 if (data->flags & MMC_DATA_WRITE) {
508 if (cmd->data->flags & MMC_DATA_WRITE) { 657 cmd_cfg |= CMD_CFG_DATA_WR;
509 desc->cmd_cfg |= CMD_CFG_DATA_WR;
510 WARN_ON(xfer_bytes > host->bounce_buf_size); 658 WARN_ON(xfer_bytes > host->bounce_buf_size);
511 sg_copy_to_buffer(cmd->data->sg, cmd->data->sg_len, 659 sg_copy_to_buffer(data->sg, data->sg_len,
512 host->bounce_buf, xfer_bytes); 660 host->bounce_buf, xfer_bytes);
513 cmd->data->bytes_xfered = xfer_bytes;
514 dma_wmb(); 661 dma_wmb();
515 } else {
516 desc->cmd_cfg &= ~CMD_CFG_DATA_WR;
517 }
518
519 if (xfer_bytes > 0) {
520 desc->cmd_cfg &= ~CMD_CFG_DATA_NUM;
521 desc->cmd_data = host->bounce_dma_addr & CMD_DATA_MASK;
522 } else {
523 /* write data to data_addr */
524 desc->cmd_cfg |= CMD_CFG_DATA_NUM;
525 desc->cmd_data = 0;
526 } 662 }
527 663
528 cmd_cfg_timeout = 12; 664 cmd_data = host->bounce_dma_addr & CMD_DATA_MASK;
529 } else { 665 } else {
530 desc->cmd_cfg &= ~CMD_CFG_DATA_IO; 666 cmd_cfg |= FIELD_PREP(CMD_CFG_TIMEOUT_MASK,
531 cmd_cfg_timeout = 10; 667 ilog2(SD_EMMC_CMD_TIMEOUT));
532 } 668 }
533 desc->cmd_cfg |= (cmd_cfg_timeout & CMD_CFG_TIMEOUT_MASK) <<
534 CMD_CFG_TIMEOUT_SHIFT;
535
536 host->cmd = cmd;
537 669
538 /* Last descriptor */ 670 /* Last descriptor */
539 desc->cmd_cfg |= CMD_CFG_END_OF_CHAIN; 671 cmd_cfg |= CMD_CFG_END_OF_CHAIN;
540 writel(desc->cmd_cfg, host->regs + SD_EMMC_CMD_CFG); 672 writel(cmd_cfg, host->regs + SD_EMMC_CMD_CFG);
541 writel(desc->cmd_data, host->regs + SD_EMMC_CMD_DAT); 673 writel(cmd_data, host->regs + SD_EMMC_CMD_DAT);
542 writel(desc->cmd_resp, host->regs + SD_EMMC_CMD_RSP); 674 writel(0, host->regs + SD_EMMC_CMD_RSP);
543 wmb(); /* ensure descriptor is written before kicked */ 675 wmb(); /* ensure descriptor is written before kicked */
544 writel(desc->cmd_arg, host->regs + SD_EMMC_CMD_ARG); 676 writel(cmd->arg, host->regs + SD_EMMC_CMD_ARG);
545} 677}
546 678
547static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) 679static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
548{ 680{
549 struct meson_host *host = mmc_priv(mmc); 681 struct meson_host *host = mmc_priv(mmc);
682 bool needs_pre_post_req = mrq->data &&
683 !(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE);
550 684
551 WARN_ON(host->mrq != NULL); 685 if (needs_pre_post_req) {
686 meson_mmc_get_transfer_mode(mmc, mrq);
687 if (!meson_mmc_desc_chain_mode(mrq->data))
688 needs_pre_post_req = false;
689 }
690
691 if (needs_pre_post_req)
692 meson_mmc_pre_req(mmc, mrq);
552 693
553 /* Stop execution */ 694 /* Stop execution */
554 writel(0, host->regs + SD_EMMC_START); 695 writel(0, host->regs + SD_EMMC_START);
555 696
556 host->mrq = mrq; 697 meson_mmc_start_cmd(mmc, mrq->sbc ?: mrq->cmd);
557 698
558 if (mrq->sbc) 699 if (needs_pre_post_req)
559 meson_mmc_start_cmd(mmc, mrq->sbc); 700 meson_mmc_post_req(mmc, mrq, 0);
560 else
561 meson_mmc_start_cmd(mmc, mrq->cmd);
562} 701}
563 702
564static int meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd) 703static void meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd)
565{ 704{
566 struct meson_host *host = mmc_priv(mmc); 705 struct meson_host *host = mmc_priv(mmc);
567 706
@@ -573,15 +712,13 @@ static int meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd)
573 } else if (cmd->flags & MMC_RSP_PRESENT) { 712 } else if (cmd->flags & MMC_RSP_PRESENT) {
574 cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP); 713 cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP);
575 } 714 }
576
577 return 0;
578} 715}
579 716
580static irqreturn_t meson_mmc_irq(int irq, void *dev_id) 717static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
581{ 718{
582 struct meson_host *host = dev_id; 719 struct meson_host *host = dev_id;
583 struct mmc_request *mrq;
584 struct mmc_command *cmd; 720 struct mmc_command *cmd;
721 struct mmc_data *data;
585 u32 irq_en, status, raw_status; 722 u32 irq_en, status, raw_status;
586 irqreturn_t ret = IRQ_HANDLED; 723 irqreturn_t ret = IRQ_HANDLED;
587 724
@@ -590,14 +727,11 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
590 727
591 cmd = host->cmd; 728 cmd = host->cmd;
592 729
593 mrq = host->mrq;
594
595 if (WARN_ON(!mrq))
596 return IRQ_NONE;
597
598 if (WARN_ON(!cmd)) 730 if (WARN_ON(!cmd))
599 return IRQ_NONE; 731 return IRQ_NONE;
600 732
733 data = cmd->data;
734
601 spin_lock(&host->lock); 735 spin_lock(&host->lock);
602 irq_en = readl(host->regs + SD_EMMC_IRQ_EN); 736 irq_en = readl(host->regs + SD_EMMC_IRQ_EN);
603 raw_status = readl(host->regs + SD_EMMC_STATUS); 737 raw_status = readl(host->regs + SD_EMMC_STATUS);
@@ -610,6 +744,8 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
610 goto out; 744 goto out;
611 } 745 }
612 746
747 meson_mmc_read_resp(host->mmc, cmd);
748
613 cmd->error = 0; 749 cmd->error = 0;
614 if (status & IRQ_RXD_ERR_MASK) { 750 if (status & IRQ_RXD_ERR_MASK) {
615 dev_dbg(host->dev, "Unhandled IRQ: RXD error\n"); 751 dev_dbg(host->dev, "Unhandled IRQ: RXD error\n");
@@ -636,12 +772,16 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
636 if (status & IRQ_SDIO) 772 if (status & IRQ_SDIO)
637 dev_dbg(host->dev, "Unhandled IRQ: SDIO.\n"); 773 dev_dbg(host->dev, "Unhandled IRQ: SDIO.\n");
638 774
639 if (status & (IRQ_END_OF_CHAIN | IRQ_RESP_STATUS)) 775 if (status & (IRQ_END_OF_CHAIN | IRQ_RESP_STATUS)) {
640 ret = IRQ_WAKE_THREAD; 776 if (data && !cmd->error)
641 else { 777 data->bytes_xfered = data->blksz * data->blocks;
778 if (meson_mmc_bounce_buf_read(data) ||
779 meson_mmc_get_next_command(cmd))
780 ret = IRQ_WAKE_THREAD;
781 } else {
642 dev_warn(host->dev, "Unknown IRQ! status=0x%04x: MMC CMD%u arg=0x%08x flags=0x%08x stop=%d\n", 782 dev_warn(host->dev, "Unknown IRQ! status=0x%04x: MMC CMD%u arg=0x%08x flags=0x%08x stop=%d\n",
643 status, cmd->opcode, cmd->arg, 783 status, cmd->opcode, cmd->arg,
644 cmd->flags, mrq->stop ? 1 : 0); 784 cmd->flags, cmd->mrq->stop ? 1 : 0);
645 if (cmd->data) { 785 if (cmd->data) {
646 struct mmc_data *data = cmd->data; 786 struct mmc_data *data = cmd->data;
647 787
@@ -656,10 +796,8 @@ out:
656 /* ack all (enabled) interrupts */ 796 /* ack all (enabled) interrupts */
657 writel(status, host->regs + SD_EMMC_STATUS); 797 writel(status, host->regs + SD_EMMC_STATUS);
658 798
659 if (ret == IRQ_HANDLED) { 799 if (ret == IRQ_HANDLED)
660 meson_mmc_read_resp(host->mmc, cmd);
661 meson_mmc_request_done(host->mmc, cmd->mrq); 800 meson_mmc_request_done(host->mmc, cmd->mrq);
662 }
663 801
664 spin_unlock(&host->lock); 802 spin_unlock(&host->lock);
665 return ret; 803 return ret;
@@ -668,35 +806,53 @@ out:
668static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id) 806static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
669{ 807{
670 struct meson_host *host = dev_id; 808 struct meson_host *host = dev_id;
671 struct mmc_request *mrq = host->mrq; 809 struct mmc_command *next_cmd, *cmd = host->cmd;
672 struct mmc_command *cmd = host->cmd;
673 struct mmc_data *data; 810 struct mmc_data *data;
674 unsigned int xfer_bytes; 811 unsigned int xfer_bytes;
675 812
676 if (WARN_ON(!mrq))
677 return IRQ_NONE;
678
679 if (WARN_ON(!cmd)) 813 if (WARN_ON(!cmd))
680 return IRQ_NONE; 814 return IRQ_NONE;
681 815
682 data = cmd->data; 816 data = cmd->data;
683 if (data && data->flags & MMC_DATA_READ) { 817 if (meson_mmc_bounce_buf_read(data)) {
684 xfer_bytes = data->blksz * data->blocks; 818 xfer_bytes = data->blksz * data->blocks;
685 WARN_ON(xfer_bytes > host->bounce_buf_size); 819 WARN_ON(xfer_bytes > host->bounce_buf_size);
686 sg_copy_from_buffer(data->sg, data->sg_len, 820 sg_copy_from_buffer(data->sg, data->sg_len,
687 host->bounce_buf, xfer_bytes); 821 host->bounce_buf, xfer_bytes);
688 data->bytes_xfered = xfer_bytes;
689 } 822 }
690 823
691 meson_mmc_read_resp(host->mmc, cmd); 824 next_cmd = meson_mmc_get_next_command(cmd);
692 if (!data || !data->stop || mrq->sbc) 825 if (next_cmd)
693 meson_mmc_request_done(host->mmc, mrq); 826 meson_mmc_start_cmd(host->mmc, next_cmd);
694 else 827 else
695 meson_mmc_start_cmd(host->mmc, data->stop); 828 meson_mmc_request_done(host->mmc, cmd->mrq);
696 829
697 return IRQ_HANDLED; 830 return IRQ_HANDLED;
698} 831}
699 832
833static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
834{
835 struct meson_host *host = mmc_priv(mmc);
836 struct meson_tuning_params tp_old = host->tp;
837 int ret = -EINVAL, i, cmd_error;
838
839 dev_info(mmc_dev(mmc), "(re)tuning...\n");
840
841 for (i = CLK_PHASE_0; i <= CLK_PHASE_270; i++) {
842 host->tp.rx_phase = i;
843 /* exclude the active parameter set if retuning */
844 if (!memcmp(&tp_old, &host->tp, sizeof(tp_old)) &&
845 mmc->doing_retune)
846 continue;
847 meson_mmc_set_tuning_params(mmc);
848 ret = mmc_send_tuning(mmc, opcode, &cmd_error);
849 if (!ret)
850 break;
851 }
852
853 return ret;
854}
855
700/* 856/*
701 * NOTE: we only need this until the GPIO/pinctrl driver can handle 857 * NOTE: we only need this until the GPIO/pinctrl driver can handle
702 * interrupts. For now, the MMC core will use this for polling. 858 * interrupts. For now, the MMC core will use this for polling.
@@ -711,10 +867,25 @@ static int meson_mmc_get_cd(struct mmc_host *mmc)
711 return status; 867 return status;
712} 868}
713 869
870static void meson_mmc_cfg_init(struct meson_host *host)
871{
872 u32 cfg = 0;
873
874 cfg |= FIELD_PREP(CFG_RESP_TIMEOUT_MASK,
875 ilog2(SD_EMMC_CFG_RESP_TIMEOUT));
876 cfg |= FIELD_PREP(CFG_RC_CC_MASK, ilog2(SD_EMMC_CFG_CMD_GAP));
877 cfg |= FIELD_PREP(CFG_BLK_LEN_MASK, ilog2(SD_EMMC_CFG_BLK_SIZE));
878
879 writel(cfg, host->regs + SD_EMMC_CFG);
880}
881
714static const struct mmc_host_ops meson_mmc_ops = { 882static const struct mmc_host_ops meson_mmc_ops = {
715 .request = meson_mmc_request, 883 .request = meson_mmc_request,
716 .set_ios = meson_mmc_set_ios, 884 .set_ios = meson_mmc_set_ios,
717 .get_cd = meson_mmc_get_cd, 885 .get_cd = meson_mmc_get_cd,
886 .pre_req = meson_mmc_pre_req,
887 .post_req = meson_mmc_post_req,
888 .execute_tuning = meson_mmc_execute_tuning,
718}; 889};
719 890
720static int meson_mmc_probe(struct platform_device *pdev) 891static int meson_mmc_probe(struct platform_device *pdev)
@@ -722,7 +893,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
722 struct resource *res; 893 struct resource *res;
723 struct meson_host *host; 894 struct meson_host *host;
724 struct mmc_host *mmc; 895 struct mmc_host *mmc;
725 int ret; 896 int ret, irq;
726 897
727 mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev); 898 mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev);
728 if (!mmc) 899 if (!mmc)
@@ -754,8 +925,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
754 goto free_host; 925 goto free_host;
755 } 926 }
756 927
757 host->irq = platform_get_irq(pdev, 0); 928 irq = platform_get_irq(pdev, 0);
758 if (host->irq == 0) { 929 if (!irq) {
759 dev_err(&pdev->dev, "failed to get interrupt resource.\n"); 930 dev_err(&pdev->dev, "failed to get interrupt resource.\n");
760 ret = -EINVAL; 931 ret = -EINVAL;
761 goto free_host; 932 goto free_host;
@@ -771,9 +942,13 @@ static int meson_mmc_probe(struct platform_device *pdev)
771 if (ret) 942 if (ret)
772 goto free_host; 943 goto free_host;
773 944
945 host->tp.core_phase = CLK_PHASE_180;
946 host->tp.tx_phase = CLK_PHASE_0;
947 host->tp.rx_phase = CLK_PHASE_0;
948
774 ret = meson_mmc_clk_init(host); 949 ret = meson_mmc_clk_init(host);
775 if (ret) 950 if (ret)
776 goto free_host; 951 goto err_core_clk;
777 952
778 /* Stop execution */ 953 /* Stop execution */
779 writel(0, host->regs + SD_EMMC_START); 954 writel(0, host->regs + SD_EMMC_START);
@@ -783,14 +958,20 @@ static int meson_mmc_probe(struct platform_device *pdev)
783 writel(IRQ_EN_MASK, host->regs + SD_EMMC_STATUS); 958 writel(IRQ_EN_MASK, host->regs + SD_EMMC_STATUS);
784 writel(IRQ_EN_MASK, host->regs + SD_EMMC_IRQ_EN); 959 writel(IRQ_EN_MASK, host->regs + SD_EMMC_IRQ_EN);
785 960
786 ret = devm_request_threaded_irq(&pdev->dev, host->irq, 961 /* set config to sane default */
787 meson_mmc_irq, meson_mmc_irq_thread, 962 meson_mmc_cfg_init(host);
788 IRQF_SHARED, DRIVER_NAME, host); 963
964 ret = devm_request_threaded_irq(&pdev->dev, irq, meson_mmc_irq,
965 meson_mmc_irq_thread, IRQF_SHARED,
966 NULL, host);
789 if (ret) 967 if (ret)
790 goto free_host; 968 goto err_div_clk;
791 969
970 mmc->caps |= MMC_CAP_CMD23;
792 mmc->max_blk_count = CMD_CFG_LENGTH_MASK; 971 mmc->max_blk_count = CMD_CFG_LENGTH_MASK;
793 mmc->max_req_size = mmc->max_blk_count * mmc->max_blk_size; 972 mmc->max_req_size = mmc->max_blk_count * mmc->max_blk_size;
973 mmc->max_segs = SD_EMMC_DESC_BUF_LEN / sizeof(struct sd_emmc_desc);
974 mmc->max_seg_size = mmc->max_req_size;
794 975
795 /* data bounce buffer */ 976 /* data bounce buffer */
796 host->bounce_buf_size = mmc->max_req_size; 977 host->bounce_buf_size = mmc->max_req_size;
@@ -800,7 +981,15 @@ static int meson_mmc_probe(struct platform_device *pdev)
800 if (host->bounce_buf == NULL) { 981 if (host->bounce_buf == NULL) {
801 dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n"); 982 dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n");
802 ret = -ENOMEM; 983 ret = -ENOMEM;
803 goto free_host; 984 goto err_div_clk;
985 }
986
987 host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
988 &host->descs_dma_addr, GFP_KERNEL);
989 if (!host->descs) {
990 dev_err(host->dev, "Allocating descriptor DMA buffer failed\n");
991 ret = -ENOMEM;
992 goto err_bounce_buf;
804 } 993 }
805 994
806 mmc->ops = &meson_mmc_ops; 995 mmc->ops = &meson_mmc_ops;
@@ -808,9 +997,14 @@ static int meson_mmc_probe(struct platform_device *pdev)
808 997
809 return 0; 998 return 0;
810 999
811free_host: 1000err_bounce_buf:
1001 dma_free_coherent(host->dev, host->bounce_buf_size,
1002 host->bounce_buf, host->bounce_dma_addr);
1003err_div_clk:
812 clk_disable_unprepare(host->cfg_div_clk); 1004 clk_disable_unprepare(host->cfg_div_clk);
1005err_core_clk:
813 clk_disable_unprepare(host->core_clk); 1006 clk_disable_unprepare(host->core_clk);
1007free_host:
814 mmc_free_host(mmc); 1008 mmc_free_host(mmc);
815 return ret; 1009 return ret;
816} 1010}
@@ -819,9 +1013,13 @@ static int meson_mmc_remove(struct platform_device *pdev)
819{ 1013{
820 struct meson_host *host = dev_get_drvdata(&pdev->dev); 1014 struct meson_host *host = dev_get_drvdata(&pdev->dev);
821 1015
1016 mmc_remove_host(host->mmc);
1017
822 /* disable interrupts */ 1018 /* disable interrupts */
823 writel(0, host->regs + SD_EMMC_IRQ_EN); 1019 writel(0, host->regs + SD_EMMC_IRQ_EN);
824 1020
1021 dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
1022 host->descs, host->descs_dma_addr);
825 dma_free_coherent(host->dev, host->bounce_buf_size, 1023 dma_free_coherent(host->dev, host->bounce_buf_size,
826 host->bounce_buf, host->bounce_dma_addr); 1024 host->bounce_buf, host->bounce_dma_addr);
827 1025
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index e77d79c8cd9f..476e53d30128 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -888,10 +888,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
888 u32 clock_rate; 888 u32 clock_rate;
889 unsigned long timeout; 889 unsigned long timeout;
890 890
891 if (data->flags & MMC_DATA_READ) 891 direction = mmc_get_dma_dir(data);
892 direction = DMA_FROM_DEVICE;
893 else
894 direction = DMA_TO_DEVICE;
895 mmc_spi_setup_data_message(host, multiple, direction); 892 mmc_spi_setup_data_message(host, multiple, direction);
896 t = &host->t; 893 t = &host->t;
897 894
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 0c6420bb2f00..d1ca2f489054 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -516,17 +516,14 @@ static void mmci_dma_data_error(struct mmci_host *host)
516static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 516static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
517{ 517{
518 struct dma_chan *chan; 518 struct dma_chan *chan;
519 enum dma_data_direction dir;
520 519
521 if (data->flags & MMC_DATA_READ) { 520 if (data->flags & MMC_DATA_READ)
522 dir = DMA_FROM_DEVICE;
523 chan = host->dma_rx_channel; 521 chan = host->dma_rx_channel;
524 } else { 522 else
525 dir = DMA_TO_DEVICE;
526 chan = host->dma_tx_channel; 523 chan = host->dma_tx_channel;
527 }
528 524
529 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); 525 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len,
526 mmc_get_dma_dir(data));
530} 527}
531 528
532static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data) 529static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
@@ -589,17 +586,14 @@ static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
589 struct dma_chan *chan; 586 struct dma_chan *chan;
590 struct dma_device *device; 587 struct dma_device *device;
591 struct dma_async_tx_descriptor *desc; 588 struct dma_async_tx_descriptor *desc;
592 enum dma_data_direction buffer_dirn;
593 int nr_sg; 589 int nr_sg;
594 unsigned long flags = DMA_CTRL_ACK; 590 unsigned long flags = DMA_CTRL_ACK;
595 591
596 if (data->flags & MMC_DATA_READ) { 592 if (data->flags & MMC_DATA_READ) {
597 conf.direction = DMA_DEV_TO_MEM; 593 conf.direction = DMA_DEV_TO_MEM;
598 buffer_dirn = DMA_FROM_DEVICE;
599 chan = host->dma_rx_channel; 594 chan = host->dma_rx_channel;
600 } else { 595 } else {
601 conf.direction = DMA_MEM_TO_DEV; 596 conf.direction = DMA_MEM_TO_DEV;
602 buffer_dirn = DMA_TO_DEVICE;
603 chan = host->dma_tx_channel; 597 chan = host->dma_tx_channel;
604 } 598 }
605 599
@@ -612,7 +606,8 @@ static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
612 return -EINVAL; 606 return -EINVAL;
613 607
614 device = chan->device; 608 device = chan->device;
615 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn); 609 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len,
610 mmc_get_dma_dir(data));
616 if (nr_sg == 0) 611 if (nr_sg == 0)
617 return -EINVAL; 612 return -EINVAL;
618 613
@@ -631,7 +626,8 @@ static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
631 return 0; 626 return 0;
632 627
633 unmap_exit: 628 unmap_exit:
634 dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn); 629 dma_unmap_sg(device->dev, data->sg, data->sg_len,
630 mmc_get_dma_dir(data));
635 return -ENOMEM; 631 return -ENOMEM;
636} 632}
637 633
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index bbad309679cf..d4dc55ac7dea 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -256,7 +256,7 @@ static void moxart_dma_complete(void *param)
256 256
257static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host) 257static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host)
258{ 258{
259 u32 len, dir_data, dir_slave; 259 u32 len, dir_slave;
260 long dma_time; 260 long dma_time;
261 struct dma_async_tx_descriptor *desc = NULL; 261 struct dma_async_tx_descriptor *desc = NULL;
262 struct dma_chan *dma_chan; 262 struct dma_chan *dma_chan;
@@ -266,16 +266,14 @@ static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host)
266 266
267 if (data->flags & MMC_DATA_WRITE) { 267 if (data->flags & MMC_DATA_WRITE) {
268 dma_chan = host->dma_chan_tx; 268 dma_chan = host->dma_chan_tx;
269 dir_data = DMA_TO_DEVICE;
270 dir_slave = DMA_MEM_TO_DEV; 269 dir_slave = DMA_MEM_TO_DEV;
271 } else { 270 } else {
272 dma_chan = host->dma_chan_rx; 271 dma_chan = host->dma_chan_rx;
273 dir_data = DMA_FROM_DEVICE;
274 dir_slave = DMA_DEV_TO_MEM; 272 dir_slave = DMA_DEV_TO_MEM;
275 } 273 }
276 274
277 len = dma_map_sg(dma_chan->device->dev, data->sg, 275 len = dma_map_sg(dma_chan->device->dev, data->sg,
278 data->sg_len, dir_data); 276 data->sg_len, mmc_get_dma_dir(data));
279 277
280 if (len > 0) { 278 if (len > 0) {
281 desc = dmaengine_prep_slave_sg(dma_chan, data->sg, 279 desc = dmaengine_prep_slave_sg(dma_chan, data->sg,
@@ -301,7 +299,7 @@ static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host)
301 299
302 dma_unmap_sg(dma_chan->device->dev, 300 dma_unmap_sg(dma_chan->device->dev,
303 data->sg, data->sg_len, 301 data->sg, data->sg_len,
304 dir_data); 302 mmc_get_dma_dir(data));
305} 303}
306 304
307 305
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index b235d8da0602..5c1e178fc5f9 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -76,6 +76,7 @@
76#define MSDC_PATCH_BIT1 0xb4 76#define MSDC_PATCH_BIT1 0xb4
77#define MSDC_PAD_TUNE 0xec 77#define MSDC_PAD_TUNE 0xec
78#define PAD_DS_TUNE 0x188 78#define PAD_DS_TUNE 0x188
79#define PAD_CMD_TUNE 0x18c
79#define EMMC50_CFG0 0x208 80#define EMMC50_CFG0 0x208
80 81
81/*--------------------------------------------------------------------------*/ 82/*--------------------------------------------------------------------------*/
@@ -211,13 +212,18 @@
211#define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */ 212#define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */
212#define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */ 213#define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */
213 214
215#define MSDC_PAD_TUNE_DATWRDLY (0x1f << 0) /* RW */
214#define MSDC_PAD_TUNE_DATRRDLY (0x1f << 8) /* RW */ 216#define MSDC_PAD_TUNE_DATRRDLY (0x1f << 8) /* RW */
215#define MSDC_PAD_TUNE_CMDRDLY (0x1f << 16) /* RW */ 217#define MSDC_PAD_TUNE_CMDRDLY (0x1f << 16) /* RW */
218#define MSDC_PAD_TUNE_CMDRRDLY (0x1f << 22) /* RW */
219#define MSDC_PAD_TUNE_CLKTDLY (0x1f << 27) /* RW */
216 220
217#define PAD_DS_TUNE_DLY1 (0x1f << 2) /* RW */ 221#define PAD_DS_TUNE_DLY1 (0x1f << 2) /* RW */
218#define PAD_DS_TUNE_DLY2 (0x1f << 7) /* RW */ 222#define PAD_DS_TUNE_DLY2 (0x1f << 7) /* RW */
219#define PAD_DS_TUNE_DLY3 (0x1f << 12) /* RW */ 223#define PAD_DS_TUNE_DLY3 (0x1f << 12) /* RW */
220 224
225#define PAD_CMD_TUNE_RX_DLY3 (0x1f << 1) /* RW */
226
221#define EMMC50_CFG_PADCMD_LATCHCK (0x1 << 0) /* RW */ 227#define EMMC50_CFG_PADCMD_LATCHCK (0x1 << 0) /* RW */
222#define EMMC50_CFG_CRCSTS_EDGE (0x1 << 3) /* RW */ 228#define EMMC50_CFG_CRCSTS_EDGE (0x1 << 3) /* RW */
223#define EMMC50_CFG_CFCSTS_SEL (0x1 << 4) /* RW */ 229#define EMMC50_CFG_CFCSTS_SEL (0x1 << 4) /* RW */
@@ -285,12 +291,14 @@ struct msdc_save_para {
285 u32 patch_bit0; 291 u32 patch_bit0;
286 u32 patch_bit1; 292 u32 patch_bit1;
287 u32 pad_ds_tune; 293 u32 pad_ds_tune;
294 u32 pad_cmd_tune;
288 u32 emmc50_cfg0; 295 u32 emmc50_cfg0;
289}; 296};
290 297
291struct msdc_tune_para { 298struct msdc_tune_para {
292 u32 iocon; 299 u32 iocon;
293 u32 pad_tune; 300 u32 pad_tune;
301 u32 pad_cmd_tune;
294}; 302};
295 303
296struct msdc_delay_phase { 304struct msdc_delay_phase {
@@ -332,6 +340,10 @@ struct msdc_host {
332 unsigned char timing; 340 unsigned char timing;
333 bool vqmmc_enabled; 341 bool vqmmc_enabled;
334 u32 hs400_ds_delay; 342 u32 hs400_ds_delay;
343 u32 hs200_cmd_int_delay; /* cmd internal delay for HS200/SDR104 */
344 u32 hs400_cmd_int_delay; /* cmd internal delay for HS400 */
345 bool hs400_cmd_resp_sel_rising;
346 /* cmd response sample selection for HS400 */
335 bool hs400_mode; /* current eMMC will run at hs400 mode */ 347 bool hs400_mode; /* current eMMC will run at hs400 mode */
336 struct msdc_save_para save_para; /* used when gate HCLK */ 348 struct msdc_save_para save_para; /* used when gate HCLK */
337 struct msdc_tune_para def_tune_para; /* default tune setting */ 349 struct msdc_tune_para def_tune_para; /* default tune setting */
@@ -462,11 +474,9 @@ static void msdc_prepare_data(struct msdc_host *host, struct mmc_request *mrq)
462 struct mmc_data *data = mrq->data; 474 struct mmc_data *data = mrq->data;
463 475
464 if (!(data->host_cookie & MSDC_PREPARE_FLAG)) { 476 if (!(data->host_cookie & MSDC_PREPARE_FLAG)) {
465 bool read = (data->flags & MMC_DATA_READ) != 0;
466
467 data->host_cookie |= MSDC_PREPARE_FLAG; 477 data->host_cookie |= MSDC_PREPARE_FLAG;
468 data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len, 478 data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len,
469 read ? DMA_FROM_DEVICE : DMA_TO_DEVICE); 479 mmc_get_dma_dir(data));
470 } 480 }
471} 481}
472 482
@@ -478,10 +488,8 @@ static void msdc_unprepare_data(struct msdc_host *host, struct mmc_request *mrq)
478 return; 488 return;
479 489
480 if (data->host_cookie & MSDC_PREPARE_FLAG) { 490 if (data->host_cookie & MSDC_PREPARE_FLAG) {
481 bool read = (data->flags & MMC_DATA_READ) != 0;
482
483 dma_unmap_sg(host->dev, data->sg, data->sg_len, 491 dma_unmap_sg(host->dev, data->sg, data->sg_len,
484 read ? DMA_FROM_DEVICE : DMA_TO_DEVICE); 492 mmc_get_dma_dir(data));
485 data->host_cookie &= ~MSDC_PREPARE_FLAG; 493 data->host_cookie &= ~MSDC_PREPARE_FLAG;
486 } 494 }
487} 495}
@@ -601,8 +609,14 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
601 } else { 609 } else {
602 writel(host->saved_tune_para.iocon, host->base + MSDC_IOCON); 610 writel(host->saved_tune_para.iocon, host->base + MSDC_IOCON);
603 writel(host->saved_tune_para.pad_tune, host->base + MSDC_PAD_TUNE); 611 writel(host->saved_tune_para.pad_tune, host->base + MSDC_PAD_TUNE);
612 writel(host->saved_tune_para.pad_cmd_tune,
613 host->base + PAD_CMD_TUNE);
604 } 614 }
605 615
616 if (timing == MMC_TIMING_MMC_HS400)
617 sdr_set_field(host->base + PAD_CMD_TUNE,
618 MSDC_PAD_TUNE_CMDRRDLY,
619 host->hs400_cmd_int_delay);
606 dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->sclk, timing); 620 dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->sclk, timing);
607} 621}
608 622
@@ -1303,7 +1317,7 @@ static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u32 delay)
1303 len_final = len; 1317 len_final = len;
1304 } 1318 }
1305 start += len ? len : 1; 1319 start += len ? len : 1;
1306 if (len >= 8 && start_final < 4) 1320 if (len >= 12 && start_final < 4)
1307 break; 1321 break;
1308 } 1322 }
1309 1323
@@ -1326,36 +1340,67 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
1326 struct msdc_host *host = mmc_priv(mmc); 1340 struct msdc_host *host = mmc_priv(mmc);
1327 u32 rise_delay = 0, fall_delay = 0; 1341 u32 rise_delay = 0, fall_delay = 0;
1328 struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,}; 1342 struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,};
1343 struct msdc_delay_phase internal_delay_phase;
1329 u8 final_delay, final_maxlen; 1344 u8 final_delay, final_maxlen;
1345 u32 internal_delay = 0;
1330 int cmd_err; 1346 int cmd_err;
1331 int i; 1347 int i, j;
1348
1349 if (mmc->ios.timing == MMC_TIMING_MMC_HS200 ||
1350 mmc->ios.timing == MMC_TIMING_UHS_SDR104)
1351 sdr_set_field(host->base + MSDC_PAD_TUNE,
1352 MSDC_PAD_TUNE_CMDRRDLY,
1353 host->hs200_cmd_int_delay);
1332 1354
1333 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 1355 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
1334 for (i = 0 ; i < PAD_DELAY_MAX; i++) { 1356 for (i = 0 ; i < PAD_DELAY_MAX; i++) {
1335 sdr_set_field(host->base + MSDC_PAD_TUNE, 1357 sdr_set_field(host->base + MSDC_PAD_TUNE,
1336 MSDC_PAD_TUNE_CMDRDLY, i); 1358 MSDC_PAD_TUNE_CMDRDLY, i);
1337 mmc_send_tuning(mmc, opcode, &cmd_err); 1359 /*
1338 if (!cmd_err) 1360 * Using the same parameters, it may sometimes pass the test,
1339 rise_delay |= (1 << i); 1361 * but sometimes it may fail. To make sure the parameters are
1362 * more stable, we test each set of parameters 3 times.
1363 */
1364 for (j = 0; j < 3; j++) {
1365 mmc_send_tuning(mmc, opcode, &cmd_err);
1366 if (!cmd_err) {
1367 rise_delay |= (1 << i);
1368 } else {
1369 rise_delay &= ~(1 << i);
1370 break;
1371 }
1372 }
1340 } 1373 }
1341 final_rise_delay = get_best_delay(host, rise_delay); 1374 final_rise_delay = get_best_delay(host, rise_delay);
1342 /* if rising edge has enough margin, then do not scan falling edge */ 1375 /* if rising edge has enough margin, then do not scan falling edge */
1343 if (final_rise_delay.maxlen >= 10 || 1376 if (final_rise_delay.maxlen >= 12 && final_rise_delay.start < 4)
1344 (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4))
1345 goto skip_fall; 1377 goto skip_fall;
1346 1378
1347 sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 1379 sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
1348 for (i = 0; i < PAD_DELAY_MAX; i++) { 1380 for (i = 0; i < PAD_DELAY_MAX; i++) {
1349 sdr_set_field(host->base + MSDC_PAD_TUNE, 1381 sdr_set_field(host->base + MSDC_PAD_TUNE,
1350 MSDC_PAD_TUNE_CMDRDLY, i); 1382 MSDC_PAD_TUNE_CMDRDLY, i);
1351 mmc_send_tuning(mmc, opcode, &cmd_err); 1383 /*
1352 if (!cmd_err) 1384 * Using the same parameters, it may sometimes pass the test,
1353 fall_delay |= (1 << i); 1385 * but sometimes it may fail. To make sure the parameters are
1386 * more stable, we test each set of parameters 3 times.
1387 */
1388 for (j = 0; j < 3; j++) {
1389 mmc_send_tuning(mmc, opcode, &cmd_err);
1390 if (!cmd_err) {
1391 fall_delay |= (1 << i);
1392 } else {
1393 fall_delay &= ~(1 << i);
1394 break;
1395 }
1396 }
1354 } 1397 }
1355 final_fall_delay = get_best_delay(host, fall_delay); 1398 final_fall_delay = get_best_delay(host, fall_delay);
1356 1399
1357skip_fall: 1400skip_fall:
1358 final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen); 1401 final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen);
1402 if (final_fall_delay.maxlen >= 12 && final_fall_delay.start < 4)
1403 final_maxlen = final_fall_delay.maxlen;
1359 if (final_maxlen == final_rise_delay.maxlen) { 1404 if (final_maxlen == final_rise_delay.maxlen) {
1360 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 1405 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
1361 sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRDLY, 1406 sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRDLY,
@@ -1367,7 +1412,71 @@ skip_fall:
1367 final_fall_delay.final_phase); 1412 final_fall_delay.final_phase);
1368 final_delay = final_fall_delay.final_phase; 1413 final_delay = final_fall_delay.final_phase;
1369 } 1414 }
1415 if (host->hs200_cmd_int_delay)
1416 goto skip_internal;
1417
1418 for (i = 0; i < PAD_DELAY_MAX; i++) {
1419 sdr_set_field(host->base + MSDC_PAD_TUNE,
1420 MSDC_PAD_TUNE_CMDRRDLY, i);
1421 mmc_send_tuning(mmc, opcode, &cmd_err);
1422 if (!cmd_err)
1423 internal_delay |= (1 << i);
1424 }
1425 dev_dbg(host->dev, "Final internal delay: 0x%x\n", internal_delay);
1426 internal_delay_phase = get_best_delay(host, internal_delay);
1427 sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRRDLY,
1428 internal_delay_phase.final_phase);
1429skip_internal:
1430 dev_dbg(host->dev, "Final cmd pad delay: %x\n", final_delay);
1431 return final_delay == 0xff ? -EIO : 0;
1432}
1433
1434static int hs400_tune_response(struct mmc_host *mmc, u32 opcode)
1435{
1436 struct msdc_host *host = mmc_priv(mmc);
1437 u32 cmd_delay = 0;
1438 struct msdc_delay_phase final_cmd_delay = { 0,};
1439 u8 final_delay;
1440 int cmd_err;
1441 int i, j;
1442
1443 /* select EMMC50 PAD CMD tune */
1444 sdr_set_bits(host->base + PAD_CMD_TUNE, BIT(0));
1445
1446 if (mmc->ios.timing == MMC_TIMING_MMC_HS200 ||
1447 mmc->ios.timing == MMC_TIMING_UHS_SDR104)
1448 sdr_set_field(host->base + MSDC_PAD_TUNE,
1449 MSDC_PAD_TUNE_CMDRRDLY,
1450 host->hs200_cmd_int_delay);
1451
1452 if (host->hs400_cmd_resp_sel_rising)
1453 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
1454 else
1455 sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
1456 for (i = 0 ; i < PAD_DELAY_MAX; i++) {
1457 sdr_set_field(host->base + PAD_CMD_TUNE,
1458 PAD_CMD_TUNE_RX_DLY3, i);
1459 /*
1460 * Using the same parameters, it may sometimes pass the test,
1461 * but sometimes it may fail. To make sure the parameters are
1462 * more stable, we test each set of parameters 3 times.
1463 */
1464 for (j = 0; j < 3; j++) {
1465 mmc_send_tuning(mmc, opcode, &cmd_err);
1466 if (!cmd_err) {
1467 cmd_delay |= (1 << i);
1468 } else {
1469 cmd_delay &= ~(1 << i);
1470 break;
1471 }
1472 }
1473 }
1474 final_cmd_delay = get_best_delay(host, cmd_delay);
1475 sdr_set_field(host->base + PAD_CMD_TUNE, PAD_CMD_TUNE_RX_DLY3,
1476 final_cmd_delay.final_phase);
1477 final_delay = final_cmd_delay.final_phase;
1370 1478
1479 dev_dbg(host->dev, "Final cmd pad delay: %x\n", final_delay);
1371 return final_delay == 0xff ? -EIO : 0; 1480 return final_delay == 0xff ? -EIO : 0;
1372} 1481}
1373 1482
@@ -1390,7 +1499,7 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
1390 } 1499 }
1391 final_rise_delay = get_best_delay(host, rise_delay); 1500 final_rise_delay = get_best_delay(host, rise_delay);
1392 /* if rising edge has enough margin, then do not scan falling edge */ 1501 /* if rising edge has enough margin, then do not scan falling edge */
1393 if (final_rise_delay.maxlen >= 10 || 1502 if (final_rise_delay.maxlen >= 12 ||
1394 (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4)) 1503 (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4))
1395 goto skip_fall; 1504 goto skip_fall;
1396 1505
@@ -1423,6 +1532,7 @@ skip_fall:
1423 final_delay = final_fall_delay.final_phase; 1532 final_delay = final_fall_delay.final_phase;
1424 } 1533 }
1425 1534
1535 dev_dbg(host->dev, "Final data pad delay: %x\n", final_delay);
1426 return final_delay == 0xff ? -EIO : 0; 1536 return final_delay == 0xff ? -EIO : 0;
1427} 1537}
1428 1538
@@ -1431,7 +1541,10 @@ static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode)
1431 struct msdc_host *host = mmc_priv(mmc); 1541 struct msdc_host *host = mmc_priv(mmc);
1432 int ret; 1542 int ret;
1433 1543
1434 ret = msdc_tune_response(mmc, opcode); 1544 if (host->hs400_mode)
1545 ret = hs400_tune_response(mmc, opcode);
1546 else
1547 ret = msdc_tune_response(mmc, opcode);
1435 if (ret == -EIO) { 1548 if (ret == -EIO) {
1436 dev_err(host->dev, "Tune response fail!\n"); 1549 dev_err(host->dev, "Tune response fail!\n");
1437 return ret; 1550 return ret;
@@ -1444,6 +1557,7 @@ static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode)
1444 1557
1445 host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON); 1558 host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON);
1446 host->saved_tune_para.pad_tune = readl(host->base + MSDC_PAD_TUNE); 1559 host->saved_tune_para.pad_tune = readl(host->base + MSDC_PAD_TUNE);
1560 host->saved_tune_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE);
1447 return ret; 1561 return ret;
1448} 1562}
1449 1563
@@ -1478,6 +1592,25 @@ static struct mmc_host_ops mt_msdc_ops = {
1478 .hw_reset = msdc_hw_reset, 1592 .hw_reset = msdc_hw_reset,
1479}; 1593};
1480 1594
1595static void msdc_of_property_parse(struct platform_device *pdev,
1596 struct msdc_host *host)
1597{
1598 of_property_read_u32(pdev->dev.of_node, "hs400-ds-delay",
1599 &host->hs400_ds_delay);
1600
1601 of_property_read_u32(pdev->dev.of_node, "mediatek,hs200-cmd-int-delay",
1602 &host->hs200_cmd_int_delay);
1603
1604 of_property_read_u32(pdev->dev.of_node, "mediatek,hs400-cmd-int-delay",
1605 &host->hs400_cmd_int_delay);
1606
1607 if (of_property_read_bool(pdev->dev.of_node,
1608 "mediatek,hs400-cmd-resp-sel-rising"))
1609 host->hs400_cmd_resp_sel_rising = true;
1610 else
1611 host->hs400_cmd_resp_sel_rising = false;
1612}
1613
1481static int msdc_drv_probe(struct platform_device *pdev) 1614static int msdc_drv_probe(struct platform_device *pdev)
1482{ 1615{
1483 struct mmc_host *mmc; 1616 struct mmc_host *mmc;
@@ -1549,10 +1682,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
1549 goto host_free; 1682 goto host_free;
1550 } 1683 }
1551 1684
1552 if (!of_property_read_u32(pdev->dev.of_node, "hs400-ds-delay", 1685 msdc_of_property_parse(pdev, host);
1553 &host->hs400_ds_delay))
1554 dev_dbg(&pdev->dev, "hs400-ds-delay: %x\n",
1555 host->hs400_ds_delay);
1556 1686
1557 host->dev = &pdev->dev; 1687 host->dev = &pdev->dev;
1558 host->mmc = mmc; 1688 host->mmc = mmc;
@@ -1664,6 +1794,7 @@ static void msdc_save_reg(struct msdc_host *host)
1664 host->save_para.patch_bit0 = readl(host->base + MSDC_PATCH_BIT); 1794 host->save_para.patch_bit0 = readl(host->base + MSDC_PATCH_BIT);
1665 host->save_para.patch_bit1 = readl(host->base + MSDC_PATCH_BIT1); 1795 host->save_para.patch_bit1 = readl(host->base + MSDC_PATCH_BIT1);
1666 host->save_para.pad_ds_tune = readl(host->base + PAD_DS_TUNE); 1796 host->save_para.pad_ds_tune = readl(host->base + PAD_DS_TUNE);
1797 host->save_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE);
1667 host->save_para.emmc50_cfg0 = readl(host->base + EMMC50_CFG0); 1798 host->save_para.emmc50_cfg0 = readl(host->base + EMMC50_CFG0);
1668} 1799}
1669 1800
@@ -1676,6 +1807,7 @@ static void msdc_restore_reg(struct msdc_host *host)
1676 writel(host->save_para.patch_bit0, host->base + MSDC_PATCH_BIT); 1807 writel(host->save_para.patch_bit0, host->base + MSDC_PATCH_BIT);
1677 writel(host->save_para.patch_bit1, host->base + MSDC_PATCH_BIT1); 1808 writel(host->save_para.patch_bit1, host->base + MSDC_PATCH_BIT1);
1678 writel(host->save_para.pad_ds_tune, host->base + PAD_DS_TUNE); 1809 writel(host->save_para.pad_ds_tune, host->base + PAD_DS_TUNE);
1810 writel(host->save_para.pad_cmd_tune, host->base + PAD_CMD_TUNE);
1679 writel(host->save_para.emmc50_cfg0, host->base + EMMC50_CFG0); 1811 writel(host->save_para.emmc50_cfg0, host->base + EMMC50_CFG0);
1680} 1812}
1681 1813
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 42296e55b9de..58d74b8d6c79 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -125,10 +125,10 @@ static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data)
125 return 1; 125 return 1;
126 } else { 126 } else {
127 dma_addr_t phys_addr; 127 dma_addr_t phys_addr;
128 int dma_dir = (data->flags & MMC_DATA_READ) ? 128
129 DMA_FROM_DEVICE : DMA_TO_DEVICE; 129 host->sg_frags = dma_map_sg(mmc_dev(host->mmc),
130 host->sg_frags = dma_map_sg(mmc_dev(host->mmc), data->sg, 130 data->sg, data->sg_len,
131 data->sg_len, dma_dir); 131 mmc_get_dma_dir(data));
132 phys_addr = sg_dma_address(data->sg); 132 phys_addr = sg_dma_address(data->sg);
133 mvsd_write(MVSD_SYS_ADDR_LOW, (u32)phys_addr & 0xffff); 133 mvsd_write(MVSD_SYS_ADDR_LOW, (u32)phys_addr & 0xffff);
134 mvsd_write(MVSD_SYS_ADDR_HI, (u32)phys_addr >> 16); 134 mvsd_write(MVSD_SYS_ADDR_HI, (u32)phys_addr >> 16);
@@ -294,8 +294,7 @@ static u32 mvsd_finish_data(struct mvsd_host *host, struct mmc_data *data,
294 host->pio_size = 0; 294 host->pio_size = 0;
295 } else { 295 } else {
296 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags, 296 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags,
297 (data->flags & MMC_DATA_READ) ? 297 mmc_get_dma_dir(data));
298 DMA_FROM_DEVICE : DMA_TO_DEVICE);
299 } 298 }
300 299
301 if (err_status & MVSD_ERR_DATA_TIMEOUT) 300 if (err_status & MVSD_ERR_DATA_TIMEOUT)
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index a58bd653ed8b..8c39dccacf39 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -935,15 +935,6 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
935 OMAP_HSMMC_WRITE(host->base, CMD, cmdreg); 935 OMAP_HSMMC_WRITE(host->base, CMD, cmdreg);
936} 936}
937 937
938static int
939omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
940{
941 if (data->flags & MMC_DATA_WRITE)
942 return DMA_TO_DEVICE;
943 else
944 return DMA_FROM_DEVICE;
945}
946
947static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host, 938static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host,
948 struct mmc_data *data) 939 struct mmc_data *data)
949{ 940{
@@ -1055,7 +1046,7 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
1055 dmaengine_terminate_all(chan); 1046 dmaengine_terminate_all(chan);
1056 dma_unmap_sg(chan->device->dev, 1047 dma_unmap_sg(chan->device->dev,
1057 host->data->sg, host->data->sg_len, 1048 host->data->sg, host->data->sg_len,
1058 omap_hsmmc_get_dma_dir(host, host->data)); 1049 mmc_get_dma_dir(host->data));
1059 1050
1060 host->data->host_cookie = 0; 1051 host->data->host_cookie = 0;
1061 } 1052 }
@@ -1350,7 +1341,7 @@ static void omap_hsmmc_dma_callback(void *param)
1350 if (!data->host_cookie) 1341 if (!data->host_cookie)
1351 dma_unmap_sg(chan->device->dev, 1342 dma_unmap_sg(chan->device->dev,
1352 data->sg, data->sg_len, 1343 data->sg, data->sg_len,
1353 omap_hsmmc_get_dma_dir(host, data)); 1344 mmc_get_dma_dir(data));
1354 1345
1355 req_in_progress = host->req_in_progress; 1346 req_in_progress = host->req_in_progress;
1356 host->dma_ch = -1; 1347 host->dma_ch = -1;
@@ -1383,7 +1374,7 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
1383 /* Check if next job is already prepared */ 1374 /* Check if next job is already prepared */
1384 if (next || data->host_cookie != host->next_data.cookie) { 1375 if (next || data->host_cookie != host->next_data.cookie) {
1385 dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len, 1376 dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len,
1386 omap_hsmmc_get_dma_dir(host, data)); 1377 mmc_get_dma_dir(data));
1387 1378
1388 } else { 1379 } else {
1389 dma_len = host->next_data.dma_len; 1380 dma_len = host->next_data.dma_len;
@@ -1569,7 +1560,7 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
1569 struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data); 1560 struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data);
1570 1561
1571 dma_unmap_sg(c->device->dev, data->sg, data->sg_len, 1562 dma_unmap_sg(c->device->dev, data->sg, data->sg_len,
1572 omap_hsmmc_get_dma_dir(host, data)); 1563 mmc_get_dma_dir(data));
1573 data->host_cookie = 0; 1564 data->host_cookie = 0;
1574 } 1565 }
1575} 1566}
@@ -1770,8 +1761,8 @@ static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host)
1770 */ 1761 */
1771 if (host->pdata->controller_flags & OMAP_HSMMC_SWAKEUP_MISSING) { 1762 if (host->pdata->controller_flags & OMAP_HSMMC_SWAKEUP_MISSING) {
1772 struct pinctrl *p = devm_pinctrl_get(host->dev); 1763 struct pinctrl *p = devm_pinctrl_get(host->dev);
1773 if (!p) { 1764 if (IS_ERR(p)) {
1774 ret = -ENODEV; 1765 ret = PTR_ERR(p);
1775 goto err_free_irq; 1766 goto err_free_irq;
1776 } 1767 }
1777 if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_DEFAULT))) { 1768 if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_DEFAULT))) {
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 7a173f8c455b..8896bf533dc7 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -24,6 +24,10 @@
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/irq.h> 25#include <linux/irq.h>
26#include <linux/io.h> 26#include <linux/io.h>
27#include <linux/of.h>
28#include <linux/of_device.h>
29#include <linux/of_gpio.h>
30#include <linux/mmc/slot-gpio.h>
27 31
28#include <plat/gpio-cfg.h> 32#include <plat/gpio-cfg.h>
29#include <mach/dma.h> 33#include <mach/dma.h>
@@ -807,21 +811,6 @@ irq_out:
807 811
808} 812}
809 813
810/*
811 * ISR for the CardDetect Pin
812*/
813
814static irqreturn_t s3cmci_irq_cd(int irq, void *dev_id)
815{
816 struct s3cmci_host *host = (struct s3cmci_host *)dev_id;
817
818 dbg(host, dbg_irq, "card detect\n");
819
820 mmc_detect_change(host->mmc, msecs_to_jiffies(500));
821
822 return IRQ_HANDLED;
823}
824
825static void s3cmci_dma_done_callback(void *arg) 814static void s3cmci_dma_done_callback(void *arg)
826{ 815{
827 struct s3cmci_host *host = arg; 816 struct s3cmci_host *host = arg;
@@ -1104,7 +1093,7 @@ static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data)
1104 conf.direction = DMA_MEM_TO_DEV; 1093 conf.direction = DMA_MEM_TO_DEV;
1105 1094
1106 dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 1095 dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
1107 rw ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 1096 mmc_get_dma_dir(data));
1108 1097
1109 dmaengine_slave_config(host->dma, &conf); 1098 dmaengine_slave_config(host->dma, &conf);
1110 desc = dmaengine_prep_slave_sg(host->dma, data->sg, data->sg_len, 1099 desc = dmaengine_prep_slave_sg(host->dma, data->sg, data->sg_len,
@@ -1121,7 +1110,7 @@ static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data)
1121 1110
1122unmap_exit: 1111unmap_exit:
1123 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 1112 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
1124 rw ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 1113 mmc_get_dma_dir(data));
1125 return -ENOMEM; 1114 return -ENOMEM;
1126} 1115}
1127 1116
@@ -1177,19 +1166,6 @@ static void s3cmci_send_request(struct mmc_host *mmc)
1177 s3cmci_enable_irq(host, true); 1166 s3cmci_enable_irq(host, true);
1178} 1167}
1179 1168
1180static int s3cmci_card_present(struct mmc_host *mmc)
1181{
1182 struct s3cmci_host *host = mmc_priv(mmc);
1183 struct s3c24xx_mci_pdata *pdata = host->pdata;
1184 int ret;
1185
1186 if (pdata->no_detect)
1187 return -ENOSYS;
1188
1189 ret = gpio_get_value(pdata->gpio_detect) ? 0 : 1;
1190 return ret ^ pdata->detect_invert;
1191}
1192
1193static void s3cmci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1169static void s3cmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1194{ 1170{
1195 struct s3cmci_host *host = mmc_priv(mmc); 1171 struct s3cmci_host *host = mmc_priv(mmc);
@@ -1198,7 +1174,7 @@ static void s3cmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1198 host->cmd_is_stop = 0; 1174 host->cmd_is_stop = 0;
1199 host->mrq = mrq; 1175 host->mrq = mrq;
1200 1176
1201 if (s3cmci_card_present(mmc) == 0) { 1177 if (mmc_gpio_get_cd(mmc) == 0) {
1202 dbg(host, dbg_err, "%s: no medium present\n", __func__); 1178 dbg(host, dbg_err, "%s: no medium present\n", __func__);
1203 host->mrq->cmd->error = -ENOMEDIUM; 1179 host->mrq->cmd->error = -ENOMEDIUM;
1204 mmc_request_done(mmc, mrq); 1180 mmc_request_done(mmc, mrq);
@@ -1242,8 +1218,9 @@ static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1242 case MMC_POWER_ON: 1218 case MMC_POWER_ON:
1243 case MMC_POWER_UP: 1219 case MMC_POWER_UP:
1244 /* Configure GPE5...GPE10 pins in SD mode */ 1220 /* Configure GPE5...GPE10 pins in SD mode */
1245 s3c_gpio_cfgall_range(S3C2410_GPE(5), 6, S3C_GPIO_SFN(2), 1221 if (!host->pdev->dev.of_node)
1246 S3C_GPIO_PULL_NONE); 1222 s3c_gpio_cfgall_range(S3C2410_GPE(5), 6, S3C_GPIO_SFN(2),
1223 S3C_GPIO_PULL_NONE);
1247 1224
1248 if (host->pdata->set_power) 1225 if (host->pdata->set_power)
1249 host->pdata->set_power(ios->power_mode, ios->vdd); 1226 host->pdata->set_power(ios->power_mode, ios->vdd);
@@ -1255,7 +1232,8 @@ static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1255 1232
1256 case MMC_POWER_OFF: 1233 case MMC_POWER_OFF:
1257 default: 1234 default:
1258 gpio_direction_output(S3C2410_GPE(5), 0); 1235 if (!host->pdev->dev.of_node)
1236 gpio_direction_output(S3C2410_GPE(5), 0);
1259 1237
1260 if (host->is2440) 1238 if (host->is2440)
1261 mci_con |= S3C2440_SDICON_SDRESET; 1239 mci_con |= S3C2440_SDICON_SDRESET;
@@ -1295,21 +1273,6 @@ static void s3cmci_reset(struct s3cmci_host *host)
1295 writel(con, host->base + S3C2410_SDICON); 1273 writel(con, host->base + S3C2410_SDICON);
1296} 1274}
1297 1275
1298static int s3cmci_get_ro(struct mmc_host *mmc)
1299{
1300 struct s3cmci_host *host = mmc_priv(mmc);
1301 struct s3c24xx_mci_pdata *pdata = host->pdata;
1302 int ret;
1303
1304 if (pdata->no_wprotect)
1305 return 0;
1306
1307 ret = gpio_get_value(pdata->gpio_wprotect) ? 1 : 0;
1308 ret ^= pdata->wprotect_invert;
1309
1310 return ret;
1311}
1312
1313static void s3cmci_enable_sdio_irq(struct mmc_host *mmc, int enable) 1276static void s3cmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1314{ 1277{
1315 struct s3cmci_host *host = mmc_priv(mmc); 1278 struct s3cmci_host *host = mmc_priv(mmc);
@@ -1353,8 +1316,8 @@ static void s3cmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1353static struct mmc_host_ops s3cmci_ops = { 1316static struct mmc_host_ops s3cmci_ops = {
1354 .request = s3cmci_request, 1317 .request = s3cmci_request,
1355 .set_ios = s3cmci_set_ios, 1318 .set_ios = s3cmci_set_ios,
1356 .get_ro = s3cmci_get_ro, 1319 .get_ro = mmc_gpio_get_ro,
1357 .get_cd = s3cmci_card_present, 1320 .get_cd = mmc_gpio_get_cd,
1358 .enable_sdio_irq = s3cmci_enable_sdio_irq, 1321 .enable_sdio_irq = s3cmci_enable_sdio_irq,
1359}; 1322};
1360 1323
@@ -1545,21 +1508,14 @@ static inline void s3cmci_debugfs_remove(struct s3cmci_host *host) { }
1545 1508
1546#endif /* CONFIG_DEBUG_FS */ 1509#endif /* CONFIG_DEBUG_FS */
1547 1510
1548static int s3cmci_probe(struct platform_device *pdev) 1511static int s3cmci_probe_pdata(struct s3cmci_host *host)
1549{ 1512{
1550 struct s3cmci_host *host; 1513 struct platform_device *pdev = host->pdev;
1551 struct mmc_host *mmc; 1514 struct mmc_host *mmc = host->mmc;
1552 int ret; 1515 struct s3c24xx_mci_pdata *pdata;
1553 int is2440; 1516 int i, ret;
1554 int i;
1555 1517
1556 is2440 = platform_get_device_id(pdev)->driver_data; 1518 host->is2440 = platform_get_device_id(pdev)->driver_data;
1557
1558 mmc = mmc_alloc_host(sizeof(struct s3cmci_host), &pdev->dev);
1559 if (!mmc) {
1560 ret = -ENOMEM;
1561 goto probe_out;
1562 }
1563 1519
1564 for (i = S3C2410_GPE(5); i <= S3C2410_GPE(10); i++) { 1520 for (i = S3C2410_GPE(5); i <= S3C2410_GPE(10); i++) {
1565 ret = gpio_request(i, dev_name(&pdev->dev)); 1521 ret = gpio_request(i, dev_name(&pdev->dev));
@@ -1569,25 +1525,101 @@ static int s3cmci_probe(struct platform_device *pdev)
1569 for (i--; i >= S3C2410_GPE(5); i--) 1525 for (i--; i >= S3C2410_GPE(5); i--)
1570 gpio_free(i); 1526 gpio_free(i);
1571 1527
1572 goto probe_free_host; 1528 return ret;
1529 }
1530 }
1531
1532 if (!pdev->dev.platform_data)
1533 pdev->dev.platform_data = &s3cmci_def_pdata;
1534
1535 pdata = pdev->dev.platform_data;
1536
1537 if (pdata->no_wprotect)
1538 mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
1539
1540 if (pdata->no_detect)
1541 mmc->caps |= MMC_CAP_NEEDS_POLL;
1542
1543 if (pdata->wprotect_invert)
1544 mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
1545
1546 if (pdata->detect_invert)
1547 mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
1548
1549 if (gpio_is_valid(pdata->gpio_detect)) {
1550 ret = mmc_gpio_request_cd(mmc, pdata->gpio_detect, 0);
1551 if (ret) {
1552 dev_err(&pdev->dev, "error requesting GPIO for CD %d\n",
1553 ret);
1554 return ret;
1573 } 1555 }
1574 } 1556 }
1575 1557
1558 if (gpio_is_valid(pdata->gpio_wprotect)) {
1559 ret = mmc_gpio_request_ro(mmc, pdata->gpio_wprotect);
1560 if (ret) {
1561 dev_err(&pdev->dev, "error requesting GPIO for WP %d\n",
1562 ret);
1563 return ret;
1564 }
1565 }
1566
1567 return 0;
1568}
1569
1570static int s3cmci_probe_dt(struct s3cmci_host *host)
1571{
1572 struct platform_device *pdev = host->pdev;
1573 struct s3c24xx_mci_pdata *pdata;
1574 struct mmc_host *mmc = host->mmc;
1575 int ret;
1576
1577 host->is2440 = (int) of_device_get_match_data(&pdev->dev);
1578
1579 ret = mmc_of_parse(mmc);
1580 if (ret)
1581 return ret;
1582
1583 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1584 if (!pdata)
1585 return -ENOMEM;
1586
1587 pdev->dev.platform_data = pdata;
1588
1589 return 0;
1590}
1591
1592static int s3cmci_probe(struct platform_device *pdev)
1593{
1594 struct s3cmci_host *host;
1595 struct mmc_host *mmc;
1596 int ret;
1597 int i;
1598
1599 mmc = mmc_alloc_host(sizeof(struct s3cmci_host), &pdev->dev);
1600 if (!mmc) {
1601 ret = -ENOMEM;
1602 goto probe_out;
1603 }
1604
1576 host = mmc_priv(mmc); 1605 host = mmc_priv(mmc);
1577 host->mmc = mmc; 1606 host->mmc = mmc;
1578 host->pdev = pdev; 1607 host->pdev = pdev;
1579 host->is2440 = is2440; 1608
1609 if (pdev->dev.of_node)
1610 ret = s3cmci_probe_dt(host);
1611 else
1612 ret = s3cmci_probe_pdata(host);
1613
1614 if (ret)
1615 goto probe_free_host;
1580 1616
1581 host->pdata = pdev->dev.platform_data; 1617 host->pdata = pdev->dev.platform_data;
1582 if (!host->pdata) {
1583 pdev->dev.platform_data = &s3cmci_def_pdata;
1584 host->pdata = &s3cmci_def_pdata;
1585 }
1586 1618
1587 spin_lock_init(&host->complete_lock); 1619 spin_lock_init(&host->complete_lock);
1588 tasklet_init(&host->pio_tasklet, pio_tasklet, (unsigned long) host); 1620 tasklet_init(&host->pio_tasklet, pio_tasklet, (unsigned long) host);
1589 1621
1590 if (is2440) { 1622 if (host->is2440) {
1591 host->sdiimsk = S3C2440_SDIIMSK; 1623 host->sdiimsk = S3C2440_SDIIMSK;
1592 host->sdidata = S3C2440_SDIDATA; 1624 host->sdidata = S3C2440_SDIDATA;
1593 host->clk_div = 1; 1625 host->clk_div = 1;
@@ -1645,43 +1677,6 @@ static int s3cmci_probe(struct platform_device *pdev)
1645 disable_irq(host->irq); 1677 disable_irq(host->irq);
1646 host->irq_state = false; 1678 host->irq_state = false;
1647 1679
1648 if (!host->pdata->no_detect) {
1649 ret = gpio_request(host->pdata->gpio_detect, "s3cmci detect");
1650 if (ret) {
1651 dev_err(&pdev->dev, "failed to get detect gpio\n");
1652 goto probe_free_irq;
1653 }
1654
1655 host->irq_cd = gpio_to_irq(host->pdata->gpio_detect);
1656
1657 if (host->irq_cd >= 0) {
1658 if (request_irq(host->irq_cd, s3cmci_irq_cd,
1659 IRQF_TRIGGER_RISING |
1660 IRQF_TRIGGER_FALLING,
1661 DRIVER_NAME, host)) {
1662 dev_err(&pdev->dev,
1663 "can't get card detect irq.\n");
1664 ret = -ENOENT;
1665 goto probe_free_gpio_cd;
1666 }
1667 } else {
1668 dev_warn(&pdev->dev,
1669 "host detect has no irq available\n");
1670 gpio_direction_input(host->pdata->gpio_detect);
1671 }
1672 } else
1673 host->irq_cd = -1;
1674
1675 if (!host->pdata->no_wprotect) {
1676 ret = gpio_request(host->pdata->gpio_wprotect, "s3cmci wp");
1677 if (ret) {
1678 dev_err(&pdev->dev, "failed to get writeprotect\n");
1679 goto probe_free_irq_cd;
1680 }
1681
1682 gpio_direction_input(host->pdata->gpio_wprotect);
1683 }
1684
1685 /* Depending on the dma state, get a DMA channel to use. */ 1680 /* Depending on the dma state, get a DMA channel to use. */
1686 1681
1687 if (s3cmci_host_usedma(host)) { 1682 if (s3cmci_host_usedma(host)) {
@@ -1689,7 +1684,7 @@ static int s3cmci_probe(struct platform_device *pdev)
1689 ret = PTR_ERR_OR_ZERO(host->dma); 1684 ret = PTR_ERR_OR_ZERO(host->dma);
1690 if (ret) { 1685 if (ret) {
1691 dev_err(&pdev->dev, "cannot get DMA channel.\n"); 1686 dev_err(&pdev->dev, "cannot get DMA channel.\n");
1692 goto probe_free_gpio_wp; 1687 goto probe_free_irq;
1693 } 1688 }
1694 } 1689 }
1695 1690
@@ -1768,18 +1763,6 @@ static int s3cmci_probe(struct platform_device *pdev)
1768 if (s3cmci_host_usedma(host)) 1763 if (s3cmci_host_usedma(host))
1769 dma_release_channel(host->dma); 1764 dma_release_channel(host->dma);
1770 1765
1771 probe_free_gpio_wp:
1772 if (!host->pdata->no_wprotect)
1773 gpio_free(host->pdata->gpio_wprotect);
1774
1775 probe_free_gpio_cd:
1776 if (!host->pdata->no_detect)
1777 gpio_free(host->pdata->gpio_detect);
1778
1779 probe_free_irq_cd:
1780 if (host->irq_cd >= 0)
1781 free_irq(host->irq_cd, host);
1782
1783 probe_free_irq: 1766 probe_free_irq:
1784 free_irq(host->irq, host); 1767 free_irq(host->irq, host);
1785 1768
@@ -1790,8 +1773,9 @@ static int s3cmci_probe(struct platform_device *pdev)
1790 release_mem_region(host->mem->start, resource_size(host->mem)); 1773 release_mem_region(host->mem->start, resource_size(host->mem));
1791 1774
1792 probe_free_gpio: 1775 probe_free_gpio:
1793 for (i = S3C2410_GPE(5); i <= S3C2410_GPE(10); i++) 1776 if (!pdev->dev.of_node)
1794 gpio_free(i); 1777 for (i = S3C2410_GPE(5); i <= S3C2410_GPE(10); i++)
1778 gpio_free(i);
1795 1779
1796 probe_free_host: 1780 probe_free_host:
1797 mmc_free_host(mmc); 1781 mmc_free_host(mmc);
@@ -1818,7 +1802,6 @@ static int s3cmci_remove(struct platform_device *pdev)
1818{ 1802{
1819 struct mmc_host *mmc = platform_get_drvdata(pdev); 1803 struct mmc_host *mmc = platform_get_drvdata(pdev);
1820 struct s3cmci_host *host = mmc_priv(mmc); 1804 struct s3cmci_host *host = mmc_priv(mmc);
1821 struct s3c24xx_mci_pdata *pd = host->pdata;
1822 int i; 1805 int i;
1823 1806
1824 s3cmci_shutdown(pdev); 1807 s3cmci_shutdown(pdev);
@@ -1832,15 +1815,9 @@ static int s3cmci_remove(struct platform_device *pdev)
1832 1815
1833 free_irq(host->irq, host); 1816 free_irq(host->irq, host);
1834 1817
1835 if (!pd->no_wprotect) 1818 if (!pdev->dev.of_node)
1836 gpio_free(pd->gpio_wprotect); 1819 for (i = S3C2410_GPE(5); i <= S3C2410_GPE(10); i++)
1837 1820 gpio_free(i);
1838 if (!pd->no_detect)
1839 gpio_free(pd->gpio_detect);
1840
1841 for (i = S3C2410_GPE(5); i <= S3C2410_GPE(10); i++)
1842 gpio_free(i);
1843
1844 1821
1845 iounmap(host->base); 1822 iounmap(host->base);
1846 release_mem_region(host->mem->start, resource_size(host->mem)); 1823 release_mem_region(host->mem->start, resource_size(host->mem));
@@ -1849,6 +1826,23 @@ static int s3cmci_remove(struct platform_device *pdev)
1849 return 0; 1826 return 0;
1850} 1827}
1851 1828
1829static const struct of_device_id s3cmci_dt_match[] = {
1830 {
1831 .compatible = "samsung,s3c2410-sdi",
1832 .data = (void *)0,
1833 },
1834 {
1835 .compatible = "samsung,s3c2412-sdi",
1836 .data = (void *)1,
1837 },
1838 {
1839 .compatible = "samsung,s3c2440-sdi",
1840 .data = (void *)1,
1841 },
1842 { /* sentinel */ },
1843};
1844MODULE_DEVICE_TABLE(of, s3cmci_dt_match);
1845
1852static const struct platform_device_id s3cmci_driver_ids[] = { 1846static const struct platform_device_id s3cmci_driver_ids[] = {
1853 { 1847 {
1854 .name = "s3c2410-sdi", 1848 .name = "s3c2410-sdi",
@@ -1868,6 +1862,7 @@ MODULE_DEVICE_TABLE(platform, s3cmci_driver_ids);
1868static struct platform_driver s3cmci_driver = { 1862static struct platform_driver s3cmci_driver = {
1869 .driver = { 1863 .driver = {
1870 .name = "s3c-sdi", 1864 .name = "s3c-sdi",
1865 .of_match_table = s3cmci_dt_match,
1871 }, 1866 },
1872 .id_table = s3cmci_driver_ids, 1867 .id_table = s3cmci_driver_ids,
1873 .probe = s3cmci_probe, 1868 .probe = s3cmci_probe,
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 9dcb7048e3b1..c6a9a1bfaa22 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -263,10 +263,8 @@ static int sdhci_acpi_sd_probe_slot(struct platform_device *pdev,
263 263
264 /* Platform specific code during sd probe slot goes here */ 264 /* Platform specific code during sd probe slot goes here */
265 265
266 if (hid && !strcmp(hid, "80865ACA")) { 266 if (hid && !strcmp(hid, "80865ACA"))
267 host->mmc_host_ops.get_cd = bxt_get_cd; 267 host->mmc_host_ops.get_cd = bxt_get_cd;
268 host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
269 }
270 268
271 return 0; 269 return 0;
272} 270}
@@ -302,7 +300,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
302 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 300 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
303 .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON | 301 .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
304 SDHCI_QUIRK2_STOP_WITH_TC, 302 SDHCI_QUIRK2_STOP_WITH_TC,
305 .caps = MMC_CAP_WAIT_WHILE_BUSY, 303 .caps = MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_AGGRESSIVE_PM,
306 .probe_slot = sdhci_acpi_sd_probe_slot, 304 .probe_slot = sdhci_acpi_sd_probe_slot,
307}; 305};
308 306
@@ -524,8 +522,12 @@ static int sdhci_acpi_remove(struct platform_device *pdev)
524static int sdhci_acpi_suspend(struct device *dev) 522static int sdhci_acpi_suspend(struct device *dev)
525{ 523{
526 struct sdhci_acpi_host *c = dev_get_drvdata(dev); 524 struct sdhci_acpi_host *c = dev_get_drvdata(dev);
525 struct sdhci_host *host = c->host;
526
527 if (host->tuning_mode != SDHCI_TUNING_MODE_3)
528 mmc_retune_needed(host->mmc);
527 529
528 return sdhci_suspend_host(c->host); 530 return sdhci_suspend_host(host);
529} 531}
530 532
531static int sdhci_acpi_resume(struct device *dev) 533static int sdhci_acpi_resume(struct device *dev)
@@ -544,8 +546,12 @@ static int sdhci_acpi_resume(struct device *dev)
544static int sdhci_acpi_runtime_suspend(struct device *dev) 546static int sdhci_acpi_runtime_suspend(struct device *dev)
545{ 547{
546 struct sdhci_acpi_host *c = dev_get_drvdata(dev); 548 struct sdhci_acpi_host *c = dev_get_drvdata(dev);
549 struct sdhci_host *host = c->host;
550
551 if (host->tuning_mode != SDHCI_TUNING_MODE_3)
552 mmc_retune_needed(host->mmc);
547 553
548 return sdhci_runtime_suspend_host(c->host); 554 return sdhci_runtime_suspend_host(host);
549} 555}
550 556
551static int sdhci_acpi_runtime_resume(struct device *dev) 557static int sdhci_acpi_runtime_resume(struct device *dev)
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
index 159f6f64c68e..242c5dc7a81e 100644
--- a/drivers/mmc/host/sdhci-brcmstb.c
+++ b/drivers/mmc/host/sdhci-brcmstb.c
@@ -29,6 +29,9 @@ static int sdhci_brcmstb_suspend(struct device *dev)
29 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 29 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
30 int res; 30 int res;
31 31
32 if (host->tuning_mode != SDHCI_TUNING_MODE_3)
33 mmc_retune_needed(host->mmc);
34
32 res = sdhci_suspend_host(host); 35 res = sdhci_suspend_host(host);
33 if (res) 36 if (res)
34 return res; 37 return res;
diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
index 316cfec3f005..19d5698244b5 100644
--- a/drivers/mmc/host/sdhci-cadence.c
+++ b/drivers/mmc/host/sdhci-cadence.c
@@ -18,6 +18,7 @@
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/mmc/host.h> 19#include <linux/mmc/host.h>
20#include <linux/mmc/mmc.h> 20#include <linux/mmc/mmc.h>
21#include <linux/of.h>
21 22
22#include "sdhci-pltfm.h" 23#include "sdhci-pltfm.h"
23 24
@@ -40,6 +41,7 @@
40#define SDHCI_CDNS_HRS06_MODE_MMC_DDR 0x3 41#define SDHCI_CDNS_HRS06_MODE_MMC_DDR 0x3
41#define SDHCI_CDNS_HRS06_MODE_MMC_HS200 0x4 42#define SDHCI_CDNS_HRS06_MODE_MMC_HS200 0x4
42#define SDHCI_CDNS_HRS06_MODE_MMC_HS400 0x5 43#define SDHCI_CDNS_HRS06_MODE_MMC_HS400 0x5
44#define SDHCI_CDNS_HRS06_MODE_MMC_HS400ES 0x6
43 45
44/* SRS - Slot Register Set (SDHCI-compatible) */ 46/* SRS - Slot Register Set (SDHCI-compatible) */
45#define SDHCI_CDNS_SRS_BASE 0x200 47#define SDHCI_CDNS_SRS_BASE 0x200
@@ -54,6 +56,9 @@
54#define SDHCI_CDNS_PHY_DLY_EMMC_LEGACY 0x06 56#define SDHCI_CDNS_PHY_DLY_EMMC_LEGACY 0x06
55#define SDHCI_CDNS_PHY_DLY_EMMC_SDR 0x07 57#define SDHCI_CDNS_PHY_DLY_EMMC_SDR 0x07
56#define SDHCI_CDNS_PHY_DLY_EMMC_DDR 0x08 58#define SDHCI_CDNS_PHY_DLY_EMMC_DDR 0x08
59#define SDHCI_CDNS_PHY_DLY_SDCLK 0x0b
60#define SDHCI_CDNS_PHY_DLY_HSMMC 0x0c
61#define SDHCI_CDNS_PHY_DLY_STROBE 0x0d
57 62
58/* 63/*
59 * The tuned val register is 6 bit-wide, but not the whole of the range is 64 * The tuned val register is 6 bit-wide, but not the whole of the range is
@@ -64,13 +69,34 @@
64 69
65struct sdhci_cdns_priv { 70struct sdhci_cdns_priv {
66 void __iomem *hrs_addr; 71 void __iomem *hrs_addr;
72 bool enhanced_strobe;
67}; 73};
68 74
69static void sdhci_cdns_write_phy_reg(struct sdhci_cdns_priv *priv, 75struct sdhci_cdns_phy_cfg {
70 u8 addr, u8 data) 76 const char *property;
77 u8 addr;
78};
79
80static const struct sdhci_cdns_phy_cfg sdhci_cdns_phy_cfgs[] = {
81 { "cdns,phy-input-delay-sd-highspeed", SDHCI_CDNS_PHY_DLY_SD_HS, },
82 { "cdns,phy-input-delay-legacy", SDHCI_CDNS_PHY_DLY_SD_DEFAULT, },
83 { "cdns,phy-input-delay-sd-uhs-sdr12", SDHCI_CDNS_PHY_DLY_UHS_SDR12, },
84 { "cdns,phy-input-delay-sd-uhs-sdr25", SDHCI_CDNS_PHY_DLY_UHS_SDR25, },
85 { "cdns,phy-input-delay-sd-uhs-sdr50", SDHCI_CDNS_PHY_DLY_UHS_SDR50, },
86 { "cdns,phy-input-delay-sd-uhs-ddr50", SDHCI_CDNS_PHY_DLY_UHS_DDR50, },
87 { "cdns,phy-input-delay-mmc-highspeed", SDHCI_CDNS_PHY_DLY_EMMC_SDR, },
88 { "cdns,phy-input-delay-mmc-ddr", SDHCI_CDNS_PHY_DLY_EMMC_DDR, },
89 { "cdns,phy-dll-delay-sdclk", SDHCI_CDNS_PHY_DLY_SDCLK, },
90 { "cdns,phy-dll-delay-sdclk-hsmmc", SDHCI_CDNS_PHY_DLY_HSMMC, },
91 { "cdns,phy-dll-delay-strobe", SDHCI_CDNS_PHY_DLY_STROBE, },
92};
93
94static int sdhci_cdns_write_phy_reg(struct sdhci_cdns_priv *priv,
95 u8 addr, u8 data)
71{ 96{
72 void __iomem *reg = priv->hrs_addr + SDHCI_CDNS_HRS04; 97 void __iomem *reg = priv->hrs_addr + SDHCI_CDNS_HRS04;
73 u32 tmp; 98 u32 tmp;
99 int ret;
74 100
75 tmp = (data << SDHCI_CDNS_HRS04_WDATA_SHIFT) | 101 tmp = (data << SDHCI_CDNS_HRS04_WDATA_SHIFT) |
76 (addr << SDHCI_CDNS_HRS04_ADDR_SHIFT); 102 (addr << SDHCI_CDNS_HRS04_ADDR_SHIFT);
@@ -79,17 +105,36 @@ static void sdhci_cdns_write_phy_reg(struct sdhci_cdns_priv *priv,
79 tmp |= SDHCI_CDNS_HRS04_WR; 105 tmp |= SDHCI_CDNS_HRS04_WR;
80 writel(tmp, reg); 106 writel(tmp, reg);
81 107
108 ret = readl_poll_timeout(reg, tmp, tmp & SDHCI_CDNS_HRS04_ACK, 0, 10);
109 if (ret)
110 return ret;
111
82 tmp &= ~SDHCI_CDNS_HRS04_WR; 112 tmp &= ~SDHCI_CDNS_HRS04_WR;
83 writel(tmp, reg); 113 writel(tmp, reg);
114
115 return 0;
84} 116}
85 117
86static void sdhci_cdns_phy_init(struct sdhci_cdns_priv *priv) 118static int sdhci_cdns_phy_init(struct device_node *np,
119 struct sdhci_cdns_priv *priv)
87{ 120{
88 sdhci_cdns_write_phy_reg(priv, SDHCI_CDNS_PHY_DLY_SD_HS, 4); 121 u32 val;
89 sdhci_cdns_write_phy_reg(priv, SDHCI_CDNS_PHY_DLY_SD_DEFAULT, 4); 122 int ret, i;
90 sdhci_cdns_write_phy_reg(priv, SDHCI_CDNS_PHY_DLY_EMMC_LEGACY, 9); 123
91 sdhci_cdns_write_phy_reg(priv, SDHCI_CDNS_PHY_DLY_EMMC_SDR, 2); 124 for (i = 0; i < ARRAY_SIZE(sdhci_cdns_phy_cfgs); i++) {
92 sdhci_cdns_write_phy_reg(priv, SDHCI_CDNS_PHY_DLY_EMMC_DDR, 3); 125 ret = of_property_read_u32(np, sdhci_cdns_phy_cfgs[i].property,
126 &val);
127 if (ret)
128 continue;
129
130 ret = sdhci_cdns_write_phy_reg(priv,
131 sdhci_cdns_phy_cfgs[i].addr,
132 val);
133 if (ret)
134 return ret;
135 }
136
137 return 0;
93} 138}
94 139
95static inline void *sdhci_cdns_priv(struct sdhci_host *host) 140static inline void *sdhci_cdns_priv(struct sdhci_host *host)
@@ -103,16 +148,35 @@ static unsigned int sdhci_cdns_get_timeout_clock(struct sdhci_host *host)
103{ 148{
104 /* 149 /*
105 * Cadence's spec says the Timeout Clock Frequency is the same as the 150 * Cadence's spec says the Timeout Clock Frequency is the same as the
106 * Base Clock Frequency. Divide it by 1000 to return a value in kHz. 151 * Base Clock Frequency.
107 */ 152 */
108 return host->max_clk / 1000; 153 return host->max_clk;
154}
155
156static void sdhci_cdns_set_emmc_mode(struct sdhci_cdns_priv *priv, u32 mode)
157{
158 u32 tmp;
159
160 /* The speed mode for eMMC is selected by HRS06 register */
161 tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS06);
162 tmp &= ~SDHCI_CDNS_HRS06_MODE_MASK;
163 tmp |= mode;
164 writel(tmp, priv->hrs_addr + SDHCI_CDNS_HRS06);
165}
166
167static u32 sdhci_cdns_get_emmc_mode(struct sdhci_cdns_priv *priv)
168{
169 u32 tmp;
170
171 tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS06);
172 return tmp & SDHCI_CDNS_HRS06_MODE_MASK;
109} 173}
110 174
111static void sdhci_cdns_set_uhs_signaling(struct sdhci_host *host, 175static void sdhci_cdns_set_uhs_signaling(struct sdhci_host *host,
112 unsigned int timing) 176 unsigned int timing)
113{ 177{
114 struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host); 178 struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
115 u32 mode, tmp; 179 u32 mode;
116 180
117 switch (timing) { 181 switch (timing) {
118 case MMC_TIMING_MMC_HS: 182 case MMC_TIMING_MMC_HS:
@@ -125,18 +189,17 @@ static void sdhci_cdns_set_uhs_signaling(struct sdhci_host *host,
125 mode = SDHCI_CDNS_HRS06_MODE_MMC_HS200; 189 mode = SDHCI_CDNS_HRS06_MODE_MMC_HS200;
126 break; 190 break;
127 case MMC_TIMING_MMC_HS400: 191 case MMC_TIMING_MMC_HS400:
128 mode = SDHCI_CDNS_HRS06_MODE_MMC_HS400; 192 if (priv->enhanced_strobe)
193 mode = SDHCI_CDNS_HRS06_MODE_MMC_HS400ES;
194 else
195 mode = SDHCI_CDNS_HRS06_MODE_MMC_HS400;
129 break; 196 break;
130 default: 197 default:
131 mode = SDHCI_CDNS_HRS06_MODE_SD; 198 mode = SDHCI_CDNS_HRS06_MODE_SD;
132 break; 199 break;
133 } 200 }
134 201
135 /* The speed mode for eMMC is selected by HRS06 register */ 202 sdhci_cdns_set_emmc_mode(priv, mode);
136 tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS06);
137 tmp &= ~SDHCI_CDNS_HRS06_MODE_MASK;
138 tmp |= mode;
139 writel(tmp, priv->hrs_addr + SDHCI_CDNS_HRS06);
140 203
141 /* For SD, fall back to the default handler */ 204 /* For SD, fall back to the default handler */
142 if (mode == SDHCI_CDNS_HRS06_MODE_SD) 205 if (mode == SDHCI_CDNS_HRS06_MODE_SD)
@@ -213,6 +276,26 @@ static int sdhci_cdns_execute_tuning(struct mmc_host *mmc, u32 opcode)
213 return sdhci_cdns_set_tune_val(host, end_of_streak - max_streak / 2); 276 return sdhci_cdns_set_tune_val(host, end_of_streak - max_streak / 2);
214} 277}
215 278
279static void sdhci_cdns_hs400_enhanced_strobe(struct mmc_host *mmc,
280 struct mmc_ios *ios)
281{
282 struct sdhci_host *host = mmc_priv(mmc);
283 struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
284 u32 mode;
285
286 priv->enhanced_strobe = ios->enhanced_strobe;
287
288 mode = sdhci_cdns_get_emmc_mode(priv);
289
290 if (mode == SDHCI_CDNS_HRS06_MODE_MMC_HS400 && ios->enhanced_strobe)
291 sdhci_cdns_set_emmc_mode(priv,
292 SDHCI_CDNS_HRS06_MODE_MMC_HS400ES);
293
294 if (mode == SDHCI_CDNS_HRS06_MODE_MMC_HS400ES && !ios->enhanced_strobe)
295 sdhci_cdns_set_emmc_mode(priv,
296 SDHCI_CDNS_HRS06_MODE_MMC_HS400);
297}
298
216static int sdhci_cdns_probe(struct platform_device *pdev) 299static int sdhci_cdns_probe(struct platform_device *pdev)
217{ 300{
218 struct sdhci_host *host; 301 struct sdhci_host *host;
@@ -220,8 +303,9 @@ static int sdhci_cdns_probe(struct platform_device *pdev)
220 struct sdhci_cdns_priv *priv; 303 struct sdhci_cdns_priv *priv;
221 struct clk *clk; 304 struct clk *clk;
222 int ret; 305 int ret;
306 struct device *dev = &pdev->dev;
223 307
224 clk = devm_clk_get(&pdev->dev, NULL); 308 clk = devm_clk_get(dev, NULL);
225 if (IS_ERR(clk)) 309 if (IS_ERR(clk))
226 return PTR_ERR(clk); 310 return PTR_ERR(clk);
227 311
@@ -240,14 +324,21 @@ static int sdhci_cdns_probe(struct platform_device *pdev)
240 324
241 priv = sdhci_cdns_priv(host); 325 priv = sdhci_cdns_priv(host);
242 priv->hrs_addr = host->ioaddr; 326 priv->hrs_addr = host->ioaddr;
327 priv->enhanced_strobe = false;
243 host->ioaddr += SDHCI_CDNS_SRS_BASE; 328 host->ioaddr += SDHCI_CDNS_SRS_BASE;
244 host->mmc_host_ops.execute_tuning = sdhci_cdns_execute_tuning; 329 host->mmc_host_ops.execute_tuning = sdhci_cdns_execute_tuning;
330 host->mmc_host_ops.hs400_enhanced_strobe =
331 sdhci_cdns_hs400_enhanced_strobe;
332
333 sdhci_get_of_property(pdev);
245 334
246 ret = mmc_of_parse(host->mmc); 335 ret = mmc_of_parse(host->mmc);
247 if (ret) 336 if (ret)
248 goto free; 337 goto free;
249 338
250 sdhci_cdns_phy_init(priv); 339 ret = sdhci_cdns_phy_init(dev->of_node, priv);
340 if (ret)
341 goto free;
251 342
252 ret = sdhci_add_host(host); 343 ret = sdhci_add_host(host);
253 if (ret) 344 if (ret)
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 445fc47dc3e7..23d8b8a73ae9 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -889,6 +889,28 @@ static void esdhc_set_strobe_dll(struct sdhci_host *host)
889 } 889 }
890} 890}
891 891
892static void esdhc_reset_tuning(struct sdhci_host *host)
893{
894 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
895 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
896 u32 ctrl;
897
898 /* Rest the tuning circurt */
899 if (esdhc_is_usdhc(imx_data)) {
900 if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
901 ctrl = readl(host->ioaddr + ESDHC_MIX_CTRL);
902 ctrl &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
903 ctrl &= ~ESDHC_MIX_CTRL_FBCLK_SEL;
904 writel(ctrl, host->ioaddr + ESDHC_MIX_CTRL);
905 writel(0, host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
906 } else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
907 ctrl = readl(host->ioaddr + SDHCI_ACMD12_ERR);
908 ctrl &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
909 writel(ctrl, host->ioaddr + SDHCI_ACMD12_ERR);
910 }
911 }
912}
913
892static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing) 914static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
893{ 915{
894 u32 m; 916 u32 m;
@@ -932,6 +954,10 @@ static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
932 host->ops->set_clock(host, host->clock); 954 host->ops->set_clock(host, host->clock);
933 esdhc_set_strobe_dll(host); 955 esdhc_set_strobe_dll(host);
934 break; 956 break;
957 case MMC_TIMING_LEGACY:
958 default:
959 esdhc_reset_tuning(host);
960 break;
935 } 961 }
936 962
937 esdhc_change_pinstate(host, timing); 963 esdhc_change_pinstate(host, timing);
@@ -1323,6 +1349,9 @@ static int sdhci_esdhc_suspend(struct device *dev)
1323{ 1349{
1324 struct sdhci_host *host = dev_get_drvdata(dev); 1350 struct sdhci_host *host = dev_get_drvdata(dev);
1325 1351
1352 if (host->tuning_mode != SDHCI_TUNING_MODE_3)
1353 mmc_retune_needed(host->mmc);
1354
1326 return sdhci_suspend_host(host); 1355 return sdhci_suspend_host(host);
1327} 1356}
1328 1357
@@ -1347,6 +1376,9 @@ static int sdhci_esdhc_runtime_suspend(struct device *dev)
1347 1376
1348 ret = sdhci_runtime_suspend_host(host); 1377 ret = sdhci_runtime_suspend_host(host);
1349 1378
1379 if (host->tuning_mode != SDHCI_TUNING_MODE_3)
1380 mmc_retune_needed(host->mmc);
1381
1350 if (!sdhci_sdio_irq_enabled(host)) { 1382 if (!sdhci_sdio_irq_enabled(host)) {
1351 clk_disable_unprepare(imx_data->clk_per); 1383 clk_disable_unprepare(imx_data->clk_per);
1352 clk_disable_unprepare(imx_data->clk_ipg); 1384 clk_disable_unprepare(imx_data->clk_ipg);
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index ece8b37e51dd..c4bbd7485987 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -37,6 +37,7 @@
37 37
38/* Protocol Control Register */ 38/* Protocol Control Register */
39#define ESDHC_PROCTL 0x28 39#define ESDHC_PROCTL 0x28
40#define ESDHC_VOLT_SEL 0x00000400
40#define ESDHC_CTRL_4BITBUS (0x1 << 1) 41#define ESDHC_CTRL_4BITBUS (0x1 << 1)
41#define ESDHC_CTRL_8BITBUS (0x2 << 1) 42#define ESDHC_CTRL_8BITBUS (0x2 << 1)
42#define ESDHC_CTRL_BUSWIDTH_MASK (0x3 << 1) 43#define ESDHC_CTRL_BUSWIDTH_MASK (0x3 << 1)
@@ -52,8 +53,14 @@
52#define ESDHC_CLOCK_HCKEN 0x00000002 53#define ESDHC_CLOCK_HCKEN 0x00000002
53#define ESDHC_CLOCK_IPGEN 0x00000001 54#define ESDHC_CLOCK_IPGEN 0x00000001
54 55
56/* Tuning Block Control Register */
57#define ESDHC_TBCTL 0x120
58#define ESDHC_TB_EN 0x00000004
59
55/* Control Register for DMA transfer */ 60/* Control Register for DMA transfer */
56#define ESDHC_DMA_SYSCTL 0x40c 61#define ESDHC_DMA_SYSCTL 0x40c
62#define ESDHC_PERIPHERAL_CLK_SEL 0x00080000
63#define ESDHC_FLUSH_ASYNC_FIFO 0x00040000
57#define ESDHC_DMA_SNOOP 0x00000040 64#define ESDHC_DMA_SNOOP 0x00000040
58 65
59#endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */ 66#endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 10cdc84d5113..9d601dc0d646 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -991,12 +991,8 @@ static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
991 mmc_hostname(host->mmc), host->clock, uhs, ctrl_2); 991 mmc_hostname(host->mmc), host->clock, uhs, ctrl_2);
992 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 992 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
993 993
994 spin_unlock_irq(&host->lock);
995
996 if (mmc->ios.timing == MMC_TIMING_MMC_HS400) 994 if (mmc->ios.timing == MMC_TIMING_MMC_HS400)
997 sdhci_msm_hs400(host, &mmc->ios); 995 sdhci_msm_hs400(host, &mmc->ios);
998
999 spin_lock_irq(&host->lock);
1000} 996}
1001 997
1002static void sdhci_msm_voltage_switch(struct sdhci_host *host) 998static void sdhci_msm_voltage_switch(struct sdhci_host *host)
@@ -1089,13 +1085,9 @@ static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
1089 goto out; 1085 goto out;
1090 } 1086 }
1091 1087
1092 spin_unlock_irq(&host->lock);
1093
1094 sdhci_msm_hc_select_mode(host); 1088 sdhci_msm_hc_select_mode(host);
1095 1089
1096 msm_set_clock_rate_for_bus_mode(host, clock); 1090 msm_set_clock_rate_for_bus_mode(host, clock);
1097
1098 spin_lock_irq(&host->lock);
1099out: 1091out:
1100 __sdhci_msm_set_clock(host, clock); 1092 __sdhci_msm_set_clock(host, clock);
1101} 1093}
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 1cfd7f900339..ea6b36c88ae7 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -157,21 +157,6 @@ static int sdhci_arasan_syscon_write(struct sdhci_host *host,
157 return ret; 157 return ret;
158} 158}
159 159
160static unsigned int sdhci_arasan_get_timeout_clock(struct sdhci_host *host)
161{
162 unsigned long freq;
163 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
164
165 /* SDHCI timeout clock is in kHz */
166 freq = DIV_ROUND_UP(clk_get_rate(pltfm_host->clk), 1000);
167
168 /* or in MHz */
169 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
170 freq = DIV_ROUND_UP(freq, 1000);
171
172 return freq;
173}
174
175static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock) 160static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
176{ 161{
177 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 162 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -194,9 +179,7 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
194 * through low speeds without power cycling. 179 * through low speeds without power cycling.
195 */ 180 */
196 sdhci_set_clock(host, host->max_clk); 181 sdhci_set_clock(host, host->max_clk);
197 spin_unlock_irq(&host->lock);
198 phy_power_on(sdhci_arasan->phy); 182 phy_power_on(sdhci_arasan->phy);
199 spin_lock_irq(&host->lock);
200 sdhci_arasan->is_phy_on = true; 183 sdhci_arasan->is_phy_on = true;
201 184
202 /* 185 /*
@@ -215,18 +198,14 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
215 } 198 }
216 199
217 if (ctrl_phy && sdhci_arasan->is_phy_on) { 200 if (ctrl_phy && sdhci_arasan->is_phy_on) {
218 spin_unlock_irq(&host->lock);
219 phy_power_off(sdhci_arasan->phy); 201 phy_power_off(sdhci_arasan->phy);
220 spin_lock_irq(&host->lock);
221 sdhci_arasan->is_phy_on = false; 202 sdhci_arasan->is_phy_on = false;
222 } 203 }
223 204
224 sdhci_set_clock(host, clock); 205 sdhci_set_clock(host, clock);
225 206
226 if (ctrl_phy) { 207 if (ctrl_phy) {
227 spin_unlock_irq(&host->lock);
228 phy_power_on(sdhci_arasan->phy); 208 phy_power_on(sdhci_arasan->phy);
229 spin_lock_irq(&host->lock);
230 sdhci_arasan->is_phy_on = true; 209 sdhci_arasan->is_phy_on = true;
231 } 210 }
232} 211}
@@ -286,7 +265,7 @@ static int sdhci_arasan_voltage_switch(struct mmc_host *mmc,
286static struct sdhci_ops sdhci_arasan_ops = { 265static struct sdhci_ops sdhci_arasan_ops = {
287 .set_clock = sdhci_arasan_set_clock, 266 .set_clock = sdhci_arasan_set_clock,
288 .get_max_clock = sdhci_pltfm_clk_get_max_clock, 267 .get_max_clock = sdhci_pltfm_clk_get_max_clock,
289 .get_timeout_clock = sdhci_arasan_get_timeout_clock, 268 .get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
290 .set_bus_width = sdhci_set_bus_width, 269 .set_bus_width = sdhci_set_bus_width,
291 .reset = sdhci_arasan_reset, 270 .reset = sdhci_arasan_reset,
292 .set_uhs_signaling = sdhci_set_uhs_signaling, 271 .set_uhs_signaling = sdhci_set_uhs_signaling,
@@ -315,6 +294,9 @@ static int sdhci_arasan_suspend(struct device *dev)
315 struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host); 294 struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
316 int ret; 295 int ret;
317 296
297 if (host->tuning_mode != SDHCI_TUNING_MODE_3)
298 mmc_retune_needed(host->mmc);
299
318 ret = sdhci_suspend_host(host); 300 ret = sdhci_suspend_host(host);
319 if (ret) 301 if (ret)
320 return ret; 302 return ret;
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index d5430ed02a67..7611fd679f1a 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -98,9 +98,7 @@ static void sdhci_at91_set_power(struct sdhci_host *host, unsigned char mode,
98 if (!IS_ERR(host->mmc->supply.vmmc)) { 98 if (!IS_ERR(host->mmc->supply.vmmc)) {
99 struct mmc_host *mmc = host->mmc; 99 struct mmc_host *mmc = host->mmc;
100 100
101 spin_unlock_irq(&host->lock);
102 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 101 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
103 spin_lock_irq(&host->lock);
104 } 102 }
105 sdhci_set_power_noreg(host, mode, vdd); 103 sdhci_set_power_noreg(host, mode, vdd);
106} 104}
@@ -140,6 +138,9 @@ static int sdhci_at91_runtime_suspend(struct device *dev)
140 138
141 ret = sdhci_runtime_suspend_host(host); 139 ret = sdhci_runtime_suspend_host(host);
142 140
141 if (host->tuning_mode != SDHCI_TUNING_MODE_3)
142 mmc_retune_needed(host->mmc);
143
143 clk_disable_unprepare(priv->gck); 144 clk_disable_unprepare(priv->gck);
144 clk_disable_unprepare(priv->hclock); 145 clk_disable_unprepare(priv->hclock);
145 clk_disable_unprepare(priv->mainck); 146 clk_disable_unprepare(priv->mainck);
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index d3aa67142839..44b016baa585 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -16,9 +16,12 @@
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/of.h> 18#include <linux/of.h>
19#include <linux/of_address.h>
19#include <linux/delay.h> 20#include <linux/delay.h>
20#include <linux/module.h> 21#include <linux/module.h>
21#include <linux/sys_soc.h> 22#include <linux/sys_soc.h>
23#include <linux/clk.h>
24#include <linux/ktime.h>
22#include <linux/mmc/host.h> 25#include <linux/mmc/host.h>
23#include "sdhci-pltfm.h" 26#include "sdhci-pltfm.h"
24#include "sdhci-esdhc.h" 27#include "sdhci-esdhc.h"
@@ -30,6 +33,7 @@ struct sdhci_esdhc {
30 u8 vendor_ver; 33 u8 vendor_ver;
31 u8 spec_ver; 34 u8 spec_ver;
32 bool quirk_incorrect_hostver; 35 bool quirk_incorrect_hostver;
36 unsigned int peripheral_clock;
33}; 37};
34 38
35/** 39/**
@@ -414,15 +418,25 @@ static int esdhc_of_enable_dma(struct sdhci_host *host)
414static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host) 418static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host)
415{ 419{
416 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 420 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
421 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
417 422
418 return pltfm_host->clock; 423 if (esdhc->peripheral_clock)
424 return esdhc->peripheral_clock;
425 else
426 return pltfm_host->clock;
419} 427}
420 428
421static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host) 429static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
422{ 430{
423 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 431 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
432 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
433 unsigned int clock;
424 434
425 return pltfm_host->clock / 256 / 16; 435 if (esdhc->peripheral_clock)
436 clock = esdhc->peripheral_clock;
437 else
438 clock = pltfm_host->clock;
439 return clock / 256 / 16;
426} 440}
427 441
428static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) 442static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
@@ -431,7 +445,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
431 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); 445 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
432 int pre_div = 1; 446 int pre_div = 1;
433 int div = 1; 447 int div = 1;
434 u32 timeout; 448 ktime_t timeout;
435 u32 temp; 449 u32 temp;
436 450
437 host->mmc->actual_clock = 0; 451 host->mmc->actual_clock = 0;
@@ -443,6 +457,20 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
443 if (esdhc->vendor_ver < VENDOR_V_23) 457 if (esdhc->vendor_ver < VENDOR_V_23)
444 pre_div = 2; 458 pre_div = 2;
445 459
460 /*
461 * Limit SD clock to 167MHz for ls1046a according to its datasheet
462 */
463 if (clock > 167000000 &&
464 of_find_compatible_node(NULL, NULL, "fsl,ls1046a-esdhc"))
465 clock = 167000000;
466
467 /*
468 * Limit SD clock to 125MHz for ls1012a according to its datasheet
469 */
470 if (clock > 125000000 &&
471 of_find_compatible_node(NULL, NULL, "fsl,ls1012a-esdhc"))
472 clock = 125000000;
473
446 /* Workaround to reduce the clock frequency for p1010 esdhc */ 474 /* Workaround to reduce the clock frequency for p1010 esdhc */
447 if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) { 475 if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) {
448 if (clock > 20000000) 476 if (clock > 20000000)
@@ -475,15 +503,14 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
475 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); 503 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
476 504
477 /* Wait max 20 ms */ 505 /* Wait max 20 ms */
478 timeout = 20; 506 timeout = ktime_add_ms(ktime_get(), 20);
479 while (!(sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)) { 507 while (!(sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)) {
480 if (timeout == 0) { 508 if (ktime_after(ktime_get(), timeout)) {
481 pr_err("%s: Internal clock never stabilised.\n", 509 pr_err("%s: Internal clock never stabilised.\n",
482 mmc_hostname(host->mmc)); 510 mmc_hostname(host->mmc));
483 return; 511 return;
484 } 512 }
485 timeout--; 513 udelay(10);
486 mdelay(1);
487 } 514 }
488 515
489 temp |= ESDHC_CLOCK_SDCLKEN; 516 temp |= ESDHC_CLOCK_SDCLKEN;
@@ -512,6 +539,33 @@ static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
512 sdhci_writel(host, ctrl, ESDHC_PROCTL); 539 sdhci_writel(host, ctrl, ESDHC_PROCTL);
513} 540}
514 541
542static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
543{
544 u32 val;
545 ktime_t timeout;
546
547 val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
548
549 if (enable)
550 val |= ESDHC_CLOCK_SDCLKEN;
551 else
552 val &= ~ESDHC_CLOCK_SDCLKEN;
553
554 sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
555
556 /* Wait max 20 ms */
557 timeout = ktime_add_ms(ktime_get(), 20);
558 val = ESDHC_CLOCK_STABLE;
559 while (!(sdhci_readl(host, ESDHC_PRSSTAT) & val)) {
560 if (ktime_after(ktime_get(), timeout)) {
561 pr_err("%s: Internal clock never stabilised.\n",
562 mmc_hostname(host->mmc));
563 break;
564 }
565 udelay(10);
566 }
567}
568
515static void esdhc_reset(struct sdhci_host *host, u8 mask) 569static void esdhc_reset(struct sdhci_host *host, u8 mask)
516{ 570{
517 sdhci_reset(host, mask); 571 sdhci_reset(host, mask);
@@ -520,6 +574,95 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask)
520 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 574 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
521} 575}
522 576
577/* The SCFG, Supplemental Configuration Unit, provides SoC specific
578 * configuration and status registers for the device. There is a
579 * SDHC IO VSEL control register on SCFG for some platforms. It's
580 * used to support SDHC IO voltage switching.
581 */
582static const struct of_device_id scfg_device_ids[] = {
583 { .compatible = "fsl,t1040-scfg", },
584 { .compatible = "fsl,ls1012a-scfg", },
585 { .compatible = "fsl,ls1046a-scfg", },
586 {}
587};
588
589/* SDHC IO VSEL control register definition */
590#define SCFG_SDHCIOVSELCR 0x408
591#define SDHCIOVSELCR_TGLEN 0x80000000
592#define SDHCIOVSELCR_VSELVAL 0x60000000
593#define SDHCIOVSELCR_SDHC_VS 0x00000001
594
595static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
596 struct mmc_ios *ios)
597{
598 struct sdhci_host *host = mmc_priv(mmc);
599 struct device_node *scfg_node;
600 void __iomem *scfg_base = NULL;
601 u32 sdhciovselcr;
602 u32 val;
603
604 /*
605 * Signal Voltage Switching is only applicable for Host Controllers
606 * v3.00 and above.
607 */
608 if (host->version < SDHCI_SPEC_300)
609 return 0;
610
611 val = sdhci_readl(host, ESDHC_PROCTL);
612
613 switch (ios->signal_voltage) {
614 case MMC_SIGNAL_VOLTAGE_330:
615 val &= ~ESDHC_VOLT_SEL;
616 sdhci_writel(host, val, ESDHC_PROCTL);
617 return 0;
618 case MMC_SIGNAL_VOLTAGE_180:
619 scfg_node = of_find_matching_node(NULL, scfg_device_ids);
620 if (scfg_node)
621 scfg_base = of_iomap(scfg_node, 0);
622 if (scfg_base) {
623 sdhciovselcr = SDHCIOVSELCR_TGLEN |
624 SDHCIOVSELCR_VSELVAL;
625 iowrite32be(sdhciovselcr,
626 scfg_base + SCFG_SDHCIOVSELCR);
627
628 val |= ESDHC_VOLT_SEL;
629 sdhci_writel(host, val, ESDHC_PROCTL);
630 mdelay(5);
631
632 sdhciovselcr = SDHCIOVSELCR_TGLEN |
633 SDHCIOVSELCR_SDHC_VS;
634 iowrite32be(sdhciovselcr,
635 scfg_base + SCFG_SDHCIOVSELCR);
636 iounmap(scfg_base);
637 } else {
638 val |= ESDHC_VOLT_SEL;
639 sdhci_writel(host, val, ESDHC_PROCTL);
640 }
641 return 0;
642 default:
643 return 0;
644 }
645}
646
647static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
648{
649 struct sdhci_host *host = mmc_priv(mmc);
650 u32 val;
651
652 /* Use tuning block for tuning procedure */
653 esdhc_clock_enable(host, false);
654 val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
655 val |= ESDHC_FLUSH_ASYNC_FIFO;
656 sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
657
658 val = sdhci_readl(host, ESDHC_TBCTL);
659 val |= ESDHC_TB_EN;
660 sdhci_writel(host, val, ESDHC_TBCTL);
661 esdhc_clock_enable(host, true);
662
663 return sdhci_execute_tuning(mmc, opcode);
664}
665
523#ifdef CONFIG_PM_SLEEP 666#ifdef CONFIG_PM_SLEEP
524static u32 esdhc_proctl; 667static u32 esdhc_proctl;
525static int esdhc_of_suspend(struct device *dev) 668static int esdhc_of_suspend(struct device *dev)
@@ -528,6 +671,9 @@ static int esdhc_of_suspend(struct device *dev)
528 671
529 esdhc_proctl = sdhci_readl(host, SDHCI_HOST_CONTROL); 672 esdhc_proctl = sdhci_readl(host, SDHCI_HOST_CONTROL);
530 673
674 if (host->tuning_mode != SDHCI_TUNING_MODE_3)
675 mmc_retune_needed(host->mmc);
676
531 return sdhci_suspend_host(host); 677 return sdhci_suspend_host(host);
532} 678}
533 679
@@ -610,6 +756,9 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
610{ 756{
611 struct sdhci_pltfm_host *pltfm_host; 757 struct sdhci_pltfm_host *pltfm_host;
612 struct sdhci_esdhc *esdhc; 758 struct sdhci_esdhc *esdhc;
759 struct device_node *np;
760 struct clk *clk;
761 u32 val;
613 u16 host_ver; 762 u16 host_ver;
614 763
615 pltfm_host = sdhci_priv(host); 764 pltfm_host = sdhci_priv(host);
@@ -623,6 +772,32 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
623 esdhc->quirk_incorrect_hostver = true; 772 esdhc->quirk_incorrect_hostver = true;
624 else 773 else
625 esdhc->quirk_incorrect_hostver = false; 774 esdhc->quirk_incorrect_hostver = false;
775
776 np = pdev->dev.of_node;
777 clk = of_clk_get(np, 0);
778 if (!IS_ERR(clk)) {
779 /*
780 * esdhc->peripheral_clock would be assigned with a value
781 * which is eSDHC base clock when use periperal clock.
782 * For ls1046a, the clock value got by common clk API is
783 * peripheral clock while the eSDHC base clock is 1/2
784 * peripheral clock.
785 */
786 if (of_device_is_compatible(np, "fsl,ls1046a-esdhc"))
787 esdhc->peripheral_clock = clk_get_rate(clk) / 2;
788 else
789 esdhc->peripheral_clock = clk_get_rate(clk);
790
791 clk_put(clk);
792 }
793
794 if (esdhc->peripheral_clock) {
795 esdhc_clock_enable(host, false);
796 val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
797 val |= ESDHC_PERIPHERAL_CLK_SEL;
798 sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
799 esdhc_clock_enable(host, true);
800 }
626} 801}
627 802
628static int sdhci_esdhc_probe(struct platform_device *pdev) 803static int sdhci_esdhc_probe(struct platform_device *pdev)
@@ -645,6 +820,11 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
645 if (IS_ERR(host)) 820 if (IS_ERR(host))
646 return PTR_ERR(host); 821 return PTR_ERR(host);
647 822
823 host->mmc_host_ops.start_signal_voltage_switch =
824 esdhc_signal_voltage_switch;
825 host->mmc_host_ops.execute_tuning = esdhc_execute_tuning;
826 host->tuning_delay = 1;
827
648 esdhc_init(pdev, host); 828 esdhc_init(pdev, host);
649 829
650 sdhci_get_of_property(pdev); 830 sdhci_get_of_property(pdev);
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 86560d590786..92fc3f7c538d 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -12,6 +12,7 @@
12 * - JMicron (hardware and technical support) 12 * - JMicron (hardware and technical support)
13 */ 13 */
14 14
15#include <linux/string.h>
15#include <linux/delay.h> 16#include <linux/delay.h>
16#include <linux/highmem.h> 17#include <linux/highmem.h>
17#include <linux/module.h> 18#include <linux/module.h>
@@ -36,10 +37,138 @@
36static int sdhci_pci_enable_dma(struct sdhci_host *host); 37static int sdhci_pci_enable_dma(struct sdhci_host *host);
37static void sdhci_pci_set_bus_width(struct sdhci_host *host, int width); 38static void sdhci_pci_set_bus_width(struct sdhci_host *host, int width);
38static void sdhci_pci_hw_reset(struct sdhci_host *host); 39static void sdhci_pci_hw_reset(struct sdhci_host *host);
39static int sdhci_pci_select_drive_strength(struct sdhci_host *host, 40
40 struct mmc_card *card, 41#ifdef CONFIG_PM_SLEEP
41 unsigned int max_dtr, int host_drv, 42static int __sdhci_pci_suspend_host(struct sdhci_pci_chip *chip)
42 int card_drv, int *drv_type); 43{
44 int i, ret;
45
46 for (i = 0; i < chip->num_slots; i++) {
47 struct sdhci_pci_slot *slot = chip->slots[i];
48 struct sdhci_host *host;
49
50 if (!slot)
51 continue;
52
53 host = slot->host;
54
55 if (chip->pm_retune && host->tuning_mode != SDHCI_TUNING_MODE_3)
56 mmc_retune_needed(host->mmc);
57
58 ret = sdhci_suspend_host(host);
59 if (ret)
60 goto err_pci_suspend;
61
62 if (host->mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ)
63 sdhci_enable_irq_wakeups(host);
64 }
65
66 return 0;
67
68err_pci_suspend:
69 while (--i >= 0)
70 sdhci_resume_host(chip->slots[i]->host);
71 return ret;
72}
73
74static int sdhci_pci_init_wakeup(struct sdhci_pci_chip *chip)
75{
76 mmc_pm_flag_t pm_flags = 0;
77 int i;
78
79 for (i = 0; i < chip->num_slots; i++) {
80 struct sdhci_pci_slot *slot = chip->slots[i];
81
82 if (slot)
83 pm_flags |= slot->host->mmc->pm_flags;
84 }
85
86 return device_init_wakeup(&chip->pdev->dev,
87 (pm_flags & MMC_PM_KEEP_POWER) &&
88 (pm_flags & MMC_PM_WAKE_SDIO_IRQ));
89}
90
91static int sdhci_pci_suspend_host(struct sdhci_pci_chip *chip)
92{
93 int ret;
94
95 ret = __sdhci_pci_suspend_host(chip);
96 if (ret)
97 return ret;
98
99 sdhci_pci_init_wakeup(chip);
100
101 return 0;
102}
103
104int sdhci_pci_resume_host(struct sdhci_pci_chip *chip)
105{
106 struct sdhci_pci_slot *slot;
107 int i, ret;
108
109 for (i = 0; i < chip->num_slots; i++) {
110 slot = chip->slots[i];
111 if (!slot)
112 continue;
113
114 ret = sdhci_resume_host(slot->host);
115 if (ret)
116 return ret;
117 }
118
119 return 0;
120}
121#endif
122
123#ifdef CONFIG_PM
124static int sdhci_pci_runtime_suspend_host(struct sdhci_pci_chip *chip)
125{
126 struct sdhci_pci_slot *slot;
127 struct sdhci_host *host;
128 int i, ret;
129
130 for (i = 0; i < chip->num_slots; i++) {
131 slot = chip->slots[i];
132 if (!slot)
133 continue;
134
135 host = slot->host;
136
137 ret = sdhci_runtime_suspend_host(host);
138 if (ret)
139 goto err_pci_runtime_suspend;
140
141 if (chip->rpm_retune &&
142 host->tuning_mode != SDHCI_TUNING_MODE_3)
143 mmc_retune_needed(host->mmc);
144 }
145
146 return 0;
147
148err_pci_runtime_suspend:
149 while (--i >= 0)
150 sdhci_runtime_resume_host(chip->slots[i]->host);
151 return ret;
152}
153
154static int sdhci_pci_runtime_resume_host(struct sdhci_pci_chip *chip)
155{
156 struct sdhci_pci_slot *slot;
157 int i, ret;
158
159 for (i = 0; i < chip->num_slots; i++) {
160 slot = chip->slots[i];
161 if (!slot)
162 continue;
163
164 ret = sdhci_runtime_resume_host(slot->host);
165 if (ret)
166 return ret;
167 }
168
169 return 0;
170}
171#endif
43 172
44/*****************************************************************************\ 173/*****************************************************************************\
45 * * 174 * *
@@ -71,14 +200,16 @@ static int ricoh_mmc_probe_slot(struct sdhci_pci_slot *slot)
71 return 0; 200 return 0;
72} 201}
73 202
203#ifdef CONFIG_PM_SLEEP
74static int ricoh_mmc_resume(struct sdhci_pci_chip *chip) 204static int ricoh_mmc_resume(struct sdhci_pci_chip *chip)
75{ 205{
76 /* Apply a delay to allow controller to settle */ 206 /* Apply a delay to allow controller to settle */
77 /* Otherwise it becomes confused if card state changed 207 /* Otherwise it becomes confused if card state changed
78 during suspend */ 208 during suspend */
79 msleep(500); 209 msleep(500);
80 return 0; 210 return sdhci_pci_resume_host(chip);
81} 211}
212#endif
82 213
83static const struct sdhci_pci_fixes sdhci_ricoh = { 214static const struct sdhci_pci_fixes sdhci_ricoh = {
84 .probe = ricoh_probe, 215 .probe = ricoh_probe,
@@ -89,7 +220,9 @@ static const struct sdhci_pci_fixes sdhci_ricoh = {
89 220
90static const struct sdhci_pci_fixes sdhci_ricoh_mmc = { 221static const struct sdhci_pci_fixes sdhci_ricoh_mmc = {
91 .probe_slot = ricoh_mmc_probe_slot, 222 .probe_slot = ricoh_mmc_probe_slot,
223#ifdef CONFIG_PM_SLEEP
92 .resume = ricoh_mmc_resume, 224 .resume = ricoh_mmc_resume,
225#endif
93 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | 226 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
94 SDHCI_QUIRK_CLOCK_BEFORE_RESET | 227 SDHCI_QUIRK_CLOCK_BEFORE_RESET |
95 SDHCI_QUIRK_NO_CARD_NO_RESET | 228 SDHCI_QUIRK_NO_CARD_NO_RESET |
@@ -259,6 +392,81 @@ static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = {
259 .probe_slot = pch_hc_probe_slot, 392 .probe_slot = pch_hc_probe_slot,
260}; 393};
261 394
395enum {
396 INTEL_DSM_FNS = 0,
397 INTEL_DSM_DRV_STRENGTH = 9,
398 INTEL_DSM_D3_RETUNE = 10,
399};
400
401struct intel_host {
402 u32 dsm_fns;
403 int drv_strength;
404 bool d3_retune;
405};
406
407const u8 intel_dsm_uuid[] = {
408 0xA5, 0x3E, 0xC1, 0xF6, 0xCD, 0x65, 0x1F, 0x46,
409 0xAB, 0x7A, 0x29, 0xF7, 0xE8, 0xD5, 0xBD, 0x61,
410};
411
412static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
413 unsigned int fn, u32 *result)
414{
415 union acpi_object *obj;
416 int err = 0;
417 size_t len;
418
419 obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), intel_dsm_uuid, 0, fn, NULL);
420 if (!obj)
421 return -EOPNOTSUPP;
422
423 if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < 1) {
424 err = -EINVAL;
425 goto out;
426 }
427
428 len = min_t(size_t, obj->buffer.length, 4);
429
430 *result = 0;
431 memcpy(result, obj->buffer.pointer, len);
432out:
433 ACPI_FREE(obj);
434
435 return err;
436}
437
438static int intel_dsm(struct intel_host *intel_host, struct device *dev,
439 unsigned int fn, u32 *result)
440{
441 if (fn > 31 || !(intel_host->dsm_fns & (1 << fn)))
442 return -EOPNOTSUPP;
443
444 return __intel_dsm(intel_host, dev, fn, result);
445}
446
447static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
448 struct mmc_host *mmc)
449{
450 int err;
451 u32 val;
452
453 err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
454 if (err) {
455 pr_debug("%s: DSM not supported, error %d\n",
456 mmc_hostname(mmc), err);
457 return;
458 }
459
460 pr_debug("%s: DSM function mask %#x\n",
461 mmc_hostname(mmc), intel_host->dsm_fns);
462
463 err = intel_dsm(intel_host, dev, INTEL_DSM_DRV_STRENGTH, &val);
464 intel_host->drv_strength = err ? 0 : val;
465
466 err = intel_dsm(intel_host, dev, INTEL_DSM_D3_RETUNE, &val);
467 intel_host->d3_retune = err ? true : !!val;
468}
469
262static void sdhci_pci_int_hw_reset(struct sdhci_host *host) 470static void sdhci_pci_int_hw_reset(struct sdhci_host *host)
263{ 471{
264 u8 reg; 472 u8 reg;
@@ -274,67 +482,15 @@ static void sdhci_pci_int_hw_reset(struct sdhci_host *host)
274 usleep_range(300, 1000); 482 usleep_range(300, 1000);
275} 483}
276 484
277static int spt_select_drive_strength(struct sdhci_host *host, 485static int intel_select_drive_strength(struct mmc_card *card,
278 struct mmc_card *card, 486 unsigned int max_dtr, int host_drv,
279 unsigned int max_dtr, 487 int card_drv, int *drv_type)
280 int host_drv, int card_drv, int *drv_type)
281{
282 int drive_strength;
283
284 if (sdhci_pci_spt_drive_strength > 0)
285 drive_strength = sdhci_pci_spt_drive_strength & 0xf;
286 else
287 drive_strength = 0; /* Default 50-ohm */
288
289 if ((mmc_driver_type_mask(drive_strength) & card_drv) == 0)
290 drive_strength = 0; /* Default 50-ohm */
291
292 return drive_strength;
293}
294
295/* Try to read the drive strength from the card */
296static void spt_read_drive_strength(struct sdhci_host *host)
297{ 488{
298 u32 val, i, t; 489 struct sdhci_host *host = mmc_priv(card->host);
299 u16 m; 490 struct sdhci_pci_slot *slot = sdhci_priv(host);
300 491 struct intel_host *intel_host = sdhci_pci_priv(slot);
301 if (sdhci_pci_spt_drive_strength)
302 return;
303
304 sdhci_pci_spt_drive_strength = -1;
305
306 m = sdhci_readw(host, SDHCI_HOST_CONTROL2) & 0x7;
307 if (m != 3 && m != 5)
308 return;
309 val = sdhci_readl(host, SDHCI_PRESENT_STATE);
310 if (val & 0x3)
311 return;
312 sdhci_writel(host, 0x007f0023, SDHCI_INT_ENABLE);
313 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
314 sdhci_writew(host, 0x10, SDHCI_TRANSFER_MODE);
315 sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
316 sdhci_writew(host, 512, SDHCI_BLOCK_SIZE);
317 sdhci_writew(host, 1, SDHCI_BLOCK_COUNT);
318 sdhci_writel(host, 0, SDHCI_ARGUMENT);
319 sdhci_writew(host, 0x83b, SDHCI_COMMAND);
320 for (i = 0; i < 1000; i++) {
321 val = sdhci_readl(host, SDHCI_INT_STATUS);
322 if (val & 0xffff8000)
323 return;
324 if (val & 0x20)
325 break;
326 udelay(1);
327 }
328 val = sdhci_readl(host, SDHCI_PRESENT_STATE);
329 if (!(val & 0x800))
330 return;
331 for (i = 0; i < 47; i++)
332 val = sdhci_readl(host, SDHCI_BUFFER);
333 t = val & 0xf00;
334 if (t != 0x200 && t != 0x300)
335 return;
336 492
337 sdhci_pci_spt_drive_strength = 0x10 | ((val >> 12) & 0xf); 493 return intel_host->drv_strength;
338} 494}
339 495
340static int bxt_get_cd(struct mmc_host *mmc) 496static int bxt_get_cd(struct mmc_host *mmc)
@@ -359,8 +515,57 @@ out:
359 return ret; 515 return ret;
360} 516}
361 517
518#define SDHCI_INTEL_PWR_TIMEOUT_CNT 20
519#define SDHCI_INTEL_PWR_TIMEOUT_UDELAY 100
520
521static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
522 unsigned short vdd)
523{
524 int cntr;
525 u8 reg;
526
527 sdhci_set_power(host, mode, vdd);
528
529 if (mode == MMC_POWER_OFF)
530 return;
531
532 /*
533 * Bus power might not enable after D3 -> D0 transition due to the
534 * present state not yet having propagated. Retry for up to 2ms.
535 */
536 for (cntr = 0; cntr < SDHCI_INTEL_PWR_TIMEOUT_CNT; cntr++) {
537 reg = sdhci_readb(host, SDHCI_POWER_CONTROL);
538 if (reg & SDHCI_POWER_ON)
539 break;
540 udelay(SDHCI_INTEL_PWR_TIMEOUT_UDELAY);
541 reg |= SDHCI_POWER_ON;
542 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
543 }
544}
545
546static const struct sdhci_ops sdhci_intel_byt_ops = {
547 .set_clock = sdhci_set_clock,
548 .set_power = sdhci_intel_set_power,
549 .enable_dma = sdhci_pci_enable_dma,
550 .set_bus_width = sdhci_pci_set_bus_width,
551 .reset = sdhci_reset,
552 .set_uhs_signaling = sdhci_set_uhs_signaling,
553 .hw_reset = sdhci_pci_hw_reset,
554};
555
556static void byt_read_dsm(struct sdhci_pci_slot *slot)
557{
558 struct intel_host *intel_host = sdhci_pci_priv(slot);
559 struct device *dev = &slot->chip->pdev->dev;
560 struct mmc_host *mmc = slot->host->mmc;
561
562 intel_dsm_init(intel_host, dev, mmc);
563 slot->chip->rpm_retune = intel_host->d3_retune;
564}
565
362static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) 566static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
363{ 567{
568 byt_read_dsm(slot);
364 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | 569 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
365 MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR | 570 MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
366 MMC_CAP_CMD_DURING_TFR | 571 MMC_CAP_CMD_DURING_TFR |
@@ -369,10 +574,8 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
369 slot->hw_reset = sdhci_pci_int_hw_reset; 574 slot->hw_reset = sdhci_pci_int_hw_reset;
370 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BSW_EMMC) 575 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BSW_EMMC)
371 slot->host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */ 576 slot->host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */
372 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_SPT_EMMC) { 577 slot->host->mmc_host_ops.select_drive_strength =
373 spt_read_drive_strength(slot->host); 578 intel_select_drive_strength;
374 slot->select_drive_strength = spt_select_drive_strength;
375 }
376 return 0; 579 return 0;
377} 580}
378 581
@@ -405,6 +608,8 @@ static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
405{ 608{
406 int err; 609 int err;
407 610
611 byt_read_dsm(slot);
612
408 err = ni_set_max_freq(slot); 613 err = ni_set_max_freq(slot);
409 if (err) 614 if (err)
410 return err; 615 return err;
@@ -416,6 +621,7 @@ static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
416 621
417static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot) 622static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
418{ 623{
624 byt_read_dsm(slot);
419 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE | 625 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
420 MMC_CAP_WAIT_WHILE_BUSY; 626 MMC_CAP_WAIT_WHILE_BUSY;
421 return 0; 627 return 0;
@@ -423,63 +629,20 @@ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
423 629
424static int byt_sd_probe_slot(struct sdhci_pci_slot *slot) 630static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
425{ 631{
426 slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY; 632 byt_read_dsm(slot);
633 slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY |
634 MMC_CAP_AGGRESSIVE_PM;
427 slot->cd_idx = 0; 635 slot->cd_idx = 0;
428 slot->cd_override_level = true; 636 slot->cd_override_level = true;
429 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD || 637 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
430 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD || 638 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD ||
431 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD || 639 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD ||
432 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_SD) { 640 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_SD)
433 slot->host->mmc_host_ops.get_cd = bxt_get_cd; 641 slot->host->mmc_host_ops.get_cd = bxt_get_cd;
434 slot->host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
435 }
436 642
437 return 0; 643 return 0;
438} 644}
439 645
440#define SDHCI_INTEL_PWR_TIMEOUT_CNT 20
441#define SDHCI_INTEL_PWR_TIMEOUT_UDELAY 100
442
443static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
444 unsigned short vdd)
445{
446 int cntr;
447 u8 reg;
448
449 sdhci_set_power(host, mode, vdd);
450
451 if (mode == MMC_POWER_OFF)
452 return;
453
454 spin_unlock_irq(&host->lock);
455
456 /*
457 * Bus power might not enable after D3 -> D0 transition due to the
458 * present state not yet having propagated. Retry for up to 2ms.
459 */
460 for (cntr = 0; cntr < SDHCI_INTEL_PWR_TIMEOUT_CNT; cntr++) {
461 reg = sdhci_readb(host, SDHCI_POWER_CONTROL);
462 if (reg & SDHCI_POWER_ON)
463 break;
464 udelay(SDHCI_INTEL_PWR_TIMEOUT_UDELAY);
465 reg |= SDHCI_POWER_ON;
466 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
467 }
468
469 spin_lock_irq(&host->lock);
470}
471
472static const struct sdhci_ops sdhci_intel_byt_ops = {
473 .set_clock = sdhci_set_clock,
474 .set_power = sdhci_intel_set_power,
475 .enable_dma = sdhci_pci_enable_dma,
476 .set_bus_width = sdhci_pci_set_bus_width,
477 .reset = sdhci_reset,
478 .set_uhs_signaling = sdhci_set_uhs_signaling,
479 .hw_reset = sdhci_pci_hw_reset,
480 .select_drive_strength = sdhci_pci_select_drive_strength,
481};
482
483static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = { 646static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
484 .allow_runtime_pm = true, 647 .allow_runtime_pm = true,
485 .probe_slot = byt_emmc_probe_slot, 648 .probe_slot = byt_emmc_probe_slot,
@@ -488,6 +651,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
488 SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 | 651 SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 |
489 SDHCI_QUIRK2_STOP_WITH_TC, 652 SDHCI_QUIRK2_STOP_WITH_TC,
490 .ops = &sdhci_intel_byt_ops, 653 .ops = &sdhci_intel_byt_ops,
654 .priv_size = sizeof(struct intel_host),
491}; 655};
492 656
493static const struct sdhci_pci_fixes sdhci_ni_byt_sdio = { 657static const struct sdhci_pci_fixes sdhci_ni_byt_sdio = {
@@ -497,6 +661,7 @@ static const struct sdhci_pci_fixes sdhci_ni_byt_sdio = {
497 .allow_runtime_pm = true, 661 .allow_runtime_pm = true,
498 .probe_slot = ni_byt_sdio_probe_slot, 662 .probe_slot = ni_byt_sdio_probe_slot,
499 .ops = &sdhci_intel_byt_ops, 663 .ops = &sdhci_intel_byt_ops,
664 .priv_size = sizeof(struct intel_host),
500}; 665};
501 666
502static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = { 667static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
@@ -506,6 +671,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
506 .allow_runtime_pm = true, 671 .allow_runtime_pm = true,
507 .probe_slot = byt_sdio_probe_slot, 672 .probe_slot = byt_sdio_probe_slot,
508 .ops = &sdhci_intel_byt_ops, 673 .ops = &sdhci_intel_byt_ops,
674 .priv_size = sizeof(struct intel_host),
509}; 675};
510 676
511static const struct sdhci_pci_fixes sdhci_intel_byt_sd = { 677static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
@@ -517,6 +683,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
517 .own_cd_for_runtime_pm = true, 683 .own_cd_for_runtime_pm = true,
518 .probe_slot = byt_sd_probe_slot, 684 .probe_slot = byt_sd_probe_slot,
519 .ops = &sdhci_intel_byt_ops, 685 .ops = &sdhci_intel_byt_ops,
686 .priv_size = sizeof(struct intel_host),
520}; 687};
521 688
522/* Define Host controllers for Intel Merrifield platform */ 689/* Define Host controllers for Intel Merrifield platform */
@@ -719,9 +886,14 @@ static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead)
719 jmicron_enable_mmc(slot->host, 0); 886 jmicron_enable_mmc(slot->host, 0);
720} 887}
721 888
889#ifdef CONFIG_PM_SLEEP
722static int jmicron_suspend(struct sdhci_pci_chip *chip) 890static int jmicron_suspend(struct sdhci_pci_chip *chip)
723{ 891{
724 int i; 892 int i, ret;
893
894 ret = __sdhci_pci_suspend_host(chip);
895 if (ret)
896 return ret;
725 897
726 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || 898 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
727 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) { 899 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
@@ -729,6 +901,8 @@ static int jmicron_suspend(struct sdhci_pci_chip *chip)
729 jmicron_enable_mmc(chip->slots[i]->host, 0); 901 jmicron_enable_mmc(chip->slots[i]->host, 0);
730 } 902 }
731 903
904 sdhci_pci_init_wakeup(chip);
905
732 return 0; 906 return 0;
733} 907}
734 908
@@ -748,15 +922,18 @@ static int jmicron_resume(struct sdhci_pci_chip *chip)
748 return ret; 922 return ret;
749 } 923 }
750 924
751 return 0; 925 return sdhci_pci_resume_host(chip);
752} 926}
927#endif
753 928
754static const struct sdhci_pci_fixes sdhci_o2 = { 929static const struct sdhci_pci_fixes sdhci_o2 = {
755 .probe = sdhci_pci_o2_probe, 930 .probe = sdhci_pci_o2_probe,
756 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 931 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
757 .quirks2 = SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD, 932 .quirks2 = SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD,
758 .probe_slot = sdhci_pci_o2_probe_slot, 933 .probe_slot = sdhci_pci_o2_probe_slot,
934#ifdef CONFIG_PM_SLEEP
759 .resume = sdhci_pci_o2_resume, 935 .resume = sdhci_pci_o2_resume,
936#endif
760}; 937};
761 938
762static const struct sdhci_pci_fixes sdhci_jmicron = { 939static const struct sdhci_pci_fixes sdhci_jmicron = {
@@ -765,8 +942,10 @@ static const struct sdhci_pci_fixes sdhci_jmicron = {
765 .probe_slot = jmicron_probe_slot, 942 .probe_slot = jmicron_probe_slot,
766 .remove_slot = jmicron_remove_slot, 943 .remove_slot = jmicron_remove_slot,
767 944
945#ifdef CONFIG_PM_SLEEP
768 .suspend = jmicron_suspend, 946 .suspend = jmicron_suspend,
769 .resume = jmicron_resume, 947 .resume = jmicron_resume,
948#endif
770}; 949};
771 950
772/* SysKonnect CardBus2SDIO extra registers */ 951/* SysKonnect CardBus2SDIO extra registers */
@@ -1617,20 +1796,6 @@ static void sdhci_pci_hw_reset(struct sdhci_host *host)
1617 slot->hw_reset(host); 1796 slot->hw_reset(host);
1618} 1797}
1619 1798
1620static int sdhci_pci_select_drive_strength(struct sdhci_host *host,
1621 struct mmc_card *card,
1622 unsigned int max_dtr, int host_drv,
1623 int card_drv, int *drv_type)
1624{
1625 struct sdhci_pci_slot *slot = sdhci_priv(host);
1626
1627 if (!slot->select_drive_strength)
1628 return 0;
1629
1630 return slot->select_drive_strength(host, card, max_dtr, host_drv,
1631 card_drv, drv_type);
1632}
1633
1634static const struct sdhci_ops sdhci_pci_ops = { 1799static const struct sdhci_ops sdhci_pci_ops = {
1635 .set_clock = sdhci_set_clock, 1800 .set_clock = sdhci_set_clock,
1636 .enable_dma = sdhci_pci_enable_dma, 1801 .enable_dma = sdhci_pci_enable_dma,
@@ -1638,7 +1803,6 @@ static const struct sdhci_ops sdhci_pci_ops = {
1638 .reset = sdhci_reset, 1803 .reset = sdhci_reset,
1639 .set_uhs_signaling = sdhci_set_uhs_signaling, 1804 .set_uhs_signaling = sdhci_set_uhs_signaling,
1640 .hw_reset = sdhci_pci_hw_reset, 1805 .hw_reset = sdhci_pci_hw_reset,
1641 .select_drive_strength = sdhci_pci_select_drive_strength,
1642}; 1806};
1643 1807
1644/*****************************************************************************\ 1808/*****************************************************************************\
@@ -1651,83 +1815,29 @@ static const struct sdhci_ops sdhci_pci_ops = {
1651static int sdhci_pci_suspend(struct device *dev) 1815static int sdhci_pci_suspend(struct device *dev)
1652{ 1816{
1653 struct pci_dev *pdev = to_pci_dev(dev); 1817 struct pci_dev *pdev = to_pci_dev(dev);
1654 struct sdhci_pci_chip *chip; 1818 struct sdhci_pci_chip *chip = pci_get_drvdata(pdev);
1655 struct sdhci_pci_slot *slot;
1656 mmc_pm_flag_t slot_pm_flags;
1657 mmc_pm_flag_t pm_flags = 0;
1658 int i, ret;
1659 1819
1660 chip = pci_get_drvdata(pdev);
1661 if (!chip) 1820 if (!chip)
1662 return 0; 1821 return 0;
1663 1822
1664 for (i = 0; i < chip->num_slots; i++) { 1823 if (chip->fixes && chip->fixes->suspend)
1665 slot = chip->slots[i]; 1824 return chip->fixes->suspend(chip);
1666 if (!slot)
1667 continue;
1668
1669 ret = sdhci_suspend_host(slot->host);
1670
1671 if (ret)
1672 goto err_pci_suspend;
1673
1674 slot_pm_flags = slot->host->mmc->pm_flags;
1675 if (slot_pm_flags & MMC_PM_WAKE_SDIO_IRQ)
1676 sdhci_enable_irq_wakeups(slot->host);
1677
1678 pm_flags |= slot_pm_flags;
1679 }
1680
1681 if (chip->fixes && chip->fixes->suspend) {
1682 ret = chip->fixes->suspend(chip);
1683 if (ret)
1684 goto err_pci_suspend;
1685 }
1686
1687 if (pm_flags & MMC_PM_KEEP_POWER) {
1688 if (pm_flags & MMC_PM_WAKE_SDIO_IRQ)
1689 device_init_wakeup(dev, true);
1690 else
1691 device_init_wakeup(dev, false);
1692 } else
1693 device_init_wakeup(dev, false);
1694
1695 return 0;
1696 1825
1697err_pci_suspend: 1826 return sdhci_pci_suspend_host(chip);
1698 while (--i >= 0)
1699 sdhci_resume_host(chip->slots[i]->host);
1700 return ret;
1701} 1827}
1702 1828
1703static int sdhci_pci_resume(struct device *dev) 1829static int sdhci_pci_resume(struct device *dev)
1704{ 1830{
1705 struct pci_dev *pdev = to_pci_dev(dev); 1831 struct pci_dev *pdev = to_pci_dev(dev);
1706 struct sdhci_pci_chip *chip; 1832 struct sdhci_pci_chip *chip = pci_get_drvdata(pdev);
1707 struct sdhci_pci_slot *slot;
1708 int i, ret;
1709 1833
1710 chip = pci_get_drvdata(pdev);
1711 if (!chip) 1834 if (!chip)
1712 return 0; 1835 return 0;
1713 1836
1714 if (chip->fixes && chip->fixes->resume) { 1837 if (chip->fixes && chip->fixes->resume)
1715 ret = chip->fixes->resume(chip); 1838 return chip->fixes->resume(chip);
1716 if (ret)
1717 return ret;
1718 }
1719
1720 for (i = 0; i < chip->num_slots; i++) {
1721 slot = chip->slots[i];
1722 if (!slot)
1723 continue;
1724
1725 ret = sdhci_resume_host(slot->host);
1726 if (ret)
1727 return ret;
1728 }
1729 1839
1730 return 0; 1840 return sdhci_pci_resume_host(chip);
1731} 1841}
1732#endif 1842#endif
1733 1843
@@ -1735,67 +1845,29 @@ static int sdhci_pci_resume(struct device *dev)
1735static int sdhci_pci_runtime_suspend(struct device *dev) 1845static int sdhci_pci_runtime_suspend(struct device *dev)
1736{ 1846{
1737 struct pci_dev *pdev = to_pci_dev(dev); 1847 struct pci_dev *pdev = to_pci_dev(dev);
1738 struct sdhci_pci_chip *chip; 1848 struct sdhci_pci_chip *chip = pci_get_drvdata(pdev);
1739 struct sdhci_pci_slot *slot;
1740 int i, ret;
1741 1849
1742 chip = pci_get_drvdata(pdev);
1743 if (!chip) 1850 if (!chip)
1744 return 0; 1851 return 0;
1745 1852
1746 for (i = 0; i < chip->num_slots; i++) { 1853 if (chip->fixes && chip->fixes->runtime_suspend)
1747 slot = chip->slots[i]; 1854 return chip->fixes->runtime_suspend(chip);
1748 if (!slot)
1749 continue;
1750
1751 ret = sdhci_runtime_suspend_host(slot->host);
1752
1753 if (ret)
1754 goto err_pci_runtime_suspend;
1755 }
1756 1855
1757 if (chip->fixes && chip->fixes->suspend) { 1856 return sdhci_pci_runtime_suspend_host(chip);
1758 ret = chip->fixes->suspend(chip);
1759 if (ret)
1760 goto err_pci_runtime_suspend;
1761 }
1762
1763 return 0;
1764
1765err_pci_runtime_suspend:
1766 while (--i >= 0)
1767 sdhci_runtime_resume_host(chip->slots[i]->host);
1768 return ret;
1769} 1857}
1770 1858
1771static int sdhci_pci_runtime_resume(struct device *dev) 1859static int sdhci_pci_runtime_resume(struct device *dev)
1772{ 1860{
1773 struct pci_dev *pdev = to_pci_dev(dev); 1861 struct pci_dev *pdev = to_pci_dev(dev);
1774 struct sdhci_pci_chip *chip; 1862 struct sdhci_pci_chip *chip = pci_get_drvdata(pdev);
1775 struct sdhci_pci_slot *slot;
1776 int i, ret;
1777 1863
1778 chip = pci_get_drvdata(pdev);
1779 if (!chip) 1864 if (!chip)
1780 return 0; 1865 return 0;
1781 1866
1782 if (chip->fixes && chip->fixes->resume) { 1867 if (chip->fixes && chip->fixes->runtime_resume)
1783 ret = chip->fixes->resume(chip); 1868 return chip->fixes->runtime_resume(chip);
1784 if (ret)
1785 return ret;
1786 }
1787 1869
1788 for (i = 0; i < chip->num_slots; i++) { 1870 return sdhci_pci_runtime_resume_host(chip);
1789 slot = chip->slots[i];
1790 if (!slot)
1791 continue;
1792
1793 ret = sdhci_runtime_resume_host(slot->host);
1794 if (ret)
1795 return ret;
1796 }
1797
1798 return 0;
1799} 1871}
1800#endif 1872#endif
1801 1873
@@ -1818,6 +1890,7 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
1818 struct sdhci_pci_slot *slot; 1890 struct sdhci_pci_slot *slot;
1819 struct sdhci_host *host; 1891 struct sdhci_host *host;
1820 int ret, bar = first_bar + slotno; 1892 int ret, bar = first_bar + slotno;
1893 size_t priv_size = chip->fixes ? chip->fixes->priv_size : 0;
1821 1894
1822 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { 1895 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
1823 dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar); 1896 dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar);
@@ -1839,7 +1912,7 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
1839 return ERR_PTR(-ENODEV); 1912 return ERR_PTR(-ENODEV);
1840 } 1913 }
1841 1914
1842 host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pci_slot)); 1915 host = sdhci_alloc_host(&pdev->dev, sizeof(*slot) + priv_size);
1843 if (IS_ERR(host)) { 1916 if (IS_ERR(host)) {
1844 dev_err(&pdev->dev, "cannot allocate host\n"); 1917 dev_err(&pdev->dev, "cannot allocate host\n");
1845 return ERR_CAST(host); 1918 return ERR_CAST(host);
@@ -1919,7 +1992,10 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
1919 } 1992 }
1920 } 1993 }
1921 1994
1922 ret = sdhci_add_host(host); 1995 if (chip->fixes && chip->fixes->add_host)
1996 ret = chip->fixes->add_host(slot);
1997 else
1998 ret = sdhci_add_host(host);
1923 if (ret) 1999 if (ret)
1924 goto remove; 2000 goto remove;
1925 2001
@@ -2042,6 +2118,8 @@ static int sdhci_pci_probe(struct pci_dev *pdev,
2042 chip->allow_runtime_pm = chip->fixes->allow_runtime_pm; 2118 chip->allow_runtime_pm = chip->fixes->allow_runtime_pm;
2043 } 2119 }
2044 chip->num_slots = slots; 2120 chip->num_slots = slots;
2121 chip->pm_retune = true;
2122 chip->rpm_retune = true;
2045 2123
2046 pci_set_drvdata(pdev, chip); 2124 pci_set_drvdata(pdev, chip);
2047 2125
diff --git a/drivers/mmc/host/sdhci-pci-data.c b/drivers/mmc/host/sdhci-pci-data.c
index 56fddc622a54..a611217769f5 100644
--- a/drivers/mmc/host/sdhci-pci-data.c
+++ b/drivers/mmc/host/sdhci-pci-data.c
@@ -3,6 +3,3 @@
3 3
4struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev, int slotno); 4struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev, int slotno);
5EXPORT_SYMBOL_GPL(sdhci_pci_get_data); 5EXPORT_SYMBOL_GPL(sdhci_pci_get_data);
6
7int sdhci_pci_spt_drive_strength;
8EXPORT_SYMBOL_GPL(sdhci_pci_spt_drive_strength);
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index d48f03104b5b..14273ca00641 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -384,8 +384,10 @@ int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
384 return 0; 384 return 0;
385} 385}
386 386
387#ifdef CONFIG_PM_SLEEP
387int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip) 388int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip)
388{ 389{
389 sdhci_pci_o2_probe(chip); 390 sdhci_pci_o2_probe(chip);
390 return 0; 391 return sdhci_pci_resume_host(chip);
391} 392}
393#endif
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 36f743464fcc..37766d20a600 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -64,12 +64,20 @@ struct sdhci_pci_fixes {
64 int (*probe) (struct sdhci_pci_chip *); 64 int (*probe) (struct sdhci_pci_chip *);
65 65
66 int (*probe_slot) (struct sdhci_pci_slot *); 66 int (*probe_slot) (struct sdhci_pci_slot *);
67 int (*add_host) (struct sdhci_pci_slot *);
67 void (*remove_slot) (struct sdhci_pci_slot *, int); 68 void (*remove_slot) (struct sdhci_pci_slot *, int);
68 69
70#ifdef CONFIG_PM_SLEEP
69 int (*suspend) (struct sdhci_pci_chip *); 71 int (*suspend) (struct sdhci_pci_chip *);
70 int (*resume) (struct sdhci_pci_chip *); 72 int (*resume) (struct sdhci_pci_chip *);
73#endif
74#ifdef CONFIG_PM
75 int (*runtime_suspend) (struct sdhci_pci_chip *);
76 int (*runtime_resume) (struct sdhci_pci_chip *);
77#endif
71 78
72 const struct sdhci_ops *ops; 79 const struct sdhci_ops *ops;
80 size_t priv_size;
73}; 81};
74 82
75struct sdhci_pci_slot { 83struct sdhci_pci_slot {
@@ -85,10 +93,7 @@ struct sdhci_pci_slot {
85 bool cd_override_level; 93 bool cd_override_level;
86 94
87 void (*hw_reset)(struct sdhci_host *host); 95 void (*hw_reset)(struct sdhci_host *host);
88 int (*select_drive_strength)(struct sdhci_host *host, 96 unsigned long private[0] ____cacheline_aligned;
89 struct mmc_card *card,
90 unsigned int max_dtr, int host_drv,
91 int card_drv, int *drv_type);
92}; 97};
93 98
94struct sdhci_pci_chip { 99struct sdhci_pci_chip {
@@ -97,10 +102,21 @@ struct sdhci_pci_chip {
97 unsigned int quirks; 102 unsigned int quirks;
98 unsigned int quirks2; 103 unsigned int quirks2;
99 bool allow_runtime_pm; 104 bool allow_runtime_pm;
105 bool pm_retune;
106 bool rpm_retune;
100 const struct sdhci_pci_fixes *fixes; 107 const struct sdhci_pci_fixes *fixes;
101 108
102 int num_slots; /* Slots on controller */ 109 int num_slots; /* Slots on controller */
103 struct sdhci_pci_slot *slots[MAX_SLOTS]; /* Pointers to host slots */ 110 struct sdhci_pci_slot *slots[MAX_SLOTS]; /* Pointers to host slots */
104}; 111};
105 112
113static inline void *sdhci_pci_priv(struct sdhci_pci_slot *slot)
114{
115 return (void *)slot->private;
116}
117
118#ifdef CONFIG_PM_SLEEP
119int sdhci_pci_resume_host(struct sdhci_pci_chip *chip);
120#endif
121
106#endif /* __SDHCI_PCI_H */ 122#endif /* __SDHCI_PCI_H */
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index ad49bfaf5bf8..e090d8c42ddb 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -213,6 +213,9 @@ static int sdhci_pltfm_suspend(struct device *dev)
213{ 213{
214 struct sdhci_host *host = dev_get_drvdata(dev); 214 struct sdhci_host *host = dev_get_drvdata(dev);
215 215
216 if (host->tuning_mode != SDHCI_TUNING_MODE_3)
217 mmc_retune_needed(host->mmc);
218
216 return sdhci_suspend_host(host); 219 return sdhci_suspend_host(host);
217} 220}
218 221
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c
index 347eae2d7b6a..995083ce1c46 100644
--- a/drivers/mmc/host/sdhci-pxav2.c
+++ b/drivers/mmc/host/sdhci-pxav2.c
@@ -185,7 +185,11 @@ static int sdhci_pxav2_probe(struct platform_device *pdev)
185 goto err_clk_get; 185 goto err_clk_get;
186 } 186 }
187 pltfm_host->clk = clk; 187 pltfm_host->clk = clk;
188 clk_prepare_enable(clk); 188 ret = clk_prepare_enable(clk);
189 if (ret) {
190 dev_err(&pdev->dev, "failed to enable io clock\n");
191 goto err_clk_enable;
192 }
189 193
190 host->quirks = SDHCI_QUIRK_BROKEN_ADMA 194 host->quirks = SDHCI_QUIRK_BROKEN_ADMA
191 | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL 195 | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
@@ -222,12 +226,11 @@ static int sdhci_pxav2_probe(struct platform_device *pdev)
222 goto err_add_host; 226 goto err_add_host;
223 } 227 }
224 228
225 platform_set_drvdata(pdev, host);
226
227 return 0; 229 return 0;
228 230
229err_add_host: 231err_add_host:
230 clk_disable_unprepare(clk); 232 clk_disable_unprepare(clk);
233err_clk_enable:
231 clk_put(clk); 234 clk_put(clk);
232err_clk_get: 235err_clk_get:
233 sdhci_pltfm_free(pdev); 236 sdhci_pltfm_free(pdev);
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index d0f5c05fbc19..f953f35c2624 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -323,11 +323,8 @@ static void pxav3_set_power(struct sdhci_host *host, unsigned char mode,
323 if (host->pwr == 0) 323 if (host->pwr == 0)
324 vdd = 0; 324 vdd = 0;
325 325
326 if (!IS_ERR(mmc->supply.vmmc)) { 326 if (!IS_ERR(mmc->supply.vmmc))
327 spin_unlock_irq(&host->lock);
328 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 327 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
329 spin_lock_irq(&host->lock);
330 }
331} 328}
332 329
333static const struct sdhci_ops pxav3_sdhci_ops = { 330static const struct sdhci_ops pxav3_sdhci_ops = {
@@ -480,8 +477,6 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
480 goto err_add_host; 477 goto err_add_host;
481 } 478 }
482 479
483 platform_set_drvdata(pdev, host);
484
485 if (host->mmc->pm_caps & MMC_PM_WAKE_SDIO_IRQ) 480 if (host->mmc->pm_caps & MMC_PM_WAKE_SDIO_IRQ)
486 device_init_wakeup(&pdev->dev, 1); 481 device_init_wakeup(&pdev->dev, 1);
487 482
@@ -529,6 +524,8 @@ static int sdhci_pxav3_suspend(struct device *dev)
529 struct sdhci_host *host = dev_get_drvdata(dev); 524 struct sdhci_host *host = dev_get_drvdata(dev);
530 525
531 pm_runtime_get_sync(dev); 526 pm_runtime_get_sync(dev);
527 if (host->tuning_mode != SDHCI_TUNING_MODE_3)
528 mmc_retune_needed(host->mmc);
532 ret = sdhci_suspend_host(host); 529 ret = sdhci_suspend_host(host);
533 pm_runtime_mark_last_busy(dev); 530 pm_runtime_mark_last_busy(dev);
534 pm_runtime_put_autosuspend(dev); 531 pm_runtime_put_autosuspend(dev);
@@ -562,6 +559,9 @@ static int sdhci_pxav3_runtime_suspend(struct device *dev)
562 if (ret) 559 if (ret)
563 return ret; 560 return ret;
564 561
562 if (host->tuning_mode != SDHCI_TUNING_MODE_3)
563 mmc_retune_needed(host->mmc);
564
565 clk_disable_unprepare(pxa->clk_io); 565 clk_disable_unprepare(pxa->clk_io);
566 if (!IS_ERR(pxa->clk_core)) 566 if (!IS_ERR(pxa->clk_core))
567 clk_disable_unprepare(pxa->clk_core); 567 clk_disable_unprepare(pxa->clk_core);
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 3e5c83d435ae..7c065a70f92b 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -190,9 +190,7 @@ static unsigned int sdhci_s3c_consider_clock(struct sdhci_s3c *ourhost,
190 * speed possible with selected clock source and skip the division. 190 * speed possible with selected clock source and skip the division.
191 */ 191 */
192 if (ourhost->no_divider) { 192 if (ourhost->no_divider) {
193 spin_unlock_irq(&ourhost->host->lock);
194 rate = clk_round_rate(clksrc, wanted); 193 rate = clk_round_rate(clksrc, wanted);
195 spin_lock_irq(&ourhost->host->lock);
196 return wanted - rate; 194 return wanted - rate;
197 } 195 }
198 196
@@ -389,9 +387,7 @@ static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
389 clk &= ~SDHCI_CLOCK_CARD_EN; 387 clk &= ~SDHCI_CLOCK_CARD_EN;
390 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 388 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
391 389
392 spin_unlock_irq(&host->lock);
393 ret = clk_set_rate(ourhost->clk_bus[ourhost->cur_clk], clock); 390 ret = clk_set_rate(ourhost->clk_bus[ourhost->cur_clk], clock);
394 spin_lock_irq(&host->lock);
395 if (ret != 0) { 391 if (ret != 0) {
396 dev_err(dev, "%s: failed to set clock rate %uHz\n", 392 dev_err(dev, "%s: failed to set clock rate %uHz\n",
397 mmc_hostname(host->mmc), clock); 393 mmc_hostname(host->mmc), clock);
@@ -743,6 +739,9 @@ static int sdhci_s3c_suspend(struct device *dev)
743{ 739{
744 struct sdhci_host *host = dev_get_drvdata(dev); 740 struct sdhci_host *host = dev_get_drvdata(dev);
745 741
742 if (host->tuning_mode != SDHCI_TUNING_MODE_3)
743 mmc_retune_needed(host->mmc);
744
746 return sdhci_suspend_host(host); 745 return sdhci_suspend_host(host);
747} 746}
748 747
@@ -764,6 +763,9 @@ static int sdhci_s3c_runtime_suspend(struct device *dev)
764 763
765 ret = sdhci_runtime_suspend_host(host); 764 ret = sdhci_runtime_suspend_host(host);
766 765
766 if (host->tuning_mode != SDHCI_TUNING_MODE_3)
767 mmc_retune_needed(host->mmc);
768
767 if (ourhost->cur_clk >= 0) 769 if (ourhost->cur_clk >= 0)
768 clk_disable_unprepare(ourhost->clk_bus[ourhost->cur_clk]); 770 clk_disable_unprepare(ourhost->clk_bus[ourhost->cur_clk]);
769 clk_disable_unprepare(busclk); 771 clk_disable_unprepare(busclk);
diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c
index 5d068639dd3f..c251c6c0a112 100644
--- a/drivers/mmc/host/sdhci-sirf.c
+++ b/drivers/mmc/host/sdhci-sirf.c
@@ -237,6 +237,9 @@ static int sdhci_sirf_suspend(struct device *dev)
237 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 237 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
238 int ret; 238 int ret;
239 239
240 if (host->tuning_mode != SDHCI_TUNING_MODE_3)
241 mmc_retune_needed(host->mmc);
242
240 ret = sdhci_suspend_host(host); 243 ret = sdhci_suspend_host(host);
241 if (ret) 244 if (ret)
242 return ret; 245 return ret;
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index 255a896769b8..8c0f88428556 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -165,6 +165,9 @@ static int sdhci_suspend(struct device *dev)
165 struct spear_sdhci *sdhci = sdhci_priv(host); 165 struct spear_sdhci *sdhci = sdhci_priv(host);
166 int ret; 166 int ret;
167 167
168 if (host->tuning_mode != SDHCI_TUNING_MODE_3)
169 mmc_retune_needed(host->mmc);
170
168 ret = sdhci_suspend_host(host); 171 ret = sdhci_suspend_host(host);
169 if (!ret) 172 if (!ret)
170 clk_disable(sdhci->clk); 173 clk_disable(sdhci->clk);
diff --git a/drivers/mmc/host/sdhci-st.c b/drivers/mmc/host/sdhci-st.c
index ed92ce729dde..68c36c9fa231 100644
--- a/drivers/mmc/host/sdhci-st.c
+++ b/drivers/mmc/host/sdhci-st.c
@@ -418,8 +418,6 @@ static int sdhci_st_probe(struct platform_device *pdev)
418 goto err_out; 418 goto err_out;
419 } 419 }
420 420
421 platform_set_drvdata(pdev, host);
422
423 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION)); 421 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
424 422
425 dev_info(&pdev->dev, "SDHCI ST Initialised: Host Version: 0x%x Vendor Version 0x%x\n", 423 dev_info(&pdev->dev, "SDHCI ST Initialised: Host Version: 0x%x Vendor Version 0x%x\n",
@@ -465,8 +463,12 @@ static int sdhci_st_suspend(struct device *dev)
465 struct sdhci_host *host = dev_get_drvdata(dev); 463 struct sdhci_host *host = dev_get_drvdata(dev);
466 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 464 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
467 struct st_mmc_platform_data *pdata = sdhci_pltfm_priv(pltfm_host); 465 struct st_mmc_platform_data *pdata = sdhci_pltfm_priv(pltfm_host);
468 int ret = sdhci_suspend_host(host); 466 int ret;
467
468 if (host->tuning_mode != SDHCI_TUNING_MODE_3)
469 mmc_retune_needed(host->mmc);
469 470
471 ret = sdhci_suspend_host(host);
470 if (ret) 472 if (ret)
471 goto out; 473 goto out;
472 474
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 20b6ff5b4af1..7f93079c7a3a 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -21,6 +21,7 @@
21#include <linux/io.h> 21#include <linux/io.h>
22#include <linux/of.h> 22#include <linux/of.h>
23#include <linux/of_device.h> 23#include <linux/of_device.h>
24#include <linux/reset.h>
24#include <linux/mmc/card.h> 25#include <linux/mmc/card.h>
25#include <linux/mmc/host.h> 26#include <linux/mmc/host.h>
26#include <linux/mmc/mmc.h> 27#include <linux/mmc/mmc.h>
@@ -65,6 +66,8 @@ struct sdhci_tegra {
65 struct gpio_desc *power_gpio; 66 struct gpio_desc *power_gpio;
66 bool ddr_signaling; 67 bool ddr_signaling;
67 bool pad_calib_required; 68 bool pad_calib_required;
69
70 struct reset_control *rst;
68}; 71};
69 72
70static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg) 73static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
@@ -431,7 +434,23 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
431 .pdata = &sdhci_tegra210_pdata, 434 .pdata = &sdhci_tegra210_pdata,
432}; 435};
433 436
437static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
438 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
439 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
440 SDHCI_QUIRK_SINGLE_POWER_WRITE |
441 SDHCI_QUIRK_NO_HISPD_BIT |
442 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
443 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
444 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
445 .ops = &tegra114_sdhci_ops,
446};
447
448static const struct sdhci_tegra_soc_data soc_data_tegra186 = {
449 .pdata = &sdhci_tegra186_pdata,
450};
451
434static const struct of_device_id sdhci_tegra_dt_match[] = { 452static const struct of_device_id sdhci_tegra_dt_match[] = {
453 { .compatible = "nvidia,tegra186-sdhci", .data = &soc_data_tegra186 },
435 { .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 }, 454 { .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
436 { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 }, 455 { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
437 { .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 }, 456 { .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
@@ -489,6 +508,25 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
489 clk_prepare_enable(clk); 508 clk_prepare_enable(clk);
490 pltfm_host->clk = clk; 509 pltfm_host->clk = clk;
491 510
511 tegra_host->rst = devm_reset_control_get(&pdev->dev, "sdhci");
512 if (IS_ERR(tegra_host->rst)) {
513 rc = PTR_ERR(tegra_host->rst);
514 dev_err(&pdev->dev, "failed to get reset control: %d\n", rc);
515 goto err_rst_get;
516 }
517
518 rc = reset_control_assert(tegra_host->rst);
519 if (rc)
520 goto err_rst_get;
521
522 usleep_range(2000, 4000);
523
524 rc = reset_control_deassert(tegra_host->rst);
525 if (rc)
526 goto err_rst_get;
527
528 usleep_range(2000, 4000);
529
492 rc = sdhci_add_host(host); 530 rc = sdhci_add_host(host);
493 if (rc) 531 if (rc)
494 goto err_add_host; 532 goto err_add_host;
@@ -496,6 +534,8 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
496 return 0; 534 return 0;
497 535
498err_add_host: 536err_add_host:
537 reset_control_assert(tegra_host->rst);
538err_rst_get:
499 clk_disable_unprepare(pltfm_host->clk); 539 clk_disable_unprepare(pltfm_host->clk);
500err_clk_get: 540err_clk_get:
501err_power_req: 541err_power_req:
@@ -504,6 +544,23 @@ err_parse_dt:
504 return rc; 544 return rc;
505} 545}
506 546
547static int sdhci_tegra_remove(struct platform_device *pdev)
548{
549 struct sdhci_host *host = platform_get_drvdata(pdev);
550 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
551 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
552
553 sdhci_remove_host(host, 0);
554
555 reset_control_assert(tegra_host->rst);
556 usleep_range(2000, 4000);
557 clk_disable_unprepare(pltfm_host->clk);
558
559 sdhci_pltfm_free(pdev);
560
561 return 0;
562}
563
507static struct platform_driver sdhci_tegra_driver = { 564static struct platform_driver sdhci_tegra_driver = {
508 .driver = { 565 .driver = {
509 .name = "sdhci-tegra", 566 .name = "sdhci-tegra",
@@ -511,7 +568,7 @@ static struct platform_driver sdhci_tegra_driver = {
511 .pm = &sdhci_pltfm_pmops, 568 .pm = &sdhci_pltfm_pmops,
512 }, 569 },
513 .probe = sdhci_tegra_probe, 570 .probe = sdhci_tegra_probe,
514 .remove = sdhci_pltfm_unregister, 571 .remove = sdhci_tegra_remove,
515}; 572};
516 573
517module_platform_driver(sdhci_tegra_driver); 574module_platform_driver(sdhci_tegra_driver);
diff --git a/drivers/mmc/host/sdhci-xenon-phy.c b/drivers/mmc/host/sdhci-xenon-phy.c
new file mode 100644
index 000000000000..6356781f1cca
--- /dev/null
+++ b/drivers/mmc/host/sdhci-xenon-phy.c
@@ -0,0 +1,837 @@
1/*
2 * PHY support for Xenon SDHC
3 *
4 * Copyright (C) 2016 Marvell, All Rights Reserved.
5 *
6 * Author: Hu Ziji <huziji@marvell.com>
7 * Date: 2016-8-24
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation version 2.
12 */
13
14#include <linux/slab.h>
15#include <linux/delay.h>
16#include <linux/ktime.h>
17#include <linux/of_address.h>
18
19#include "sdhci-pltfm.h"
20#include "sdhci-xenon.h"
21
22/* Register base for eMMC PHY 5.0 Version */
23#define XENON_EMMC_5_0_PHY_REG_BASE 0x0160
24/* Register base for eMMC PHY 5.1 Version */
25#define XENON_EMMC_PHY_REG_BASE 0x0170
26
27#define XENON_EMMC_PHY_TIMING_ADJUST XENON_EMMC_PHY_REG_BASE
28#define XENON_EMMC_5_0_PHY_TIMING_ADJUST XENON_EMMC_5_0_PHY_REG_BASE
29#define XENON_TIMING_ADJUST_SLOW_MODE BIT(29)
30#define XENON_TIMING_ADJUST_SDIO_MODE BIT(28)
31#define XENON_SAMPL_INV_QSP_PHASE_SELECT BIT(18)
32#define XENON_SAMPL_INV_QSP_PHASE_SELECT_SHIFT 18
33#define XENON_PHY_INITIALIZAION BIT(31)
34#define XENON_WAIT_CYCLE_BEFORE_USING_MASK 0xF
35#define XENON_WAIT_CYCLE_BEFORE_USING_SHIFT 12
36#define XENON_FC_SYNC_EN_DURATION_MASK 0xF
37#define XENON_FC_SYNC_EN_DURATION_SHIFT 8
38#define XENON_FC_SYNC_RST_EN_DURATION_MASK 0xF
39#define XENON_FC_SYNC_RST_EN_DURATION_SHIFT 4
40#define XENON_FC_SYNC_RST_DURATION_MASK 0xF
41#define XENON_FC_SYNC_RST_DURATION_SHIFT 0
42
43#define XENON_EMMC_PHY_FUNC_CONTROL (XENON_EMMC_PHY_REG_BASE + 0x4)
44#define XENON_EMMC_5_0_PHY_FUNC_CONTROL \
45 (XENON_EMMC_5_0_PHY_REG_BASE + 0x4)
46#define XENON_ASYNC_DDRMODE_MASK BIT(23)
47#define XENON_ASYNC_DDRMODE_SHIFT 23
48#define XENON_CMD_DDR_MODE BIT(16)
49#define XENON_DQ_DDR_MODE_SHIFT 8
50#define XENON_DQ_DDR_MODE_MASK 0xFF
51#define XENON_DQ_ASYNC_MODE BIT(4)
52
53#define XENON_EMMC_PHY_PAD_CONTROL (XENON_EMMC_PHY_REG_BASE + 0x8)
54#define XENON_EMMC_5_0_PHY_PAD_CONTROL \
55 (XENON_EMMC_5_0_PHY_REG_BASE + 0x8)
56#define XENON_REC_EN_SHIFT 24
57#define XENON_REC_EN_MASK 0xF
58#define XENON_FC_DQ_RECEN BIT(24)
59#define XENON_FC_CMD_RECEN BIT(25)
60#define XENON_FC_QSP_RECEN BIT(26)
61#define XENON_FC_QSN_RECEN BIT(27)
62#define XENON_OEN_QSN BIT(28)
63#define XENON_AUTO_RECEN_CTRL BIT(30)
64#define XENON_FC_ALL_CMOS_RECEIVER 0xF000
65
66#define XENON_EMMC5_FC_QSP_PD BIT(18)
67#define XENON_EMMC5_FC_QSP_PU BIT(22)
68#define XENON_EMMC5_FC_CMD_PD BIT(17)
69#define XENON_EMMC5_FC_CMD_PU BIT(21)
70#define XENON_EMMC5_FC_DQ_PD BIT(16)
71#define XENON_EMMC5_FC_DQ_PU BIT(20)
72
73#define XENON_EMMC_PHY_PAD_CONTROL1 (XENON_EMMC_PHY_REG_BASE + 0xC)
74#define XENON_EMMC5_1_FC_QSP_PD BIT(9)
75#define XENON_EMMC5_1_FC_QSP_PU BIT(25)
76#define XENON_EMMC5_1_FC_CMD_PD BIT(8)
77#define XENON_EMMC5_1_FC_CMD_PU BIT(24)
78#define XENON_EMMC5_1_FC_DQ_PD 0xFF
79#define XENON_EMMC5_1_FC_DQ_PU (0xFF << 16)
80
81#define XENON_EMMC_PHY_PAD_CONTROL2 (XENON_EMMC_PHY_REG_BASE + 0x10)
82#define XENON_EMMC_5_0_PHY_PAD_CONTROL2 \
83 (XENON_EMMC_5_0_PHY_REG_BASE + 0xC)
84#define XENON_ZNR_MASK 0x1F
85#define XENON_ZNR_SHIFT 8
86#define XENON_ZPR_MASK 0x1F
87/* Preferred ZNR and ZPR value vary between different boards.
88 * The specific ZNR and ZPR value should be defined here
89 * according to board actual timing.
90 */
91#define XENON_ZNR_DEF_VALUE 0xF
92#define XENON_ZPR_DEF_VALUE 0xF
93
94#define XENON_EMMC_PHY_DLL_CONTROL (XENON_EMMC_PHY_REG_BASE + 0x14)
95#define XENON_EMMC_5_0_PHY_DLL_CONTROL \
96 (XENON_EMMC_5_0_PHY_REG_BASE + 0x10)
97#define XENON_DLL_ENABLE BIT(31)
98#define XENON_DLL_UPDATE_STROBE_5_0 BIT(30)
99#define XENON_DLL_REFCLK_SEL BIT(30)
100#define XENON_DLL_UPDATE BIT(23)
101#define XENON_DLL_PHSEL1_SHIFT 24
102#define XENON_DLL_PHSEL0_SHIFT 16
103#define XENON_DLL_PHASE_MASK 0x3F
104#define XENON_DLL_PHASE_90_DEGREE 0x1F
105#define XENON_DLL_FAST_LOCK BIT(5)
106#define XENON_DLL_GAIN2X BIT(3)
107#define XENON_DLL_BYPASS_EN BIT(0)
108
109#define XENON_EMMC_5_0_PHY_LOGIC_TIMING_ADJUST \
110 (XENON_EMMC_5_0_PHY_REG_BASE + 0x14)
111#define XENON_EMMC_5_0_PHY_LOGIC_TIMING_VALUE 0x5A54
112#define XENON_EMMC_PHY_LOGIC_TIMING_ADJUST (XENON_EMMC_PHY_REG_BASE + 0x18)
113#define XENON_LOGIC_TIMING_VALUE 0x00AA8977
114
115/*
116 * List offset of PHY registers and some special register values
117 * in eMMC PHY 5.0 or eMMC PHY 5.1
118 */
119struct xenon_emmc_phy_regs {
120 /* Offset of Timing Adjust register */
121 u16 timing_adj;
122 /* Offset of Func Control register */
123 u16 func_ctrl;
124 /* Offset of Pad Control register */
125 u16 pad_ctrl;
126 /* Offset of Pad Control register 2 */
127 u16 pad_ctrl2;
128 /* Offset of DLL Control register */
129 u16 dll_ctrl;
130 /* Offset of Logic Timing Adjust register */
131 u16 logic_timing_adj;
132 /* DLL Update Enable bit */
133 u32 dll_update;
134 /* value in Logic Timing Adjustment register */
135 u32 logic_timing_val;
136};
137
138static const char * const phy_types[] = {
139 "emmc 5.0 phy",
140 "emmc 5.1 phy"
141};
142
143enum xenon_phy_type_enum {
144 EMMC_5_0_PHY,
145 EMMC_5_1_PHY,
146 NR_PHY_TYPES
147};
148
149enum soc_pad_ctrl_type {
150 SOC_PAD_SD,
151 SOC_PAD_FIXED_1_8V,
152};
153
154struct soc_pad_ctrl {
155 /* Register address of SoC PHY PAD ctrl */
156 void __iomem *reg;
157 /* SoC PHY PAD ctrl type */
158 enum soc_pad_ctrl_type pad_type;
159 /* SoC specific operation to set SoC PHY PAD */
160 void (*set_soc_pad)(struct sdhci_host *host,
161 unsigned char signal_voltage);
162};
163
164static struct xenon_emmc_phy_regs xenon_emmc_5_0_phy_regs = {
165 .timing_adj = XENON_EMMC_5_0_PHY_TIMING_ADJUST,
166 .func_ctrl = XENON_EMMC_5_0_PHY_FUNC_CONTROL,
167 .pad_ctrl = XENON_EMMC_5_0_PHY_PAD_CONTROL,
168 .pad_ctrl2 = XENON_EMMC_5_0_PHY_PAD_CONTROL2,
169 .dll_ctrl = XENON_EMMC_5_0_PHY_DLL_CONTROL,
170 .logic_timing_adj = XENON_EMMC_5_0_PHY_LOGIC_TIMING_ADJUST,
171 .dll_update = XENON_DLL_UPDATE_STROBE_5_0,
172 .logic_timing_val = XENON_EMMC_5_0_PHY_LOGIC_TIMING_VALUE,
173};
174
175static struct xenon_emmc_phy_regs xenon_emmc_5_1_phy_regs = {
176 .timing_adj = XENON_EMMC_PHY_TIMING_ADJUST,
177 .func_ctrl = XENON_EMMC_PHY_FUNC_CONTROL,
178 .pad_ctrl = XENON_EMMC_PHY_PAD_CONTROL,
179 .pad_ctrl2 = XENON_EMMC_PHY_PAD_CONTROL2,
180 .dll_ctrl = XENON_EMMC_PHY_DLL_CONTROL,
181 .logic_timing_adj = XENON_EMMC_PHY_LOGIC_TIMING_ADJUST,
182 .dll_update = XENON_DLL_UPDATE,
183 .logic_timing_val = XENON_LOGIC_TIMING_VALUE,
184};
185
186/*
187 * eMMC PHY configuration and operations
188 */
189struct xenon_emmc_phy_params {
190 bool slow_mode;
191
192 u8 znr;
193 u8 zpr;
194
195 /* Nr of consecutive Sampling Points of a Valid Sampling Window */
196 u8 nr_tun_times;
197 /* Divider for calculating Tuning Step */
198 u8 tun_step_divider;
199
200 struct soc_pad_ctrl pad_ctrl;
201};
202
203static int xenon_alloc_emmc_phy(struct sdhci_host *host)
204{
205 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
206 struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
207 struct xenon_emmc_phy_params *params;
208
209 params = devm_kzalloc(mmc_dev(host->mmc), sizeof(*params), GFP_KERNEL);
210 if (!params)
211 return -ENOMEM;
212
213 priv->phy_params = params;
214 if (priv->phy_type == EMMC_5_0_PHY)
215 priv->emmc_phy_regs = &xenon_emmc_5_0_phy_regs;
216 else
217 priv->emmc_phy_regs = &xenon_emmc_5_1_phy_regs;
218
219 return 0;
220}
221
222/*
223 * eMMC 5.0/5.1 PHY init/re-init.
224 * eMMC PHY init should be executed after:
225 * 1. SDCLK frequency changes.
226 * 2. SDCLK is stopped and re-enabled.
227 * 3. config in emmc_phy_regs->timing_adj and emmc_phy_regs->func_ctrl
228 * are changed
229 */
230static int xenon_emmc_phy_init(struct sdhci_host *host)
231{
232 u32 reg;
233 u32 wait, clock;
234 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
235 struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
236 struct xenon_emmc_phy_regs *phy_regs = priv->emmc_phy_regs;
237
238 reg = sdhci_readl(host, phy_regs->timing_adj);
239 reg |= XENON_PHY_INITIALIZAION;
240 sdhci_writel(host, reg, phy_regs->timing_adj);
241
242 /* Add duration of FC_SYNC_RST */
243 wait = ((reg >> XENON_FC_SYNC_RST_DURATION_SHIFT) &
244 XENON_FC_SYNC_RST_DURATION_MASK);
245 /* Add interval between FC_SYNC_EN and FC_SYNC_RST */
246 wait += ((reg >> XENON_FC_SYNC_RST_EN_DURATION_SHIFT) &
247 XENON_FC_SYNC_RST_EN_DURATION_MASK);
248 /* Add duration of asserting FC_SYNC_EN */
249 wait += ((reg >> XENON_FC_SYNC_EN_DURATION_SHIFT) &
250 XENON_FC_SYNC_EN_DURATION_MASK);
251 /* Add duration of waiting for PHY */
252 wait += ((reg >> XENON_WAIT_CYCLE_BEFORE_USING_SHIFT) &
253 XENON_WAIT_CYCLE_BEFORE_USING_MASK);
254 /* 4 additional bus clock and 4 AXI bus clock are required */
255 wait += 8;
256 wait <<= 20;
257
258 clock = host->clock;
259 if (!clock)
260 /* Use the possibly slowest bus frequency value */
261 clock = XENON_LOWEST_SDCLK_FREQ;
262 /* get the wait time */
263 wait /= clock;
264 wait++;
265 /* wait for host eMMC PHY init completes */
266 udelay(wait);
267
268 reg = sdhci_readl(host, phy_regs->timing_adj);
269 reg &= XENON_PHY_INITIALIZAION;
270 if (reg) {
271 dev_err(mmc_dev(host->mmc), "eMMC PHY init cannot complete after %d us\n",
272 wait);
273 return -ETIMEDOUT;
274 }
275
276 return 0;
277}
278
279#define ARMADA_3700_SOC_PAD_1_8V 0x1
280#define ARMADA_3700_SOC_PAD_3_3V 0x0
281
282static void armada_3700_soc_pad_voltage_set(struct sdhci_host *host,
283 unsigned char signal_voltage)
284{
285 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
286 struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
287 struct xenon_emmc_phy_params *params = priv->phy_params;
288
289 if (params->pad_ctrl.pad_type == SOC_PAD_FIXED_1_8V) {
290 writel(ARMADA_3700_SOC_PAD_1_8V, params->pad_ctrl.reg);
291 } else if (params->pad_ctrl.pad_type == SOC_PAD_SD) {
292 if (signal_voltage == MMC_SIGNAL_VOLTAGE_180)
293 writel(ARMADA_3700_SOC_PAD_1_8V, params->pad_ctrl.reg);
294 else if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
295 writel(ARMADA_3700_SOC_PAD_3_3V, params->pad_ctrl.reg);
296 }
297}
298
299/*
300 * Set SoC PHY voltage PAD control register,
301 * according to the operation voltage on PAD.
302 * The detailed operation depends on SoC implementation.
303 */
304static void xenon_emmc_phy_set_soc_pad(struct sdhci_host *host,
305 unsigned char signal_voltage)
306{
307 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
308 struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
309 struct xenon_emmc_phy_params *params = priv->phy_params;
310
311 if (!params->pad_ctrl.reg)
312 return;
313
314 if (params->pad_ctrl.set_soc_pad)
315 params->pad_ctrl.set_soc_pad(host, signal_voltage);
316}
317
318/*
319 * Enable eMMC PHY HW DLL
320 * DLL should be enabled and stable before HS200/SDR104 tuning,
321 * and before HS400 data strobe setting.
322 */
323static int xenon_emmc_phy_enable_dll(struct sdhci_host *host)
324{
325 u32 reg;
326 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
327 struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
328 struct xenon_emmc_phy_regs *phy_regs = priv->emmc_phy_regs;
329 ktime_t timeout;
330
331 if (WARN_ON(host->clock <= MMC_HIGH_52_MAX_DTR))
332 return -EINVAL;
333
334 reg = sdhci_readl(host, phy_regs->dll_ctrl);
335 if (reg & XENON_DLL_ENABLE)
336 return 0;
337
338 /* Enable DLL */
339 reg = sdhci_readl(host, phy_regs->dll_ctrl);
340 reg |= (XENON_DLL_ENABLE | XENON_DLL_FAST_LOCK);
341
342 /*
343 * Set Phase as 90 degree, which is most common value.
344 * Might set another value if necessary.
345 * The granularity is 1 degree.
346 */
347 reg &= ~((XENON_DLL_PHASE_MASK << XENON_DLL_PHSEL0_SHIFT) |
348 (XENON_DLL_PHASE_MASK << XENON_DLL_PHSEL1_SHIFT));
349 reg |= ((XENON_DLL_PHASE_90_DEGREE << XENON_DLL_PHSEL0_SHIFT) |
350 (XENON_DLL_PHASE_90_DEGREE << XENON_DLL_PHSEL1_SHIFT));
351
352 reg &= ~XENON_DLL_BYPASS_EN;
353 reg |= phy_regs->dll_update;
354 if (priv->phy_type == EMMC_5_1_PHY)
355 reg &= ~XENON_DLL_REFCLK_SEL;
356 sdhci_writel(host, reg, phy_regs->dll_ctrl);
357
358 /* Wait max 32 ms */
359 timeout = ktime_add_ms(ktime_get(), 32);
360 while (!(sdhci_readw(host, XENON_SLOT_EXT_PRESENT_STATE) &
361 XENON_DLL_LOCK_STATE)) {
362 if (ktime_after(ktime_get(), timeout)) {
363 dev_err(mmc_dev(host->mmc), "Wait for DLL Lock time-out\n");
364 return -ETIMEDOUT;
365 }
366 udelay(100);
367 }
368 return 0;
369}
370
371/*
372 * Config to eMMC PHY to prepare for tuning.
373 * Enable HW DLL and set the TUNING_STEP
374 */
375static int xenon_emmc_phy_config_tuning(struct sdhci_host *host)
376{
377 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
378 struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
379 struct xenon_emmc_phy_params *params = priv->phy_params;
380 u32 reg, tuning_step;
381 int ret;
382
383 if (host->clock <= MMC_HIGH_52_MAX_DTR)
384 return -EINVAL;
385
386 ret = xenon_emmc_phy_enable_dll(host);
387 if (ret)
388 return ret;
389
390 /* Achieve TUNING_STEP with HW DLL help */
391 reg = sdhci_readl(host, XENON_SLOT_DLL_CUR_DLY_VAL);
392 tuning_step = reg / params->tun_step_divider;
393 if (unlikely(tuning_step > XENON_TUNING_STEP_MASK)) {
394 dev_warn(mmc_dev(host->mmc),
395 "HS200 TUNING_STEP %d is larger than MAX value\n",
396 tuning_step);
397 tuning_step = XENON_TUNING_STEP_MASK;
398 }
399
400 /* Set TUNING_STEP for later tuning */
401 reg = sdhci_readl(host, XENON_SLOT_OP_STATUS_CTRL);
402 reg &= ~(XENON_TUN_CONSECUTIVE_TIMES_MASK <<
403 XENON_TUN_CONSECUTIVE_TIMES_SHIFT);
404 reg |= (params->nr_tun_times << XENON_TUN_CONSECUTIVE_TIMES_SHIFT);
405 reg &= ~(XENON_TUNING_STEP_MASK << XENON_TUNING_STEP_SHIFT);
406 reg |= (tuning_step << XENON_TUNING_STEP_SHIFT);
407 sdhci_writel(host, reg, XENON_SLOT_OP_STATUS_CTRL);
408
409 return 0;
410}
411
412static void xenon_emmc_phy_disable_data_strobe(struct sdhci_host *host)
413{
414 u32 reg;
415
416 /* Disable SDHC Data Strobe */
417 reg = sdhci_readl(host, XENON_SLOT_EMMC_CTRL);
418 reg &= ~XENON_ENABLE_DATA_STROBE;
419 sdhci_writel(host, reg, XENON_SLOT_EMMC_CTRL);
420}
421
422/* Set HS400 Data Strobe */
423static void xenon_emmc_phy_strobe_delay_adj(struct sdhci_host *host)
424{
425 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
426 struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
427 u32 reg;
428
429 if (WARN_ON(host->timing != MMC_TIMING_MMC_HS400))
430 return;
431
432 if (host->clock <= MMC_HIGH_52_MAX_DTR)
433 return;
434
435 dev_dbg(mmc_dev(host->mmc), "starts HS400 strobe delay adjustment\n");
436
437 xenon_emmc_phy_enable_dll(host);
438
439 /* Enable SDHC Data Strobe */
440 reg = sdhci_readl(host, XENON_SLOT_EMMC_CTRL);
441 reg |= XENON_ENABLE_DATA_STROBE;
442 sdhci_writel(host, reg, XENON_SLOT_EMMC_CTRL);
443
444 /* Set Data Strobe Pull down */
445 if (priv->phy_type == EMMC_5_0_PHY) {
446 reg = sdhci_readl(host, XENON_EMMC_5_0_PHY_PAD_CONTROL);
447 reg |= XENON_EMMC5_FC_QSP_PD;
448 reg &= ~XENON_EMMC5_FC_QSP_PU;
449 sdhci_writel(host, reg, XENON_EMMC_5_0_PHY_PAD_CONTROL);
450 } else {
451 reg = sdhci_readl(host, XENON_EMMC_PHY_PAD_CONTROL1);
452 reg |= XENON_EMMC5_1_FC_QSP_PD;
453 reg &= ~XENON_EMMC5_1_FC_QSP_PU;
454 sdhci_writel(host, reg, XENON_EMMC_PHY_PAD_CONTROL1);
455 }
456}
457
458/*
459 * If eMMC PHY Slow Mode is required in lower speed mode (SDCLK < 55MHz)
460 * in SDR mode, enable Slow Mode to bypass eMMC PHY.
461 * SDIO slower SDR mode also requires Slow Mode.
462 *
463 * If Slow Mode is enabled, return true.
464 * Otherwise, return false.
465 */
466static bool xenon_emmc_phy_slow_mode(struct sdhci_host *host,
467 unsigned char timing)
468{
469 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
470 struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
471 struct xenon_emmc_phy_params *params = priv->phy_params;
472 struct xenon_emmc_phy_regs *phy_regs = priv->emmc_phy_regs;
473 u32 reg;
474 int ret;
475
476 if (host->clock > MMC_HIGH_52_MAX_DTR)
477 return false;
478
479 reg = sdhci_readl(host, phy_regs->timing_adj);
480 /* When in slower SDR mode, enable Slow Mode for SDIO
481 * or when Slow Mode flag is set
482 */
483 switch (timing) {
484 case MMC_TIMING_LEGACY:
485 /*
486 * If Slow Mode is required, enable Slow Mode by default
487 * in early init phase to avoid any potential issue.
488 */
489 if (params->slow_mode) {
490 reg |= XENON_TIMING_ADJUST_SLOW_MODE;
491 ret = true;
492 } else {
493 reg &= ~XENON_TIMING_ADJUST_SLOW_MODE;
494 ret = false;
495 }
496 break;
497 case MMC_TIMING_UHS_SDR25:
498 case MMC_TIMING_UHS_SDR12:
499 case MMC_TIMING_SD_HS:
500 case MMC_TIMING_MMC_HS:
501 if ((priv->init_card_type == MMC_TYPE_SDIO) ||
502 params->slow_mode) {
503 reg |= XENON_TIMING_ADJUST_SLOW_MODE;
504 ret = true;
505 break;
506 }
507 default:
508 reg &= ~XENON_TIMING_ADJUST_SLOW_MODE;
509 ret = false;
510 }
511
512 sdhci_writel(host, reg, phy_regs->timing_adj);
513 return ret;
514}
515
516/*
517 * Set-up eMMC 5.0/5.1 PHY.
518 * Specific configuration depends on the current speed mode in use.
519 */
520static void xenon_emmc_phy_set(struct sdhci_host *host,
521 unsigned char timing)
522{
523 u32 reg;
524 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
525 struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
526 struct xenon_emmc_phy_params *params = priv->phy_params;
527 struct xenon_emmc_phy_regs *phy_regs = priv->emmc_phy_regs;
528
529 dev_dbg(mmc_dev(host->mmc), "eMMC PHY setting starts\n");
530
531 /* Setup pad, set bit[28] and bits[26:24] */
532 reg = sdhci_readl(host, phy_regs->pad_ctrl);
533 reg |= (XENON_FC_DQ_RECEN | XENON_FC_CMD_RECEN |
534 XENON_FC_QSP_RECEN | XENON_OEN_QSN);
535 /* All FC_XX_RECEIVCE should be set as CMOS Type */
536 reg |= XENON_FC_ALL_CMOS_RECEIVER;
537 sdhci_writel(host, reg, phy_regs->pad_ctrl);
538
539 /* Set CMD and DQ Pull Up */
540 if (priv->phy_type == EMMC_5_0_PHY) {
541 reg = sdhci_readl(host, XENON_EMMC_5_0_PHY_PAD_CONTROL);
542 reg |= (XENON_EMMC5_FC_CMD_PU | XENON_EMMC5_FC_DQ_PU);
543 reg &= ~(XENON_EMMC5_FC_CMD_PD | XENON_EMMC5_FC_DQ_PD);
544 sdhci_writel(host, reg, XENON_EMMC_5_0_PHY_PAD_CONTROL);
545 } else {
546 reg = sdhci_readl(host, XENON_EMMC_PHY_PAD_CONTROL1);
547 reg |= (XENON_EMMC5_1_FC_CMD_PU | XENON_EMMC5_1_FC_DQ_PU);
548 reg &= ~(XENON_EMMC5_1_FC_CMD_PD | XENON_EMMC5_1_FC_DQ_PD);
549 sdhci_writel(host, reg, XENON_EMMC_PHY_PAD_CONTROL1);
550 }
551
552 if (timing == MMC_TIMING_LEGACY) {
553 xenon_emmc_phy_slow_mode(host, timing);
554 goto phy_init;
555 }
556
557 /*
558 * If SDIO card, set SDIO Mode
559 * Otherwise, clear SDIO Mode
560 */
561 reg = sdhci_readl(host, phy_regs->timing_adj);
562 if (priv->init_card_type == MMC_TYPE_SDIO)
563 reg |= XENON_TIMING_ADJUST_SDIO_MODE;
564 else
565 reg &= ~XENON_TIMING_ADJUST_SDIO_MODE;
566 sdhci_writel(host, reg, phy_regs->timing_adj);
567
568 if (xenon_emmc_phy_slow_mode(host, timing))
569 goto phy_init;
570
571 /*
572 * Set preferred ZNR and ZPR value
573 * The ZNR and ZPR value vary between different boards.
574 * Define them both in sdhci-xenon-emmc-phy.h.
575 */
576 reg = sdhci_readl(host, phy_regs->pad_ctrl2);
577 reg &= ~((XENON_ZNR_MASK << XENON_ZNR_SHIFT) | XENON_ZPR_MASK);
578 reg |= ((params->znr << XENON_ZNR_SHIFT) | params->zpr);
579 sdhci_writel(host, reg, phy_regs->pad_ctrl2);
580
581 /*
582 * When setting EMMC_PHY_FUNC_CONTROL register,
583 * SD clock should be disabled
584 */
585 reg = sdhci_readl(host, SDHCI_CLOCK_CONTROL);
586 reg &= ~SDHCI_CLOCK_CARD_EN;
587 sdhci_writew(host, reg, SDHCI_CLOCK_CONTROL);
588
589 reg = sdhci_readl(host, phy_regs->func_ctrl);
590 switch (timing) {
591 case MMC_TIMING_MMC_HS400:
592 reg |= (XENON_DQ_DDR_MODE_MASK << XENON_DQ_DDR_MODE_SHIFT) |
593 XENON_CMD_DDR_MODE;
594 reg &= ~XENON_DQ_ASYNC_MODE;
595 break;
596 case MMC_TIMING_UHS_DDR50:
597 case MMC_TIMING_MMC_DDR52:
598 reg |= (XENON_DQ_DDR_MODE_MASK << XENON_DQ_DDR_MODE_SHIFT) |
599 XENON_CMD_DDR_MODE | XENON_DQ_ASYNC_MODE;
600 break;
601 default:
602 reg &= ~((XENON_DQ_DDR_MODE_MASK << XENON_DQ_DDR_MODE_SHIFT) |
603 XENON_CMD_DDR_MODE);
604 reg |= XENON_DQ_ASYNC_MODE;
605 }
606 sdhci_writel(host, reg, phy_regs->func_ctrl);
607
608 /* Enable bus clock */
609 reg = sdhci_readl(host, SDHCI_CLOCK_CONTROL);
610 reg |= SDHCI_CLOCK_CARD_EN;
611 sdhci_writew(host, reg, SDHCI_CLOCK_CONTROL);
612
613 if (timing == MMC_TIMING_MMC_HS400)
614 /* Hardware team recommend a value for HS400 */
615 sdhci_writel(host, phy_regs->logic_timing_val,
616 phy_regs->logic_timing_adj);
617 else
618 xenon_emmc_phy_disable_data_strobe(host);
619
620phy_init:
621 xenon_emmc_phy_init(host);
622
623 dev_dbg(mmc_dev(host->mmc), "eMMC PHY setting completes\n");
624}
625
626static int get_dt_pad_ctrl_data(struct sdhci_host *host,
627 struct device_node *np,
628 struct xenon_emmc_phy_params *params)
629{
630 int ret = 0;
631 const char *name;
632 struct resource iomem;
633
634 if (of_device_is_compatible(np, "marvell,armada-3700-sdhci"))
635 params->pad_ctrl.set_soc_pad = armada_3700_soc_pad_voltage_set;
636 else
637 return 0;
638
639 if (of_address_to_resource(np, 1, &iomem)) {
640 dev_err(mmc_dev(host->mmc), "Unable to find SoC PAD ctrl register address for %s\n",
641 np->name);
642 return -EINVAL;
643 }
644
645 params->pad_ctrl.reg = devm_ioremap_resource(mmc_dev(host->mmc),
646 &iomem);
647 if (IS_ERR(params->pad_ctrl.reg))
648 return PTR_ERR(params->pad_ctrl.reg);
649
650 ret = of_property_read_string(np, "marvell,pad-type", &name);
651 if (ret) {
652 dev_err(mmc_dev(host->mmc), "Unable to determine SoC PHY PAD ctrl type\n");
653 return ret;
654 }
655 if (!strcmp(name, "sd")) {
656 params->pad_ctrl.pad_type = SOC_PAD_SD;
657 } else if (!strcmp(name, "fixed-1-8v")) {
658 params->pad_ctrl.pad_type = SOC_PAD_FIXED_1_8V;
659 } else {
660 dev_err(mmc_dev(host->mmc), "Unsupported SoC PHY PAD ctrl type %s\n",
661 name);
662 return -EINVAL;
663 }
664
665 return ret;
666}
667
668static int xenon_emmc_phy_parse_param_dt(struct sdhci_host *host,
669 struct device_node *np,
670 struct xenon_emmc_phy_params *params)
671{
672 u32 value;
673
674 params->slow_mode = false;
675 if (of_property_read_bool(np, "marvell,xenon-phy-slow-mode"))
676 params->slow_mode = true;
677
678 params->znr = XENON_ZNR_DEF_VALUE;
679 if (!of_property_read_u32(np, "marvell,xenon-phy-znr", &value))
680 params->znr = value & XENON_ZNR_MASK;
681
682 params->zpr = XENON_ZPR_DEF_VALUE;
683 if (!of_property_read_u32(np, "marvell,xenon-phy-zpr", &value))
684 params->zpr = value & XENON_ZPR_MASK;
685
686 params->nr_tun_times = XENON_TUN_CONSECUTIVE_TIMES;
687 if (!of_property_read_u32(np, "marvell,xenon-phy-nr-success-tun",
688 &value))
689 params->nr_tun_times = value & XENON_TUN_CONSECUTIVE_TIMES_MASK;
690
691 params->tun_step_divider = XENON_TUNING_STEP_DIVIDER;
692 if (!of_property_read_u32(np, "marvell,xenon-phy-tun-step-divider",
693 &value))
694 params->tun_step_divider = value & 0xFF;
695
696 return get_dt_pad_ctrl_data(host, np, params);
697}
698
699/* Set SoC PHY Voltage PAD */
700void xenon_soc_pad_ctrl(struct sdhci_host *host,
701 unsigned char signal_voltage)
702{
703 xenon_emmc_phy_set_soc_pad(host, signal_voltage);
704}
705
706/*
707 * Setting PHY when card is working in High Speed Mode.
708 * HS400 set data strobe line.
709 * HS200/SDR104 set tuning config to prepare for tuning.
710 */
711static int xenon_hs_delay_adj(struct sdhci_host *host)
712{
713 int ret = 0;
714
715 if (WARN_ON(host->clock <= XENON_DEFAULT_SDCLK_FREQ))
716 return -EINVAL;
717
718 switch (host->timing) {
719 case MMC_TIMING_MMC_HS400:
720 xenon_emmc_phy_strobe_delay_adj(host);
721 return 0;
722 case MMC_TIMING_MMC_HS200:
723 case MMC_TIMING_UHS_SDR104:
724 return xenon_emmc_phy_config_tuning(host);
725 case MMC_TIMING_MMC_DDR52:
726 case MMC_TIMING_UHS_DDR50:
727 /*
728 * DDR Mode requires driver to scan Sampling Fixed Delay Line,
729 * to find out a perfect operation sampling point.
730 * It is hard to implement such a scan in host driver
731 * since initiating commands by host driver is not safe.
732 * Thus so far just keep PHY Sampling Fixed Delay in
733 * default value of DDR mode.
734 *
735 * If any timing issue occurs in DDR mode on Marvell products,
736 * please contact maintainer for internal support in Marvell.
737 */
738 dev_warn_once(mmc_dev(host->mmc), "Timing issue might occur in DDR mode\n");
739 return 0;
740 }
741
742 return ret;
743}
744
745/*
746 * Adjust PHY setting.
747 * PHY setting should be adjusted when SDCLK frequency, Bus Width
748 * or Speed Mode is changed.
749 * Additional config are required when card is working in High Speed mode,
750 * after leaving Legacy Mode.
751 */
752int xenon_phy_adj(struct sdhci_host *host, struct mmc_ios *ios)
753{
754 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
755 struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
756 int ret = 0;
757
758 if (!host->clock) {
759 priv->clock = 0;
760 return 0;
761 }
762
763 /*
764 * The timing, frequency or bus width is changed,
765 * better to set eMMC PHY based on current setting
766 * and adjust Xenon SDHC delay.
767 */
768 if ((host->clock == priv->clock) &&
769 (ios->bus_width == priv->bus_width) &&
770 (ios->timing == priv->timing))
771 return 0;
772
773 xenon_emmc_phy_set(host, ios->timing);
774
775 /* Update the record */
776 priv->bus_width = ios->bus_width;
777
778 priv->timing = ios->timing;
779 priv->clock = host->clock;
780
781 /* Legacy mode is a special case */
782 if (ios->timing == MMC_TIMING_LEGACY)
783 return 0;
784
785 if (host->clock > XENON_DEFAULT_SDCLK_FREQ)
786 ret = xenon_hs_delay_adj(host);
787 return ret;
788}
789
790void xenon_clean_phy(struct sdhci_host *host)
791{
792 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
793 struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
794
795 kfree(priv->phy_params);
796}
797
798static int xenon_add_phy(struct device_node *np, struct sdhci_host *host,
799 const char *phy_name)
800{
801 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
802 struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
803 int i, ret;
804
805 for (i = 0; i < NR_PHY_TYPES; i++) {
806 if (!strcmp(phy_name, phy_types[i])) {
807 priv->phy_type = i;
808 break;
809 }
810 }
811 if (i == NR_PHY_TYPES) {
812 dev_err(mmc_dev(host->mmc),
813 "Unable to determine PHY name %s. Use default eMMC 5.1 PHY\n",
814 phy_name);
815 priv->phy_type = EMMC_5_1_PHY;
816 }
817
818 ret = xenon_alloc_emmc_phy(host);
819 if (ret)
820 return ret;
821
822 ret = xenon_emmc_phy_parse_param_dt(host, np, priv->phy_params);
823 if (ret)
824 xenon_clean_phy(host);
825
826 return ret;
827}
828
829int xenon_phy_parse_dt(struct device_node *np, struct sdhci_host *host)
830{
831 const char *phy_type = NULL;
832
833 if (!of_property_read_string(np, "marvell,xenon-phy-type", &phy_type))
834 return xenon_add_phy(np, host, phy_type);
835
836 return xenon_add_phy(np, host, "emmc 5.1 phy");
837}
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
new file mode 100644
index 000000000000..67246655315b
--- /dev/null
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -0,0 +1,548 @@
1/*
2 * Driver for Marvell Xenon SDHC as a platform device
3 *
4 * Copyright (C) 2016 Marvell, All Rights Reserved.
5 *
6 * Author: Hu Ziji <huziji@marvell.com>
7 * Date: 2016-8-24
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation version 2.
12 *
13 * Inspired by Jisheng Zhang <jszhang@marvell.com>
14 * Special thanks to Video BG4 project team.
15 */
16
17#include <linux/delay.h>
18#include <linux/ktime.h>
19#include <linux/module.h>
20#include <linux/of.h>
21
22#include "sdhci-pltfm.h"
23#include "sdhci-xenon.h"
24
25static int xenon_enable_internal_clk(struct sdhci_host *host)
26{
27 u32 reg;
28 ktime_t timeout;
29
30 reg = sdhci_readl(host, SDHCI_CLOCK_CONTROL);
31 reg |= SDHCI_CLOCK_INT_EN;
32 sdhci_writel(host, reg, SDHCI_CLOCK_CONTROL);
33 /* Wait max 20 ms */
34 timeout = ktime_add_ms(ktime_get(), 20);
35 while (!((reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
36 & SDHCI_CLOCK_INT_STABLE)) {
37 if (ktime_after(ktime_get(), timeout)) {
38 dev_err(mmc_dev(host->mmc), "Internal clock never stabilised.\n");
39 return -ETIMEDOUT;
40 }
41 usleep_range(900, 1100);
42 }
43
44 return 0;
45}
46
47/* Set SDCLK-off-while-idle */
48static void xenon_set_sdclk_off_idle(struct sdhci_host *host,
49 unsigned char sdhc_id, bool enable)
50{
51 u32 reg;
52 u32 mask;
53
54 reg = sdhci_readl(host, XENON_SYS_OP_CTRL);
55 /* Get the bit shift basing on the SDHC index */
56 mask = (0x1 << (XENON_SDCLK_IDLEOFF_ENABLE_SHIFT + sdhc_id));
57 if (enable)
58 reg |= mask;
59 else
60 reg &= ~mask;
61
62 sdhci_writel(host, reg, XENON_SYS_OP_CTRL);
63}
64
65/* Enable/Disable the Auto Clock Gating function */
66static void xenon_set_acg(struct sdhci_host *host, bool enable)
67{
68 u32 reg;
69
70 reg = sdhci_readl(host, XENON_SYS_OP_CTRL);
71 if (enable)
72 reg &= ~XENON_AUTO_CLKGATE_DISABLE_MASK;
73 else
74 reg |= XENON_AUTO_CLKGATE_DISABLE_MASK;
75 sdhci_writel(host, reg, XENON_SYS_OP_CTRL);
76}
77
78/* Enable this SDHC */
79static void xenon_enable_sdhc(struct sdhci_host *host,
80 unsigned char sdhc_id)
81{
82 u32 reg;
83
84 reg = sdhci_readl(host, XENON_SYS_OP_CTRL);
85 reg |= (BIT(sdhc_id) << XENON_SLOT_ENABLE_SHIFT);
86 sdhci_writel(host, reg, XENON_SYS_OP_CTRL);
87
88 host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
89 /*
90 * Force to clear BUS_TEST to
91 * skip bus_test_pre and bus_test_post
92 */
93 host->mmc->caps &= ~MMC_CAP_BUS_WIDTH_TEST;
94}
95
96/* Disable this SDHC */
97static void xenon_disable_sdhc(struct sdhci_host *host,
98 unsigned char sdhc_id)
99{
100 u32 reg;
101
102 reg = sdhci_readl(host, XENON_SYS_OP_CTRL);
103 reg &= ~(BIT(sdhc_id) << XENON_SLOT_ENABLE_SHIFT);
104 sdhci_writel(host, reg, XENON_SYS_OP_CTRL);
105}
106
107/* Enable Parallel Transfer Mode */
108static void xenon_enable_sdhc_parallel_tran(struct sdhci_host *host,
109 unsigned char sdhc_id)
110{
111 u32 reg;
112
113 reg = sdhci_readl(host, XENON_SYS_EXT_OP_CTRL);
114 reg |= BIT(sdhc_id);
115 sdhci_writel(host, reg, XENON_SYS_EXT_OP_CTRL);
116}
117
118/* Mask command conflict error */
119static void xenon_mask_cmd_conflict_err(struct sdhci_host *host)
120{
121 u32 reg;
122
123 reg = sdhci_readl(host, XENON_SYS_EXT_OP_CTRL);
124 reg |= XENON_MASK_CMD_CONFLICT_ERR;
125 sdhci_writel(host, reg, XENON_SYS_EXT_OP_CTRL);
126}
127
128static void xenon_retune_setup(struct sdhci_host *host)
129{
130 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
131 struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
132 u32 reg;
133
134 /* Disable the Re-Tuning Request functionality */
135 reg = sdhci_readl(host, XENON_SLOT_RETUNING_REQ_CTRL);
136 reg &= ~XENON_RETUNING_COMPATIBLE;
137 sdhci_writel(host, reg, XENON_SLOT_RETUNING_REQ_CTRL);
138
139 /* Disable the Re-tuning Interrupt */
140 reg = sdhci_readl(host, SDHCI_SIGNAL_ENABLE);
141 reg &= ~SDHCI_INT_RETUNE;
142 sdhci_writel(host, reg, SDHCI_SIGNAL_ENABLE);
143 reg = sdhci_readl(host, SDHCI_INT_ENABLE);
144 reg &= ~SDHCI_INT_RETUNE;
145 sdhci_writel(host, reg, SDHCI_INT_ENABLE);
146
147 /* Force to use Tuning Mode 1 */
148 host->tuning_mode = SDHCI_TUNING_MODE_1;
149 /* Set re-tuning period */
150 host->tuning_count = 1 << (priv->tuning_count - 1);
151}
152
153/*
154 * Operations inside struct sdhci_ops
155 */
156/* Recover the Register Setting cleared during SOFTWARE_RESET_ALL */
157static void xenon_reset_exit(struct sdhci_host *host,
158 unsigned char sdhc_id, u8 mask)
159{
160 /* Only SOFTWARE RESET ALL will clear the register setting */
161 if (!(mask & SDHCI_RESET_ALL))
162 return;
163
164 /* Disable tuning request and auto-retuning again */
165 xenon_retune_setup(host);
166
167 xenon_set_acg(host, true);
168
169 xenon_set_sdclk_off_idle(host, sdhc_id, false);
170
171 xenon_mask_cmd_conflict_err(host);
172}
173
174static void xenon_reset(struct sdhci_host *host, u8 mask)
175{
176 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
177 struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
178
179 sdhci_reset(host, mask);
180 xenon_reset_exit(host, priv->sdhc_id, mask);
181}
182
183/*
184 * Xenon defines different values for HS200 and HS400
185 * in Host_Control_2
186 */
187static void xenon_set_uhs_signaling(struct sdhci_host *host,
188 unsigned int timing)
189{
190 u16 ctrl_2;
191
192 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
193 /* Select Bus Speed Mode for host */
194 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
195 if (timing == MMC_TIMING_MMC_HS200)
196 ctrl_2 |= XENON_CTRL_HS200;
197 else if (timing == MMC_TIMING_UHS_SDR104)
198 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
199 else if (timing == MMC_TIMING_UHS_SDR12)
200 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
201 else if (timing == MMC_TIMING_UHS_SDR25)
202 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
203 else if (timing == MMC_TIMING_UHS_SDR50)
204 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
205 else if ((timing == MMC_TIMING_UHS_DDR50) ||
206 (timing == MMC_TIMING_MMC_DDR52))
207 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
208 else if (timing == MMC_TIMING_MMC_HS400)
209 ctrl_2 |= XENON_CTRL_HS400;
210 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
211}
212
213static const struct sdhci_ops sdhci_xenon_ops = {
214 .set_clock = sdhci_set_clock,
215 .set_bus_width = sdhci_set_bus_width,
216 .reset = xenon_reset,
217 .set_uhs_signaling = xenon_set_uhs_signaling,
218 .get_max_clock = sdhci_pltfm_clk_get_max_clock,
219};
220
221static const struct sdhci_pltfm_data sdhci_xenon_pdata = {
222 .ops = &sdhci_xenon_ops,
223 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
224 SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
225 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
226};
227
228/*
229 * Xenon Specific Operations in mmc_host_ops
230 */
231static void xenon_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
232{
233 struct sdhci_host *host = mmc_priv(mmc);
234 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
235 struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
236 u32 reg;
237
238 /*
239 * HS400/HS200/eMMC HS doesn't have Preset Value register.
240 * However, sdhci_set_ios will read HS400/HS200 Preset register.
241 * Disable Preset Value register for HS400/HS200.
242 * eMMC HS with preset_enabled set will trigger a bug in
243 * get_preset_value().
244 */
245 if ((ios->timing == MMC_TIMING_MMC_HS400) ||
246 (ios->timing == MMC_TIMING_MMC_HS200) ||
247 (ios->timing == MMC_TIMING_MMC_HS)) {
248 host->preset_enabled = false;
249 host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
250 host->flags &= ~SDHCI_PV_ENABLED;
251
252 reg = sdhci_readw(host, SDHCI_HOST_CONTROL2);
253 reg &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
254 sdhci_writew(host, reg, SDHCI_HOST_CONTROL2);
255 } else {
256 host->quirks2 &= ~SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
257 }
258
259 sdhci_set_ios(mmc, ios);
260 xenon_phy_adj(host, ios);
261
262 if (host->clock > XENON_DEFAULT_SDCLK_FREQ)
263 xenon_set_sdclk_off_idle(host, priv->sdhc_id, true);
264}
265
266static int xenon_start_signal_voltage_switch(struct mmc_host *mmc,
267 struct mmc_ios *ios)
268{
269 struct sdhci_host *host = mmc_priv(mmc);
270
271 /*
272 * Before SD/SDIO set signal voltage, SD bus clock should be
273 * disabled. However, sdhci_set_clock will also disable the Internal
274 * clock in mmc_set_signal_voltage().
275 * If Internal clock is disabled, the 3.3V/1.8V bit can not be updated.
276 * Thus here manually enable internal clock.
277 *
278 * After switch completes, it is unnecessary to disable internal clock,
279 * since keeping internal clock active obeys SD spec.
280 */
281 xenon_enable_internal_clk(host);
282
283 xenon_soc_pad_ctrl(host, ios->signal_voltage);
284
285 /*
286 * If Vqmmc is fixed on platform, vqmmc regulator should be unavailable.
287 * Thus SDHCI_CTRL_VDD_180 bit might not work then.
288 * Skip the standard voltage switch to avoid any issue.
289 */
290 if (PTR_ERR(mmc->supply.vqmmc) == -ENODEV)
291 return 0;
292
293 return sdhci_start_signal_voltage_switch(mmc, ios);
294}
295
296/*
297 * Update card type.
298 * priv->init_card_type will be used in PHY timing adjustment.
299 */
300static void xenon_init_card(struct mmc_host *mmc, struct mmc_card *card)
301{
302 struct sdhci_host *host = mmc_priv(mmc);
303 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
304 struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
305
306 /* Update card type*/
307 priv->init_card_type = card->type;
308}
309
310static int xenon_execute_tuning(struct mmc_host *mmc, u32 opcode)
311{
312 struct sdhci_host *host = mmc_priv(mmc);
313
314 if (host->timing == MMC_TIMING_UHS_DDR50)
315 return 0;
316
317 /*
318 * Currently force Xenon driver back to support mode 1 only,
319 * even though Xenon might claim to support mode 2 or mode 3.
320 * It requires more time to test mode 2/mode 3 on more platforms.
321 */
322 if (host->tuning_mode != SDHCI_TUNING_MODE_1)
323 xenon_retune_setup(host);
324
325 return sdhci_execute_tuning(mmc, opcode);
326}
327
328static void xenon_enable_sdio_irq(struct mmc_host *mmc, int enable)
329{
330 struct sdhci_host *host = mmc_priv(mmc);
331 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
332 struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
333 u32 reg;
334 u8 sdhc_id = priv->sdhc_id;
335
336 sdhci_enable_sdio_irq(mmc, enable);
337
338 if (enable) {
339 /*
340 * Set SDIO Card Inserted indication
341 * to enable detecting SDIO async irq.
342 */
343 reg = sdhci_readl(host, XENON_SYS_CFG_INFO);
344 reg |= (1 << (sdhc_id + XENON_SLOT_TYPE_SDIO_SHIFT));
345 sdhci_writel(host, reg, XENON_SYS_CFG_INFO);
346 } else {
347 /* Clear SDIO Card Inserted indication */
348 reg = sdhci_readl(host, XENON_SYS_CFG_INFO);
349 reg &= ~(1 << (sdhc_id + XENON_SLOT_TYPE_SDIO_SHIFT));
350 sdhci_writel(host, reg, XENON_SYS_CFG_INFO);
351 }
352}
353
354static void xenon_replace_mmc_host_ops(struct sdhci_host *host)
355{
356 host->mmc_host_ops.set_ios = xenon_set_ios;
357 host->mmc_host_ops.start_signal_voltage_switch =
358 xenon_start_signal_voltage_switch;
359 host->mmc_host_ops.init_card = xenon_init_card;
360 host->mmc_host_ops.execute_tuning = xenon_execute_tuning;
361 host->mmc_host_ops.enable_sdio_irq = xenon_enable_sdio_irq;
362}
363
364/*
365 * Parse Xenon specific DT properties:
366 * sdhc-id: the index of current SDHC.
367 * Refer to XENON_SYS_CFG_INFO register
368 * tun-count: the interval between re-tuning
369 */
370static int xenon_probe_dt(struct platform_device *pdev)
371{
372 struct device_node *np = pdev->dev.of_node;
373 struct sdhci_host *host = platform_get_drvdata(pdev);
374 struct mmc_host *mmc = host->mmc;
375 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
376 struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
377 u32 sdhc_id, nr_sdhc;
378 u32 tuning_count;
379
380 /* Disable HS200 on Armada AP806 */
381 if (of_device_is_compatible(np, "marvell,armada-ap806-sdhci"))
382 host->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200;
383
384 sdhc_id = 0x0;
385 if (!of_property_read_u32(np, "marvell,xenon-sdhc-id", &sdhc_id)) {
386 nr_sdhc = sdhci_readl(host, XENON_SYS_CFG_INFO);
387 nr_sdhc &= XENON_NR_SUPPORTED_SLOT_MASK;
388 if (unlikely(sdhc_id > nr_sdhc)) {
389 dev_err(mmc_dev(mmc), "SDHC Index %d exceeds Number of SDHCs %d\n",
390 sdhc_id, nr_sdhc);
391 return -EINVAL;
392 }
393 }
394 priv->sdhc_id = sdhc_id;
395
396 tuning_count = XENON_DEF_TUNING_COUNT;
397 if (!of_property_read_u32(np, "marvell,xenon-tun-count",
398 &tuning_count)) {
399 if (unlikely(tuning_count >= XENON_TMR_RETUN_NO_PRESENT)) {
400 dev_err(mmc_dev(mmc), "Wrong Re-tuning Count. Set default value %d\n",
401 XENON_DEF_TUNING_COUNT);
402 tuning_count = XENON_DEF_TUNING_COUNT;
403 }
404 }
405 priv->tuning_count = tuning_count;
406
407 return xenon_phy_parse_dt(np, host);
408}
409
410static int xenon_sdhc_prepare(struct sdhci_host *host)
411{
412 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
413 struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
414 u8 sdhc_id = priv->sdhc_id;
415
416 /* Enable SDHC */
417 xenon_enable_sdhc(host, sdhc_id);
418
419 /* Enable ACG */
420 xenon_set_acg(host, true);
421
422 /* Enable Parallel Transfer Mode */
423 xenon_enable_sdhc_parallel_tran(host, sdhc_id);
424
425 /* Disable SDCLK-Off-While-Idle before card init */
426 xenon_set_sdclk_off_idle(host, sdhc_id, false);
427
428 xenon_mask_cmd_conflict_err(host);
429
430 return 0;
431}
432
433static void xenon_sdhc_unprepare(struct sdhci_host *host)
434{
435 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
436 struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
437 u8 sdhc_id = priv->sdhc_id;
438
439 /* disable SDHC */
440 xenon_disable_sdhc(host, sdhc_id);
441}
442
443static int xenon_probe(struct platform_device *pdev)
444{
445 struct sdhci_pltfm_host *pltfm_host;
446 struct sdhci_host *host;
447 struct xenon_priv *priv;
448 int err;
449
450 host = sdhci_pltfm_init(pdev, &sdhci_xenon_pdata,
451 sizeof(struct xenon_priv));
452 if (IS_ERR(host))
453 return PTR_ERR(host);
454
455 pltfm_host = sdhci_priv(host);
456 priv = sdhci_pltfm_priv(pltfm_host);
457
458 /*
459 * Link Xenon specific mmc_host_ops function,
460 * to replace standard ones in sdhci_ops.
461 */
462 xenon_replace_mmc_host_ops(host);
463
464 pltfm_host->clk = devm_clk_get(&pdev->dev, "core");
465 if (IS_ERR(pltfm_host->clk)) {
466 err = PTR_ERR(pltfm_host->clk);
467 dev_err(&pdev->dev, "Failed to setup input clk: %d\n", err);
468 goto free_pltfm;
469 }
470 err = clk_prepare_enable(pltfm_host->clk);
471 if (err)
472 goto free_pltfm;
473
474 err = mmc_of_parse(host->mmc);
475 if (err)
476 goto err_clk;
477
478 sdhci_get_of_property(pdev);
479
480 xenon_set_acg(host, false);
481
482 /* Xenon specific dt parse */
483 err = xenon_probe_dt(pdev);
484 if (err)
485 goto err_clk;
486
487 err = xenon_sdhc_prepare(host);
488 if (err)
489 goto clean_phy_param;
490
491 err = sdhci_add_host(host);
492 if (err)
493 goto remove_sdhc;
494
495 return 0;
496
497remove_sdhc:
498 xenon_sdhc_unprepare(host);
499clean_phy_param:
500 xenon_clean_phy(host);
501err_clk:
502 clk_disable_unprepare(pltfm_host->clk);
503free_pltfm:
504 sdhci_pltfm_free(pdev);
505 return err;
506}
507
508static int xenon_remove(struct platform_device *pdev)
509{
510 struct sdhci_host *host = platform_get_drvdata(pdev);
511 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
512
513 xenon_clean_phy(host);
514
515 sdhci_remove_host(host, 0);
516
517 xenon_sdhc_unprepare(host);
518
519 clk_disable_unprepare(pltfm_host->clk);
520
521 sdhci_pltfm_free(pdev);
522
523 return 0;
524}
525
526static const struct of_device_id sdhci_xenon_dt_ids[] = {
527 { .compatible = "marvell,armada-ap806-sdhci",},
528 { .compatible = "marvell,armada-cp110-sdhci",},
529 { .compatible = "marvell,armada-3700-sdhci",},
530 {}
531};
532MODULE_DEVICE_TABLE(of, sdhci_xenon_dt_ids);
533
534static struct platform_driver sdhci_xenon_driver = {
535 .driver = {
536 .name = "xenon-sdhci",
537 .of_match_table = sdhci_xenon_dt_ids,
538 .pm = &sdhci_pltfm_pmops,
539 },
540 .probe = xenon_probe,
541 .remove = xenon_remove,
542};
543
544module_platform_driver(sdhci_xenon_driver);
545
546MODULE_DESCRIPTION("SDHCI platform driver for Marvell Xenon SDHC");
547MODULE_AUTHOR("Hu Ziji <huziji@marvell.com>");
548MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-xenon.h b/drivers/mmc/host/sdhci-xenon.h
new file mode 100644
index 000000000000..6e6523ea01ce
--- /dev/null
+++ b/drivers/mmc/host/sdhci-xenon.h
@@ -0,0 +1,101 @@
1/*
2 * Copyright (C) 2016 Marvell, All Rights Reserved.
3 *
4 * Author: Hu Ziji <huziji@marvell.com>
5 * Date: 2016-8-24
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation version 2.
10 */
11#ifndef SDHCI_XENON_H_
12#define SDHCI_XENON_H_
13
14/* Register Offset of Xenon SDHC self-defined register */
15#define XENON_SYS_CFG_INFO 0x0104
16#define XENON_SLOT_TYPE_SDIO_SHIFT 24
17#define XENON_NR_SUPPORTED_SLOT_MASK 0x7
18
19#define XENON_SYS_OP_CTRL 0x0108
20#define XENON_AUTO_CLKGATE_DISABLE_MASK BIT(20)
21#define XENON_SDCLK_IDLEOFF_ENABLE_SHIFT 8
22#define XENON_SLOT_ENABLE_SHIFT 0
23
24#define XENON_SYS_EXT_OP_CTRL 0x010C
25#define XENON_MASK_CMD_CONFLICT_ERR BIT(8)
26
27#define XENON_SLOT_OP_STATUS_CTRL 0x0128
28#define XENON_TUN_CONSECUTIVE_TIMES_SHIFT 16
29#define XENON_TUN_CONSECUTIVE_TIMES_MASK 0x7
30#define XENON_TUN_CONSECUTIVE_TIMES 0x4
31#define XENON_TUNING_STEP_SHIFT 12
32#define XENON_TUNING_STEP_MASK 0xF
33#define XENON_TUNING_STEP_DIVIDER BIT(6)
34
35#define XENON_SLOT_EMMC_CTRL 0x0130
36#define XENON_ENABLE_DATA_STROBE BIT(24)
37
38#define XENON_SLOT_RETUNING_REQ_CTRL 0x0144
39/* retuning compatible */
40#define XENON_RETUNING_COMPATIBLE 0x1
41
42#define XENON_SLOT_EXT_PRESENT_STATE 0x014C
43#define XENON_DLL_LOCK_STATE 0x1
44
45#define XENON_SLOT_DLL_CUR_DLY_VAL 0x0150
46
47/* Tuning Parameter */
48#define XENON_TMR_RETUN_NO_PRESENT 0xF
49#define XENON_DEF_TUNING_COUNT 0x9
50
51#define XENON_DEFAULT_SDCLK_FREQ 400000
52#define XENON_LOWEST_SDCLK_FREQ 100000
53
54/* Xenon specific Mode Select value */
55#define XENON_CTRL_HS200 0x5
56#define XENON_CTRL_HS400 0x6
57
58struct xenon_priv {
59 unsigned char tuning_count;
60 /* idx of SDHC */
61 u8 sdhc_id;
62
63 /*
64 * eMMC/SD/SDIO require different register settings.
65 * Xenon driver has to recognize card type
66 * before mmc_host->card is not available.
67 * This field records the card type during init.
68 * It is updated in xenon_init_card().
69 *
70 * It is only valid during initialization after it is updated.
71 * Do not access this variable in normal transfers after
72 * initialization completes.
73 */
74 unsigned int init_card_type;
75
76 /*
77 * The bus_width, timing, and clock fields in below
78 * record the current ios setting of Xenon SDHC.
79 * Driver will adjust PHY setting if any change to
80 * ios affects PHY timing.
81 */
82 unsigned char bus_width;
83 unsigned char timing;
84 unsigned int clock;
85
86 int phy_type;
87 /*
88 * Contains board-specific PHY parameters
89 * passed from device tree.
90 */
91 void *phy_params;
92 struct xenon_emmc_phy_regs *emmc_phy_regs;
93};
94
95int xenon_phy_adj(struct sdhci_host *host, struct mmc_ios *ios);
96void xenon_clean_phy(struct sdhci_host *host);
97int xenon_phy_parse_dt(struct device_node *np,
98 struct sdhci_host *host);
99void xenon_soc_pad_ctrl(struct sdhci_host *host,
100 unsigned char signal_voltage);
101#endif
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 63bc33a54d0d..ecd0d4350e8a 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -14,6 +14,7 @@
14 */ 14 */
15 15
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/ktime.h>
17#include <linux/highmem.h> 18#include <linux/highmem.h>
18#include <linux/io.h> 19#include <linux/io.h>
19#include <linux/module.h> 20#include <linux/module.h>
@@ -37,7 +38,10 @@
37#define DRIVER_NAME "sdhci" 38#define DRIVER_NAME "sdhci"
38 39
39#define DBG(f, x...) \ 40#define DBG(f, x...) \
40 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x) 41 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
42
43#define SDHCI_DUMP(f, x...) \
44 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
41 45
42#define MAX_TUNING_LOOP 40 46#define MAX_TUNING_LOOP 40
43 47
@@ -48,61 +52,68 @@ static void sdhci_finish_data(struct sdhci_host *);
48 52
49static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable); 53static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
50 54
51static void sdhci_dumpregs(struct sdhci_host *host) 55void sdhci_dumpregs(struct sdhci_host *host)
52{ 56{
53 pr_err(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n", 57 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
54 mmc_hostname(host->mmc)); 58
55 59 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n",
56 pr_err(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n", 60 sdhci_readl(host, SDHCI_DMA_ADDRESS),
57 sdhci_readl(host, SDHCI_DMA_ADDRESS), 61 sdhci_readw(host, SDHCI_HOST_VERSION));
58 sdhci_readw(host, SDHCI_HOST_VERSION)); 62 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n",
59 pr_err(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n", 63 sdhci_readw(host, SDHCI_BLOCK_SIZE),
60 sdhci_readw(host, SDHCI_BLOCK_SIZE), 64 sdhci_readw(host, SDHCI_BLOCK_COUNT));
61 sdhci_readw(host, SDHCI_BLOCK_COUNT)); 65 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n",
62 pr_err(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n", 66 sdhci_readl(host, SDHCI_ARGUMENT),
63 sdhci_readl(host, SDHCI_ARGUMENT), 67 sdhci_readw(host, SDHCI_TRANSFER_MODE));
64 sdhci_readw(host, SDHCI_TRANSFER_MODE)); 68 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n",
65 pr_err(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n", 69 sdhci_readl(host, SDHCI_PRESENT_STATE),
66 sdhci_readl(host, SDHCI_PRESENT_STATE), 70 sdhci_readb(host, SDHCI_HOST_CONTROL));
67 sdhci_readb(host, SDHCI_HOST_CONTROL)); 71 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n",
68 pr_err(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n", 72 sdhci_readb(host, SDHCI_POWER_CONTROL),
69 sdhci_readb(host, SDHCI_POWER_CONTROL), 73 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
70 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL)); 74 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n",
71 pr_err(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n", 75 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
72 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL), 76 sdhci_readw(host, SDHCI_CLOCK_CONTROL));
73 sdhci_readw(host, SDHCI_CLOCK_CONTROL)); 77 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n",
74 pr_err(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n", 78 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
75 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL), 79 sdhci_readl(host, SDHCI_INT_STATUS));
76 sdhci_readl(host, SDHCI_INT_STATUS)); 80 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n",
77 pr_err(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n", 81 sdhci_readl(host, SDHCI_INT_ENABLE),
78 sdhci_readl(host, SDHCI_INT_ENABLE), 82 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
79 sdhci_readl(host, SDHCI_SIGNAL_ENABLE)); 83 SDHCI_DUMP("AC12 err: 0x%08x | Slot int: 0x%08x\n",
80 pr_err(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n", 84 sdhci_readw(host, SDHCI_ACMD12_ERR),
81 sdhci_readw(host, SDHCI_ACMD12_ERR), 85 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
82 sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); 86 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n",
83 pr_err(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n", 87 sdhci_readl(host, SDHCI_CAPABILITIES),
84 sdhci_readl(host, SDHCI_CAPABILITIES), 88 sdhci_readl(host, SDHCI_CAPABILITIES_1));
85 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 89 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n",
86 pr_err(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n", 90 sdhci_readw(host, SDHCI_COMMAND),
87 sdhci_readw(host, SDHCI_COMMAND), 91 sdhci_readl(host, SDHCI_MAX_CURRENT));
88 sdhci_readl(host, SDHCI_MAX_CURRENT)); 92 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n",
89 pr_err(DRIVER_NAME ": Host ctl2: 0x%08x\n", 93 sdhci_readl(host, SDHCI_RESPONSE),
90 sdhci_readw(host, SDHCI_HOST_CONTROL2)); 94 sdhci_readl(host, SDHCI_RESPONSE + 4));
95 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n",
96 sdhci_readl(host, SDHCI_RESPONSE + 8),
97 sdhci_readl(host, SDHCI_RESPONSE + 12));
98 SDHCI_DUMP("Host ctl2: 0x%08x\n",
99 sdhci_readw(host, SDHCI_HOST_CONTROL2));
91 100
92 if (host->flags & SDHCI_USE_ADMA) { 101 if (host->flags & SDHCI_USE_ADMA) {
93 if (host->flags & SDHCI_USE_64_BIT_DMA) 102 if (host->flags & SDHCI_USE_64_BIT_DMA) {
94 pr_err(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n", 103 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
95 readl(host->ioaddr + SDHCI_ADMA_ERROR), 104 sdhci_readl(host, SDHCI_ADMA_ERROR),
96 readl(host->ioaddr + SDHCI_ADMA_ADDRESS_HI), 105 sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
97 readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); 106 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
98 else 107 } else {
99 pr_err(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", 108 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
100 readl(host->ioaddr + SDHCI_ADMA_ERROR), 109 sdhci_readl(host, SDHCI_ADMA_ERROR),
101 readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); 110 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
111 }
102 } 112 }
103 113
104 pr_err(DRIVER_NAME ": ===========================================\n"); 114 SDHCI_DUMP("============================================\n");
105} 115}
116EXPORT_SYMBOL_GPL(sdhci_dumpregs);
106 117
107/*****************************************************************************\ 118/*****************************************************************************\
108 * * 119 * *
@@ -165,7 +176,7 @@ static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
165 176
166void sdhci_reset(struct sdhci_host *host, u8 mask) 177void sdhci_reset(struct sdhci_host *host, u8 mask)
167{ 178{
168 unsigned long timeout; 179 ktime_t timeout;
169 180
170 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); 181 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
171 182
@@ -177,18 +188,17 @@ void sdhci_reset(struct sdhci_host *host, u8 mask)
177 } 188 }
178 189
179 /* Wait max 100 ms */ 190 /* Wait max 100 ms */
180 timeout = 100; 191 timeout = ktime_add_ms(ktime_get(), 100);
181 192
182 /* hw clears the bit when it's done */ 193 /* hw clears the bit when it's done */
183 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) { 194 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
184 if (timeout == 0) { 195 if (ktime_after(ktime_get(), timeout)) {
185 pr_err("%s: Reset 0x%x never completed.\n", 196 pr_err("%s: Reset 0x%x never completed.\n",
186 mmc_hostname(host->mmc), (int)mask); 197 mmc_hostname(host->mmc), (int)mask);
187 sdhci_dumpregs(host); 198 sdhci_dumpregs(host);
188 return; 199 return;
189 } 200 }
190 timeout--; 201 udelay(10);
191 mdelay(1);
192 } 202 }
193} 203}
194EXPORT_SYMBOL_GPL(sdhci_reset); 204EXPORT_SYMBOL_GPL(sdhci_reset);
@@ -215,15 +225,8 @@ static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
215 } 225 }
216} 226}
217 227
218static void sdhci_init(struct sdhci_host *host, int soft) 228static void sdhci_set_default_irqs(struct sdhci_host *host)
219{ 229{
220 struct mmc_host *mmc = host->mmc;
221
222 if (soft)
223 sdhci_do_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
224 else
225 sdhci_do_reset(host, SDHCI_RESET_ALL);
226
227 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | 230 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
228 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | 231 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
229 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC | 232 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
@@ -236,6 +239,20 @@ static void sdhci_init(struct sdhci_host *host, int soft)
236 239
237 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 240 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
238 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 241 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
242}
243
244static void sdhci_init(struct sdhci_host *host, int soft)
245{
246 struct mmc_host *mmc = host->mmc;
247
248 if (soft)
249 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
250 else
251 sdhci_do_reset(host, SDHCI_RESET_ALL);
252
253 sdhci_set_default_irqs(host);
254
255 host->cqe_on = false;
239 256
240 if (soft) { 257 if (soft) {
241 /* force clock reconfiguration */ 258 /* force clock reconfiguration */
@@ -485,8 +502,7 @@ static int sdhci_pre_dma_transfer(struct sdhci_host *host,
485 return data->sg_count; 502 return data->sg_count;
486 503
487 sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 504 sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
488 data->flags & MMC_DATA_WRITE ? 505 mmc_get_dma_dir(data));
489 DMA_TO_DEVICE : DMA_FROM_DEVICE);
490 506
491 if (sg_count == 0) 507 if (sg_count == 0)
492 return -ENOSPC; 508 return -ENOSPC;
@@ -715,8 +731,8 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
715 } 731 }
716 732
717 if (count >= 0xF) { 733 if (count >= 0xF) {
718 DBG("%s: Too large timeout 0x%x requested for CMD%d!\n", 734 DBG("Too large timeout 0x%x requested for CMD%d!\n",
719 mmc_hostname(host->mmc), count, cmd->opcode); 735 count, cmd->opcode);
720 count = 0xE; 736 count = 0xE;
721 } 737 }
722 738
@@ -1346,25 +1362,22 @@ EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1346 1362
1347void sdhci_enable_clk(struct sdhci_host *host, u16 clk) 1363void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1348{ 1364{
1349 unsigned long timeout; 1365 ktime_t timeout;
1350 1366
1351 clk |= SDHCI_CLOCK_INT_EN; 1367 clk |= SDHCI_CLOCK_INT_EN;
1352 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1368 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1353 1369
1354 /* Wait max 20 ms */ 1370 /* Wait max 20 ms */
1355 timeout = 20; 1371 timeout = ktime_add_ms(ktime_get(), 20);
1356 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) 1372 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
1357 & SDHCI_CLOCK_INT_STABLE)) { 1373 & SDHCI_CLOCK_INT_STABLE)) {
1358 if (timeout == 0) { 1374 if (ktime_after(ktime_get(), timeout)) {
1359 pr_err("%s: Internal clock never stabilised.\n", 1375 pr_err("%s: Internal clock never stabilised.\n",
1360 mmc_hostname(host->mmc)); 1376 mmc_hostname(host->mmc));
1361 sdhci_dumpregs(host); 1377 sdhci_dumpregs(host);
1362 return; 1378 return;
1363 } 1379 }
1364 timeout--; 1380 udelay(10);
1365 spin_unlock_irq(&host->lock);
1366 usleep_range(900, 1100);
1367 spin_lock_irq(&host->lock);
1368 } 1381 }
1369 1382
1370 clk |= SDHCI_CLOCK_CARD_EN; 1383 clk |= SDHCI_CLOCK_CARD_EN;
@@ -1393,9 +1406,7 @@ static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
1393{ 1406{
1394 struct mmc_host *mmc = host->mmc; 1407 struct mmc_host *mmc = host->mmc;
1395 1408
1396 spin_unlock_irq(&host->lock);
1397 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 1409 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1398 spin_lock_irq(&host->lock);
1399 1410
1400 if (mode != MMC_POWER_OFF) 1411 if (mode != MMC_POWER_OFF)
1401 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL); 1412 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
@@ -1572,19 +1583,15 @@ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
1572} 1583}
1573EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling); 1584EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
1574 1585
1575static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1586void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1576{ 1587{
1577 struct sdhci_host *host = mmc_priv(mmc); 1588 struct sdhci_host *host = mmc_priv(mmc);
1578 unsigned long flags;
1579 u8 ctrl; 1589 u8 ctrl;
1580 1590
1581 if (ios->power_mode == MMC_POWER_UNDEFINED) 1591 if (ios->power_mode == MMC_POWER_UNDEFINED)
1582 return; 1592 return;
1583 1593
1584 spin_lock_irqsave(&host->lock, flags);
1585
1586 if (host->flags & SDHCI_DEVICE_DEAD) { 1594 if (host->flags & SDHCI_DEVICE_DEAD) {
1587 spin_unlock_irqrestore(&host->lock, flags);
1588 if (!IS_ERR(mmc->supply.vmmc) && 1595 if (!IS_ERR(mmc->supply.vmmc) &&
1589 ios->power_mode == MMC_POWER_OFF) 1596 ios->power_mode == MMC_POWER_OFF)
1590 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 1597 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
@@ -1730,8 +1737,8 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1730 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 1737 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1731 1738
1732 mmiowb(); 1739 mmiowb();
1733 spin_unlock_irqrestore(&host->lock, flags);
1734} 1740}
1741EXPORT_SYMBOL_GPL(sdhci_set_ios);
1735 1742
1736static int sdhci_get_cd(struct mmc_host *mmc) 1743static int sdhci_get_cd(struct mmc_host *mmc)
1737{ 1744{
@@ -1825,7 +1832,7 @@ static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
1825 } 1832 }
1826} 1833}
1827 1834
1828static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) 1835void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1829{ 1836{
1830 struct sdhci_host *host = mmc_priv(mmc); 1837 struct sdhci_host *host = mmc_priv(mmc);
1831 unsigned long flags; 1838 unsigned long flags;
@@ -1845,9 +1852,10 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1845 if (!enable) 1852 if (!enable)
1846 pm_runtime_put_noidle(host->mmc->parent); 1853 pm_runtime_put_noidle(host->mmc->parent);
1847} 1854}
1855EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
1848 1856
1849static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, 1857int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1850 struct mmc_ios *ios) 1858 struct mmc_ios *ios)
1851{ 1859{
1852 struct sdhci_host *host = mmc_priv(mmc); 1860 struct sdhci_host *host = mmc_priv(mmc);
1853 u16 ctrl; 1861 u16 ctrl;
@@ -1939,6 +1947,7 @@ static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1939 return 0; 1947 return 0;
1940 } 1948 }
1941} 1949}
1950EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
1942 1951
1943static int sdhci_card_busy(struct mmc_host *mmc) 1952static int sdhci_card_busy(struct mmc_host *mmc)
1944{ 1953{
@@ -2003,8 +2012,7 @@ static void sdhci_reset_tuning(struct sdhci_host *host)
2003 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2012 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2004} 2013}
2005 2014
2006static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode, 2015static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2007 unsigned long flags)
2008{ 2016{
2009 sdhci_reset_tuning(host); 2017 sdhci_reset_tuning(host);
2010 2018
@@ -2013,9 +2021,7 @@ static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode,
2013 2021
2014 sdhci_end_tuning(host); 2022 sdhci_end_tuning(host);
2015 2023
2016 spin_unlock_irqrestore(&host->lock, flags);
2017 mmc_abort_tuning(host->mmc, opcode); 2024 mmc_abort_tuning(host->mmc, opcode);
2018 spin_lock_irqsave(&host->lock, flags);
2019} 2025}
2020 2026
2021/* 2027/*
@@ -2025,12 +2031,14 @@ static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode,
2025 * interrupt setup is different to other commands and there is no timeout 2031 * interrupt setup is different to other commands and there is no timeout
2026 * interrupt so special handling is needed. 2032 * interrupt so special handling is needed.
2027 */ 2033 */
2028static void sdhci_send_tuning(struct sdhci_host *host, u32 opcode, 2034static void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2029 unsigned long flags)
2030{ 2035{
2031 struct mmc_host *mmc = host->mmc; 2036 struct mmc_host *mmc = host->mmc;
2032 struct mmc_command cmd = {}; 2037 struct mmc_command cmd = {};
2033 struct mmc_request mrq = {}; 2038 struct mmc_request mrq = {};
2039 unsigned long flags;
2040
2041 spin_lock_irqsave(&host->lock, flags);
2034 2042
2035 cmd.opcode = opcode; 2043 cmd.opcode = opcode;
2036 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 2044 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
@@ -2064,17 +2072,16 @@ static void sdhci_send_tuning(struct sdhci_host *host, u32 opcode,
2064 2072
2065 host->tuning_done = 0; 2073 host->tuning_done = 0;
2066 2074
2075 mmiowb();
2067 spin_unlock_irqrestore(&host->lock, flags); 2076 spin_unlock_irqrestore(&host->lock, flags);
2068 2077
2069 /* Wait for Buffer Read Ready interrupt */ 2078 /* Wait for Buffer Read Ready interrupt */
2070 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1), 2079 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
2071 msecs_to_jiffies(50)); 2080 msecs_to_jiffies(50));
2072 2081
2073 spin_lock_irqsave(&host->lock, flags);
2074} 2082}
2075 2083
2076static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode, 2084static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
2077 unsigned long flags)
2078{ 2085{
2079 int i; 2086 int i;
2080 2087
@@ -2085,12 +2092,12 @@ static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode,
2085 for (i = 0; i < MAX_TUNING_LOOP; i++) { 2092 for (i = 0; i < MAX_TUNING_LOOP; i++) {
2086 u16 ctrl; 2093 u16 ctrl;
2087 2094
2088 sdhci_send_tuning(host, opcode, flags); 2095 sdhci_send_tuning(host, opcode);
2089 2096
2090 if (!host->tuning_done) { 2097 if (!host->tuning_done) {
2091 pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n", 2098 pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
2092 mmc_hostname(host->mmc)); 2099 mmc_hostname(host->mmc));
2093 sdhci_abort_tuning(host, opcode, flags); 2100 sdhci_abort_tuning(host, opcode);
2094 return; 2101 return;
2095 } 2102 }
2096 2103
@@ -2101,9 +2108,9 @@ static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode,
2101 break; 2108 break;
2102 } 2109 }
2103 2110
2104 /* eMMC spec does not require a delay between tuning cycles */ 2111 /* Spec does not require a delay between tuning cycles */
2105 if (opcode == MMC_SEND_TUNING_BLOCK) 2112 if (host->tuning_delay > 0)
2106 mdelay(1); 2113 mdelay(host->tuning_delay);
2107 } 2114 }
2108 2115
2109 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n", 2116 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
@@ -2115,12 +2122,9 @@ int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2115{ 2122{
2116 struct sdhci_host *host = mmc_priv(mmc); 2123 struct sdhci_host *host = mmc_priv(mmc);
2117 int err = 0; 2124 int err = 0;
2118 unsigned long flags;
2119 unsigned int tuning_count = 0; 2125 unsigned int tuning_count = 0;
2120 bool hs400_tuning; 2126 bool hs400_tuning;
2121 2127
2122 spin_lock_irqsave(&host->lock, flags);
2123
2124 hs400_tuning = host->flags & SDHCI_HS400_TUNING; 2128 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
2125 2129
2126 if (host->tuning_mode == SDHCI_TUNING_MODE_1) 2130 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
@@ -2137,7 +2141,7 @@ int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2137 /* HS400 tuning is done in HS200 mode */ 2141 /* HS400 tuning is done in HS200 mode */
2138 case MMC_TIMING_MMC_HS400: 2142 case MMC_TIMING_MMC_HS400:
2139 err = -EINVAL; 2143 err = -EINVAL;
2140 goto out_unlock; 2144 goto out;
2141 2145
2142 case MMC_TIMING_MMC_HS200: 2146 case MMC_TIMING_MMC_HS200:
2143 /* 2147 /*
@@ -2158,44 +2162,31 @@ int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2158 /* FALLTHROUGH */ 2162 /* FALLTHROUGH */
2159 2163
2160 default: 2164 default:
2161 goto out_unlock; 2165 goto out;
2162 } 2166 }
2163 2167
2164 if (host->ops->platform_execute_tuning) { 2168 if (host->ops->platform_execute_tuning) {
2165 spin_unlock_irqrestore(&host->lock, flags);
2166 err = host->ops->platform_execute_tuning(host, opcode); 2169 err = host->ops->platform_execute_tuning(host, opcode);
2167 spin_lock_irqsave(&host->lock, flags); 2170 goto out;
2168 goto out_unlock;
2169 } 2171 }
2170 2172
2171 host->mmc->retune_period = tuning_count; 2173 host->mmc->retune_period = tuning_count;
2172 2174
2175 if (host->tuning_delay < 0)
2176 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
2177
2173 sdhci_start_tuning(host); 2178 sdhci_start_tuning(host);
2174 2179
2175 __sdhci_execute_tuning(host, opcode, flags); 2180 __sdhci_execute_tuning(host, opcode);
2176 2181
2177 sdhci_end_tuning(host); 2182 sdhci_end_tuning(host);
2178out_unlock: 2183out:
2179 host->flags &= ~SDHCI_HS400_TUNING; 2184 host->flags &= ~SDHCI_HS400_TUNING;
2180 spin_unlock_irqrestore(&host->lock, flags);
2181 2185
2182 return err; 2186 return err;
2183} 2187}
2184EXPORT_SYMBOL_GPL(sdhci_execute_tuning); 2188EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2185 2189
2186static int sdhci_select_drive_strength(struct mmc_card *card,
2187 unsigned int max_dtr, int host_drv,
2188 int card_drv, int *drv_type)
2189{
2190 struct sdhci_host *host = mmc_priv(card->host);
2191
2192 if (!host->ops->select_drive_strength)
2193 return 0;
2194
2195 return host->ops->select_drive_strength(host, card, max_dtr, host_drv,
2196 card_drv, drv_type);
2197}
2198
2199static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable) 2190static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2200{ 2191{
2201 /* Host Controller v3.00 defines preset value registers */ 2192 /* Host Controller v3.00 defines preset value registers */
@@ -2233,8 +2224,7 @@ static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2233 2224
2234 if (data->host_cookie != COOKIE_UNMAPPED) 2225 if (data->host_cookie != COOKIE_UNMAPPED)
2235 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 2226 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2236 data->flags & MMC_DATA_WRITE ? 2227 mmc_get_dma_dir(data));
2237 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2238 2228
2239 data->host_cookie = COOKIE_UNMAPPED; 2229 data->host_cookie = COOKIE_UNMAPPED;
2240} 2230}
@@ -2309,7 +2299,6 @@ static const struct mmc_host_ops sdhci_ops = {
2309 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, 2299 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
2310 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning, 2300 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
2311 .execute_tuning = sdhci_execute_tuning, 2301 .execute_tuning = sdhci_execute_tuning,
2312 .select_drive_strength = sdhci_select_drive_strength,
2313 .card_event = sdhci_card_event, 2302 .card_event = sdhci_card_event,
2314 .card_busy = sdhci_card_busy, 2303 .card_busy = sdhci_card_busy,
2315}; 2304};
@@ -2351,8 +2340,7 @@ static bool sdhci_request_done(struct sdhci_host *host)
2351 2340
2352 if (data && data->host_cookie == COOKIE_MAPPED) { 2341 if (data && data->host_cookie == COOKIE_MAPPED) {
2353 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 2342 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2354 (data->flags & MMC_DATA_READ) ? 2343 mmc_get_dma_dir(data));
2355 DMA_FROM_DEVICE : DMA_TO_DEVICE);
2356 data->host_cookie = COOKIE_UNMAPPED; 2344 data->host_cookie = COOKIE_UNMAPPED;
2357 } 2345 }
2358 } 2346 }
@@ -2517,7 +2505,6 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
2517#ifdef CONFIG_MMC_DEBUG 2505#ifdef CONFIG_MMC_DEBUG
2518static void sdhci_adma_show_error(struct sdhci_host *host) 2506static void sdhci_adma_show_error(struct sdhci_host *host)
2519{ 2507{
2520 const char *name = mmc_hostname(host->mmc);
2521 void *desc = host->adma_table; 2508 void *desc = host->adma_table;
2522 2509
2523 sdhci_dumpregs(host); 2510 sdhci_dumpregs(host);
@@ -2526,14 +2513,14 @@ static void sdhci_adma_show_error(struct sdhci_host *host)
2526 struct sdhci_adma2_64_desc *dma_desc = desc; 2513 struct sdhci_adma2_64_desc *dma_desc = desc;
2527 2514
2528 if (host->flags & SDHCI_USE_64_BIT_DMA) 2515 if (host->flags & SDHCI_USE_64_BIT_DMA)
2529 DBG("%s: %p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n", 2516 DBG("%p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2530 name, desc, le32_to_cpu(dma_desc->addr_hi), 2517 desc, le32_to_cpu(dma_desc->addr_hi),
2531 le32_to_cpu(dma_desc->addr_lo), 2518 le32_to_cpu(dma_desc->addr_lo),
2532 le16_to_cpu(dma_desc->len), 2519 le16_to_cpu(dma_desc->len),
2533 le16_to_cpu(dma_desc->cmd)); 2520 le16_to_cpu(dma_desc->cmd));
2534 else 2521 else
2535 DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", 2522 DBG("%p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2536 name, desc, le32_to_cpu(dma_desc->addr_lo), 2523 desc, le32_to_cpu(dma_desc->addr_lo),
2537 le16_to_cpu(dma_desc->len), 2524 le16_to_cpu(dma_desc->len),
2538 le16_to_cpu(dma_desc->cmd)); 2525 le16_to_cpu(dma_desc->cmd));
2539 2526
@@ -2649,10 +2636,8 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2649 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + 2636 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
2650 SDHCI_DEFAULT_BOUNDARY_SIZE; 2637 SDHCI_DEFAULT_BOUNDARY_SIZE;
2651 host->data->bytes_xfered = dmanow - dmastart; 2638 host->data->bytes_xfered = dmanow - dmastart;
2652 DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes," 2639 DBG("DMA base 0x%08x, transferred 0x%06x bytes, next 0x%08x\n",
2653 " next 0x%08x\n", 2640 dmastart, host->data->bytes_xfered, dmanow);
2654 mmc_hostname(host->mmc), dmastart,
2655 host->data->bytes_xfered, dmanow);
2656 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS); 2641 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
2657 } 2642 }
2658 2643
@@ -2692,14 +2677,19 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
2692 } 2677 }
2693 2678
2694 do { 2679 do {
2680 DBG("IRQ status 0x%08x\n", intmask);
2681
2682 if (host->ops->irq) {
2683 intmask = host->ops->irq(host, intmask);
2684 if (!intmask)
2685 goto cont;
2686 }
2687
2695 /* Clear selected interrupts. */ 2688 /* Clear selected interrupts. */
2696 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 2689 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2697 SDHCI_INT_BUS_POWER); 2690 SDHCI_INT_BUS_POWER);
2698 sdhci_writel(host, mask, SDHCI_INT_STATUS); 2691 sdhci_writel(host, mask, SDHCI_INT_STATUS);
2699 2692
2700 DBG("*** %s got interrupt: 0x%08x\n",
2701 mmc_hostname(host->mmc), intmask);
2702
2703 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 2693 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2704 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 2694 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
2705 SDHCI_CARD_PRESENT; 2695 SDHCI_CARD_PRESENT;
@@ -2759,7 +2749,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
2759 unexpected |= intmask; 2749 unexpected |= intmask;
2760 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 2750 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
2761 } 2751 }
2762 2752cont:
2763 if (result == IRQ_NONE) 2753 if (result == IRQ_NONE)
2764 result = IRQ_HANDLED; 2754 result = IRQ_HANDLED;
2765 2755
@@ -2858,8 +2848,6 @@ int sdhci_suspend_host(struct sdhci_host *host)
2858 sdhci_disable_card_detection(host); 2848 sdhci_disable_card_detection(host);
2859 2849
2860 mmc_retune_timer_stop(host->mmc); 2850 mmc_retune_timer_stop(host->mmc);
2861 if (host->tuning_mode != SDHCI_TUNING_MODE_3)
2862 mmc_retune_needed(host->mmc);
2863 2851
2864 if (!device_may_wakeup(mmc_dev(host->mmc))) { 2852 if (!device_may_wakeup(mmc_dev(host->mmc))) {
2865 host->ier = 0; 2853 host->ier = 0;
@@ -2920,8 +2908,6 @@ int sdhci_runtime_suspend_host(struct sdhci_host *host)
2920 unsigned long flags; 2908 unsigned long flags;
2921 2909
2922 mmc_retune_timer_stop(host->mmc); 2910 mmc_retune_timer_stop(host->mmc);
2923 if (host->tuning_mode != SDHCI_TUNING_MODE_3)
2924 mmc_retune_needed(host->mmc);
2925 2911
2926 spin_lock_irqsave(&host->lock, flags); 2912 spin_lock_irqsave(&host->lock, flags);
2927 host->ier &= SDHCI_INT_CARD_INT; 2913 host->ier &= SDHCI_INT_CARD_INT;
@@ -2992,6 +2978,119 @@ EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
2992 2978
2993/*****************************************************************************\ 2979/*****************************************************************************\
2994 * * 2980 * *
2981 * Command Queue Engine (CQE) helpers *
2982 * *
2983\*****************************************************************************/
2984
2985void sdhci_cqe_enable(struct mmc_host *mmc)
2986{
2987 struct sdhci_host *host = mmc_priv(mmc);
2988 unsigned long flags;
2989 u8 ctrl;
2990
2991 spin_lock_irqsave(&host->lock, flags);
2992
2993 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2994 ctrl &= ~SDHCI_CTRL_DMA_MASK;
2995 if (host->flags & SDHCI_USE_64_BIT_DMA)
2996 ctrl |= SDHCI_CTRL_ADMA64;
2997 else
2998 ctrl |= SDHCI_CTRL_ADMA32;
2999 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
3000
3001 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG, 512),
3002 SDHCI_BLOCK_SIZE);
3003
3004 /* Set maximum timeout */
3005 sdhci_writeb(host, 0xE, SDHCI_TIMEOUT_CONTROL);
3006
3007 host->ier = host->cqe_ier;
3008
3009 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3010 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3011
3012 host->cqe_on = true;
3013
3014 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3015 mmc_hostname(mmc), host->ier,
3016 sdhci_readl(host, SDHCI_INT_STATUS));
3017
3018 mmiowb();
3019 spin_unlock_irqrestore(&host->lock, flags);
3020}
3021EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
3022
3023void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
3024{
3025 struct sdhci_host *host = mmc_priv(mmc);
3026 unsigned long flags;
3027
3028 spin_lock_irqsave(&host->lock, flags);
3029
3030 sdhci_set_default_irqs(host);
3031
3032 host->cqe_on = false;
3033
3034 if (recovery) {
3035 sdhci_do_reset(host, SDHCI_RESET_CMD);
3036 sdhci_do_reset(host, SDHCI_RESET_DATA);
3037 }
3038
3039 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3040 mmc_hostname(mmc), host->ier,
3041 sdhci_readl(host, SDHCI_INT_STATUS));
3042
3043 mmiowb();
3044 spin_unlock_irqrestore(&host->lock, flags);
3045}
3046EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
3047
3048bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
3049 int *data_error)
3050{
3051 u32 mask;
3052
3053 if (!host->cqe_on)
3054 return false;
3055
3056 if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
3057 *cmd_error = -EILSEQ;
3058 else if (intmask & SDHCI_INT_TIMEOUT)
3059 *cmd_error = -ETIMEDOUT;
3060 else
3061 *cmd_error = 0;
3062
3063 if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
3064 *data_error = -EILSEQ;
3065 else if (intmask & SDHCI_INT_DATA_TIMEOUT)
3066 *data_error = -ETIMEDOUT;
3067 else if (intmask & SDHCI_INT_ADMA_ERROR)
3068 *data_error = -EIO;
3069 else
3070 *data_error = 0;
3071
3072 /* Clear selected interrupts. */
3073 mask = intmask & host->cqe_ier;
3074 sdhci_writel(host, mask, SDHCI_INT_STATUS);
3075
3076 if (intmask & SDHCI_INT_BUS_POWER)
3077 pr_err("%s: Card is consuming too much power!\n",
3078 mmc_hostname(host->mmc));
3079
3080 intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
3081 if (intmask) {
3082 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3083 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
3084 mmc_hostname(host->mmc), intmask);
3085 sdhci_dumpregs(host);
3086 }
3087
3088 return true;
3089}
3090EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
3091
3092/*****************************************************************************\
3093 * *
2995 * Device allocation/registration * 3094 * Device allocation/registration *
2996 * * 3095 * *
2997\*****************************************************************************/ 3096\*****************************************************************************/
@@ -3015,6 +3114,11 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
3015 3114
3016 host->flags = SDHCI_SIGNALING_330; 3115 host->flags = SDHCI_SIGNALING_330;
3017 3116
3117 host->cqe_ier = SDHCI_CQE_INT_MASK;
3118 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
3119
3120 host->tuning_delay = -1;
3121
3018 return host; 3122 return host;
3019} 3123}
3020 3124
@@ -3297,20 +3401,22 @@ int sdhci_setup_host(struct sdhci_host *host)
3297 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { 3401 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3298 host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >> 3402 host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >>
3299 SDHCI_TIMEOUT_CLK_SHIFT; 3403 SDHCI_TIMEOUT_CLK_SHIFT;
3404
3405 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
3406 host->timeout_clk *= 1000;
3407
3300 if (host->timeout_clk == 0) { 3408 if (host->timeout_clk == 0) {
3301 if (host->ops->get_timeout_clock) { 3409 if (!host->ops->get_timeout_clock) {
3302 host->timeout_clk =
3303 host->ops->get_timeout_clock(host);
3304 } else {
3305 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n", 3410 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3306 mmc_hostname(mmc)); 3411 mmc_hostname(mmc));
3307 ret = -ENODEV; 3412 ret = -ENODEV;
3308 goto undma; 3413 goto undma;
3309 } 3414 }
3310 }
3311 3415
3312 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT) 3416 host->timeout_clk =
3313 host->timeout_clk *= 1000; 3417 DIV_ROUND_UP(host->ops->get_timeout_clock(host),
3418 1000);
3419 }
3314 3420
3315 if (override_timeout_clk) 3421 if (override_timeout_clk)
3316 host->timeout_clk = override_timeout_clk; 3422 host->timeout_clk = override_timeout_clk;
@@ -3332,9 +3438,9 @@ int sdhci_setup_host(struct sdhci_host *host)
3332 !(host->flags & SDHCI_USE_SDMA)) && 3438 !(host->flags & SDHCI_USE_SDMA)) &&
3333 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) { 3439 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
3334 host->flags |= SDHCI_AUTO_CMD23; 3440 host->flags |= SDHCI_AUTO_CMD23;
3335 DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc)); 3441 DBG("Auto-CMD23 available\n");
3336 } else { 3442 } else {
3337 DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc)); 3443 DBG("Auto-CMD23 unavailable\n");
3338 } 3444 }
3339 3445
3340 /* 3446 /*
@@ -3598,6 +3704,22 @@ undma:
3598} 3704}
3599EXPORT_SYMBOL_GPL(sdhci_setup_host); 3705EXPORT_SYMBOL_GPL(sdhci_setup_host);
3600 3706
3707void sdhci_cleanup_host(struct sdhci_host *host)
3708{
3709 struct mmc_host *mmc = host->mmc;
3710
3711 if (!IS_ERR(mmc->supply.vqmmc))
3712 regulator_disable(mmc->supply.vqmmc);
3713
3714 if (host->align_buffer)
3715 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3716 host->adma_table_sz, host->align_buffer,
3717 host->align_addr);
3718 host->adma_table = NULL;
3719 host->align_buffer = NULL;
3720}
3721EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
3722
3601int __sdhci_add_host(struct sdhci_host *host) 3723int __sdhci_add_host(struct sdhci_host *host)
3602{ 3724{
3603 struct mmc_host *mmc = host->mmc; 3725 struct mmc_host *mmc = host->mmc;
@@ -3662,16 +3784,6 @@ unirq:
3662untasklet: 3784untasklet:
3663 tasklet_kill(&host->finish_tasklet); 3785 tasklet_kill(&host->finish_tasklet);
3664 3786
3665 if (!IS_ERR(mmc->supply.vqmmc))
3666 regulator_disable(mmc->supply.vqmmc);
3667
3668 if (host->align_buffer)
3669 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3670 host->adma_table_sz, host->align_buffer,
3671 host->align_addr);
3672 host->adma_table = NULL;
3673 host->align_buffer = NULL;
3674
3675 return ret; 3787 return ret;
3676} 3788}
3677EXPORT_SYMBOL_GPL(__sdhci_add_host); 3789EXPORT_SYMBOL_GPL(__sdhci_add_host);
@@ -3684,7 +3796,16 @@ int sdhci_add_host(struct sdhci_host *host)
3684 if (ret) 3796 if (ret)
3685 return ret; 3797 return ret;
3686 3798
3687 return __sdhci_add_host(host); 3799 ret = __sdhci_add_host(host);
3800 if (ret)
3801 goto cleanup;
3802
3803 return 0;
3804
3805cleanup:
3806 sdhci_cleanup_host(host);
3807
3808 return ret;
3688} 3809}
3689EXPORT_SYMBOL_GPL(sdhci_add_host); 3810EXPORT_SYMBOL_GPL(sdhci_add_host);
3690 3811
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index edf3adfbc213..0469fa191493 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -134,6 +134,7 @@
134#define SDHCI_INT_CARD_REMOVE 0x00000080 134#define SDHCI_INT_CARD_REMOVE 0x00000080
135#define SDHCI_INT_CARD_INT 0x00000100 135#define SDHCI_INT_CARD_INT 0x00000100
136#define SDHCI_INT_RETUNE 0x00001000 136#define SDHCI_INT_RETUNE 0x00001000
137#define SDHCI_INT_CQE 0x00004000
137#define SDHCI_INT_ERROR 0x00008000 138#define SDHCI_INT_ERROR 0x00008000
138#define SDHCI_INT_TIMEOUT 0x00010000 139#define SDHCI_INT_TIMEOUT 0x00010000
139#define SDHCI_INT_CRC 0x00020000 140#define SDHCI_INT_CRC 0x00020000
@@ -158,6 +159,13 @@
158 SDHCI_INT_BLK_GAP) 159 SDHCI_INT_BLK_GAP)
159#define SDHCI_INT_ALL_MASK ((unsigned int)-1) 160#define SDHCI_INT_ALL_MASK ((unsigned int)-1)
160 161
162#define SDHCI_CQE_INT_ERR_MASK ( \
163 SDHCI_INT_ADMA_ERROR | SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | \
164 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX | \
165 SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)
166
167#define SDHCI_CQE_INT_MASK (SDHCI_CQE_INT_ERR_MASK | SDHCI_INT_CQE)
168
161#define SDHCI_ACMD12_ERR 0x3C 169#define SDHCI_ACMD12_ERR 0x3C
162 170
163#define SDHCI_HOST_CONTROL2 0x3E 171#define SDHCI_HOST_CONTROL2 0x3E
@@ -518,6 +526,10 @@ struct sdhci_host {
518 /* cached registers */ 526 /* cached registers */
519 u32 ier; 527 u32 ier;
520 528
529 bool cqe_on; /* CQE is operating */
530 u32 cqe_ier; /* CQE interrupt mask */
531 u32 cqe_err_ier; /* CQE error interrupt mask */
532
521 wait_queue_head_t buf_ready_int; /* Waitqueue for Buffer Read Ready interrupt */ 533 wait_queue_head_t buf_ready_int; /* Waitqueue for Buffer Read Ready interrupt */
522 unsigned int tuning_done; /* Condition flag set when CMD19 succeeds */ 534 unsigned int tuning_done; /* Condition flag set when CMD19 succeeds */
523 535
@@ -526,6 +538,8 @@ struct sdhci_host {
526#define SDHCI_TUNING_MODE_1 0 538#define SDHCI_TUNING_MODE_1 0
527#define SDHCI_TUNING_MODE_2 1 539#define SDHCI_TUNING_MODE_2 1
528#define SDHCI_TUNING_MODE_3 2 540#define SDHCI_TUNING_MODE_3 2
541 /* Delay (ms) between tuning commands */
542 int tuning_delay;
529 543
530 unsigned long private[0] ____cacheline_aligned; 544 unsigned long private[0] ____cacheline_aligned;
531}; 545};
@@ -544,9 +558,12 @@ struct sdhci_ops {
544 void (*set_power)(struct sdhci_host *host, unsigned char mode, 558 void (*set_power)(struct sdhci_host *host, unsigned char mode,
545 unsigned short vdd); 559 unsigned short vdd);
546 560
561 u32 (*irq)(struct sdhci_host *host, u32 intmask);
562
547 int (*enable_dma)(struct sdhci_host *host); 563 int (*enable_dma)(struct sdhci_host *host);
548 unsigned int (*get_max_clock)(struct sdhci_host *host); 564 unsigned int (*get_max_clock)(struct sdhci_host *host);
549 unsigned int (*get_min_clock)(struct sdhci_host *host); 565 unsigned int (*get_min_clock)(struct sdhci_host *host);
566 /* get_timeout_clock should return clk rate in unit of Hz */
550 unsigned int (*get_timeout_clock)(struct sdhci_host *host); 567 unsigned int (*get_timeout_clock)(struct sdhci_host *host);
551 unsigned int (*get_max_timeout_count)(struct sdhci_host *host); 568 unsigned int (*get_max_timeout_count)(struct sdhci_host *host);
552 void (*set_timeout)(struct sdhci_host *host, 569 void (*set_timeout)(struct sdhci_host *host,
@@ -562,10 +579,6 @@ struct sdhci_ops {
562 void (*adma_workaround)(struct sdhci_host *host, u32 intmask); 579 void (*adma_workaround)(struct sdhci_host *host, u32 intmask);
563 void (*card_event)(struct sdhci_host *host); 580 void (*card_event)(struct sdhci_host *host);
564 void (*voltage_switch)(struct sdhci_host *host); 581 void (*voltage_switch)(struct sdhci_host *host);
565 int (*select_drive_strength)(struct sdhci_host *host,
566 struct mmc_card *card,
567 unsigned int max_dtr, int host_drv,
568 int card_drv, int *drv_type);
569}; 582};
570 583
571#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS 584#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
@@ -652,24 +665,23 @@ static inline u8 sdhci_readb(struct sdhci_host *host, int reg)
652 665
653#endif /* CONFIG_MMC_SDHCI_IO_ACCESSORS */ 666#endif /* CONFIG_MMC_SDHCI_IO_ACCESSORS */
654 667
655extern struct sdhci_host *sdhci_alloc_host(struct device *dev, 668struct sdhci_host *sdhci_alloc_host(struct device *dev, size_t priv_size);
656 size_t priv_size); 669void sdhci_free_host(struct sdhci_host *host);
657extern void sdhci_free_host(struct sdhci_host *host);
658 670
659static inline void *sdhci_priv(struct sdhci_host *host) 671static inline void *sdhci_priv(struct sdhci_host *host)
660{ 672{
661 return host->private; 673 return host->private;
662} 674}
663 675
664extern void sdhci_card_detect(struct sdhci_host *host); 676void sdhci_card_detect(struct sdhci_host *host);
665extern void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, 677void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps,
666 u32 *caps1); 678 u32 *caps1);
667extern int sdhci_setup_host(struct sdhci_host *host); 679int sdhci_setup_host(struct sdhci_host *host);
668extern int __sdhci_add_host(struct sdhci_host *host); 680void sdhci_cleanup_host(struct sdhci_host *host);
669extern int sdhci_add_host(struct sdhci_host *host); 681int __sdhci_add_host(struct sdhci_host *host);
670extern void sdhci_remove_host(struct sdhci_host *host, int dead); 682int sdhci_add_host(struct sdhci_host *host);
671extern void sdhci_send_command(struct sdhci_host *host, 683void sdhci_remove_host(struct sdhci_host *host, int dead);
672 struct mmc_command *cmd); 684void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd);
673 685
674static inline void sdhci_read_caps(struct sdhci_host *host) 686static inline void sdhci_read_caps(struct sdhci_host *host)
675{ 687{
@@ -693,13 +705,24 @@ void sdhci_set_bus_width(struct sdhci_host *host, int width);
693void sdhci_reset(struct sdhci_host *host, u8 mask); 705void sdhci_reset(struct sdhci_host *host, u8 mask);
694void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing); 706void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
695int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode); 707int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
708void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
709int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
710 struct mmc_ios *ios);
711void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable);
696 712
697#ifdef CONFIG_PM 713#ifdef CONFIG_PM
698extern int sdhci_suspend_host(struct sdhci_host *host); 714int sdhci_suspend_host(struct sdhci_host *host);
699extern int sdhci_resume_host(struct sdhci_host *host); 715int sdhci_resume_host(struct sdhci_host *host);
700extern void sdhci_enable_irq_wakeups(struct sdhci_host *host); 716void sdhci_enable_irq_wakeups(struct sdhci_host *host);
701extern int sdhci_runtime_suspend_host(struct sdhci_host *host); 717int sdhci_runtime_suspend_host(struct sdhci_host *host);
702extern int sdhci_runtime_resume_host(struct sdhci_host *host); 718int sdhci_runtime_resume_host(struct sdhci_host *host);
703#endif 719#endif
704 720
721void sdhci_cqe_enable(struct mmc_host *mmc);
722void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery);
723bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
724 int *data_error);
725
726void sdhci_dumpregs(struct sdhci_host *host);
727
705#endif /* __SDHCI_HW_H */ 728#endif /* __SDHCI_HW_H */
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 6ffcd2838272..d6fa2214aaae 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -385,14 +385,6 @@ static void sunxi_mmc_init_idma_des(struct sunxi_mmc_host *host,
385 wmb(); 385 wmb();
386} 386}
387 387
388static enum dma_data_direction sunxi_mmc_get_dma_dir(struct mmc_data *data)
389{
390 if (data->flags & MMC_DATA_WRITE)
391 return DMA_TO_DEVICE;
392 else
393 return DMA_FROM_DEVICE;
394}
395
396static int sunxi_mmc_map_dma(struct sunxi_mmc_host *host, 388static int sunxi_mmc_map_dma(struct sunxi_mmc_host *host,
397 struct mmc_data *data) 389 struct mmc_data *data)
398{ 390{
@@ -400,7 +392,7 @@ static int sunxi_mmc_map_dma(struct sunxi_mmc_host *host,
400 struct scatterlist *sg; 392 struct scatterlist *sg;
401 393
402 dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 394 dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
403 sunxi_mmc_get_dma_dir(data)); 395 mmc_get_dma_dir(data));
404 if (dma_len == 0) { 396 if (dma_len == 0) {
405 dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n"); 397 dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n");
406 return -ENOMEM; 398 return -ENOMEM;
@@ -489,7 +481,7 @@ static void sunxi_mmc_dump_errinfo(struct sunxi_mmc_host *host)
489 cmd->opcode == SD_IO_RW_DIRECT)) 481 cmd->opcode == SD_IO_RW_DIRECT))
490 return; 482 return;
491 483
492 dev_err(mmc_dev(host->mmc), 484 dev_dbg(mmc_dev(host->mmc),
493 "smc %d err, cmd %d,%s%s%s%s%s%s%s%s%s%s !!\n", 485 "smc %d err, cmd %d,%s%s%s%s%s%s%s%s%s%s !!\n",
494 host->mmc->index, cmd->opcode, 486 host->mmc->index, cmd->opcode,
495 data ? (data->flags & MMC_DATA_WRITE ? " WR" : " RD") : "", 487 data ? (data->flags & MMC_DATA_WRITE ? " WR" : " RD") : "",
@@ -551,7 +543,7 @@ static irqreturn_t sunxi_mmc_finalize_request(struct sunxi_mmc_host *host)
551 rval |= SDXC_FIFO_RESET; 543 rval |= SDXC_FIFO_RESET;
552 mmc_writel(host, REG_GCTRL, rval); 544 mmc_writel(host, REG_GCTRL, rval);
553 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 545 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
554 sunxi_mmc_get_dma_dir(data)); 546 mmc_get_dma_dir(data));
555 } 547 }
556 548
557 mmc_writel(host, REG_RINTR, 0xffff); 549 mmc_writel(host, REG_RINTR, 0xffff);
@@ -1022,7 +1014,7 @@ static void sunxi_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
1022 1014
1023 if (data) 1015 if (data)
1024 dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len, 1016 dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len,
1025 sunxi_mmc_get_dma_dir(data)); 1017 mmc_get_dma_dir(data));
1026 1018
1027 dev_err(mmc_dev(mmc), "request already pending\n"); 1019 dev_err(mmc_dev(mmc), "request already pending\n");
1028 mrq->cmd->error = -EBUSY; 1020 mrq->cmd->error = -EBUSY;
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 2b349d48fb9a..d0edb5730d3f 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -50,7 +50,11 @@
50#define CTL_CLK_AND_WAIT_CTL 0x138 50#define CTL_CLK_AND_WAIT_CTL 0x138
51#define CTL_RESET_SDIO 0x1e0 51#define CTL_RESET_SDIO 0x1e0
52 52
53/* Definitions for values the CTRL_STATUS register can take. */ 53/* Definitions for values the CTL_STOP_INTERNAL_ACTION register can take */
54#define TMIO_STOP_STP BIT(0)
55#define TMIO_STOP_SEC BIT(8)
56
57/* Definitions for values the CTL_STATUS register can take */
54#define TMIO_STAT_CMDRESPEND BIT(0) 58#define TMIO_STAT_CMDRESPEND BIT(0)
55#define TMIO_STAT_DATAEND BIT(2) 59#define TMIO_STAT_DATAEND BIT(2)
56#define TMIO_STAT_CARD_REMOVE BIT(3) 60#define TMIO_STAT_CARD_REMOVE BIT(3)
@@ -61,7 +65,7 @@
61#define TMIO_STAT_CARD_INSERT_A BIT(9) 65#define TMIO_STAT_CARD_INSERT_A BIT(9)
62#define TMIO_STAT_SIGSTATE_A BIT(10) 66#define TMIO_STAT_SIGSTATE_A BIT(10)
63 67
64/* These belong technically to CTRL_STATUS2, but the driver merges them */ 68/* These belong technically to CTL_STATUS2, but the driver merges them */
65#define TMIO_STAT_CMD_IDX_ERR BIT(16) 69#define TMIO_STAT_CMD_IDX_ERR BIT(16)
66#define TMIO_STAT_CRCFAIL BIT(17) 70#define TMIO_STAT_CRCFAIL BIT(17)
67#define TMIO_STAT_STOPBIT_ERR BIT(18) 71#define TMIO_STAT_STOPBIT_ERR BIT(18)
@@ -85,7 +89,7 @@
85 89
86#define TMIO_BBS 512 /* Boot block size */ 90#define TMIO_BBS 512 /* Boot block size */
87 91
88/* Definitions for values the CTRL_SDIO_STATUS register can take. */ 92/* Definitions for values the CTL_SDIO_STATUS register can take */
89#define TMIO_SDIO_STAT_IOIRQ 0x0001 93#define TMIO_SDIO_STAT_IOIRQ 0x0001
90#define TMIO_SDIO_STAT_EXPUB52 0x4000 94#define TMIO_SDIO_STAT_EXPUB52 0x4000
91#define TMIO_SDIO_STAT_EXWT 0x8000 95#define TMIO_SDIO_STAT_EXWT 0x8000
@@ -137,7 +141,7 @@ struct tmio_mmc_host {
137 bool force_pio; 141 bool force_pio;
138 struct dma_chan *chan_rx; 142 struct dma_chan *chan_rx;
139 struct dma_chan *chan_tx; 143 struct dma_chan *chan_tx;
140 struct tasklet_struct dma_complete; 144 struct completion dma_dataend;
141 struct tasklet_struct dma_issue; 145 struct tasklet_struct dma_issue;
142 struct scatterlist bounce_sg; 146 struct scatterlist bounce_sg;
143 u8 *bounce_buf; 147 u8 *bounce_buf;
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index fa8a936a3d9b..e2093db2b7ff 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -43,6 +43,34 @@ void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
43 tmio_mmc_enable_dma(host, true); 43 tmio_mmc_enable_dma(host, true);
44} 44}
45 45
46static void tmio_mmc_dma_callback(void *arg)
47{
48 struct tmio_mmc_host *host = arg;
49
50 spin_lock_irq(&host->lock);
51
52 if (!host->data)
53 goto out;
54
55 if (host->data->flags & MMC_DATA_READ)
56 dma_unmap_sg(host->chan_rx->device->dev,
57 host->sg_ptr, host->sg_len,
58 DMA_FROM_DEVICE);
59 else
60 dma_unmap_sg(host->chan_tx->device->dev,
61 host->sg_ptr, host->sg_len,
62 DMA_TO_DEVICE);
63
64 spin_unlock_irq(&host->lock);
65
66 wait_for_completion(&host->dma_dataend);
67
68 spin_lock_irq(&host->lock);
69 tmio_mmc_do_data_irq(host);
70out:
71 spin_unlock_irq(&host->lock);
72}
73
46static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) 74static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
47{ 75{
48 struct scatterlist *sg = host->sg_ptr, *sg_tmp; 76 struct scatterlist *sg = host->sg_ptr, *sg_tmp;
@@ -88,6 +116,10 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
88 DMA_DEV_TO_MEM, DMA_CTRL_ACK); 116 DMA_DEV_TO_MEM, DMA_CTRL_ACK);
89 117
90 if (desc) { 118 if (desc) {
119 reinit_completion(&host->dma_dataend);
120 desc->callback = tmio_mmc_dma_callback;
121 desc->callback_param = host;
122
91 cookie = dmaengine_submit(desc); 123 cookie = dmaengine_submit(desc);
92 if (cookie < 0) { 124 if (cookie < 0) {
93 desc = NULL; 125 desc = NULL;
@@ -162,6 +194,10 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
162 DMA_MEM_TO_DEV, DMA_CTRL_ACK); 194 DMA_MEM_TO_DEV, DMA_CTRL_ACK);
163 195
164 if (desc) { 196 if (desc) {
197 reinit_completion(&host->dma_dataend);
198 desc->callback = tmio_mmc_dma_callback;
199 desc->callback_param = host;
200
165 cookie = dmaengine_submit(desc); 201 cookie = dmaengine_submit(desc);
166 if (cookie < 0) { 202 if (cookie < 0) {
167 desc = NULL; 203 desc = NULL;
@@ -221,29 +257,6 @@ static void tmio_mmc_issue_tasklet_fn(unsigned long priv)
221 dma_async_issue_pending(chan); 257 dma_async_issue_pending(chan);
222} 258}
223 259
224static void tmio_mmc_tasklet_fn(unsigned long arg)
225{
226 struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
227
228 spin_lock_irq(&host->lock);
229
230 if (!host->data)
231 goto out;
232
233 if (host->data->flags & MMC_DATA_READ)
234 dma_unmap_sg(host->chan_rx->device->dev,
235 host->sg_ptr, host->sg_len,
236 DMA_FROM_DEVICE);
237 else
238 dma_unmap_sg(host->chan_tx->device->dev,
239 host->sg_ptr, host->sg_len,
240 DMA_TO_DEVICE);
241
242 tmio_mmc_do_data_irq(host);
243out:
244 spin_unlock_irq(&host->lock);
245}
246
247void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata) 260void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata)
248{ 261{
249 /* We can only either use DMA for both Tx and Rx or not use it at all */ 262 /* We can only either use DMA for both Tx and Rx or not use it at all */
@@ -306,7 +319,7 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat
306 if (!host->bounce_buf) 319 if (!host->bounce_buf)
307 goto ebouncebuf; 320 goto ebouncebuf;
308 321
309 tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host); 322 init_completion(&host->dma_dataend);
310 tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host); 323 tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host);
311 } 324 }
312 325
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index 6b789a739d4d..a2d92f10501b 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -340,7 +340,7 @@ static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command
340 340
341 /* CMD12 is handled by hardware */ 341 /* CMD12 is handled by hardware */
342 if (cmd->opcode == MMC_STOP_TRANSMISSION && !cmd->arg) { 342 if (cmd->opcode == MMC_STOP_TRANSMISSION && !cmd->arg) {
343 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001); 343 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, TMIO_STOP_STP);
344 return 0; 344 return 0;
345 } 345 }
346 346
@@ -367,7 +367,7 @@ static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command
367 if (data) { 367 if (data) {
368 c |= DATA_PRESENT; 368 c |= DATA_PRESENT;
369 if (data->blocks > 1) { 369 if (data->blocks > 1) {
370 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100); 370 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, TMIO_STOP_SEC);
371 c |= TRANSFER_MULTI; 371 c |= TRANSFER_MULTI;
372 372
373 /* 373 /*
@@ -553,10 +553,14 @@ void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
553 } 553 }
554 554
555 if (stop) { 555 if (stop) {
556 if (stop->opcode == MMC_STOP_TRANSMISSION && !stop->arg) 556 if (stop->opcode != MMC_STOP_TRANSMISSION || stop->arg)
557 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000); 557 dev_err(&host->pdev->dev, "unsupported stop: CMD%u,0x%x. We did CMD12,0\n",
558 else 558 stop->opcode, stop->arg);
559 BUG(); 559
560 /* fill in response from auto CMD12 */
561 stop->resp[0] = sd_ctrl_read16_and_16_as_32(host, CTL_RESPONSE);
562
563 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0);
560 } 564 }
561 565
562 schedule_work(&host->done); 566 schedule_work(&host->done);
@@ -596,11 +600,11 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host, unsigned int stat)
596 600
597 if (done) { 601 if (done) {
598 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); 602 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
599 tasklet_schedule(&host->dma_complete); 603 complete(&host->dma_dataend);
600 } 604 }
601 } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) { 605 } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) {
602 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); 606 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
603 tasklet_schedule(&host->dma_complete); 607 complete(&host->dma_dataend);
604 } else { 608 } else {
605 tmio_mmc_do_data_irq(host); 609 tmio_mmc_do_data_irq(host);
606 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP); 610 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP);
@@ -811,16 +815,14 @@ static int tmio_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
811 struct tmio_mmc_host *host = mmc_priv(mmc); 815 struct tmio_mmc_host *host = mmc_priv(mmc);
812 int i, ret = 0; 816 int i, ret = 0;
813 817
814 if (!host->tap_num) { 818 if (!host->init_tuning || !host->select_tuning)
815 if (!host->init_tuning || !host->select_tuning) 819 /* Tuning is not supported */
816 /* Tuning is not supported */ 820 goto out;
817 goto out;
818 821
819 host->tap_num = host->init_tuning(host); 822 host->tap_num = host->init_tuning(host);
820 if (!host->tap_num) 823 if (!host->tap_num)
821 /* Tuning is not supported */ 824 /* Tuning is not supported */
822 goto out; 825 goto out;
823 }
824 826
825 if (host->tap_num * 2 >= sizeof(host->taps) * BITS_PER_BYTE) { 827 if (host->tap_num * 2 >= sizeof(host->taps) * BITS_PER_BYTE) {
826 dev_warn_once(&host->pdev->dev, 828 dev_warn_once(&host->pdev->dev,
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 77e61e0a216a..aad015e0152b 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -89,6 +89,7 @@ struct mmc_ext_csd {
89 unsigned int boot_ro_lock; /* ro lock support */ 89 unsigned int boot_ro_lock; /* ro lock support */
90 bool boot_ro_lockable; 90 bool boot_ro_lockable;
91 bool ffu_capable; /* Firmware upgrade support */ 91 bool ffu_capable; /* Firmware upgrade support */
92 bool cmdq_en; /* Command Queue enabled */
92 bool cmdq_support; /* Command Queue supported */ 93 bool cmdq_support; /* Command Queue supported */
93 unsigned int cmdq_depth; /* Command Queue depth */ 94 unsigned int cmdq_depth; /* Command Queue depth */
94#define MMC_FIRMWARE_LEN 8 95#define MMC_FIRMWARE_LEN 8
@@ -208,6 +209,7 @@ struct sdio_cis {
208struct mmc_host; 209struct mmc_host;
209struct sdio_func; 210struct sdio_func;
210struct sdio_func_tuple; 211struct sdio_func_tuple;
212struct mmc_queue_req;
211 213
212#define SDIO_MAX_FUNCS 7 214#define SDIO_MAX_FUNCS 7
213 215
@@ -267,6 +269,8 @@ struct mmc_card {
267#define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */ 269#define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */
268#define MMC_QUIRK_BROKEN_HPI (1<<13) /* Disable broken HPI support */ 270#define MMC_QUIRK_BROKEN_HPI (1<<13) /* Disable broken HPI support */
269 271
272 bool reenable_cmdq; /* Re-enable Command Queue */
273
270 unsigned int erase_size; /* erase size in sectors */ 274 unsigned int erase_size; /* erase size in sectors */
271 unsigned int erase_shift; /* if erase unit is power 2 */ 275 unsigned int erase_shift; /* if erase unit is power 2 */
272 unsigned int pref_erase; /* in sectors */ 276 unsigned int pref_erase; /* in sectors */
@@ -300,6 +304,10 @@ struct mmc_card {
300 struct dentry *debugfs_root; 304 struct dentry *debugfs_root;
301 struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */ 305 struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */
302 unsigned int nr_parts; 306 unsigned int nr_parts;
307
308 struct mmc_queue_req *mqrq; /* Shared queue structure */
309 unsigned int bouncesz; /* Bounce buffer size */
310 int qdepth; /* Shared queue depth */
303}; 311};
304 312
305static inline bool mmc_large_sector(struct mmc_card *card) 313static inline bool mmc_large_sector(struct mmc_card *card)
@@ -307,6 +315,8 @@ static inline bool mmc_large_sector(struct mmc_card *card)
307 return card->ext_csd.data_sector_size == 4096; 315 return card->ext_csd.data_sector_size == 4096;
308} 316}
309 317
318bool mmc_card_is_blockaddr(struct mmc_card *card);
319
310#define mmc_card_mmc(c) ((c)->type == MMC_TYPE_MMC) 320#define mmc_card_mmc(c) ((c)->type == MMC_TYPE_MMC)
311#define mmc_card_sd(c) ((c)->type == MMC_TYPE_SD) 321#define mmc_card_sd(c) ((c)->type == MMC_TYPE_SD)
312#define mmc_card_sdio(c) ((c)->type == MMC_TYPE_SDIO) 322#define mmc_card_sdio(c) ((c)->type == MMC_TYPE_SDIO)
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 83f1c4a9f03b..21385ac0c9b1 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -17,6 +17,7 @@
17#include <linux/mmc/core.h> 17#include <linux/mmc/core.h>
18#include <linux/mmc/card.h> 18#include <linux/mmc/card.h>
19#include <linux/mmc/pm.h> 19#include <linux/mmc/pm.h>
20#include <linux/dma-direction.h>
20 21
21struct mmc_ios { 22struct mmc_ios {
22 unsigned int clock; /* clock rate */ 23 unsigned int clock; /* clock rate */
@@ -499,6 +500,11 @@ static inline bool mmc_can_retune(struct mmc_host *host)
499 return host->can_retune == 1; 500 return host->can_retune == 1;
500} 501}
501 502
503static inline enum dma_data_direction mmc_get_dma_dir(struct mmc_data *data)
504{
505 return data->flags & MMC_DATA_WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
506}
507
502int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error); 508int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
503int mmc_abort_tuning(struct mmc_host *host, u32 opcode); 509int mmc_abort_tuning(struct mmc_host *host, u32 opcode);
504 510