aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-10-05 14:34:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-05 14:34:53 -0400
commit64cbd16a8751fde075aa103dc7823a8c05805104 (patch)
tree09c6a4f46ceaaa6d949862413a064b7eb5f8c1b6
parentedadd0e5a7f9970553423ebd08172c9e3d1fb189 (diff)
parent0f75c404503cc49cbe92555fbab80a584c1f4ae2 (diff)
Merge tag 'mmc-v4.9' of git://git.linaro.org/people/ulf.hansson/mmc
Pull MMC updates from Ulf Hansson: MMC core: - Add support for sending commands during data transfer - Erase/discard/trim improvements - Improved error handling - Extend sysfs with SD status register - Document info about the vmmc/vmmcq regulators - Extend pwrseq-simple to manage an optional post-power-on-delay - Some various minor improvements and cleanups MMC host: - dw_mmc: Add reset support - dw_mmc: Return -EILSEQ for EBE and SBE error - dw_mmc: Some cleanups - dw_mmc-k3: Add UHS-I support Hisilicon Hikey - tmio: Add eMMC support - sh_mobile_sdhi: Add r8a7796 support - sunxi: Don't use sample clocks for sun4i/sun5i - sunxi: Add support for A64 mmc controller - sunxi: Some cleanups and improvements - sdhci: Support for sending commands during data transfer - sdhci: Do not allow tuning procedure to be interrupted - sdhci-pci: Enable SD/SDIO on Merrifield - sdhci-pci|acpi: Enable MMC_CAP_CMD_DURING_TFR - sdhci-pci: Some cleanups - sdhci-of-arasan: Set controller to test mode when no CD bit - sdhci-of-arasan: Some fixes for clocks and phys - sdhci-brcmstb: Don't use ADMA 64-bit when not supported - sdhci-tegra: Mark 64-bit DMA broken on Tegra124 - sdhci-esdhc-imx: Fixups related to data timeouts * tag 'mmc-v4.9' of git://git.linaro.org/people/ulf.hansson/mmc: (68 commits) mmc: dw_mmc: remove the deprecated "supports-highspeed" property mmc: dw_mmc: minor cleanup for dw_mci_adjust_fifoth mmc: dw_mmc: use macro to define ring buffer size mmc: dw_mmc: fix misleading error print if failing to do DMA transfer mmc: dw_mmc: avoid race condition of cpu and IDMAC mmc: dw_mmc: split out preparation of desc for IDMAC32 and IDMAC64 mmc: core: don't try to switch block size for dual rate mode mmc: sdhci-of-arasan: Set controller to test mode when no CD bit dt: sdhci-of-arasan: Add device tree option xlnx, fails-without-test-cd mmc: tmio: add eMMC support mmc: rtsx_usb: use new macro for R1 without CRC mmc: rtsx_pci: use new macro for R1 without CRC mmc: add define for R1 response without CRC mmc: card: do away with indirection pointer mmc: sdhci-acpi: Set MMC_CAP_CMD_DURING_TFR for Intel eMMC controllers mmc: sdhci-pci: Set MMC_CAP_CMD_DURING_TFR for Intel eMMC controllers mmc: sdhci: Support cap_cmd_during_tfr requests mmc: mmc_test: Add tests for sending commands during transfer mmc: core: Add support for sending commands during data transfer mmc: sdhci-brcmstb: Fix incorrect capability ...
-rw-r--r--Documentation/devicetree/bindings/mmc/arasan,sdhci.txt3
-rw-r--r--Documentation/devicetree/bindings/mmc/brcm,sdhci-brcmstb.txt (renamed from Documentation/devicetree/bindings/mmc/brcm,bcm7425-sdhci.txt)4
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc.txt15
-rw-r--r--Documentation/devicetree/bindings/mmc/sunxi-mmc.txt7
-rw-r--r--Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt4
-rw-r--r--Documentation/devicetree/bindings/mmc/tmio_mmc.txt1
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi8
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi8
-rw-r--r--arch/arm/boot/dts/sun8i-a23-a33.dtsi6
-rw-r--r--arch/arm/boot/dts/sun8i-h3.dtsi6
-rw-r--r--drivers/mmc/card/block.c30
-rw-r--r--drivers/mmc/card/block.h1
-rw-r--r--drivers/mmc/card/mmc_test.c308
-rw-r--r--drivers/mmc/card/queue.c4
-rw-r--r--drivers/mmc/card/queue.h2
-rw-r--r--drivers/mmc/core/core.c181
-rw-r--r--drivers/mmc/core/mmc.c9
-rw-r--r--drivers/mmc/core/pwrseq_simple.c9
-rw-r--r--drivers/mmc/core/sd.c37
-rw-r--r--drivers/mmc/core/sdio_io.c47
-rw-r--r--drivers/mmc/core/sdio_ops.c9
-rw-r--r--drivers/mmc/host/davinci_mmc.c6
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c6
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c6
-rw-r--r--drivers/mmc/host/dw_mmc.c427
-rw-r--r--drivers/mmc/host/moxart-mmc.c5
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c2
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c2
-rw-r--r--drivers/mmc/host/sdhci-acpi.c2
-rw-r--r--drivers/mmc/host/sdhci-bcm-kona.c6
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c4
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c7
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c136
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c2
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c62
-rw-r--r--drivers/mmc/host/sdhci-pci.h1
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c7
-rw-r--r--drivers/mmc/host/sdhci-tegra.c27
-rw-r--r--drivers/mmc/host/sdhci.c23
-rw-r--r--drivers/mmc/host/sdhci.h3
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c17
-rw-r--r--drivers/mmc/host/sunxi-mmc.c265
-rw-r--r--drivers/mmc/host/tmio_mmc.h4
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c47
-rw-r--r--include/linux/mmc/card.h1
-rw-r--r--include/linux/mmc/core.h10
-rw-r--r--include/linux/mmc/dw_mmc.h2
-rw-r--r--include/linux/mmc/host.h5
49 files changed, 1305 insertions, 481 deletions
diff --git a/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt b/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
index 3404afa9b938..49df630bd44f 100644
--- a/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
+++ b/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
@@ -36,6 +36,9 @@ Optional Properties:
36 - #clock-cells: If specified this should be the value <0>. With this property 36 - #clock-cells: If specified this should be the value <0>. With this property
37 in place we will export a clock representing the Card Clock. This clock 37 in place we will export a clock representing the Card Clock. This clock
38 is expected to be consumed by our PHY. You must also specify 38 is expected to be consumed by our PHY. You must also specify
39 - xlnx,fails-without-test-cd: when present, the controller doesn't work when
40 the CD line is not connected properly, and the line is not connected
41 properly. Test mode can be used to force the controller to function.
39 42
40Example: 43Example:
41 sdhci@e0100000 { 44 sdhci@e0100000 {
diff --git a/Documentation/devicetree/bindings/mmc/brcm,bcm7425-sdhci.txt b/Documentation/devicetree/bindings/mmc/brcm,sdhci-brcmstb.txt
index 82847174c37d..733b64a4d8eb 100644
--- a/Documentation/devicetree/bindings/mmc/brcm,bcm7425-sdhci.txt
+++ b/Documentation/devicetree/bindings/mmc/brcm,sdhci-brcmstb.txt
@@ -8,7 +8,9 @@ on Device Tree properties to enable them for SoC/Board combinations
8that support them. 8that support them.
9 9
10Required properties: 10Required properties:
11- compatible: "brcm,bcm7425-sdhci" 11- compatible: should be one of the following
12 - "brcm,bcm7425-sdhci"
13 - "brcm,bcm7445-sdhci"
12 14
13Refer to clocks/clock-bindings.txt for generic clock consumer properties. 15Refer to clocks/clock-bindings.txt for generic clock consumer properties.
14 16
diff --git a/Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt b/Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt
index ce0e76749671..e25436861867 100644
--- a/Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt
+++ b/Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt
@@ -16,6 +16,8 @@ Optional properties:
16 See ../clocks/clock-bindings.txt for details. 16 See ../clocks/clock-bindings.txt for details.
17- clock-names : Must include the following entry: 17- clock-names : Must include the following entry:
18 "ext_clock" (External clock provided to the card). 18 "ext_clock" (External clock provided to the card).
19- post-power-on-delay-ms : Delay in ms after powering the card and
20 de-asserting the reset-gpios (if any)
19 21
20Example: 22Example:
21 23
diff --git a/Documentation/devicetree/bindings/mmc/mmc.txt b/Documentation/devicetree/bindings/mmc/mmc.txt
index 22d1e1f3f38b..8a377827695b 100644
--- a/Documentation/devicetree/bindings/mmc/mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/mmc.txt
@@ -75,6 +75,17 @@ Optional SDIO properties:
75- wakeup-source: Enables wake up of host system on SDIO IRQ assertion 75- wakeup-source: Enables wake up of host system on SDIO IRQ assertion
76 (Legacy property supported: "enable-sdio-wakeup") 76 (Legacy property supported: "enable-sdio-wakeup")
77 77
78MMC power
79---------
80
81Controllers may implement power control from both the connected cards and
82the IO signaling (for example to change to high-speed 1.8V signalling). If
83the system supports this, then the following two properties should point
84to valid regulator nodes:
85
86- vqmmc-supply: supply node for IO line power
87- vmmc-supply: supply node for card's power
88
78 89
79MMC power sequences: 90MMC power sequences:
80-------------------- 91--------------------
@@ -102,11 +113,13 @@ Required host node properties when using function subnodes:
102- #size-cells: should be zero. 113- #size-cells: should be zero.
103 114
104Required function subnode properties: 115Required function subnode properties:
105- compatible: name of SDIO function following generic names recommended practice
106- reg: Must contain the SDIO function number of the function this subnode 116- reg: Must contain the SDIO function number of the function this subnode
107 describes. A value of 0 denotes the memory SD function, values from 117 describes. A value of 0 denotes the memory SD function, values from
108 1 to 7 denote the SDIO functions. 118 1 to 7 denote the SDIO functions.
109 119
120Optional function subnode properties:
121- compatible: name of SDIO function following generic names recommended practice
122
110 123
111Examples 124Examples
112-------- 125--------
diff --git a/Documentation/devicetree/bindings/mmc/sunxi-mmc.txt b/Documentation/devicetree/bindings/mmc/sunxi-mmc.txt
index 4bf41d833804..55cdd804cdba 100644
--- a/Documentation/devicetree/bindings/mmc/sunxi-mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/sunxi-mmc.txt
@@ -8,7 +8,12 @@ as the speed of SD standard 3.0.
8Absolute maximum transfer rate is 200MB/s 8Absolute maximum transfer rate is 200MB/s
9 9
10Required properties: 10Required properties:
11 - compatible : "allwinner,sun4i-a10-mmc" or "allwinner,sun5i-a13-mmc" 11 - compatible : should be one of:
12 * "allwinner,sun4i-a10-mmc"
13 * "allwinner,sun5i-a13-mmc"
14 * "allwinner,sun7i-a20-mmc"
15 * "allwinner,sun9i-a80-mmc"
16 * "allwinner,sun50i-a64-mmc"
12 - reg : mmc controller base registers 17 - reg : mmc controller base registers
13 - clocks : a list with 4 phandle + clock specifier pairs 18 - clocks : a list with 4 phandle + clock specifier pairs
14 - clock-names : must contain "ahb", "mmc", "output" and "sample" 19 - clock-names : must contain "ahb", "mmc", "output" and "sample"
diff --git a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
index 8636f5ae97e5..4e00e859e885 100644
--- a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
@@ -39,6 +39,10 @@ Required Properties:
39 39
40Optional properties: 40Optional properties:
41 41
42* resets: phandle + reset specifier pair, intended to represent hardware
43 reset signal present internally in some host controller IC designs.
44 See Documentation/devicetree/bindings/reset/reset.txt for details.
45
42* clocks: from common clock binding: handle to biu and ciu clocks for the 46* clocks: from common clock binding: handle to biu and ciu clocks for the
43 bus interface unit clock and the card interface unit clock. 47 bus interface unit clock and the card interface unit clock.
44 48
diff --git a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
index 0f610d4b5b00..13df9c2399c3 100644
--- a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
@@ -23,6 +23,7 @@ Required properties:
23 "renesas,sdhi-r8a7793" - SDHI IP on R8A7793 SoC 23 "renesas,sdhi-r8a7793" - SDHI IP on R8A7793 SoC
24 "renesas,sdhi-r8a7794" - SDHI IP on R8A7794 SoC 24 "renesas,sdhi-r8a7794" - SDHI IP on R8A7794 SoC
25 "renesas,sdhi-r8a7795" - SDHI IP on R8A7795 SoC 25 "renesas,sdhi-r8a7795" - SDHI IP on R8A7795 SoC
26 "renesas,sdhi-r8a7796" - SDHI IP on R8A7796 SoC
26 27
27Optional properties: 28Optional properties:
28- toshiba,mmc-wrprotect-disable: write-protect detection is unavailable 29- toshiba,mmc-wrprotect-disable: write-protect detection is unavailable
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index 1867af24ff52..0d24f107ede0 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -469,7 +469,7 @@
469 }; 469 };
470 470
471 mmc0: mmc@01c0f000 { 471 mmc0: mmc@01c0f000 {
472 compatible = "allwinner,sun5i-a13-mmc"; 472 compatible = "allwinner,sun7i-a20-mmc";
473 reg = <0x01c0f000 0x1000>; 473 reg = <0x01c0f000 0x1000>;
474 clocks = <&ahb1_gates 8>, 474 clocks = <&ahb1_gates 8>,
475 <&mmc0_clk 0>, 475 <&mmc0_clk 0>,
@@ -488,7 +488,7 @@
488 }; 488 };
489 489
490 mmc1: mmc@01c10000 { 490 mmc1: mmc@01c10000 {
491 compatible = "allwinner,sun5i-a13-mmc"; 491 compatible = "allwinner,sun7i-a20-mmc";
492 reg = <0x01c10000 0x1000>; 492 reg = <0x01c10000 0x1000>;
493 clocks = <&ahb1_gates 9>, 493 clocks = <&ahb1_gates 9>,
494 <&mmc1_clk 0>, 494 <&mmc1_clk 0>,
@@ -507,7 +507,7 @@
507 }; 507 };
508 508
509 mmc2: mmc@01c11000 { 509 mmc2: mmc@01c11000 {
510 compatible = "allwinner,sun5i-a13-mmc"; 510 compatible = "allwinner,sun7i-a20-mmc";
511 reg = <0x01c11000 0x1000>; 511 reg = <0x01c11000 0x1000>;
512 clocks = <&ahb1_gates 10>, 512 clocks = <&ahb1_gates 10>,
513 <&mmc2_clk 0>, 513 <&mmc2_clk 0>,
@@ -526,7 +526,7 @@
526 }; 526 };
527 527
528 mmc3: mmc@01c12000 { 528 mmc3: mmc@01c12000 {
529 compatible = "allwinner,sun5i-a13-mmc"; 529 compatible = "allwinner,sun7i-a20-mmc";
530 reg = <0x01c12000 0x1000>; 530 reg = <0x01c12000 0x1000>;
531 clocks = <&ahb1_gates 11>, 531 clocks = <&ahb1_gates 11>,
532 <&mmc3_clk 0>, 532 <&mmc3_clk 0>,
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index bd0c47660243..94cf5a1c7172 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -905,7 +905,7 @@
905 }; 905 };
906 906
907 mmc0: mmc@01c0f000 { 907 mmc0: mmc@01c0f000 {
908 compatible = "allwinner,sun5i-a13-mmc"; 908 compatible = "allwinner,sun7i-a20-mmc";
909 reg = <0x01c0f000 0x1000>; 909 reg = <0x01c0f000 0x1000>;
910 clocks = <&ahb_gates 8>, 910 clocks = <&ahb_gates 8>,
911 <&mmc0_clk 0>, 911 <&mmc0_clk 0>,
@@ -922,7 +922,7 @@
922 }; 922 };
923 923
924 mmc1: mmc@01c10000 { 924 mmc1: mmc@01c10000 {
925 compatible = "allwinner,sun5i-a13-mmc"; 925 compatible = "allwinner,sun7i-a20-mmc";
926 reg = <0x01c10000 0x1000>; 926 reg = <0x01c10000 0x1000>;
927 clocks = <&ahb_gates 9>, 927 clocks = <&ahb_gates 9>,
928 <&mmc1_clk 0>, 928 <&mmc1_clk 0>,
@@ -939,7 +939,7 @@
939 }; 939 };
940 940
941 mmc2: mmc@01c11000 { 941 mmc2: mmc@01c11000 {
942 compatible = "allwinner,sun5i-a13-mmc"; 942 compatible = "allwinner,sun7i-a20-mmc";
943 reg = <0x01c11000 0x1000>; 943 reg = <0x01c11000 0x1000>;
944 clocks = <&ahb_gates 10>, 944 clocks = <&ahb_gates 10>,
945 <&mmc2_clk 0>, 945 <&mmc2_clk 0>,
@@ -956,7 +956,7 @@
956 }; 956 };
957 957
958 mmc3: mmc@01c12000 { 958 mmc3: mmc@01c12000 {
959 compatible = "allwinner,sun5i-a13-mmc"; 959 compatible = "allwinner,sun7i-a20-mmc";
960 reg = <0x01c12000 0x1000>; 960 reg = <0x01c12000 0x1000>;
961 clocks = <&ahb_gates 11>, 961 clocks = <&ahb_gates 11>,
962 <&mmc3_clk 0>, 962 <&mmc3_clk 0>,
diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
index 7e05e09e61c7..e3b196e08ccf 100644
--- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
@@ -266,7 +266,7 @@
266 }; 266 };
267 267
268 mmc0: mmc@01c0f000 { 268 mmc0: mmc@01c0f000 {
269 compatible = "allwinner,sun5i-a13-mmc"; 269 compatible = "allwinner,sun7i-a20-mmc";
270 reg = <0x01c0f000 0x1000>; 270 reg = <0x01c0f000 0x1000>;
271 clocks = <&ahb1_gates 8>, 271 clocks = <&ahb1_gates 8>,
272 <&mmc0_clk 0>, 272 <&mmc0_clk 0>,
@@ -285,7 +285,7 @@
285 }; 285 };
286 286
287 mmc1: mmc@01c10000 { 287 mmc1: mmc@01c10000 {
288 compatible = "allwinner,sun5i-a13-mmc"; 288 compatible = "allwinner,sun7i-a20-mmc";
289 reg = <0x01c10000 0x1000>; 289 reg = <0x01c10000 0x1000>;
290 clocks = <&ahb1_gates 9>, 290 clocks = <&ahb1_gates 9>,
291 <&mmc1_clk 0>, 291 <&mmc1_clk 0>,
@@ -304,7 +304,7 @@
304 }; 304 };
305 305
306 mmc2: mmc@01c11000 { 306 mmc2: mmc@01c11000 {
307 compatible = "allwinner,sun5i-a13-mmc"; 307 compatible = "allwinner,sun7i-a20-mmc";
308 reg = <0x01c11000 0x1000>; 308 reg = <0x01c11000 0x1000>;
309 clocks = <&ahb1_gates 10>, 309 clocks = <&ahb1_gates 10>,
310 <&mmc2_clk 0>, 310 <&mmc2_clk 0>,
diff --git a/arch/arm/boot/dts/sun8i-h3.dtsi b/arch/arm/boot/dts/sun8i-h3.dtsi
index fdf9fdbda267..8a95e3613488 100644
--- a/arch/arm/boot/dts/sun8i-h3.dtsi
+++ b/arch/arm/boot/dts/sun8i-h3.dtsi
@@ -150,7 +150,7 @@
150 }; 150 };
151 151
152 mmc0: mmc@01c0f000 { 152 mmc0: mmc@01c0f000 {
153 compatible = "allwinner,sun5i-a13-mmc"; 153 compatible = "allwinner,sun7i-a20-mmc";
154 reg = <0x01c0f000 0x1000>; 154 reg = <0x01c0f000 0x1000>;
155 clocks = <&ccu CLK_BUS_MMC0>, 155 clocks = <&ccu CLK_BUS_MMC0>,
156 <&ccu CLK_MMC0>, 156 <&ccu CLK_MMC0>,
@@ -169,7 +169,7 @@
169 }; 169 };
170 170
171 mmc1: mmc@01c10000 { 171 mmc1: mmc@01c10000 {
172 compatible = "allwinner,sun5i-a13-mmc"; 172 compatible = "allwinner,sun7i-a20-mmc";
173 reg = <0x01c10000 0x1000>; 173 reg = <0x01c10000 0x1000>;
174 clocks = <&ccu CLK_BUS_MMC1>, 174 clocks = <&ccu CLK_BUS_MMC1>,
175 <&ccu CLK_MMC1>, 175 <&ccu CLK_MMC1>,
@@ -188,7 +188,7 @@
188 }; 188 };
189 189
190 mmc2: mmc@01c11000 { 190 mmc2: mmc@01c11000 {
191 compatible = "allwinner,sun5i-a13-mmc"; 191 compatible = "allwinner,sun7i-a20-mmc";
192 reg = <0x01c11000 0x1000>; 192 reg = <0x01c11000 0x1000>;
193 clocks = <&ccu CLK_BUS_MMC2>, 193 clocks = <&ccu CLK_BUS_MMC2>,
194 <&ccu CLK_MMC2>, 194 <&ccu CLK_MMC2>,
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 2206d4477dbb..c3335112e68c 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -142,8 +142,6 @@ static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
142{ 142{
143 struct mmc_packed *packed = mqrq->packed; 143 struct mmc_packed *packed = mqrq->packed;
144 144
145 BUG_ON(!packed);
146
147 mqrq->cmd_type = MMC_PACKED_NONE; 145 mqrq->cmd_type = MMC_PACKED_NONE;
148 packed->nr_entries = MMC_PACKED_NR_ZERO; 146 packed->nr_entries = MMC_PACKED_NR_ZERO;
149 packed->idx_failure = MMC_PACKED_NR_IDX; 147 packed->idx_failure = MMC_PACKED_NR_IDX;
@@ -1443,8 +1441,6 @@ static int mmc_blk_packed_err_check(struct mmc_card *card,
1443 int err, check, status; 1441 int err, check, status;
1444 u8 *ext_csd; 1442 u8 *ext_csd;
1445 1443
1446 BUG_ON(!packed);
1447
1448 packed->retries--; 1444 packed->retries--;
1449 check = mmc_blk_err_check(card, areq); 1445 check = mmc_blk_err_check(card, areq);
1450 err = get_card_status(card, &status, 0); 1446 err = get_card_status(card, &status, 0);
@@ -1673,6 +1669,18 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
1673 u8 max_packed_rw = 0; 1669 u8 max_packed_rw = 0;
1674 u8 reqs = 0; 1670 u8 reqs = 0;
1675 1671
1672 /*
1673 * We don't need to check packed for any further
1674 * operation of packed stuff as we set MMC_PACKED_NONE
1675 * and return zero for reqs if geting null packed. Also
1676 * we clean the flag of MMC_BLK_PACKED_CMD to avoid doing
1677 * it again when removing blk req.
1678 */
1679 if (!mqrq->packed) {
1680 md->flags &= (~MMC_BLK_PACKED_CMD);
1681 goto no_packed;
1682 }
1683
1676 if (!(md->flags & MMC_BLK_PACKED_CMD)) 1684 if (!(md->flags & MMC_BLK_PACKED_CMD))
1677 goto no_packed; 1685 goto no_packed;
1678 1686
@@ -1782,8 +1790,6 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
1782 u8 hdr_blocks; 1790 u8 hdr_blocks;
1783 u8 i = 1; 1791 u8 i = 1;
1784 1792
1785 BUG_ON(!packed);
1786
1787 mqrq->cmd_type = MMC_PACKED_WRITE; 1793 mqrq->cmd_type = MMC_PACKED_WRITE;
1788 packed->blocks = 0; 1794 packed->blocks = 0;
1789 packed->idx_failure = MMC_PACKED_NR_IDX; 1795 packed->idx_failure = MMC_PACKED_NR_IDX;
@@ -1887,8 +1893,6 @@ static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
1887 int idx = packed->idx_failure, i = 0; 1893 int idx = packed->idx_failure, i = 0;
1888 int ret = 0; 1894 int ret = 0;
1889 1895
1890 BUG_ON(!packed);
1891
1892 while (!list_empty(&packed->list)) { 1896 while (!list_empty(&packed->list)) {
1893 prq = list_entry_rq(packed->list.next); 1897 prq = list_entry_rq(packed->list.next);
1894 if (idx == i) { 1898 if (idx == i) {
@@ -1917,8 +1921,6 @@ static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
1917 struct request *prq; 1921 struct request *prq;
1918 struct mmc_packed *packed = mq_rq->packed; 1922 struct mmc_packed *packed = mq_rq->packed;
1919 1923
1920 BUG_ON(!packed);
1921
1922 while (!list_empty(&packed->list)) { 1924 while (!list_empty(&packed->list)) {
1923 prq = list_entry_rq(packed->list.next); 1925 prq = list_entry_rq(packed->list.next);
1924 list_del_init(&prq->queuelist); 1926 list_del_init(&prq->queuelist);
@@ -1935,8 +1937,6 @@ static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
1935 struct request_queue *q = mq->queue; 1937 struct request_queue *q = mq->queue;
1936 struct mmc_packed *packed = mq_rq->packed; 1938 struct mmc_packed *packed = mq_rq->packed;
1937 1939
1938 BUG_ON(!packed);
1939
1940 while (!list_empty(&packed->list)) { 1940 while (!list_empty(&packed->list)) {
1941 prq = list_entry_rq(packed->list.prev); 1941 prq = list_entry_rq(packed->list.prev);
1942 if (prq->queuelist.prev != &packed->list) { 1942 if (prq->queuelist.prev != &packed->list) {
@@ -2144,7 +2144,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
2144 return 0; 2144 return 0;
2145} 2145}
2146 2146
2147static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) 2147int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
2148{ 2148{
2149 int ret; 2149 int ret;
2150 struct mmc_blk_data *md = mq->data; 2150 struct mmc_blk_data *md = mq->data;
@@ -2265,7 +2265,6 @@ again:
2265 if (ret) 2265 if (ret)
2266 goto err_putdisk; 2266 goto err_putdisk;
2267 2267
2268 md->queue.issue_fn = mmc_blk_issue_rq;
2269 md->queue.data = md; 2268 md->queue.data = md;
2270 2269
2271 md->disk->major = MMC_BLOCK_MAJOR; 2270 md->disk->major = MMC_BLOCK_MAJOR;
@@ -2303,7 +2302,8 @@ again:
2303 set_capacity(md->disk, size); 2302 set_capacity(md->disk, size);
2304 2303
2305 if (mmc_host_cmd23(card->host)) { 2304 if (mmc_host_cmd23(card->host)) {
2306 if (mmc_card_mmc(card) || 2305 if ((mmc_card_mmc(card) &&
2306 card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
2307 (mmc_card_sd(card) && 2307 (mmc_card_sd(card) &&
2308 card->scr.cmds & SD_SCR_CMD23_SUPPORT)) 2308 card->scr.cmds & SD_SCR_CMD23_SUPPORT))
2309 md->flags |= MMC_BLK_CMD23; 2309 md->flags |= MMC_BLK_CMD23;
diff --git a/drivers/mmc/card/block.h b/drivers/mmc/card/block.h
new file mode 100644
index 000000000000..cdabb2ee74be
--- /dev/null
+++ b/drivers/mmc/card/block.h
@@ -0,0 +1 @@
int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req);
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index c032eef45762..5a8dc5a76e0d 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -184,6 +184,29 @@ static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
184 return mmc_set_blocklen(test->card, size); 184 return mmc_set_blocklen(test->card, size);
185} 185}
186 186
187static bool mmc_test_card_cmd23(struct mmc_card *card)
188{
189 return mmc_card_mmc(card) ||
190 (mmc_card_sd(card) && card->scr.cmds & SD_SCR_CMD23_SUPPORT);
191}
192
193static void mmc_test_prepare_sbc(struct mmc_test_card *test,
194 struct mmc_request *mrq, unsigned int blocks)
195{
196 struct mmc_card *card = test->card;
197
198 if (!mrq->sbc || !mmc_host_cmd23(card->host) ||
199 !mmc_test_card_cmd23(card) || !mmc_op_multi(mrq->cmd->opcode) ||
200 (card->quirks & MMC_QUIRK_BLK_NO_CMD23)) {
201 mrq->sbc = NULL;
202 return;
203 }
204
205 mrq->sbc->opcode = MMC_SET_BLOCK_COUNT;
206 mrq->sbc->arg = blocks;
207 mrq->sbc->flags = MMC_RSP_R1 | MMC_CMD_AC;
208}
209
187/* 210/*
188 * Fill in the mmc_request structure given a set of transfer parameters. 211 * Fill in the mmc_request structure given a set of transfer parameters.
189 */ 212 */
@@ -221,6 +244,8 @@ static void mmc_test_prepare_mrq(struct mmc_test_card *test,
221 mrq->data->sg = sg; 244 mrq->data->sg = sg;
222 mrq->data->sg_len = sg_len; 245 mrq->data->sg_len = sg_len;
223 246
247 mmc_test_prepare_sbc(test, mrq, blocks);
248
224 mmc_set_data_timeout(mrq->data, test->card); 249 mmc_set_data_timeout(mrq->data, test->card);
225} 250}
226 251
@@ -693,6 +718,8 @@ static int mmc_test_check_result(struct mmc_test_card *test,
693 718
694 ret = 0; 719 ret = 0;
695 720
721 if (mrq->sbc && mrq->sbc->error)
722 ret = mrq->sbc->error;
696 if (!ret && mrq->cmd->error) 723 if (!ret && mrq->cmd->error)
697 ret = mrq->cmd->error; 724 ret = mrq->cmd->error;
698 if (!ret && mrq->data->error) 725 if (!ret && mrq->data->error)
@@ -2278,6 +2305,245 @@ static int mmc_test_reset(struct mmc_test_card *test)
2278 return RESULT_FAIL; 2305 return RESULT_FAIL;
2279} 2306}
2280 2307
2308struct mmc_test_req {
2309 struct mmc_request mrq;
2310 struct mmc_command sbc;
2311 struct mmc_command cmd;
2312 struct mmc_command stop;
2313 struct mmc_command status;
2314 struct mmc_data data;
2315};
2316
2317static struct mmc_test_req *mmc_test_req_alloc(void)
2318{
2319 struct mmc_test_req *rq = kzalloc(sizeof(*rq), GFP_KERNEL);
2320
2321 if (rq) {
2322 rq->mrq.cmd = &rq->cmd;
2323 rq->mrq.data = &rq->data;
2324 rq->mrq.stop = &rq->stop;
2325 }
2326
2327 return rq;
2328}
2329
2330static int mmc_test_send_status(struct mmc_test_card *test,
2331 struct mmc_command *cmd)
2332{
2333 memset(cmd, 0, sizeof(*cmd));
2334
2335 cmd->opcode = MMC_SEND_STATUS;
2336 if (!mmc_host_is_spi(test->card->host))
2337 cmd->arg = test->card->rca << 16;
2338 cmd->flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
2339
2340 return mmc_wait_for_cmd(test->card->host, cmd, 0);
2341}
2342
2343static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
2344 unsigned int dev_addr, int use_sbc,
2345 int repeat_cmd, int write, int use_areq)
2346{
2347 struct mmc_test_req *rq = mmc_test_req_alloc();
2348 struct mmc_host *host = test->card->host;
2349 struct mmc_test_area *t = &test->area;
2350 struct mmc_async_req areq;
2351 struct mmc_request *mrq;
2352 unsigned long timeout;
2353 bool expired = false;
2354 int ret = 0, cmd_ret;
2355 u32 status = 0;
2356 int count = 0;
2357
2358 if (!rq)
2359 return -ENOMEM;
2360
2361 mrq = &rq->mrq;
2362 if (use_sbc)
2363 mrq->sbc = &rq->sbc;
2364 mrq->cap_cmd_during_tfr = true;
2365
2366 areq.mrq = mrq;
2367 areq.err_check = mmc_test_check_result_async;
2368
2369 mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks,
2370 512, write);
2371
2372 if (use_sbc && t->blocks > 1 && !mrq->sbc) {
2373 ret = mmc_host_cmd23(host) ?
2374 RESULT_UNSUP_CARD :
2375 RESULT_UNSUP_HOST;
2376 goto out_free;
2377 }
2378
2379 /* Start ongoing data request */
2380 if (use_areq) {
2381 mmc_start_req(host, &areq, &ret);
2382 if (ret)
2383 goto out_free;
2384 } else {
2385 mmc_wait_for_req(host, mrq);
2386 }
2387
2388 timeout = jiffies + msecs_to_jiffies(3000);
2389 do {
2390 count += 1;
2391
2392 /* Send status command while data transfer in progress */
2393 cmd_ret = mmc_test_send_status(test, &rq->status);
2394 if (cmd_ret)
2395 break;
2396
2397 status = rq->status.resp[0];
2398 if (status & R1_ERROR) {
2399 cmd_ret = -EIO;
2400 break;
2401 }
2402
2403 if (mmc_is_req_done(host, mrq))
2404 break;
2405
2406 expired = time_after(jiffies, timeout);
2407 if (expired) {
2408 pr_info("%s: timeout waiting for Tran state status %#x\n",
2409 mmc_hostname(host), status);
2410 cmd_ret = -ETIMEDOUT;
2411 break;
2412 }
2413 } while (repeat_cmd && R1_CURRENT_STATE(status) != R1_STATE_TRAN);
2414
2415 /* Wait for data request to complete */
2416 if (use_areq)
2417 mmc_start_req(host, NULL, &ret);
2418 else
2419 mmc_wait_for_req_done(test->card->host, mrq);
2420
2421 /*
2422 * For cap_cmd_during_tfr request, upper layer must send stop if
2423 * required.
2424 */
2425 if (mrq->data->stop && (mrq->data->error || !mrq->sbc)) {
2426 if (ret)
2427 mmc_wait_for_cmd(host, mrq->data->stop, 0);
2428 else
2429 ret = mmc_wait_for_cmd(host, mrq->data->stop, 0);
2430 }
2431
2432 if (ret)
2433 goto out_free;
2434
2435 if (cmd_ret) {
2436 pr_info("%s: Send Status failed: status %#x, error %d\n",
2437 mmc_hostname(test->card->host), status, cmd_ret);
2438 }
2439
2440 ret = mmc_test_check_result(test, mrq);
2441 if (ret)
2442 goto out_free;
2443
2444 ret = mmc_test_wait_busy(test);
2445 if (ret)
2446 goto out_free;
2447
2448 if (repeat_cmd && (t->blocks + 1) << 9 > t->max_tfr)
2449 pr_info("%s: %d commands completed during transfer of %u blocks\n",
2450 mmc_hostname(test->card->host), count, t->blocks);
2451
2452 if (cmd_ret)
2453 ret = cmd_ret;
2454out_free:
2455 kfree(rq);
2456
2457 return ret;
2458}
2459
2460static int __mmc_test_cmds_during_tfr(struct mmc_test_card *test,
2461 unsigned long sz, int use_sbc, int write,
2462 int use_areq)
2463{
2464 struct mmc_test_area *t = &test->area;
2465 int ret;
2466
2467 if (!(test->card->host->caps & MMC_CAP_CMD_DURING_TFR))
2468 return RESULT_UNSUP_HOST;
2469
2470 ret = mmc_test_area_map(test, sz, 0, 0);
2471 if (ret)
2472 return ret;
2473
2474 ret = mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 0, write,
2475 use_areq);
2476 if (ret)
2477 return ret;
2478
2479 return mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 1, write,
2480 use_areq);
2481}
2482
2483static int mmc_test_cmds_during_tfr(struct mmc_test_card *test, int use_sbc,
2484 int write, int use_areq)
2485{
2486 struct mmc_test_area *t = &test->area;
2487 unsigned long sz;
2488 int ret;
2489
2490 for (sz = 512; sz <= t->max_tfr; sz += 512) {
2491 ret = __mmc_test_cmds_during_tfr(test, sz, use_sbc, write,
2492 use_areq);
2493 if (ret)
2494 return ret;
2495 }
2496 return 0;
2497}
2498
2499/*
2500 * Commands during read - no Set Block Count (CMD23).
2501 */
2502static int mmc_test_cmds_during_read(struct mmc_test_card *test)
2503{
2504 return mmc_test_cmds_during_tfr(test, 0, 0, 0);
2505}
2506
2507/*
2508 * Commands during write - no Set Block Count (CMD23).
2509 */
2510static int mmc_test_cmds_during_write(struct mmc_test_card *test)
2511{
2512 return mmc_test_cmds_during_tfr(test, 0, 1, 0);
2513}
2514
2515/*
2516 * Commands during read - use Set Block Count (CMD23).
2517 */
2518static int mmc_test_cmds_during_read_cmd23(struct mmc_test_card *test)
2519{
2520 return mmc_test_cmds_during_tfr(test, 1, 0, 0);
2521}
2522
2523/*
2524 * Commands during write - use Set Block Count (CMD23).
2525 */
2526static int mmc_test_cmds_during_write_cmd23(struct mmc_test_card *test)
2527{
2528 return mmc_test_cmds_during_tfr(test, 1, 1, 0);
2529}
2530
2531/*
2532 * Commands during non-blocking read - use Set Block Count (CMD23).
2533 */
2534static int mmc_test_cmds_during_read_cmd23_nonblock(struct mmc_test_card *test)
2535{
2536 return mmc_test_cmds_during_tfr(test, 1, 0, 1);
2537}
2538
2539/*
2540 * Commands during non-blocking write - use Set Block Count (CMD23).
2541 */
2542static int mmc_test_cmds_during_write_cmd23_nonblock(struct mmc_test_card *test)
2543{
2544 return mmc_test_cmds_during_tfr(test, 1, 1, 1);
2545}
2546
2281static const struct mmc_test_case mmc_test_cases[] = { 2547static const struct mmc_test_case mmc_test_cases[] = {
2282 { 2548 {
2283 .name = "Basic write (no data verification)", 2549 .name = "Basic write (no data verification)",
@@ -2605,6 +2871,48 @@ static const struct mmc_test_case mmc_test_cases[] = {
2605 .name = "Reset test", 2871 .name = "Reset test",
2606 .run = mmc_test_reset, 2872 .run = mmc_test_reset,
2607 }, 2873 },
2874
2875 {
2876 .name = "Commands during read - no Set Block Count (CMD23)",
2877 .prepare = mmc_test_area_prepare,
2878 .run = mmc_test_cmds_during_read,
2879 .cleanup = mmc_test_area_cleanup,
2880 },
2881
2882 {
2883 .name = "Commands during write - no Set Block Count (CMD23)",
2884 .prepare = mmc_test_area_prepare,
2885 .run = mmc_test_cmds_during_write,
2886 .cleanup = mmc_test_area_cleanup,
2887 },
2888
2889 {
2890 .name = "Commands during read - use Set Block Count (CMD23)",
2891 .prepare = mmc_test_area_prepare,
2892 .run = mmc_test_cmds_during_read_cmd23,
2893 .cleanup = mmc_test_area_cleanup,
2894 },
2895
2896 {
2897 .name = "Commands during write - use Set Block Count (CMD23)",
2898 .prepare = mmc_test_area_prepare,
2899 .run = mmc_test_cmds_during_write_cmd23,
2900 .cleanup = mmc_test_area_cleanup,
2901 },
2902
2903 {
2904 .name = "Commands during non-blocking read - use Set Block Count (CMD23)",
2905 .prepare = mmc_test_area_prepare,
2906 .run = mmc_test_cmds_during_read_cmd23_nonblock,
2907 .cleanup = mmc_test_area_cleanup,
2908 },
2909
2910 {
2911 .name = "Commands during non-blocking write - use Set Block Count (CMD23)",
2912 .prepare = mmc_test_area_prepare,
2913 .run = mmc_test_cmds_during_write_cmd23_nonblock,
2914 .cleanup = mmc_test_area_cleanup,
2915 },
2608}; 2916};
2609 2917
2610static DEFINE_MUTEX(mmc_test_lock); 2918static DEFINE_MUTEX(mmc_test_lock);
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 708057261b38..8037f73a109a 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -19,7 +19,9 @@
19 19
20#include <linux/mmc/card.h> 20#include <linux/mmc/card.h>
21#include <linux/mmc/host.h> 21#include <linux/mmc/host.h>
22
22#include "queue.h" 23#include "queue.h"
24#include "block.h"
23 25
24#define MMC_QUEUE_BOUNCESZ 65536 26#define MMC_QUEUE_BOUNCESZ 65536
25 27
@@ -68,7 +70,7 @@ static int mmc_queue_thread(void *d)
68 bool req_is_special = mmc_req_is_special(req); 70 bool req_is_special = mmc_req_is_special(req);
69 71
70 set_current_state(TASK_RUNNING); 72 set_current_state(TASK_RUNNING);
71 mq->issue_fn(mq, req); 73 mmc_blk_issue_rq(mq, req);
72 cond_resched(); 74 cond_resched();
73 if (mq->flags & MMC_QUEUE_NEW_REQUEST) { 75 if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
74 mq->flags &= ~MMC_QUEUE_NEW_REQUEST; 76 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index fee5e1271465..3c15a75bae86 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -57,8 +57,6 @@ struct mmc_queue {
57 unsigned int flags; 57 unsigned int flags;
58#define MMC_QUEUE_SUSPENDED (1 << 0) 58#define MMC_QUEUE_SUSPENDED (1 << 0)
59#define MMC_QUEUE_NEW_REQUEST (1 << 1) 59#define MMC_QUEUE_NEW_REQUEST (1 << 1)
60
61 int (*issue_fn)(struct mmc_queue *, struct request *);
62 void *data; 60 void *data;
63 struct request_queue *queue; 61 struct request_queue *queue;
64 struct mmc_queue_req mqrq[2]; 62 struct mmc_queue_req mqrq[2];
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index e55cde6d436d..2553d903a82b 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -58,6 +58,9 @@
58 */ 58 */
59#define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */ 59#define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */
60 60
61/* The max erase timeout, used when host->max_busy_timeout isn't specified */
62#define MMC_ERASE_TIMEOUT_MS (60 * 1000) /* 60 s */
63
61static const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; 64static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
62 65
63/* 66/*
@@ -117,6 +120,24 @@ static inline void mmc_should_fail_request(struct mmc_host *host,
117 120
118#endif /* CONFIG_FAIL_MMC_REQUEST */ 121#endif /* CONFIG_FAIL_MMC_REQUEST */
119 122
123static inline void mmc_complete_cmd(struct mmc_request *mrq)
124{
125 if (mrq->cap_cmd_during_tfr && !completion_done(&mrq->cmd_completion))
126 complete_all(&mrq->cmd_completion);
127}
128
129void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq)
130{
131 if (!mrq->cap_cmd_during_tfr)
132 return;
133
134 mmc_complete_cmd(mrq);
135
136 pr_debug("%s: cmd done, tfr ongoing (CMD%u)\n",
137 mmc_hostname(host), mrq->cmd->opcode);
138}
139EXPORT_SYMBOL(mmc_command_done);
140
120/** 141/**
121 * mmc_request_done - finish processing an MMC request 142 * mmc_request_done - finish processing an MMC request
122 * @host: MMC host which completed request 143 * @host: MMC host which completed request
@@ -143,6 +164,11 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
143 cmd->retries = 0; 164 cmd->retries = 0;
144 } 165 }
145 166
167 if (host->ongoing_mrq == mrq)
168 host->ongoing_mrq = NULL;
169
170 mmc_complete_cmd(mrq);
171
146 trace_mmc_request_done(host, mrq); 172 trace_mmc_request_done(host, mrq);
147 173
148 if (err && cmd->retries && !mmc_card_removed(host->card)) { 174 if (err && cmd->retries && !mmc_card_removed(host->card)) {
@@ -155,7 +181,8 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
155 } else { 181 } else {
156 mmc_should_fail_request(host, mrq); 182 mmc_should_fail_request(host, mrq);
157 183
158 led_trigger_event(host->led, LED_OFF); 184 if (!host->ongoing_mrq)
185 led_trigger_event(host->led, LED_OFF);
159 186
160 if (mrq->sbc) { 187 if (mrq->sbc) {
161 pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n", 188 pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
@@ -220,6 +247,15 @@ static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
220 } 247 }
221 } 248 }
222 249
250 if (mrq->cap_cmd_during_tfr) {
251 host->ongoing_mrq = mrq;
252 /*
253 * Retry path could come through here without having waiting on
254 * cmd_completion, so ensure it is reinitialised.
255 */
256 reinit_completion(&mrq->cmd_completion);
257 }
258
223 trace_mmc_request_start(host, mrq); 259 trace_mmc_request_start(host, mrq);
224 260
225 host->ops->request(host, mrq); 261 host->ops->request(host, mrq);
@@ -386,6 +422,18 @@ static void mmc_wait_done(struct mmc_request *mrq)
386 complete(&mrq->completion); 422 complete(&mrq->completion);
387} 423}
388 424
425static inline void mmc_wait_ongoing_tfr_cmd(struct mmc_host *host)
426{
427 struct mmc_request *ongoing_mrq = READ_ONCE(host->ongoing_mrq);
428
429 /*
430 * If there is an ongoing transfer, wait for the command line to become
431 * available.
432 */
433 if (ongoing_mrq && !completion_done(&ongoing_mrq->cmd_completion))
434 wait_for_completion(&ongoing_mrq->cmd_completion);
435}
436
389/* 437/*
390 *__mmc_start_data_req() - starts data request 438 *__mmc_start_data_req() - starts data request
391 * @host: MMC host to start the request 439 * @host: MMC host to start the request
@@ -393,17 +441,24 @@ static void mmc_wait_done(struct mmc_request *mrq)
393 * 441 *
394 * Sets the done callback to be called when request is completed by the card. 442 * Sets the done callback to be called when request is completed by the card.
395 * Starts data mmc request execution 443 * Starts data mmc request execution
444 * If an ongoing transfer is already in progress, wait for the command line
445 * to become available before sending another command.
396 */ 446 */
397static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq) 447static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
398{ 448{
399 int err; 449 int err;
400 450
451 mmc_wait_ongoing_tfr_cmd(host);
452
401 mrq->done = mmc_wait_data_done; 453 mrq->done = mmc_wait_data_done;
402 mrq->host = host; 454 mrq->host = host;
403 455
456 init_completion(&mrq->cmd_completion);
457
404 err = mmc_start_request(host, mrq); 458 err = mmc_start_request(host, mrq);
405 if (err) { 459 if (err) {
406 mrq->cmd->error = err; 460 mrq->cmd->error = err;
461 mmc_complete_cmd(mrq);
407 mmc_wait_data_done(mrq); 462 mmc_wait_data_done(mrq);
408 } 463 }
409 464
@@ -414,12 +469,17 @@ static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
414{ 469{
415 int err; 470 int err;
416 471
472 mmc_wait_ongoing_tfr_cmd(host);
473
417 init_completion(&mrq->completion); 474 init_completion(&mrq->completion);
418 mrq->done = mmc_wait_done; 475 mrq->done = mmc_wait_done;
419 476
477 init_completion(&mrq->cmd_completion);
478
420 err = mmc_start_request(host, mrq); 479 err = mmc_start_request(host, mrq);
421 if (err) { 480 if (err) {
422 mrq->cmd->error = err; 481 mrq->cmd->error = err;
482 mmc_complete_cmd(mrq);
423 complete(&mrq->completion); 483 complete(&mrq->completion);
424 } 484 }
425 485
@@ -483,8 +543,7 @@ static int mmc_wait_for_data_req_done(struct mmc_host *host,
483 return err; 543 return err;
484} 544}
485 545
486static void mmc_wait_for_req_done(struct mmc_host *host, 546void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
487 struct mmc_request *mrq)
488{ 547{
489 struct mmc_command *cmd; 548 struct mmc_command *cmd;
490 549
@@ -525,6 +584,28 @@ static void mmc_wait_for_req_done(struct mmc_host *host,
525 584
526 mmc_retune_release(host); 585 mmc_retune_release(host);
527} 586}
587EXPORT_SYMBOL(mmc_wait_for_req_done);
588
589/**
590 * mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done
591 * @host: MMC host
592 * @mrq: MMC request
593 *
594 * mmc_is_req_done() is used with requests that have
595 * mrq->cap_cmd_during_tfr = true. mmc_is_req_done() must be called after
596 * starting a request and before waiting for it to complete. That is,
597 * either in between calls to mmc_start_req(), or after mmc_wait_for_req()
598 * and before mmc_wait_for_req_done(). If it is called at other times the
599 * result is not meaningful.
600 */
601bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq)
602{
603 if (host->areq)
604 return host->context_info.is_done_rcv;
605 else
606 return completion_done(&mrq->completion);
607}
608EXPORT_SYMBOL(mmc_is_req_done);
528 609
529/** 610/**
530 * mmc_pre_req - Prepare for a new request 611 * mmc_pre_req - Prepare for a new request
@@ -645,13 +726,18 @@ EXPORT_SYMBOL(mmc_start_req);
645 * @mrq: MMC request to start 726 * @mrq: MMC request to start
646 * 727 *
647 * Start a new MMC custom command request for a host, and wait 728 * Start a new MMC custom command request for a host, and wait
648 * for the command to complete. Does not attempt to parse the 729 * for the command to complete. In the case of 'cap_cmd_during_tfr'
649 * response. 730 * requests, the transfer is ongoing and the caller can issue further
731 * commands that do not use the data lines, and then wait by calling
732 * mmc_wait_for_req_done().
733 * Does not attempt to parse the response.
650 */ 734 */
651void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) 735void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
652{ 736{
653 __mmc_start_req(host, mrq); 737 __mmc_start_req(host, mrq);
654 mmc_wait_for_req_done(host, mrq); 738
739 if (!mrq->cap_cmd_during_tfr)
740 mmc_wait_for_req_done(host, mrq);
655} 741}
656EXPORT_SYMBOL(mmc_wait_for_req); 742EXPORT_SYMBOL(mmc_wait_for_req);
657 743
@@ -2202,6 +2288,54 @@ out:
2202 return err; 2288 return err;
2203} 2289}
2204 2290
2291static unsigned int mmc_align_erase_size(struct mmc_card *card,
2292 unsigned int *from,
2293 unsigned int *to,
2294 unsigned int nr)
2295{
2296 unsigned int from_new = *from, nr_new = nr, rem;
2297
2298 /*
2299 * When the 'card->erase_size' is power of 2, we can use round_up/down()
2300 * to align the erase size efficiently.
2301 */
2302 if (is_power_of_2(card->erase_size)) {
2303 unsigned int temp = from_new;
2304
2305 from_new = round_up(temp, card->erase_size);
2306 rem = from_new - temp;
2307
2308 if (nr_new > rem)
2309 nr_new -= rem;
2310 else
2311 return 0;
2312
2313 nr_new = round_down(nr_new, card->erase_size);
2314 } else {
2315 rem = from_new % card->erase_size;
2316 if (rem) {
2317 rem = card->erase_size - rem;
2318 from_new += rem;
2319 if (nr_new > rem)
2320 nr_new -= rem;
2321 else
2322 return 0;
2323 }
2324
2325 rem = nr_new % card->erase_size;
2326 if (rem)
2327 nr_new -= rem;
2328 }
2329
2330 if (nr_new == 0)
2331 return 0;
2332
2333 *to = from_new + nr_new;
2334 *from = from_new;
2335
2336 return nr_new;
2337}
2338
2205/** 2339/**
2206 * mmc_erase - erase sectors. 2340 * mmc_erase - erase sectors.
2207 * @card: card to erase 2341 * @card: card to erase
@@ -2240,26 +2374,12 @@ int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
2240 return -EINVAL; 2374 return -EINVAL;
2241 } 2375 }
2242 2376
2243 if (arg == MMC_ERASE_ARG) { 2377 if (arg == MMC_ERASE_ARG)
2244 rem = from % card->erase_size; 2378 nr = mmc_align_erase_size(card, &from, &to, nr);
2245 if (rem) {
2246 rem = card->erase_size - rem;
2247 from += rem;
2248 if (nr > rem)
2249 nr -= rem;
2250 else
2251 return 0;
2252 }
2253 rem = nr % card->erase_size;
2254 if (rem)
2255 nr -= rem;
2256 }
2257 2379
2258 if (nr == 0) 2380 if (nr == 0)
2259 return 0; 2381 return 0;
2260 2382
2261 to = from + nr;
2262
2263 if (to <= from) 2383 if (to <= from)
2264 return -EINVAL; 2384 return -EINVAL;
2265 2385
@@ -2352,6 +2472,8 @@ static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
2352 struct mmc_host *host = card->host; 2472 struct mmc_host *host = card->host;
2353 unsigned int max_discard, x, y, qty = 0, max_qty, min_qty, timeout; 2473 unsigned int max_discard, x, y, qty = 0, max_qty, min_qty, timeout;
2354 unsigned int last_timeout = 0; 2474 unsigned int last_timeout = 0;
2475 unsigned int max_busy_timeout = host->max_busy_timeout ?
2476 host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS;
2355 2477
2356 if (card->erase_shift) { 2478 if (card->erase_shift) {
2357 max_qty = UINT_MAX >> card->erase_shift; 2479 max_qty = UINT_MAX >> card->erase_shift;
@@ -2374,15 +2496,15 @@ static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
2374 * matter what size of 'host->max_busy_timeout', but if the 2496 * matter what size of 'host->max_busy_timeout', but if the
2375 * 'host->max_busy_timeout' is large enough for more discard sectors, 2497 * 'host->max_busy_timeout' is large enough for more discard sectors,
2376 * then we can continue to increase the max discard sectors until we 2498 * then we can continue to increase the max discard sectors until we
2377 * get a balance value. 2499 * get a balance value. In cases when the 'host->max_busy_timeout'
2500 * isn't specified, use the default max erase timeout.
2378 */ 2501 */
2379 do { 2502 do {
2380 y = 0; 2503 y = 0;
2381 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) { 2504 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
2382 timeout = mmc_erase_timeout(card, arg, qty + x); 2505 timeout = mmc_erase_timeout(card, arg, qty + x);
2383 2506
2384 if (qty + x > min_qty && 2507 if (qty + x > min_qty && timeout > max_busy_timeout)
2385 timeout > host->max_busy_timeout)
2386 break; 2508 break;
2387 2509
2388 if (timeout < last_timeout) 2510 if (timeout < last_timeout)
@@ -2427,9 +2549,6 @@ unsigned int mmc_calc_max_discard(struct mmc_card *card)
2427 struct mmc_host *host = card->host; 2549 struct mmc_host *host = card->host;
2428 unsigned int max_discard, max_trim; 2550 unsigned int max_discard, max_trim;
2429 2551
2430 if (!host->max_busy_timeout)
2431 return UINT_MAX;
2432
2433 /* 2552 /*
2434 * Without erase_group_def set, MMC erase timeout depends on clock 2553 * Without erase_group_def set, MMC erase timeout depends on clock
2435 * frequence which can change. In that case, the best choice is 2554 * frequence which can change. In that case, the best choice is
@@ -2447,7 +2566,8 @@ unsigned int mmc_calc_max_discard(struct mmc_card *card)
2447 max_discard = 0; 2566 max_discard = 0;
2448 } 2567 }
2449 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n", 2568 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
2450 mmc_hostname(host), max_discard, host->max_busy_timeout); 2569 mmc_hostname(host), max_discard, host->max_busy_timeout ?
2570 host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS);
2451 return max_discard; 2571 return max_discard;
2452} 2572}
2453EXPORT_SYMBOL(mmc_calc_max_discard); 2573EXPORT_SYMBOL(mmc_calc_max_discard);
@@ -2456,7 +2576,8 @@ int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
2456{ 2576{
2457 struct mmc_command cmd = {0}; 2577 struct mmc_command cmd = {0};
2458 2578
2459 if (mmc_card_blockaddr(card) || mmc_card_ddr52(card)) 2579 if (mmc_card_blockaddr(card) || mmc_card_ddr52(card) ||
2580 mmc_card_hs400(card) || mmc_card_hs400es(card))
2460 return 0; 2581 return 0;
2461 2582
2462 cmd.opcode = MMC_SET_BLOCKLEN; 2583 cmd.opcode = MMC_SET_BLOCKLEN;
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index f2d185cf8a8b..3486bc7fbb64 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1029,6 +1029,10 @@ static int mmc_select_hs(struct mmc_card *card)
1029 err = mmc_switch_status(card); 1029 err = mmc_switch_status(card);
1030 } 1030 }
1031 1031
1032 if (err)
1033 pr_warn("%s: switch to high-speed failed, err:%d\n",
1034 mmc_hostname(card->host), err);
1035
1032 return err; 1036 return err;
1033} 1037}
1034 1038
@@ -1265,11 +1269,8 @@ static int mmc_select_hs400es(struct mmc_card *card)
1265 1269
1266 /* Switch card to HS mode */ 1270 /* Switch card to HS mode */
1267 err = mmc_select_hs(card); 1271 err = mmc_select_hs(card);
1268 if (err) { 1272 if (err)
1269 pr_err("%s: switch to high-speed failed, err:%d\n",
1270 mmc_hostname(host), err);
1271 goto out_err; 1273 goto out_err;
1272 }
1273 1274
1274 err = mmc_switch_status(card); 1275 err = mmc_switch_status(card);
1275 if (err) 1276 if (err)
diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c
index 450d907c6e6c..1304160de168 100644
--- a/drivers/mmc/core/pwrseq_simple.c
+++ b/drivers/mmc/core/pwrseq_simple.c
@@ -16,6 +16,8 @@
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/err.h> 17#include <linux/err.h>
18#include <linux/gpio/consumer.h> 18#include <linux/gpio/consumer.h>
19#include <linux/delay.h>
20#include <linux/property.h>
19 21
20#include <linux/mmc/host.h> 22#include <linux/mmc/host.h>
21 23
@@ -24,6 +26,7 @@
24struct mmc_pwrseq_simple { 26struct mmc_pwrseq_simple {
25 struct mmc_pwrseq pwrseq; 27 struct mmc_pwrseq pwrseq;
26 bool clk_enabled; 28 bool clk_enabled;
29 u32 post_power_on_delay_ms;
27 struct clk *ext_clk; 30 struct clk *ext_clk;
28 struct gpio_descs *reset_gpios; 31 struct gpio_descs *reset_gpios;
29}; 32};
@@ -64,6 +67,9 @@ static void mmc_pwrseq_simple_post_power_on(struct mmc_host *host)
64 struct mmc_pwrseq_simple *pwrseq = to_pwrseq_simple(host->pwrseq); 67 struct mmc_pwrseq_simple *pwrseq = to_pwrseq_simple(host->pwrseq);
65 68
66 mmc_pwrseq_simple_set_gpios_value(pwrseq, 0); 69 mmc_pwrseq_simple_set_gpios_value(pwrseq, 0);
70
71 if (pwrseq->post_power_on_delay_ms)
72 msleep(pwrseq->post_power_on_delay_ms);
67} 73}
68 74
69static void mmc_pwrseq_simple_power_off(struct mmc_host *host) 75static void mmc_pwrseq_simple_power_off(struct mmc_host *host)
@@ -111,6 +117,9 @@ static int mmc_pwrseq_simple_probe(struct platform_device *pdev)
111 return PTR_ERR(pwrseq->reset_gpios); 117 return PTR_ERR(pwrseq->reset_gpios);
112 } 118 }
113 119
120 device_property_read_u32(dev, "post-power-on-delay-ms",
121 &pwrseq->post_power_on_delay_ms);
122
114 pwrseq->pwrseq.dev = dev; 123 pwrseq->pwrseq.dev = dev;
115 pwrseq->pwrseq.ops = &mmc_pwrseq_simple_ops; 124 pwrseq->pwrseq.ops = &mmc_pwrseq_simple_ops;
116 pwrseq->pwrseq.owner = THIS_MODULE; 125 pwrseq->pwrseq.owner = THIS_MODULE;
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 0123936241b0..73c762a28dfe 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -223,8 +223,7 @@ static int mmc_decode_scr(struct mmc_card *card)
223static int mmc_read_ssr(struct mmc_card *card) 223static int mmc_read_ssr(struct mmc_card *card)
224{ 224{
225 unsigned int au, es, et, eo; 225 unsigned int au, es, et, eo;
226 int err, i; 226 int i;
227 u32 *ssr;
228 227
229 if (!(card->csd.cmdclass & CCC_APP_SPEC)) { 228 if (!(card->csd.cmdclass & CCC_APP_SPEC)) {
230 pr_warn("%s: card lacks mandatory SD Status function\n", 229 pr_warn("%s: card lacks mandatory SD Status function\n",
@@ -232,33 +231,27 @@ static int mmc_read_ssr(struct mmc_card *card)
232 return 0; 231 return 0;
233 } 232 }
234 233
235 ssr = kmalloc(64, GFP_KERNEL); 234 if (mmc_app_sd_status(card, card->raw_ssr)) {
236 if (!ssr)
237 return -ENOMEM;
238
239 err = mmc_app_sd_status(card, ssr);
240 if (err) {
241 pr_warn("%s: problem reading SD Status register\n", 235 pr_warn("%s: problem reading SD Status register\n",
242 mmc_hostname(card->host)); 236 mmc_hostname(card->host));
243 err = 0; 237 return 0;
244 goto out;
245 } 238 }
246 239
247 for (i = 0; i < 16; i++) 240 for (i = 0; i < 16; i++)
248 ssr[i] = be32_to_cpu(ssr[i]); 241 card->raw_ssr[i] = be32_to_cpu(card->raw_ssr[i]);
249 242
250 /* 243 /*
251 * UNSTUFF_BITS only works with four u32s so we have to offset the 244 * UNSTUFF_BITS only works with four u32s so we have to offset the
252 * bitfield positions accordingly. 245 * bitfield positions accordingly.
253 */ 246 */
254 au = UNSTUFF_BITS(ssr, 428 - 384, 4); 247 au = UNSTUFF_BITS(card->raw_ssr, 428 - 384, 4);
255 if (au) { 248 if (au) {
256 if (au <= 9 || card->scr.sda_spec3) { 249 if (au <= 9 || card->scr.sda_spec3) {
257 card->ssr.au = sd_au_size[au]; 250 card->ssr.au = sd_au_size[au];
258 es = UNSTUFF_BITS(ssr, 408 - 384, 16); 251 es = UNSTUFF_BITS(card->raw_ssr, 408 - 384, 16);
259 et = UNSTUFF_BITS(ssr, 402 - 384, 6); 252 et = UNSTUFF_BITS(card->raw_ssr, 402 - 384, 6);
260 if (es && et) { 253 if (es && et) {
261 eo = UNSTUFF_BITS(ssr, 400 - 384, 2); 254 eo = UNSTUFF_BITS(card->raw_ssr, 400 - 384, 2);
262 card->ssr.erase_timeout = (et * 1000) / es; 255 card->ssr.erase_timeout = (et * 1000) / es;
263 card->ssr.erase_offset = eo * 1000; 256 card->ssr.erase_offset = eo * 1000;
264 } 257 }
@@ -267,9 +260,8 @@ static int mmc_read_ssr(struct mmc_card *card)
267 mmc_hostname(card->host)); 260 mmc_hostname(card->host));
268 } 261 }
269 } 262 }
270out: 263
271 kfree(ssr); 264 return 0;
272 return err;
273} 265}
274 266
275/* 267/*
@@ -666,6 +658,14 @@ MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
666MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1], 658MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
667 card->raw_csd[2], card->raw_csd[3]); 659 card->raw_csd[2], card->raw_csd[3]);
668MMC_DEV_ATTR(scr, "%08x%08x\n", card->raw_scr[0], card->raw_scr[1]); 660MMC_DEV_ATTR(scr, "%08x%08x\n", card->raw_scr[0], card->raw_scr[1]);
661MMC_DEV_ATTR(ssr,
662 "%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x\n",
663 card->raw_ssr[0], card->raw_ssr[1], card->raw_ssr[2],
664 card->raw_ssr[3], card->raw_ssr[4], card->raw_ssr[5],
665 card->raw_ssr[6], card->raw_ssr[7], card->raw_ssr[8],
666 card->raw_ssr[9], card->raw_ssr[10], card->raw_ssr[11],
667 card->raw_ssr[12], card->raw_ssr[13], card->raw_ssr[14],
668 card->raw_ssr[15]);
669MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year); 669MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
670MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9); 670MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9);
671MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9); 671MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9);
@@ -698,6 +698,7 @@ static struct attribute *sd_std_attrs[] = {
698 &dev_attr_cid.attr, 698 &dev_attr_cid.attr,
699 &dev_attr_csd.attr, 699 &dev_attr_csd.attr,
700 &dev_attr_scr.attr, 700 &dev_attr_scr.attr,
701 &dev_attr_ssr.attr,
701 &dev_attr_date.attr, 702 &dev_attr_date.attr,
702 &dev_attr_erase_size.attr, 703 &dev_attr_erase_size.attr,
703 &dev_attr_preferred_erase_size.attr, 704 &dev_attr_preferred_erase_size.attr,
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index 78cb4d5d9d58..406e5f037e32 100644
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -26,8 +26,8 @@
26 */ 26 */
27void sdio_claim_host(struct sdio_func *func) 27void sdio_claim_host(struct sdio_func *func)
28{ 28{
29 BUG_ON(!func); 29 if (WARN_ON(!func))
30 BUG_ON(!func->card); 30 return;
31 31
32 mmc_claim_host(func->card->host); 32 mmc_claim_host(func->card->host);
33} 33}
@@ -42,8 +42,8 @@ EXPORT_SYMBOL_GPL(sdio_claim_host);
42 */ 42 */
43void sdio_release_host(struct sdio_func *func) 43void sdio_release_host(struct sdio_func *func)
44{ 44{
45 BUG_ON(!func); 45 if (WARN_ON(!func))
46 BUG_ON(!func->card); 46 return;
47 47
48 mmc_release_host(func->card->host); 48 mmc_release_host(func->card->host);
49} 49}
@@ -62,8 +62,8 @@ int sdio_enable_func(struct sdio_func *func)
62 unsigned char reg; 62 unsigned char reg;
63 unsigned long timeout; 63 unsigned long timeout;
64 64
65 BUG_ON(!func); 65 if (!func)
66 BUG_ON(!func->card); 66 return -EINVAL;
67 67
68 pr_debug("SDIO: Enabling device %s...\n", sdio_func_id(func)); 68 pr_debug("SDIO: Enabling device %s...\n", sdio_func_id(func));
69 69
@@ -112,8 +112,8 @@ int sdio_disable_func(struct sdio_func *func)
112 int ret; 112 int ret;
113 unsigned char reg; 113 unsigned char reg;
114 114
115 BUG_ON(!func); 115 if (!func)
116 BUG_ON(!func->card); 116 return -EINVAL;
117 117
118 pr_debug("SDIO: Disabling device %s...\n", sdio_func_id(func)); 118 pr_debug("SDIO: Disabling device %s...\n", sdio_func_id(func));
119 119
@@ -307,6 +307,9 @@ static int sdio_io_rw_ext_helper(struct sdio_func *func, int write,
307 unsigned max_blocks; 307 unsigned max_blocks;
308 int ret; 308 int ret;
309 309
310 if (!func || (func->num > 7))
311 return -EINVAL;
312
310 /* Do the bulk of the transfer using block mode (if supported). */ 313 /* Do the bulk of the transfer using block mode (if supported). */
311 if (func->card->cccr.multi_block && (size > sdio_max_byte_size(func))) { 314 if (func->card->cccr.multi_block && (size > sdio_max_byte_size(func))) {
312 /* Blocks per command is limited by host count, host transfer 315 /* Blocks per command is limited by host count, host transfer
@@ -367,7 +370,10 @@ u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret)
367 int ret; 370 int ret;
368 u8 val; 371 u8 val;
369 372
370 BUG_ON(!func); 373 if (!func) {
374 *err_ret = -EINVAL;
375 return 0xFF;
376 }
371 377
372 if (err_ret) 378 if (err_ret)
373 *err_ret = 0; 379 *err_ret = 0;
@@ -398,7 +404,10 @@ void sdio_writeb(struct sdio_func *func, u8 b, unsigned int addr, int *err_ret)
398{ 404{
399 int ret; 405 int ret;
400 406
401 BUG_ON(!func); 407 if (!func) {
408 *err_ret = -EINVAL;
409 return;
410 }
402 411
403 ret = mmc_io_rw_direct(func->card, 1, func->num, addr, b, NULL); 412 ret = mmc_io_rw_direct(func->card, 1, func->num, addr, b, NULL);
404 if (err_ret) 413 if (err_ret)
@@ -623,7 +632,10 @@ unsigned char sdio_f0_readb(struct sdio_func *func, unsigned int addr,
623 int ret; 632 int ret;
624 unsigned char val; 633 unsigned char val;
625 634
626 BUG_ON(!func); 635 if (!func) {
636 *err_ret = -EINVAL;
637 return 0xFF;
638 }
627 639
628 if (err_ret) 640 if (err_ret)
629 *err_ret = 0; 641 *err_ret = 0;
@@ -658,7 +670,10 @@ void sdio_f0_writeb(struct sdio_func *func, unsigned char b, unsigned int addr,
658{ 670{
659 int ret; 671 int ret;
660 672
661 BUG_ON(!func); 673 if (!func) {
674 *err_ret = -EINVAL;
675 return;
676 }
662 677
663 if ((addr < 0xF0 || addr > 0xFF) && (!mmc_card_lenient_fn0(func->card))) { 678 if ((addr < 0xF0 || addr > 0xFF) && (!mmc_card_lenient_fn0(func->card))) {
664 if (err_ret) 679 if (err_ret)
@@ -684,8 +699,8 @@ EXPORT_SYMBOL_GPL(sdio_f0_writeb);
684 */ 699 */
685mmc_pm_flag_t sdio_get_host_pm_caps(struct sdio_func *func) 700mmc_pm_flag_t sdio_get_host_pm_caps(struct sdio_func *func)
686{ 701{
687 BUG_ON(!func); 702 if (!func)
688 BUG_ON(!func->card); 703 return 0;
689 704
690 return func->card->host->pm_caps; 705 return func->card->host->pm_caps;
691} 706}
@@ -707,8 +722,8 @@ int sdio_set_host_pm_flags(struct sdio_func *func, mmc_pm_flag_t flags)
707{ 722{
708 struct mmc_host *host; 723 struct mmc_host *host;
709 724
710 BUG_ON(!func); 725 if (!func)
711 BUG_ON(!func->card); 726 return -EINVAL;
712 727
713 host = func->card->host; 728 host = func->card->host;
714 729
diff --git a/drivers/mmc/core/sdio_ops.c b/drivers/mmc/core/sdio_ops.c
index 34f6e8015306..90fe5545c677 100644
--- a/drivers/mmc/core/sdio_ops.c
+++ b/drivers/mmc/core/sdio_ops.c
@@ -24,8 +24,6 @@ int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
24 struct mmc_command cmd = {0}; 24 struct mmc_command cmd = {0};
25 int i, err = 0; 25 int i, err = 0;
26 26
27 BUG_ON(!host);
28
29 cmd.opcode = SD_IO_SEND_OP_COND; 27 cmd.opcode = SD_IO_SEND_OP_COND;
30 cmd.arg = ocr; 28 cmd.arg = ocr;
31 cmd.flags = MMC_RSP_SPI_R4 | MMC_RSP_R4 | MMC_CMD_BCR; 29 cmd.flags = MMC_RSP_SPI_R4 | MMC_RSP_R4 | MMC_CMD_BCR;
@@ -71,8 +69,8 @@ static int mmc_io_rw_direct_host(struct mmc_host *host, int write, unsigned fn,
71 struct mmc_command cmd = {0}; 69 struct mmc_command cmd = {0};
72 int err; 70 int err;
73 71
74 BUG_ON(!host); 72 if (fn > 7)
75 BUG_ON(fn > 7); 73 return -EINVAL;
76 74
77 /* sanity check */ 75 /* sanity check */
78 if (addr & ~0x1FFFF) 76 if (addr & ~0x1FFFF)
@@ -114,7 +112,6 @@ static int mmc_io_rw_direct_host(struct mmc_host *host, int write, unsigned fn,
114int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn, 112int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn,
115 unsigned addr, u8 in, u8 *out) 113 unsigned addr, u8 in, u8 *out)
116{ 114{
117 BUG_ON(!card);
118 return mmc_io_rw_direct_host(card->host, write, fn, addr, in, out); 115 return mmc_io_rw_direct_host(card->host, write, fn, addr, in, out);
119} 116}
120 117
@@ -129,8 +126,6 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
129 unsigned int nents, left_size, i; 126 unsigned int nents, left_size, i;
130 unsigned int seg_size = card->host->max_seg_size; 127 unsigned int seg_size = card->host->max_seg_size;
131 128
132 BUG_ON(!card);
133 BUG_ON(fn > 7);
134 WARN_ON(blksz == 0); 129 WARN_ON(blksz == 0);
135 130
136 /* sanity check */ 131 /* sanity check */
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index a56373c75983..8fa478c3b0db 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -1216,9 +1216,11 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
1216 } 1216 }
1217 1217
1218 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1218 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1219 irq = platform_get_irq(pdev, 0); 1219 if (!r)
1220 if (!r || irq == NO_IRQ)
1221 return -ENODEV; 1220 return -ENODEV;
1221 irq = platform_get_irq(pdev, 0);
1222 if (irq < 0)
1223 return irq;
1222 1224
1223 mem_size = resource_size(r); 1225 mem_size = resource_size(r);
1224 mem = devm_request_mem_region(&pdev->dev, r->start, mem_size, 1226 mem = devm_request_mem_region(&pdev->dev, r->start, mem_size,
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index da0ef1765735..7ab3d749b5ae 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -225,8 +225,12 @@ static void dw_mci_exynos_config_hs400(struct dw_mci *host, u32 timing)
225 * Not supported to configure register 225 * Not supported to configure register
226 * related to HS400 226 * related to HS400
227 */ 227 */
228 if (priv->ctrl_type < DW_MCI_TYPE_EXYNOS5420) 228 if (priv->ctrl_type < DW_MCI_TYPE_EXYNOS5420) {
229 if (timing == MMC_TIMING_MMC_HS400)
230 dev_warn(host->dev,
231 "cannot configure HS400, unsupported chipset\n");
229 return; 232 return;
233 }
230 234
231 dqs = priv->saved_dqs_en; 235 dqs = priv->saved_dqs_en;
232 strobe = priv->saved_strobe_ctrl; 236 strobe = priv->saved_strobe_ctrl;
diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c
index 8e9d886bfcda..624789496dce 100644
--- a/drivers/mmc/host/dw_mmc-k3.c
+++ b/drivers/mmc/host/dw_mmc-k3.c
@@ -131,11 +131,17 @@ static void dw_mci_hi6220_set_ios(struct dw_mci *host, struct mmc_ios *ios)
131 host->bus_hz = clk_get_rate(host->biu_clk); 131 host->bus_hz = clk_get_rate(host->biu_clk);
132} 132}
133 133
134static int dw_mci_hi6220_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
135{
136 return 0;
137}
138
134static const struct dw_mci_drv_data hi6220_data = { 139static const struct dw_mci_drv_data hi6220_data = {
135 .caps = dw_mci_hi6220_caps, 140 .caps = dw_mci_hi6220_caps,
136 .switch_voltage = dw_mci_hi6220_switch_voltage, 141 .switch_voltage = dw_mci_hi6220_switch_voltage,
137 .set_ios = dw_mci_hi6220_set_ios, 142 .set_ios = dw_mci_hi6220_set_ios,
138 .parse_dt = dw_mci_hi6220_parse_dt, 143 .parse_dt = dw_mci_hi6220_parse_dt,
144 .execute_tuning = dw_mci_hi6220_execute_tuning,
139}; 145};
140 146
141static const struct of_device_id dw_mci_k3_match[] = { 147static const struct of_device_id dw_mci_k3_match[] = {
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 767af2026f8b..4fcbc4012ed0 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -61,6 +61,8 @@
61 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \ 61 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
62 SDMMC_IDMAC_INT_TI) 62 SDMMC_IDMAC_INT_TI)
63 63
64#define DESC_RING_BUF_SZ PAGE_SIZE
65
64struct idmac_desc_64addr { 66struct idmac_desc_64addr {
65 u32 des0; /* Control Descriptor */ 67 u32 des0; /* Control Descriptor */
66 68
@@ -467,136 +469,6 @@ static void dw_mci_dmac_complete_dma(void *arg)
467 } 469 }
468} 470}
469 471
470static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
471 unsigned int sg_len)
472{
473 unsigned int desc_len;
474 int i;
475
476 if (host->dma_64bit_address == 1) {
477 struct idmac_desc_64addr *desc_first, *desc_last, *desc;
478
479 desc_first = desc_last = desc = host->sg_cpu;
480
481 for (i = 0; i < sg_len; i++) {
482 unsigned int length = sg_dma_len(&data->sg[i]);
483
484 u64 mem_addr = sg_dma_address(&data->sg[i]);
485
486 for ( ; length ; desc++) {
487 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
488 length : DW_MCI_DESC_DATA_LENGTH;
489
490 length -= desc_len;
491
492 /*
493 * Set the OWN bit and disable interrupts
494 * for this descriptor
495 */
496 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
497 IDMAC_DES0_CH;
498
499 /* Buffer length */
500 IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
501
502 /* Physical address to DMA to/from */
503 desc->des4 = mem_addr & 0xffffffff;
504 desc->des5 = mem_addr >> 32;
505
506 /* Update physical address for the next desc */
507 mem_addr += desc_len;
508
509 /* Save pointer to the last descriptor */
510 desc_last = desc;
511 }
512 }
513
514 /* Set first descriptor */
515 desc_first->des0 |= IDMAC_DES0_FD;
516
517 /* Set last descriptor */
518 desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
519 desc_last->des0 |= IDMAC_DES0_LD;
520
521 } else {
522 struct idmac_desc *desc_first, *desc_last, *desc;
523
524 desc_first = desc_last = desc = host->sg_cpu;
525
526 for (i = 0; i < sg_len; i++) {
527 unsigned int length = sg_dma_len(&data->sg[i]);
528
529 u32 mem_addr = sg_dma_address(&data->sg[i]);
530
531 for ( ; length ; desc++) {
532 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
533 length : DW_MCI_DESC_DATA_LENGTH;
534
535 length -= desc_len;
536
537 /*
538 * Set the OWN bit and disable interrupts
539 * for this descriptor
540 */
541 desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
542 IDMAC_DES0_DIC |
543 IDMAC_DES0_CH);
544
545 /* Buffer length */
546 IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
547
548 /* Physical address to DMA to/from */
549 desc->des2 = cpu_to_le32(mem_addr);
550
551 /* Update physical address for the next desc */
552 mem_addr += desc_len;
553
554 /* Save pointer to the last descriptor */
555 desc_last = desc;
556 }
557 }
558
559 /* Set first descriptor */
560 desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
561
562 /* Set last descriptor */
563 desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
564 IDMAC_DES0_DIC));
565 desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
566 }
567
568 wmb(); /* drain writebuffer */
569}
570
571static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
572{
573 u32 temp;
574
575 dw_mci_translate_sglist(host, host->data, sg_len);
576
577 /* Make sure to reset DMA in case we did PIO before this */
578 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
579 dw_mci_idmac_reset(host);
580
581 /* Select IDMAC interface */
582 temp = mci_readl(host, CTRL);
583 temp |= SDMMC_CTRL_USE_IDMAC;
584 mci_writel(host, CTRL, temp);
585
586 /* drain writebuffer */
587 wmb();
588
589 /* Enable the IDMAC */
590 temp = mci_readl(host, BMOD);
591 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
592 mci_writel(host, BMOD, temp);
593
594 /* Start it running */
595 mci_writel(host, PLDMND, 1);
596
597 return 0;
598}
599
600static int dw_mci_idmac_init(struct dw_mci *host) 472static int dw_mci_idmac_init(struct dw_mci *host)
601{ 473{
602 int i; 474 int i;
@@ -604,7 +476,8 @@ static int dw_mci_idmac_init(struct dw_mci *host)
604 if (host->dma_64bit_address == 1) { 476 if (host->dma_64bit_address == 1) {
605 struct idmac_desc_64addr *p; 477 struct idmac_desc_64addr *p;
606 /* Number of descriptors in the ring buffer */ 478 /* Number of descriptors in the ring buffer */
607 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc_64addr); 479 host->ring_size =
480 DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr);
608 481
609 /* Forward link the descriptor list */ 482 /* Forward link the descriptor list */
610 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; 483 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
@@ -630,7 +503,8 @@ static int dw_mci_idmac_init(struct dw_mci *host)
630 } else { 503 } else {
631 struct idmac_desc *p; 504 struct idmac_desc *p;
632 /* Number of descriptors in the ring buffer */ 505 /* Number of descriptors in the ring buffer */
633 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc); 506 host->ring_size =
507 DESC_RING_BUF_SZ / sizeof(struct idmac_desc);
634 508
635 /* Forward link the descriptor list */ 509 /* Forward link the descriptor list */
636 for (i = 0, p = host->sg_cpu; 510 for (i = 0, p = host->sg_cpu;
@@ -671,6 +545,195 @@ static int dw_mci_idmac_init(struct dw_mci *host)
671 return 0; 545 return 0;
672} 546}
673 547
548static inline int dw_mci_prepare_desc64(struct dw_mci *host,
549 struct mmc_data *data,
550 unsigned int sg_len)
551{
552 unsigned int desc_len;
553 struct idmac_desc_64addr *desc_first, *desc_last, *desc;
554 unsigned long timeout;
555 int i;
556
557 desc_first = desc_last = desc = host->sg_cpu;
558
559 for (i = 0; i < sg_len; i++) {
560 unsigned int length = sg_dma_len(&data->sg[i]);
561
562 u64 mem_addr = sg_dma_address(&data->sg[i]);
563
564 for ( ; length ; desc++) {
565 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
566 length : DW_MCI_DESC_DATA_LENGTH;
567
568 length -= desc_len;
569
570 /*
571 * Wait for the former clear OWN bit operation
572 * of IDMAC to make sure that this descriptor
573 * isn't still owned by IDMAC as IDMAC's write
574 * ops and CPU's read ops are asynchronous.
575 */
576 timeout = jiffies + msecs_to_jiffies(100);
577 while (readl(&desc->des0) & IDMAC_DES0_OWN) {
578 if (time_after(jiffies, timeout))
579 goto err_own_bit;
580 udelay(10);
581 }
582
583 /*
584 * Set the OWN bit and disable interrupts
585 * for this descriptor
586 */
587 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
588 IDMAC_DES0_CH;
589
590 /* Buffer length */
591 IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
592
593 /* Physical address to DMA to/from */
594 desc->des4 = mem_addr & 0xffffffff;
595 desc->des5 = mem_addr >> 32;
596
597 /* Update physical address for the next desc */
598 mem_addr += desc_len;
599
600 /* Save pointer to the last descriptor */
601 desc_last = desc;
602 }
603 }
604
605 /* Set first descriptor */
606 desc_first->des0 |= IDMAC_DES0_FD;
607
608 /* Set last descriptor */
609 desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
610 desc_last->des0 |= IDMAC_DES0_LD;
611
612 return 0;
613err_own_bit:
614 /* restore the descriptor chain as it's polluted */
615 dev_dbg(host->dev, "desciptor is still owned by IDMAC.\n");
616 memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
617 dw_mci_idmac_init(host);
618 return -EINVAL;
619}
620
621
622static inline int dw_mci_prepare_desc32(struct dw_mci *host,
623 struct mmc_data *data,
624 unsigned int sg_len)
625{
626 unsigned int desc_len;
627 struct idmac_desc *desc_first, *desc_last, *desc;
628 unsigned long timeout;
629 int i;
630
631 desc_first = desc_last = desc = host->sg_cpu;
632
633 for (i = 0; i < sg_len; i++) {
634 unsigned int length = sg_dma_len(&data->sg[i]);
635
636 u32 mem_addr = sg_dma_address(&data->sg[i]);
637
638 for ( ; length ; desc++) {
639 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
640 length : DW_MCI_DESC_DATA_LENGTH;
641
642 length -= desc_len;
643
644 /*
645 * Wait for the former clear OWN bit operation
646 * of IDMAC to make sure that this descriptor
647 * isn't still owned by IDMAC as IDMAC's write
648 * ops and CPU's read ops are asynchronous.
649 */
650 timeout = jiffies + msecs_to_jiffies(100);
651 while (readl(&desc->des0) &
652 cpu_to_le32(IDMAC_DES0_OWN)) {
653 if (time_after(jiffies, timeout))
654 goto err_own_bit;
655 udelay(10);
656 }
657
658 /*
659 * Set the OWN bit and disable interrupts
660 * for this descriptor
661 */
662 desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
663 IDMAC_DES0_DIC |
664 IDMAC_DES0_CH);
665
666 /* Buffer length */
667 IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
668
669 /* Physical address to DMA to/from */
670 desc->des2 = cpu_to_le32(mem_addr);
671
672 /* Update physical address for the next desc */
673 mem_addr += desc_len;
674
675 /* Save pointer to the last descriptor */
676 desc_last = desc;
677 }
678 }
679
680 /* Set first descriptor */
681 desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
682
683 /* Set last descriptor */
684 desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
685 IDMAC_DES0_DIC));
686 desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
687
688 return 0;
689err_own_bit:
690 /* restore the descriptor chain as it's polluted */
691 dev_dbg(host->dev, "desciptor is still owned by IDMAC.\n");
692 memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
693 dw_mci_idmac_init(host);
694 return -EINVAL;
695}
696
697static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
698{
699 u32 temp;
700 int ret;
701
702 if (host->dma_64bit_address == 1)
703 ret = dw_mci_prepare_desc64(host, host->data, sg_len);
704 else
705 ret = dw_mci_prepare_desc32(host, host->data, sg_len);
706
707 if (ret)
708 goto out;
709
710 /* drain writebuffer */
711 wmb();
712
713 /* Make sure to reset DMA in case we did PIO before this */
714 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
715 dw_mci_idmac_reset(host);
716
717 /* Select IDMAC interface */
718 temp = mci_readl(host, CTRL);
719 temp |= SDMMC_CTRL_USE_IDMAC;
720 mci_writel(host, CTRL, temp);
721
722 /* drain writebuffer */
723 wmb();
724
725 /* Enable the IDMAC */
726 temp = mci_readl(host, BMOD);
727 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
728 mci_writel(host, BMOD, temp);
729
730 /* Start it running */
731 mci_writel(host, PLDMND, 1);
732
733out:
734 return ret;
735}
736
674static const struct dw_mci_dma_ops dw_mci_idmac_ops = { 737static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
675 .init = dw_mci_idmac_init, 738 .init = dw_mci_idmac_init,
676 .start = dw_mci_idmac_start_dma, 739 .start = dw_mci_idmac_start_dma,
@@ -876,11 +939,8 @@ static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
876 * MSIZE is '1', 939 * MSIZE is '1',
877 * if blksz is not a multiple of the FIFO width 940 * if blksz is not a multiple of the FIFO width
878 */ 941 */
879 if (blksz % fifo_width) { 942 if (blksz % fifo_width)
880 msize = 0;
881 rx_wmark = 1;
882 goto done; 943 goto done;
883 }
884 944
885 do { 945 do {
886 if (!((blksz_depth % mszs[idx]) || 946 if (!((blksz_depth % mszs[idx]) ||
@@ -998,8 +1058,10 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
998 spin_unlock_irqrestore(&host->irq_lock, irqflags); 1058 spin_unlock_irqrestore(&host->irq_lock, irqflags);
999 1059
1000 if (host->dma_ops->start(host, sg_len)) { 1060 if (host->dma_ops->start(host, sg_len)) {
1001 /* We can't do DMA */ 1061 /* We can't do DMA, try PIO for this one */
1002 dev_err(host->dev, "%s: failed to start DMA.\n", __func__); 1062 dev_dbg(host->dev,
1063 "%s: fall back to PIO mode for current transfer\n",
1064 __func__);
1003 return -ENODEV; 1065 return -ENODEV;
1004 } 1066 }
1005 1067
@@ -1695,11 +1757,11 @@ static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1695 data->error = -ETIMEDOUT; 1757 data->error = -ETIMEDOUT;
1696 } else if (host->dir_status == 1758 } else if (host->dir_status ==
1697 DW_MCI_RECV_STATUS) { 1759 DW_MCI_RECV_STATUS) {
1698 data->error = -EIO; 1760 data->error = -EILSEQ;
1699 } 1761 }
1700 } else { 1762 } else {
1701 /* SDMMC_INT_SBE is included */ 1763 /* SDMMC_INT_SBE is included */
1702 data->error = -EIO; 1764 data->error = -EILSEQ;
1703 } 1765 }
1704 1766
1705 dev_dbg(host->dev, "data error, status 0x%08x\n", status); 1767 dev_dbg(host->dev, "data error, status 0x%08x\n", status);
@@ -2527,47 +2589,6 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2527 return IRQ_HANDLED; 2589 return IRQ_HANDLED;
2528} 2590}
2529 2591
2530#ifdef CONFIG_OF
2531/* given a slot, find out the device node representing that slot */
2532static struct device_node *dw_mci_of_find_slot_node(struct dw_mci_slot *slot)
2533{
2534 struct device *dev = slot->mmc->parent;
2535 struct device_node *np;
2536 const __be32 *addr;
2537 int len;
2538
2539 if (!dev || !dev->of_node)
2540 return NULL;
2541
2542 for_each_child_of_node(dev->of_node, np) {
2543 addr = of_get_property(np, "reg", &len);
2544 if (!addr || (len < sizeof(int)))
2545 continue;
2546 if (be32_to_cpup(addr) == slot->id)
2547 return np;
2548 }
2549 return NULL;
2550}
2551
2552static void dw_mci_slot_of_parse(struct dw_mci_slot *slot)
2553{
2554 struct device_node *np = dw_mci_of_find_slot_node(slot);
2555
2556 if (!np)
2557 return;
2558
2559 if (of_property_read_bool(np, "disable-wp")) {
2560 slot->mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
2561 dev_warn(slot->mmc->parent,
2562 "Slot quirk 'disable-wp' is deprecated\n");
2563 }
2564}
2565#else /* CONFIG_OF */
2566static void dw_mci_slot_of_parse(struct dw_mci_slot *slot)
2567{
2568}
2569#endif /* CONFIG_OF */
2570
2571static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) 2592static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2572{ 2593{
2573 struct mmc_host *mmc; 2594 struct mmc_host *mmc;
@@ -2630,8 +2651,6 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2630 if (host->pdata->caps2) 2651 if (host->pdata->caps2)
2631 mmc->caps2 = host->pdata->caps2; 2652 mmc->caps2 = host->pdata->caps2;
2632 2653
2633 dw_mci_slot_of_parse(slot);
2634
2635 ret = mmc_of_parse(mmc); 2654 ret = mmc_of_parse(mmc);
2636 if (ret) 2655 if (ret)
2637 goto err_host_allocated; 2656 goto err_host_allocated;
@@ -2736,7 +2755,8 @@ static void dw_mci_init_dma(struct dw_mci *host)
2736 } 2755 }
2737 2756
2738 /* Alloc memory for sg translation */ 2757 /* Alloc memory for sg translation */
2739 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE, 2758 host->sg_cpu = dmam_alloc_coherent(host->dev,
2759 DESC_RING_BUF_SZ,
2740 &host->sg_dma, GFP_KERNEL); 2760 &host->sg_dma, GFP_KERNEL);
2741 if (!host->sg_cpu) { 2761 if (!host->sg_cpu) {
2742 dev_err(host->dev, 2762 dev_err(host->dev,
@@ -2919,6 +2939,13 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2919 if (!pdata) 2939 if (!pdata)
2920 return ERR_PTR(-ENOMEM); 2940 return ERR_PTR(-ENOMEM);
2921 2941
2942 /* find reset controller when exist */
2943 pdata->rstc = devm_reset_control_get_optional(dev, NULL);
2944 if (IS_ERR(pdata->rstc)) {
2945 if (PTR_ERR(pdata->rstc) == -EPROBE_DEFER)
2946 return ERR_PTR(-EPROBE_DEFER);
2947 }
2948
2922 /* find out number of slots supported */ 2949 /* find out number of slots supported */
2923 of_property_read_u32(np, "num-slots", &pdata->num_slots); 2950 of_property_read_u32(np, "num-slots", &pdata->num_slots);
2924 2951
@@ -2937,11 +2964,6 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2937 return ERR_PTR(ret); 2964 return ERR_PTR(ret);
2938 } 2965 }
2939 2966
2940 if (of_find_property(np, "supports-highspeed", NULL)) {
2941 dev_info(dev, "supports-highspeed property is deprecated.\n");
2942 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2943 }
2944
2945 return pdata; 2967 return pdata;
2946} 2968}
2947 2969
@@ -2990,7 +3012,9 @@ int dw_mci_probe(struct dw_mci *host)
2990 3012
2991 if (!host->pdata) { 3013 if (!host->pdata) {
2992 host->pdata = dw_mci_parse_dt(host); 3014 host->pdata = dw_mci_parse_dt(host);
2993 if (IS_ERR(host->pdata)) { 3015 if (PTR_ERR(host->pdata) == -EPROBE_DEFER) {
3016 return -EPROBE_DEFER;
3017 } else if (IS_ERR(host->pdata)) {
2994 dev_err(host->dev, "platform data not available\n"); 3018 dev_err(host->dev, "platform data not available\n");
2995 return -EINVAL; 3019 return -EINVAL;
2996 } 3020 }
@@ -3044,6 +3068,12 @@ int dw_mci_probe(struct dw_mci *host)
3044 } 3068 }
3045 } 3069 }
3046 3070
3071 if (!IS_ERR(host->pdata->rstc)) {
3072 reset_control_assert(host->pdata->rstc);
3073 usleep_range(10, 50);
3074 reset_control_deassert(host->pdata->rstc);
3075 }
3076
3047 setup_timer(&host->cmd11_timer, 3077 setup_timer(&host->cmd11_timer,
3048 dw_mci_cmd11_timer, (unsigned long)host); 3078 dw_mci_cmd11_timer, (unsigned long)host);
3049 3079
@@ -3193,13 +3223,14 @@ err_dmaunmap:
3193 if (host->use_dma && host->dma_ops->exit) 3223 if (host->use_dma && host->dma_ops->exit)
3194 host->dma_ops->exit(host); 3224 host->dma_ops->exit(host);
3195 3225
3226 if (!IS_ERR(host->pdata->rstc))
3227 reset_control_assert(host->pdata->rstc);
3228
3196err_clk_ciu: 3229err_clk_ciu:
3197 if (!IS_ERR(host->ciu_clk)) 3230 clk_disable_unprepare(host->ciu_clk);
3198 clk_disable_unprepare(host->ciu_clk);
3199 3231
3200err_clk_biu: 3232err_clk_biu:
3201 if (!IS_ERR(host->biu_clk)) 3233 clk_disable_unprepare(host->biu_clk);
3202 clk_disable_unprepare(host->biu_clk);
3203 3234
3204 return ret; 3235 return ret;
3205} 3236}
@@ -3225,11 +3256,11 @@ void dw_mci_remove(struct dw_mci *host)
3225 if (host->use_dma && host->dma_ops->exit) 3256 if (host->use_dma && host->dma_ops->exit)
3226 host->dma_ops->exit(host); 3257 host->dma_ops->exit(host);
3227 3258
3228 if (!IS_ERR(host->ciu_clk)) 3259 if (!IS_ERR(host->pdata->rstc))
3229 clk_disable_unprepare(host->ciu_clk); 3260 reset_control_assert(host->pdata->rstc);
3230 3261
3231 if (!IS_ERR(host->biu_clk)) 3262 clk_disable_unprepare(host->ciu_clk);
3232 clk_disable_unprepare(host->biu_clk); 3263 clk_disable_unprepare(host->biu_clk);
3233} 3264}
3234EXPORT_SYMBOL(dw_mci_remove); 3265EXPORT_SYMBOL(dw_mci_remove);
3235 3266
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index 79905ce895ad..bbad309679cf 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -257,7 +257,7 @@ static void moxart_dma_complete(void *param)
257static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host) 257static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host)
258{ 258{
259 u32 len, dir_data, dir_slave; 259 u32 len, dir_data, dir_slave;
260 unsigned long dma_time; 260 long dma_time;
261 struct dma_async_tx_descriptor *desc = NULL; 261 struct dma_async_tx_descriptor *desc = NULL;
262 struct dma_chan *dma_chan; 262 struct dma_chan *dma_chan;
263 263
@@ -397,7 +397,8 @@ static void moxart_prepare_data(struct moxart_host *host)
397static void moxart_request(struct mmc_host *mmc, struct mmc_request *mrq) 397static void moxart_request(struct mmc_host *mmc, struct mmc_request *mrq)
398{ 398{
399 struct moxart_host *host = mmc_priv(mmc); 399 struct moxart_host *host = mmc_priv(mmc);
400 unsigned long pio_time, flags; 400 long pio_time;
401 unsigned long flags;
401 u32 status; 402 u32 status;
402 403
403 spin_lock_irqsave(&host->lock, flags); 404 spin_lock_irqsave(&host->lock, flags);
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 396c9b7e4121..3ccaa1415f33 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -126,7 +126,7 @@ static int sd_response_type(struct mmc_command *cmd)
126 return SD_RSP_TYPE_R0; 126 return SD_RSP_TYPE_R0;
127 case MMC_RSP_R1: 127 case MMC_RSP_R1:
128 return SD_RSP_TYPE_R1; 128 return SD_RSP_TYPE_R1;
129 case MMC_RSP_R1 & ~MMC_RSP_CRC: 129 case MMC_RSP_R1_NO_CRC:
130 return SD_RSP_TYPE_R1 | SD_NO_CHECK_CRC7; 130 return SD_RSP_TYPE_R1 | SD_NO_CHECK_CRC7;
131 case MMC_RSP_R1B: 131 case MMC_RSP_R1B:
132 return SD_RSP_TYPE_R1b; 132 return SD_RSP_TYPE_R1b;
diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
index 6c71fc9f76c7..4106295527b9 100644
--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
+++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
@@ -324,7 +324,7 @@ static void sd_send_cmd_get_rsp(struct rtsx_usb_sdmmc *host,
324 case MMC_RSP_R1: 324 case MMC_RSP_R1:
325 rsp_type = SD_RSP_TYPE_R1; 325 rsp_type = SD_RSP_TYPE_R1;
326 break; 326 break;
327 case MMC_RSP_R1 & ~MMC_RSP_CRC: 327 case MMC_RSP_R1_NO_CRC:
328 rsp_type = SD_RSP_TYPE_R1 | SD_NO_CHECK_CRC7; 328 rsp_type = SD_RSP_TYPE_R1 | SD_NO_CHECK_CRC7;
329 break; 329 break;
330 case MMC_RSP_R1B: 330 case MMC_RSP_R1B:
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 8fe0756c8e1e..81d4dc034793 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -275,7 +275,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
275 .chip = &sdhci_acpi_chip_int, 275 .chip = &sdhci_acpi_chip_int,
276 .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | 276 .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
277 MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR | 277 MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
278 MMC_CAP_WAIT_WHILE_BUSY, 278 MMC_CAP_CMD_DURING_TFR | MMC_CAP_WAIT_WHILE_BUSY,
279 .caps2 = MMC_CAP2_HC_ERASE_SZ, 279 .caps2 = MMC_CAP2_HC_ERASE_SZ,
280 .flags = SDHCI_ACPI_RUNTIME_PM, 280 .flags = SDHCI_ACPI_RUNTIME_PM,
281 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 281 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
diff --git a/drivers/mmc/host/sdhci-bcm-kona.c b/drivers/mmc/host/sdhci-bcm-kona.c
index e5c634bdfdd9..51dd2fd65000 100644
--- a/drivers/mmc/host/sdhci-bcm-kona.c
+++ b/drivers/mmc/host/sdhci-bcm-kona.c
@@ -253,12 +253,14 @@ static int sdhci_bcm_kona_probe(struct platform_device *pdev)
253 goto err_pltfm_free; 253 goto err_pltfm_free;
254 } 254 }
255 255
256 if (clk_set_rate(pltfm_priv->clk, host->mmc->f_max) != 0) { 256 ret = clk_set_rate(pltfm_priv->clk, host->mmc->f_max);
257 if (ret) {
257 dev_err(dev, "Failed to set rate core clock\n"); 258 dev_err(dev, "Failed to set rate core clock\n");
258 goto err_pltfm_free; 259 goto err_pltfm_free;
259 } 260 }
260 261
261 if (clk_prepare_enable(pltfm_priv->clk) != 0) { 262 ret = clk_prepare_enable(pltfm_priv->clk);
263 if (ret) {
262 dev_err(dev, "Failed to enable core clock\n"); 264 dev_err(dev, "Failed to enable core clock\n");
263 goto err_pltfm_free; 265 goto err_pltfm_free;
264 } 266 }
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
index cce10fe3e19e..159f6f64c68e 100644
--- a/drivers/mmc/host/sdhci-brcmstb.c
+++ b/drivers/mmc/host/sdhci-brcmstb.c
@@ -98,6 +98,8 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
98 * properties through mmc_of_parse(). 98 * properties through mmc_of_parse().
99 */ 99 */
100 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES); 100 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
101 if (of_device_is_compatible(pdev->dev.of_node, "brcm,bcm7425-sdhci"))
102 host->caps &= ~SDHCI_CAN_64BIT;
101 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); 103 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
102 host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 | 104 host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
103 SDHCI_SUPPORT_DDR50); 105 SDHCI_SUPPORT_DDR50);
@@ -121,6 +123,7 @@ err_clk:
121 123
122static const struct of_device_id sdhci_brcm_of_match[] = { 124static const struct of_device_id sdhci_brcm_of_match[] = {
123 { .compatible = "brcm,bcm7425-sdhci" }, 125 { .compatible = "brcm,bcm7425-sdhci" },
126 { .compatible = "brcm,bcm7445-sdhci" },
124 {}, 127 {},
125}; 128};
126MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match); 129MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match);
@@ -128,7 +131,6 @@ MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match);
128static struct platform_driver sdhci_brcmstb_driver = { 131static struct platform_driver sdhci_brcmstb_driver = {
129 .driver = { 132 .driver = {
130 .name = "sdhci-brcmstb", 133 .name = "sdhci-brcmstb",
131 .owner = THIS_MODULE,
132 .pm = &sdhci_brcmstb_pmops, 134 .pm = &sdhci_brcmstb_pmops,
133 .of_match_table = of_match_ptr(sdhci_brcm_of_match), 135 .of_match_table = of_match_ptr(sdhci_brcm_of_match),
134 }, 136 },
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 99e0b334f9df..1f54fd8755c8 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -31,6 +31,7 @@
31#include "sdhci-pltfm.h" 31#include "sdhci-pltfm.h"
32#include "sdhci-esdhc.h" 32#include "sdhci-esdhc.h"
33 33
34#define ESDHC_SYS_CTRL_DTOCV_MASK 0x0f
34#define ESDHC_CTRL_D3CD 0x08 35#define ESDHC_CTRL_D3CD 0x08
35#define ESDHC_BURST_LEN_EN_INCR (1 << 27) 36#define ESDHC_BURST_LEN_EN_INCR (1 << 27)
36/* VENDOR SPEC register */ 37/* VENDOR SPEC register */
@@ -928,7 +929,8 @@ static unsigned int esdhc_get_max_timeout_count(struct sdhci_host *host)
928 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 929 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
929 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); 930 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
930 931
931 return esdhc_is_usdhc(imx_data) ? 1 << 28 : 1 << 27; 932 /* Doc Errata: the uSDHC actual maximum timeout count is 1 << 29 */
933 return esdhc_is_usdhc(imx_data) ? 1 << 29 : 1 << 27;
932} 934}
933 935
934static void esdhc_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) 936static void esdhc_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
@@ -937,7 +939,8 @@ static void esdhc_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
937 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); 939 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
938 940
939 /* use maximum timeout counter */ 941 /* use maximum timeout counter */
940 sdhci_writeb(host, esdhc_is_usdhc(imx_data) ? 0xF : 0xE, 942 esdhc_clrset_le(host, ESDHC_SYS_CTRL_DTOCV_MASK,
943 esdhc_is_usdhc(imx_data) ? 0xF : 0xE,
941 SDHCI_TIMEOUT_CONTROL); 944 SDHCI_TIMEOUT_CONTROL);
942} 945}
943 946
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index e0f193f7e3e5..da8e40af6f85 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -26,6 +26,7 @@
26#include <linux/phy/phy.h> 26#include <linux/phy/phy.h>
27#include <linux/regmap.h> 27#include <linux/regmap.h>
28#include "sdhci-pltfm.h" 28#include "sdhci-pltfm.h"
29#include <linux/of.h>
29 30
30#define SDHCI_ARASAN_CLK_CTRL_OFFSET 0x2c 31#define SDHCI_ARASAN_CLK_CTRL_OFFSET 0x2c
31#define SDHCI_ARASAN_VENDOR_REGISTER 0x78 32#define SDHCI_ARASAN_VENDOR_REGISTER 0x78
@@ -35,6 +36,8 @@
35#define CLK_CTRL_TIMEOUT_MASK (0xf << CLK_CTRL_TIMEOUT_SHIFT) 36#define CLK_CTRL_TIMEOUT_MASK (0xf << CLK_CTRL_TIMEOUT_SHIFT)
36#define CLK_CTRL_TIMEOUT_MIN_EXP 13 37#define CLK_CTRL_TIMEOUT_MIN_EXP 13
37 38
39#define PHY_CLK_TOO_SLOW_HZ 400000
40
38/* 41/*
39 * On some SoCs the syscon area has a feature where the upper 16-bits of 42 * On some SoCs the syscon area has a feature where the upper 16-bits of
40 * each 32-bit register act as a write mask for the lower 16-bits. This allows 43 * each 32-bit register act as a write mask for the lower 16-bits. This allows
@@ -65,10 +68,12 @@ struct sdhci_arasan_soc_ctl_field {
65 * accessible via the syscon API. 68 * accessible via the syscon API.
66 * 69 *
67 * @baseclkfreq: Where to find corecfg_baseclkfreq 70 * @baseclkfreq: Where to find corecfg_baseclkfreq
71 * @clockmultiplier: Where to find corecfg_clockmultiplier
68 * @hiword_update: If true, use HIWORD_UPDATE to access the syscon 72 * @hiword_update: If true, use HIWORD_UPDATE to access the syscon
69 */ 73 */
70struct sdhci_arasan_soc_ctl_map { 74struct sdhci_arasan_soc_ctl_map {
71 struct sdhci_arasan_soc_ctl_field baseclkfreq; 75 struct sdhci_arasan_soc_ctl_field baseclkfreq;
76 struct sdhci_arasan_soc_ctl_field clockmultiplier;
72 bool hiword_update; 77 bool hiword_update;
73}; 78};
74 79
@@ -77,6 +82,7 @@ struct sdhci_arasan_soc_ctl_map {
77 * @host: Pointer to the main SDHCI host structure. 82 * @host: Pointer to the main SDHCI host structure.
78 * @clk_ahb: Pointer to the AHB clock 83 * @clk_ahb: Pointer to the AHB clock
79 * @phy: Pointer to the generic phy 84 * @phy: Pointer to the generic phy
85 * @is_phy_on: True if the PHY is on; false if not.
80 * @sdcardclk_hw: Struct for the clock we might provide to a PHY. 86 * @sdcardclk_hw: Struct for the clock we might provide to a PHY.
81 * @sdcardclk: Pointer to normal 'struct clock' for sdcardclk_hw. 87 * @sdcardclk: Pointer to normal 'struct clock' for sdcardclk_hw.
82 * @soc_ctl_base: Pointer to regmap for syscon for soc_ctl registers. 88 * @soc_ctl_base: Pointer to regmap for syscon for soc_ctl registers.
@@ -86,16 +92,22 @@ struct sdhci_arasan_data {
86 struct sdhci_host *host; 92 struct sdhci_host *host;
87 struct clk *clk_ahb; 93 struct clk *clk_ahb;
88 struct phy *phy; 94 struct phy *phy;
95 bool is_phy_on;
89 96
90 struct clk_hw sdcardclk_hw; 97 struct clk_hw sdcardclk_hw;
91 struct clk *sdcardclk; 98 struct clk *sdcardclk;
92 99
93 struct regmap *soc_ctl_base; 100 struct regmap *soc_ctl_base;
94 const struct sdhci_arasan_soc_ctl_map *soc_ctl_map; 101 const struct sdhci_arasan_soc_ctl_map *soc_ctl_map;
102 unsigned int quirks; /* Arasan deviations from spec */
103
104/* Controller does not have CD wired and will not function normally without */
105#define SDHCI_ARASAN_QUIRK_FORCE_CDTEST BIT(0)
95}; 106};
96 107
97static const struct sdhci_arasan_soc_ctl_map rk3399_soc_ctl_map = { 108static const struct sdhci_arasan_soc_ctl_map rk3399_soc_ctl_map = {
98 .baseclkfreq = { .reg = 0xf000, .width = 8, .shift = 8 }, 109 .baseclkfreq = { .reg = 0xf000, .width = 8, .shift = 8 },
110 .clockmultiplier = { .reg = 0xf02c, .width = 8, .shift = 0},
99 .hiword_update = true, 111 .hiword_update = true,
100}; 112};
101 113
@@ -170,13 +182,47 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
170 struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host); 182 struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
171 bool ctrl_phy = false; 183 bool ctrl_phy = false;
172 184
173 if (clock > MMC_HIGH_52_MAX_DTR && (!IS_ERR(sdhci_arasan->phy))) 185 if (!IS_ERR(sdhci_arasan->phy)) {
174 ctrl_phy = true; 186 if (!sdhci_arasan->is_phy_on && clock <= PHY_CLK_TOO_SLOW_HZ) {
187 /*
188 * If PHY off, set clock to max speed and power PHY on.
189 *
190 * Although PHY docs apparently suggest power cycling
191 * when changing the clock the PHY doesn't like to be
192 * powered on while at low speeds like those used in ID
193 * mode. Even worse is powering the PHY on while the
194 * clock is off.
195 *
196 * To workaround the PHY limitations, the best we can
197 * do is to power it on at a faster speed and then slam
198 * through low speeds without power cycling.
199 */
200 sdhci_set_clock(host, host->max_clk);
201 spin_unlock_irq(&host->lock);
202 phy_power_on(sdhci_arasan->phy);
203 spin_lock_irq(&host->lock);
204 sdhci_arasan->is_phy_on = true;
205
206 /*
207 * We'll now fall through to the below case with
208 * ctrl_phy = false (so we won't turn off/on). The
209 * sdhci_set_clock() will set the real clock.
210 */
211 } else if (clock > PHY_CLK_TOO_SLOW_HZ) {
212 /*
213 * At higher clock speeds the PHY is fine being power
214 * cycled and docs say you _should_ power cycle when
215 * changing clock speeds.
216 */
217 ctrl_phy = true;
218 }
219 }
175 220
176 if (ctrl_phy) { 221 if (ctrl_phy && sdhci_arasan->is_phy_on) {
177 spin_unlock_irq(&host->lock); 222 spin_unlock_irq(&host->lock);
178 phy_power_off(sdhci_arasan->phy); 223 phy_power_off(sdhci_arasan->phy);
179 spin_lock_irq(&host->lock); 224 spin_lock_irq(&host->lock);
225 sdhci_arasan->is_phy_on = false;
180 } 226 }
181 227
182 sdhci_set_clock(host, clock); 228 sdhci_set_clock(host, clock);
@@ -185,6 +231,7 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
185 spin_unlock_irq(&host->lock); 231 spin_unlock_irq(&host->lock);
186 phy_power_on(sdhci_arasan->phy); 232 phy_power_on(sdhci_arasan->phy);
187 spin_lock_irq(&host->lock); 233 spin_lock_irq(&host->lock);
234 sdhci_arasan->is_phy_on = true;
188 } 235 }
189} 236}
190 237
@@ -203,12 +250,27 @@ static void sdhci_arasan_hs400_enhanced_strobe(struct mmc_host *mmc,
203 writel(vendor, host->ioaddr + SDHCI_ARASAN_VENDOR_REGISTER); 250 writel(vendor, host->ioaddr + SDHCI_ARASAN_VENDOR_REGISTER);
204} 251}
205 252
253void sdhci_arasan_reset(struct sdhci_host *host, u8 mask)
254{
255 u8 ctrl;
256 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
257 struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
258
259 sdhci_reset(host, mask);
260
261 if (sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_FORCE_CDTEST) {
262 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
263 ctrl |= SDHCI_CTRL_CDTEST_INS | SDHCI_CTRL_CDTEST_EN;
264 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
265 }
266}
267
206static struct sdhci_ops sdhci_arasan_ops = { 268static struct sdhci_ops sdhci_arasan_ops = {
207 .set_clock = sdhci_arasan_set_clock, 269 .set_clock = sdhci_arasan_set_clock,
208 .get_max_clock = sdhci_pltfm_clk_get_max_clock, 270 .get_max_clock = sdhci_pltfm_clk_get_max_clock,
209 .get_timeout_clock = sdhci_arasan_get_timeout_clock, 271 .get_timeout_clock = sdhci_arasan_get_timeout_clock,
210 .set_bus_width = sdhci_set_bus_width, 272 .set_bus_width = sdhci_set_bus_width,
211 .reset = sdhci_reset, 273 .reset = sdhci_arasan_reset,
212 .set_uhs_signaling = sdhci_set_uhs_signaling, 274 .set_uhs_signaling = sdhci_set_uhs_signaling,
213}; 275};
214 276
@@ -239,13 +301,14 @@ static int sdhci_arasan_suspend(struct device *dev)
239 if (ret) 301 if (ret)
240 return ret; 302 return ret;
241 303
242 if (!IS_ERR(sdhci_arasan->phy)) { 304 if (!IS_ERR(sdhci_arasan->phy) && sdhci_arasan->is_phy_on) {
243 ret = phy_power_off(sdhci_arasan->phy); 305 ret = phy_power_off(sdhci_arasan->phy);
244 if (ret) { 306 if (ret) {
245 dev_err(dev, "Cannot power off phy.\n"); 307 dev_err(dev, "Cannot power off phy.\n");
246 sdhci_resume_host(host); 308 sdhci_resume_host(host);
247 return ret; 309 return ret;
248 } 310 }
311 sdhci_arasan->is_phy_on = false;
249 } 312 }
250 313
251 clk_disable(pltfm_host->clk); 314 clk_disable(pltfm_host->clk);
@@ -281,12 +344,13 @@ static int sdhci_arasan_resume(struct device *dev)
281 return ret; 344 return ret;
282 } 345 }
283 346
284 if (!IS_ERR(sdhci_arasan->phy)) { 347 if (!IS_ERR(sdhci_arasan->phy) && host->mmc->actual_clock) {
285 ret = phy_power_on(sdhci_arasan->phy); 348 ret = phy_power_on(sdhci_arasan->phy);
286 if (ret) { 349 if (ret) {
287 dev_err(dev, "Cannot power on phy.\n"); 350 dev_err(dev, "Cannot power on phy.\n");
288 return ret; 351 return ret;
289 } 352 }
353 sdhci_arasan->is_phy_on = true;
290 } 354 }
291 355
292 return sdhci_resume_host(host); 356 return sdhci_resume_host(host);
@@ -338,6 +402,45 @@ static const struct clk_ops arasan_sdcardclk_ops = {
338}; 402};
339 403
340/** 404/**
405 * sdhci_arasan_update_clockmultiplier - Set corecfg_clockmultiplier
406 *
407 * The corecfg_clockmultiplier is supposed to contain clock multiplier
408 * value of programmable clock generator.
409 *
410 * NOTES:
411 * - Many existing devices don't seem to do this and work fine. To keep
412 * compatibility for old hardware where the device tree doesn't provide a
413 * register map, this function is a noop if a soc_ctl_map hasn't been provided
414 * for this platform.
415 * - The value of corecfg_clockmultiplier should sync with that of corresponding
416 * value reading from sdhci_capability_register. So this function is called
417 * once at probe time and never called again.
418 *
419 * @host: The sdhci_host
420 */
421static void sdhci_arasan_update_clockmultiplier(struct sdhci_host *host,
422 u32 value)
423{
424 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
425 struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
426 const struct sdhci_arasan_soc_ctl_map *soc_ctl_map =
427 sdhci_arasan->soc_ctl_map;
428
429 /* Having a map is optional */
430 if (!soc_ctl_map)
431 return;
432
433 /* If we have a map, we expect to have a syscon */
434 if (!sdhci_arasan->soc_ctl_base) {
435 pr_warn("%s: Have regmap, but no soc-ctl-syscon\n",
436 mmc_hostname(host->mmc));
437 return;
438 }
439
440 sdhci_arasan_syscon_write(host, &soc_ctl_map->clockmultiplier, value);
441}
442
443/**
341 * sdhci_arasan_update_baseclkfreq - Set corecfg_baseclkfreq 444 * sdhci_arasan_update_baseclkfreq - Set corecfg_baseclkfreq
342 * 445 *
343 * The corecfg_baseclkfreq is supposed to contain the MHz of clk_xin. This 446 * The corecfg_baseclkfreq is supposed to contain the MHz of clk_xin. This
@@ -462,6 +565,7 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
462 struct sdhci_host *host; 565 struct sdhci_host *host;
463 struct sdhci_pltfm_host *pltfm_host; 566 struct sdhci_pltfm_host *pltfm_host;
464 struct sdhci_arasan_data *sdhci_arasan; 567 struct sdhci_arasan_data *sdhci_arasan;
568 struct device_node *np = pdev->dev.of_node;
465 569
466 host = sdhci_pltfm_init(pdev, &sdhci_arasan_pdata, 570 host = sdhci_pltfm_init(pdev, &sdhci_arasan_pdata,
467 sizeof(*sdhci_arasan)); 571 sizeof(*sdhci_arasan));
@@ -516,8 +620,16 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
516 } 620 }
517 621
518 sdhci_get_of_property(pdev); 622 sdhci_get_of_property(pdev);
623
624 if (of_property_read_bool(np, "xlnx,fails-without-test-cd"))
625 sdhci_arasan->quirks |= SDHCI_ARASAN_QUIRK_FORCE_CDTEST;
626
519 pltfm_host->clk = clk_xin; 627 pltfm_host->clk = clk_xin;
520 628
629 if (of_device_is_compatible(pdev->dev.of_node,
630 "rockchip,rk3399-sdhci-5.1"))
631 sdhci_arasan_update_clockmultiplier(host, 0x0);
632
521 sdhci_arasan_update_baseclkfreq(host); 633 sdhci_arasan_update_baseclkfreq(host);
522 634
523 ret = sdhci_arasan_register_sdclk(sdhci_arasan, clk_xin, &pdev->dev); 635 ret = sdhci_arasan_register_sdclk(sdhci_arasan, clk_xin, &pdev->dev);
@@ -547,12 +659,6 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
547 goto unreg_clk; 659 goto unreg_clk;
548 } 660 }
549 661
550 ret = phy_power_on(sdhci_arasan->phy);
551 if (ret < 0) {
552 dev_err(&pdev->dev, "phy_power_on err.\n");
553 goto err_phy_power;
554 }
555
556 host->mmc_host_ops.hs400_enhanced_strobe = 662 host->mmc_host_ops.hs400_enhanced_strobe =
557 sdhci_arasan_hs400_enhanced_strobe; 663 sdhci_arasan_hs400_enhanced_strobe;
558 } 664 }
@@ -565,9 +671,6 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
565 671
566err_add_host: 672err_add_host:
567 if (!IS_ERR(sdhci_arasan->phy)) 673 if (!IS_ERR(sdhci_arasan->phy))
568 phy_power_off(sdhci_arasan->phy);
569err_phy_power:
570 if (!IS_ERR(sdhci_arasan->phy))
571 phy_exit(sdhci_arasan->phy); 674 phy_exit(sdhci_arasan->phy);
572unreg_clk: 675unreg_clk:
573 sdhci_arasan_unregister_sdclk(&pdev->dev); 676 sdhci_arasan_unregister_sdclk(&pdev->dev);
@@ -589,7 +692,8 @@ static int sdhci_arasan_remove(struct platform_device *pdev)
589 struct clk *clk_ahb = sdhci_arasan->clk_ahb; 692 struct clk *clk_ahb = sdhci_arasan->clk_ahb;
590 693
591 if (!IS_ERR(sdhci_arasan->phy)) { 694 if (!IS_ERR(sdhci_arasan->phy)) {
592 phy_power_off(sdhci_arasan->phy); 695 if (sdhci_arasan->is_phy_on)
696 phy_power_off(sdhci_arasan->phy);
593 phy_exit(sdhci_arasan->phy); 697 phy_exit(sdhci_arasan->phy);
594 } 698 }
595 699
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 239be2fde242..fb71c866eacc 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -583,7 +583,7 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
583 583
584 np = pdev->dev.of_node; 584 np = pdev->dev.of_node;
585 585
586 if (of_get_property(np, "little-endian", NULL)) 586 if (of_property_read_bool(np, "little-endian"))
587 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata, 587 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata,
588 sizeof(struct sdhci_esdhc)); 588 sizeof(struct sdhci_esdhc));
589 else 589 else
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 897cfd24ca2e..72a1f1f5180a 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -156,7 +156,7 @@ static void sdhci_pci_add_own_cd(struct sdhci_pci_slot *slot)
156 if (!gpio_is_valid(gpio)) 156 if (!gpio_is_valid(gpio))
157 return; 157 return;
158 158
159 err = gpio_request(gpio, "sd_cd"); 159 err = devm_gpio_request(&slot->chip->pdev->dev, gpio, "sd_cd");
160 if (err < 0) 160 if (err < 0)
161 goto out; 161 goto out;
162 162
@@ -179,7 +179,7 @@ static void sdhci_pci_add_own_cd(struct sdhci_pci_slot *slot)
179 return; 179 return;
180 180
181out_free: 181out_free:
182 gpio_free(gpio); 182 devm_gpio_free(&slot->chip->pdev->dev, gpio);
183out: 183out:
184 dev_warn(&slot->chip->pdev->dev, "failed to setup card detect wake up\n"); 184 dev_warn(&slot->chip->pdev->dev, "failed to setup card detect wake up\n");
185} 185}
@@ -188,8 +188,6 @@ static void sdhci_pci_remove_own_cd(struct sdhci_pci_slot *slot)
188{ 188{
189 if (slot->cd_irq >= 0) 189 if (slot->cd_irq >= 0)
190 free_irq(slot->cd_irq, slot); 190 free_irq(slot->cd_irq, slot);
191 if (gpio_is_valid(slot->cd_gpio))
192 gpio_free(slot->cd_gpio);
193} 191}
194 192
195#else 193#else
@@ -356,6 +354,7 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
356{ 354{
357 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | 355 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
358 MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR | 356 MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
357 MMC_CAP_CMD_DURING_TFR |
359 MMC_CAP_WAIT_WHILE_BUSY; 358 MMC_CAP_WAIT_WHILE_BUSY;
360 slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ; 359 slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ;
361 slot->hw_reset = sdhci_pci_int_hw_reset; 360 slot->hw_reset = sdhci_pci_int_hw_reset;
@@ -421,17 +420,30 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
421/* Define Host controllers for Intel Merrifield platform */ 420/* Define Host controllers for Intel Merrifield platform */
422#define INTEL_MRFLD_EMMC_0 0 421#define INTEL_MRFLD_EMMC_0 0
423#define INTEL_MRFLD_EMMC_1 1 422#define INTEL_MRFLD_EMMC_1 1
423#define INTEL_MRFLD_SD 2
424#define INTEL_MRFLD_SDIO 3
424 425
425static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot) 426static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot)
426{ 427{
427 if ((PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFLD_EMMC_0) && 428 unsigned int func = PCI_FUNC(slot->chip->pdev->devfn);
428 (PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFLD_EMMC_1)) 429
429 /* SD support is not ready yet */ 430 switch (func) {
431 case INTEL_MRFLD_EMMC_0:
432 case INTEL_MRFLD_EMMC_1:
433 slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE |
434 MMC_CAP_8_BIT_DATA |
435 MMC_CAP_1_8V_DDR;
436 break;
437 case INTEL_MRFLD_SD:
438 slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
439 break;
440 case INTEL_MRFLD_SDIO:
441 slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE |
442 MMC_CAP_POWER_OFF_CARD;
443 break;
444 default:
430 return -ENODEV; 445 return -ENODEV;
431 446 }
432 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
433 MMC_CAP_1_8V_DDR;
434
435 return 0; 447 return 0;
436} 448}
437 449
@@ -1615,7 +1627,6 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
1615 1627
1616 slot->chip = chip; 1628 slot->chip = chip;
1617 slot->host = host; 1629 slot->host = host;
1618 slot->pci_bar = bar;
1619 slot->rst_n_gpio = -EINVAL; 1630 slot->rst_n_gpio = -EINVAL;
1620 slot->cd_gpio = -EINVAL; 1631 slot->cd_gpio = -EINVAL;
1621 slot->cd_idx = -1; 1632 slot->cd_idx = -1;
@@ -1643,27 +1654,22 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
1643 1654
1644 host->irq = pdev->irq; 1655 host->irq = pdev->irq;
1645 1656
1646 ret = pci_request_region(pdev, bar, mmc_hostname(host->mmc)); 1657 ret = pcim_iomap_regions(pdev, BIT(bar), mmc_hostname(host->mmc));
1647 if (ret) { 1658 if (ret) {
1648 dev_err(&pdev->dev, "cannot request region\n"); 1659 dev_err(&pdev->dev, "cannot request region\n");
1649 goto cleanup; 1660 goto cleanup;
1650 } 1661 }
1651 1662
1652 host->ioaddr = pci_ioremap_bar(pdev, bar); 1663 host->ioaddr = pcim_iomap_table(pdev)[bar];
1653 if (!host->ioaddr) {
1654 dev_err(&pdev->dev, "failed to remap registers\n");
1655 ret = -ENOMEM;
1656 goto release;
1657 }
1658 1664
1659 if (chip->fixes && chip->fixes->probe_slot) { 1665 if (chip->fixes && chip->fixes->probe_slot) {
1660 ret = chip->fixes->probe_slot(slot); 1666 ret = chip->fixes->probe_slot(slot);
1661 if (ret) 1667 if (ret)
1662 goto unmap; 1668 goto cleanup;
1663 } 1669 }
1664 1670
1665 if (gpio_is_valid(slot->rst_n_gpio)) { 1671 if (gpio_is_valid(slot->rst_n_gpio)) {
1666 if (!gpio_request(slot->rst_n_gpio, "eMMC_reset")) { 1672 if (!devm_gpio_request(&pdev->dev, slot->rst_n_gpio, "eMMC_reset")) {
1667 gpio_direction_output(slot->rst_n_gpio, 1); 1673 gpio_direction_output(slot->rst_n_gpio, 1);
1668 slot->host->mmc->caps |= MMC_CAP_HW_RESET; 1674 slot->host->mmc->caps |= MMC_CAP_HW_RESET;
1669 slot->hw_reset = sdhci_pci_gpio_hw_reset; 1675 slot->hw_reset = sdhci_pci_gpio_hw_reset;
@@ -1702,18 +1708,9 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
1702 return slot; 1708 return slot;
1703 1709
1704remove: 1710remove:
1705 if (gpio_is_valid(slot->rst_n_gpio))
1706 gpio_free(slot->rst_n_gpio);
1707
1708 if (chip->fixes && chip->fixes->remove_slot) 1711 if (chip->fixes && chip->fixes->remove_slot)
1709 chip->fixes->remove_slot(slot, 0); 1712 chip->fixes->remove_slot(slot, 0);
1710 1713
1711unmap:
1712 iounmap(host->ioaddr);
1713
1714release:
1715 pci_release_region(pdev, bar);
1716
1717cleanup: 1714cleanup:
1718 if (slot->data && slot->data->cleanup) 1715 if (slot->data && slot->data->cleanup)
1719 slot->data->cleanup(slot->data); 1716 slot->data->cleanup(slot->data);
@@ -1738,17 +1735,12 @@ static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot)
1738 1735
1739 sdhci_remove_host(slot->host, dead); 1736 sdhci_remove_host(slot->host, dead);
1740 1737
1741 if (gpio_is_valid(slot->rst_n_gpio))
1742 gpio_free(slot->rst_n_gpio);
1743
1744 if (slot->chip->fixes && slot->chip->fixes->remove_slot) 1738 if (slot->chip->fixes && slot->chip->fixes->remove_slot)
1745 slot->chip->fixes->remove_slot(slot, dead); 1739 slot->chip->fixes->remove_slot(slot, dead);
1746 1740
1747 if (slot->data && slot->data->cleanup) 1741 if (slot->data && slot->data->cleanup)
1748 slot->data->cleanup(slot->data); 1742 slot->data->cleanup(slot->data);
1749 1743
1750 pci_release_region(slot->chip->pdev, slot->pci_bar);
1751
1752 sdhci_free_host(slot->host); 1744 sdhci_free_host(slot->host);
1753} 1745}
1754 1746
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 7e0788712e1a..9c7c08b93223 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -72,7 +72,6 @@ struct sdhci_pci_slot {
72 struct sdhci_host *host; 72 struct sdhci_host *host;
73 struct sdhci_pci_data *data; 73 struct sdhci_pci_data *data;
74 74
75 int pci_bar;
76 int rst_n_gpio; 75 int rst_n_gpio;
77 int cd_gpio; 76 int cd_gpio;
78 int cd_irq; 77 int cd_irq;
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 1d17dcfc3ffb..ad49bfaf5bf8 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -156,13 +156,6 @@ struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
156 host->quirks2 = pdata->quirks2; 156 host->quirks2 = pdata->quirks2;
157 } 157 }
158 158
159 /*
160 * Some platforms need to probe the controller to be able to
161 * determine which caps should be used.
162 */
163 if (host->ops && host->ops->platform_init)
164 host->ops->platform_init(host);
165
166 platform_set_drvdata(pdev, host); 159 platform_set_drvdata(pdev, host);
167 160
168 return host; 161 return host;
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 1e93dc4e303e..20b6ff5b4af1 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -391,6 +391,31 @@ static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
391 .pdata = &sdhci_tegra114_pdata, 391 .pdata = &sdhci_tegra114_pdata,
392}; 392};
393 393
394static const struct sdhci_pltfm_data sdhci_tegra124_pdata = {
395 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
396 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
397 SDHCI_QUIRK_SINGLE_POWER_WRITE |
398 SDHCI_QUIRK_NO_HISPD_BIT |
399 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
400 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
401 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
402 /*
403 * The TRM states that the SD/MMC controller found on
404 * Tegra124 can address 34 bits (the maximum supported by
405 * the Tegra memory controller), but tests show that DMA
406 * to or from above 4 GiB doesn't work. This is possibly
407 * caused by missing programming, though it's not obvious
408 * what sequence is required. Mark 64-bit DMA broken for
409 * now to fix this for existing users (e.g. Nyan boards).
410 */
411 SDHCI_QUIRK2_BROKEN_64_BIT_DMA,
412 .ops = &tegra114_sdhci_ops,
413};
414
415static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
416 .pdata = &sdhci_tegra124_pdata,
417};
418
394static const struct sdhci_pltfm_data sdhci_tegra210_pdata = { 419static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
395 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 420 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
396 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | 421 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
@@ -408,7 +433,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
408 433
409static const struct of_device_id sdhci_tegra_dt_match[] = { 434static const struct of_device_id sdhci_tegra_dt_match[] = {
410 { .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 }, 435 { .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
411 { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra114 }, 436 { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
412 { .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 }, 437 { .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
413 { .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 }, 438 { .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
414 { .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 }, 439 { .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index cd65d474afa2..48055666c655 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -888,7 +888,8 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
888static inline bool sdhci_auto_cmd12(struct sdhci_host *host, 888static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
889 struct mmc_request *mrq) 889 struct mmc_request *mrq)
890{ 890{
891 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12); 891 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
892 !mrq->cap_cmd_during_tfr;
892} 893}
893 894
894static void sdhci_set_transfer_mode(struct sdhci_host *host, 895static void sdhci_set_transfer_mode(struct sdhci_host *host,
@@ -1031,9 +1032,18 @@ static void sdhci_finish_data(struct sdhci_host *host)
1031 sdhci_do_reset(host, SDHCI_RESET_DATA); 1032 sdhci_do_reset(host, SDHCI_RESET_DATA);
1032 } 1033 }
1033 1034
1034 /* Avoid triggering warning in sdhci_send_command() */ 1035 /*
1035 host->cmd = NULL; 1036 * 'cap_cmd_during_tfr' request must not use the command line
1036 sdhci_send_command(host, data->stop); 1037 * after mmc_command_done() has been called. It is upper layer's
1038 * responsibility to send the stop command if required.
1039 */
1040 if (data->mrq->cap_cmd_during_tfr) {
1041 sdhci_finish_mrq(host, data->mrq);
1042 } else {
1043 /* Avoid triggering warning in sdhci_send_command() */
1044 host->cmd = NULL;
1045 sdhci_send_command(host, data->stop);
1046 }
1037 } else { 1047 } else {
1038 sdhci_finish_mrq(host, data->mrq); 1048 sdhci_finish_mrq(host, data->mrq);
1039 } 1049 }
@@ -1165,6 +1175,9 @@ static void sdhci_finish_command(struct sdhci_host *host)
1165 } 1175 }
1166 } 1176 }
1167 1177
1178 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1179 mmc_command_done(host->mmc, cmd->mrq);
1180
1168 /* 1181 /*
1169 * The host can send and interrupt when the busy state has 1182 * The host can send and interrupt when the busy state has
1170 * ended, allowing us to wait without wasting CPU cycles. 1183 * ended, allowing us to wait without wasting CPU cycles.
@@ -2062,7 +2075,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2062 2075
2063 spin_unlock_irqrestore(&host->lock, flags); 2076 spin_unlock_irqrestore(&host->lock, flags);
2064 /* Wait for Buffer Read Ready interrupt */ 2077 /* Wait for Buffer Read Ready interrupt */
2065 wait_event_interruptible_timeout(host->buf_ready_int, 2078 wait_event_timeout(host->buf_ready_int,
2066 (host->tuning_done == 1), 2079 (host->tuning_done == 1),
2067 msecs_to_jiffies(50)); 2080 msecs_to_jiffies(50));
2068 spin_lock_irqsave(&host->lock, flags); 2081 spin_lock_irqsave(&host->lock, flags);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 0411c9f36461..c722cd23205c 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -84,6 +84,8 @@
84#define SDHCI_CTRL_ADMA32 0x10 84#define SDHCI_CTRL_ADMA32 0x10
85#define SDHCI_CTRL_ADMA64 0x18 85#define SDHCI_CTRL_ADMA64 0x18
86#define SDHCI_CTRL_8BITBUS 0x20 86#define SDHCI_CTRL_8BITBUS 0x20
87#define SDHCI_CTRL_CDTEST_INS 0x40
88#define SDHCI_CTRL_CDTEST_EN 0x80
87 89
88#define SDHCI_POWER_CONTROL 0x29 90#define SDHCI_POWER_CONTROL 0x29
89#define SDHCI_POWER_ON 0x01 91#define SDHCI_POWER_ON 0x01
@@ -555,7 +557,6 @@ struct sdhci_ops {
555 void (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs); 557 void (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
556 void (*hw_reset)(struct sdhci_host *host); 558 void (*hw_reset)(struct sdhci_host *host);
557 void (*adma_workaround)(struct sdhci_host *host, u32 intmask); 559 void (*adma_workaround)(struct sdhci_host *host, u32 intmask);
558 void (*platform_init)(struct sdhci_host *host);
559 void (*card_event)(struct sdhci_host *host); 560 void (*card_event)(struct sdhci_host *host);
560 void (*voltage_switch)(struct sdhci_host *host); 561 void (*voltage_switch)(struct sdhci_host *host);
561 int (*select_drive_strength)(struct sdhci_host *host, 562 int (*select_drive_strength)(struct sdhci_host *host,
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index c3b651bf89cb..49edff7fee49 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -94,6 +94,7 @@ static const struct of_device_id sh_mobile_sdhi_of_match[] = {
94 { .compatible = "renesas,sdhi-r8a7793", .data = &of_rcar_gen2_compatible, }, 94 { .compatible = "renesas,sdhi-r8a7793", .data = &of_rcar_gen2_compatible, },
95 { .compatible = "renesas,sdhi-r8a7794", .data = &of_rcar_gen2_compatible, }, 95 { .compatible = "renesas,sdhi-r8a7794", .data = &of_rcar_gen2_compatible, },
96 { .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, }, 96 { .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, },
97 { .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, },
97 {}, 98 {},
98}; 99};
99MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match); 100MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match);
@@ -213,6 +214,13 @@ static void sh_mobile_sdhi_clk_disable(struct tmio_mmc_host *host)
213 clk_disable_unprepare(priv->clk); 214 clk_disable_unprepare(priv->clk);
214} 215}
215 216
217static int sh_mobile_sdhi_card_busy(struct mmc_host *mmc)
218{
219 struct tmio_mmc_host *host = mmc_priv(mmc);
220
221 return !(sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & TMIO_STAT_DAT0);
222}
223
216static int sh_mobile_sdhi_start_signal_voltage_switch(struct mmc_host *mmc, 224static int sh_mobile_sdhi_start_signal_voltage_switch(struct mmc_host *mmc,
217 struct mmc_ios *ios) 225 struct mmc_ios *ios)
218{ 226{
@@ -369,7 +377,14 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
369 host->clk_update = sh_mobile_sdhi_clk_update; 377 host->clk_update = sh_mobile_sdhi_clk_update;
370 host->clk_disable = sh_mobile_sdhi_clk_disable; 378 host->clk_disable = sh_mobile_sdhi_clk_disable;
371 host->multi_io_quirk = sh_mobile_sdhi_multi_io_quirk; 379 host->multi_io_quirk = sh_mobile_sdhi_multi_io_quirk;
372 host->start_signal_voltage_switch = sh_mobile_sdhi_start_signal_voltage_switch; 380
381 /* SDR speeds are only available on Gen2+ */
382 if (mmc_data->flags & TMIO_MMC_MIN_RCAR2) {
383 /* card_busy caused issues on r8a73a4 (pre-Gen2) CD-less SDHI */
384 host->card_busy = sh_mobile_sdhi_card_busy;
385 host->start_signal_voltage_switch =
386 sh_mobile_sdhi_start_signal_voltage_switch;
387 }
373 388
374 /* Orginally registers were 16 bit apart, could be 32 or 64 nowadays */ 389 /* Orginally registers were 16 bit apart, could be 32 or 64 nowadays */
375 if (!host->bus_shift && resource_size(res) > 0x100) /* old way to determine the shift */ 390 if (!host->bus_shift && resource_size(res) > 0x100) /* old way to determine the shift */
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 2ee4c21ec55e..c0a5c676d0e8 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -72,6 +72,13 @@
72#define SDXC_REG_CHDA (0x90) 72#define SDXC_REG_CHDA (0x90)
73#define SDXC_REG_CBDA (0x94) 73#define SDXC_REG_CBDA (0x94)
74 74
75/* New registers introduced in A64 */
76#define SDXC_REG_A12A 0x058 /* SMC Auto Command 12 Register */
77#define SDXC_REG_SD_NTSR 0x05C /* SMC New Timing Set Register */
78#define SDXC_REG_DRV_DL 0x140 /* Drive Delay Control Register */
79#define SDXC_REG_SAMP_DL_REG 0x144 /* SMC sample delay control */
80#define SDXC_REG_DS_DL_REG 0x148 /* SMC data strobe delay control */
81
75#define mmc_readl(host, reg) \ 82#define mmc_readl(host, reg) \
76 readl((host)->reg_base + SDXC_##reg) 83 readl((host)->reg_base + SDXC_##reg)
77#define mmc_writel(host, reg, value) \ 84#define mmc_writel(host, reg, value) \
@@ -217,21 +224,41 @@
217#define SDXC_CLK_50M_DDR 3 224#define SDXC_CLK_50M_DDR 3
218#define SDXC_CLK_50M_DDR_8BIT 4 225#define SDXC_CLK_50M_DDR_8BIT 4
219 226
227#define SDXC_2X_TIMING_MODE BIT(31)
228
229#define SDXC_CAL_START BIT(15)
230#define SDXC_CAL_DONE BIT(14)
231#define SDXC_CAL_DL_SHIFT 8
232#define SDXC_CAL_DL_SW_EN BIT(7)
233#define SDXC_CAL_DL_SW_SHIFT 0
234#define SDXC_CAL_DL_MASK 0x3f
235
236#define SDXC_CAL_TIMEOUT 3 /* in seconds, 3s is enough*/
237
220struct sunxi_mmc_clk_delay { 238struct sunxi_mmc_clk_delay {
221 u32 output; 239 u32 output;
222 u32 sample; 240 u32 sample;
223}; 241};
224 242
225struct sunxi_idma_des { 243struct sunxi_idma_des {
226 u32 config; 244 __le32 config;
227 u32 buf_size; 245 __le32 buf_size;
228 u32 buf_addr_ptr1; 246 __le32 buf_addr_ptr1;
229 u32 buf_addr_ptr2; 247 __le32 buf_addr_ptr2;
248};
249
250struct sunxi_mmc_cfg {
251 u32 idma_des_size_bits;
252 const struct sunxi_mmc_clk_delay *clk_delays;
253
254 /* does the IP block support autocalibration? */
255 bool can_calibrate;
230}; 256};
231 257
232struct sunxi_mmc_host { 258struct sunxi_mmc_host {
233 struct mmc_host *mmc; 259 struct mmc_host *mmc;
234 struct reset_control *reset; 260 struct reset_control *reset;
261 const struct sunxi_mmc_cfg *cfg;
235 262
236 /* IO mapping base */ 263 /* IO mapping base */
237 void __iomem *reg_base; 264 void __iomem *reg_base;
@@ -241,7 +268,6 @@ struct sunxi_mmc_host {
241 struct clk *clk_mmc; 268 struct clk *clk_mmc;
242 struct clk *clk_sample; 269 struct clk *clk_sample;
243 struct clk *clk_output; 270 struct clk *clk_output;
244 const struct sunxi_mmc_clk_delay *clk_delays;
245 271
246 /* irq */ 272 /* irq */
247 spinlock_t lock; 273 spinlock_t lock;
@@ -250,7 +276,6 @@ struct sunxi_mmc_host {
250 u32 sdio_imask; 276 u32 sdio_imask;
251 277
252 /* dma */ 278 /* dma */
253 u32 idma_des_size_bits;
254 dma_addr_t sg_dma; 279 dma_addr_t sg_dma;
255 void *sg_cpu; 280 void *sg_cpu;
256 bool wait_dma; 281 bool wait_dma;
@@ -322,25 +347,28 @@ static void sunxi_mmc_init_idma_des(struct sunxi_mmc_host *host,
322{ 347{
323 struct sunxi_idma_des *pdes = (struct sunxi_idma_des *)host->sg_cpu; 348 struct sunxi_idma_des *pdes = (struct sunxi_idma_des *)host->sg_cpu;
324 dma_addr_t next_desc = host->sg_dma; 349 dma_addr_t next_desc = host->sg_dma;
325 int i, max_len = (1 << host->idma_des_size_bits); 350 int i, max_len = (1 << host->cfg->idma_des_size_bits);
326 351
327 for (i = 0; i < data->sg_len; i++) { 352 for (i = 0; i < data->sg_len; i++) {
328 pdes[i].config = SDXC_IDMAC_DES0_CH | SDXC_IDMAC_DES0_OWN | 353 pdes[i].config = cpu_to_le32(SDXC_IDMAC_DES0_CH |
329 SDXC_IDMAC_DES0_DIC; 354 SDXC_IDMAC_DES0_OWN |
355 SDXC_IDMAC_DES0_DIC);
330 356
331 if (data->sg[i].length == max_len) 357 if (data->sg[i].length == max_len)
332 pdes[i].buf_size = 0; /* 0 == max_len */ 358 pdes[i].buf_size = 0; /* 0 == max_len */
333 else 359 else
334 pdes[i].buf_size = data->sg[i].length; 360 pdes[i].buf_size = cpu_to_le32(data->sg[i].length);
335 361
336 next_desc += sizeof(struct sunxi_idma_des); 362 next_desc += sizeof(struct sunxi_idma_des);
337 pdes[i].buf_addr_ptr1 = sg_dma_address(&data->sg[i]); 363 pdes[i].buf_addr_ptr1 =
338 pdes[i].buf_addr_ptr2 = (u32)next_desc; 364 cpu_to_le32(sg_dma_address(&data->sg[i]));
365 pdes[i].buf_addr_ptr2 = cpu_to_le32((u32)next_desc);
339 } 366 }
340 367
341 pdes[0].config |= SDXC_IDMAC_DES0_FD; 368 pdes[0].config |= cpu_to_le32(SDXC_IDMAC_DES0_FD);
342 pdes[i - 1].config |= SDXC_IDMAC_DES0_LD | SDXC_IDMAC_DES0_ER; 369 pdes[i - 1].config |= cpu_to_le32(SDXC_IDMAC_DES0_LD |
343 pdes[i - 1].config &= ~SDXC_IDMAC_DES0_DIC; 370 SDXC_IDMAC_DES0_ER);
371 pdes[i - 1].config &= cpu_to_le32(~SDXC_IDMAC_DES0_DIC);
344 pdes[i - 1].buf_addr_ptr2 = 0; 372 pdes[i - 1].buf_addr_ptr2 = 0;
345 373
346 /* 374 /*
@@ -653,11 +681,84 @@ static int sunxi_mmc_oclk_onoff(struct sunxi_mmc_host *host, u32 oclk_en)
653 return 0; 681 return 0;
654} 682}
655 683
684static int sunxi_mmc_calibrate(struct sunxi_mmc_host *host, int reg_off)
685{
686 u32 reg = readl(host->reg_base + reg_off);
687 u32 delay;
688 unsigned long timeout;
689
690 if (!host->cfg->can_calibrate)
691 return 0;
692
693 reg &= ~(SDXC_CAL_DL_MASK << SDXC_CAL_DL_SW_SHIFT);
694 reg &= ~SDXC_CAL_DL_SW_EN;
695
696 writel(reg | SDXC_CAL_START, host->reg_base + reg_off);
697
698 dev_dbg(mmc_dev(host->mmc), "calibration started\n");
699
700 timeout = jiffies + HZ * SDXC_CAL_TIMEOUT;
701
702 while (!((reg = readl(host->reg_base + reg_off)) & SDXC_CAL_DONE)) {
703 if (time_before(jiffies, timeout))
704 cpu_relax();
705 else {
706 reg &= ~SDXC_CAL_START;
707 writel(reg, host->reg_base + reg_off);
708
709 return -ETIMEDOUT;
710 }
711 }
712
713 delay = (reg >> SDXC_CAL_DL_SHIFT) & SDXC_CAL_DL_MASK;
714
715 reg &= ~SDXC_CAL_START;
716 reg |= (delay << SDXC_CAL_DL_SW_SHIFT) | SDXC_CAL_DL_SW_EN;
717
718 writel(reg, host->reg_base + reg_off);
719
720 dev_dbg(mmc_dev(host->mmc), "calibration ended, reg is 0x%x\n", reg);
721
722 return 0;
723}
724
725static int sunxi_mmc_clk_set_phase(struct sunxi_mmc_host *host,
726 struct mmc_ios *ios, u32 rate)
727{
728 int index;
729
730 if (!host->cfg->clk_delays)
731 return 0;
732
733 /* determine delays */
734 if (rate <= 400000) {
735 index = SDXC_CLK_400K;
736 } else if (rate <= 25000000) {
737 index = SDXC_CLK_25M;
738 } else if (rate <= 52000000) {
739 if (ios->timing != MMC_TIMING_UHS_DDR50 &&
740 ios->timing != MMC_TIMING_MMC_DDR52) {
741 index = SDXC_CLK_50M;
742 } else if (ios->bus_width == MMC_BUS_WIDTH_8) {
743 index = SDXC_CLK_50M_DDR_8BIT;
744 } else {
745 index = SDXC_CLK_50M_DDR;
746 }
747 } else {
748 return -EINVAL;
749 }
750
751 clk_set_phase(host->clk_sample, host->cfg->clk_delays[index].sample);
752 clk_set_phase(host->clk_output, host->cfg->clk_delays[index].output);
753
754 return 0;
755}
756
656static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host, 757static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
657 struct mmc_ios *ios) 758 struct mmc_ios *ios)
658{ 759{
659 u32 rate, oclk_dly, rval, sclk_dly; 760 long rate;
660 u32 clock = ios->clock; 761 u32 rval, clock = ios->clock;
661 int ret; 762 int ret;
662 763
663 /* 8 bit DDR requires a higher module clock */ 764 /* 8 bit DDR requires a higher module clock */
@@ -666,13 +767,18 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
666 clock <<= 1; 767 clock <<= 1;
667 768
668 rate = clk_round_rate(host->clk_mmc, clock); 769 rate = clk_round_rate(host->clk_mmc, clock);
669 dev_dbg(mmc_dev(host->mmc), "setting clk to %d, rounded %d\n", 770 if (rate < 0) {
771 dev_err(mmc_dev(host->mmc), "error rounding clk to %d: %ld\n",
772 clock, rate);
773 return rate;
774 }
775 dev_dbg(mmc_dev(host->mmc), "setting clk to %d, rounded %ld\n",
670 clock, rate); 776 clock, rate);
671 777
672 /* setting clock rate */ 778 /* setting clock rate */
673 ret = clk_set_rate(host->clk_mmc, rate); 779 ret = clk_set_rate(host->clk_mmc, rate);
674 if (ret) { 780 if (ret) {
675 dev_err(mmc_dev(host->mmc), "error setting clk to %d: %d\n", 781 dev_err(mmc_dev(host->mmc), "error setting clk to %ld: %d\n",
676 rate, ret); 782 rate, ret);
677 return ret; 783 return ret;
678 } 784 }
@@ -692,31 +798,15 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
692 } 798 }
693 mmc_writel(host, REG_CLKCR, rval); 799 mmc_writel(host, REG_CLKCR, rval);
694 800
695 /* determine delays */ 801 ret = sunxi_mmc_clk_set_phase(host, ios, rate);
696 if (rate <= 400000) { 802 if (ret)
697 oclk_dly = host->clk_delays[SDXC_CLK_400K].output; 803 return ret;
698 sclk_dly = host->clk_delays[SDXC_CLK_400K].sample; 804
699 } else if (rate <= 25000000) { 805 ret = sunxi_mmc_calibrate(host, SDXC_REG_SAMP_DL_REG);
700 oclk_dly = host->clk_delays[SDXC_CLK_25M].output; 806 if (ret)
701 sclk_dly = host->clk_delays[SDXC_CLK_25M].sample; 807 return ret;
702 } else if (rate <= 52000000) {
703 if (ios->timing != MMC_TIMING_UHS_DDR50 &&
704 ios->timing != MMC_TIMING_MMC_DDR52) {
705 oclk_dly = host->clk_delays[SDXC_CLK_50M].output;
706 sclk_dly = host->clk_delays[SDXC_CLK_50M].sample;
707 } else if (ios->bus_width == MMC_BUS_WIDTH_8) {
708 oclk_dly = host->clk_delays[SDXC_CLK_50M_DDR_8BIT].output;
709 sclk_dly = host->clk_delays[SDXC_CLK_50M_DDR_8BIT].sample;
710 } else {
711 oclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].output;
712 sclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].sample;
713 }
714 } else {
715 return -EINVAL;
716 }
717 808
718 clk_set_phase(host->clk_sample, sclk_dly); 809 /* TODO: enable calibrate on sdc2 SDXC_REG_DS_DL_REG of A64 */
719 clk_set_phase(host->clk_output, oclk_dly);
720 810
721 return sunxi_mmc_oclk_onoff(host, 1); 811 return sunxi_mmc_oclk_onoff(host, 1);
722} 812}
@@ -938,14 +1028,6 @@ static int sunxi_mmc_card_busy(struct mmc_host *mmc)
938 return !!(mmc_readl(host, REG_STAS) & SDXC_CARD_DATA_BUSY); 1028 return !!(mmc_readl(host, REG_STAS) & SDXC_CARD_DATA_BUSY);
939} 1029}
940 1030
941static const struct of_device_id sunxi_mmc_of_match[] = {
942 { .compatible = "allwinner,sun4i-a10-mmc", },
943 { .compatible = "allwinner,sun5i-a13-mmc", },
944 { .compatible = "allwinner,sun9i-a80-mmc", },
945 { /* sentinel */ }
946};
947MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match);
948
949static struct mmc_host_ops sunxi_mmc_ops = { 1031static struct mmc_host_ops sunxi_mmc_ops = {
950 .request = sunxi_mmc_request, 1032 .request = sunxi_mmc_request,
951 .set_ios = sunxi_mmc_set_ios, 1033 .set_ios = sunxi_mmc_set_ios,
@@ -974,21 +1056,54 @@ static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = {
974 [SDXC_CLK_50M_DDR_8BIT] = { .output = 72, .sample = 72 }, 1056 [SDXC_CLK_50M_DDR_8BIT] = { .output = 72, .sample = 72 },
975}; 1057};
976 1058
1059static const struct sunxi_mmc_cfg sun4i_a10_cfg = {
1060 .idma_des_size_bits = 13,
1061 .clk_delays = NULL,
1062 .can_calibrate = false,
1063};
1064
1065static const struct sunxi_mmc_cfg sun5i_a13_cfg = {
1066 .idma_des_size_bits = 16,
1067 .clk_delays = NULL,
1068 .can_calibrate = false,
1069};
1070
1071static const struct sunxi_mmc_cfg sun7i_a20_cfg = {
1072 .idma_des_size_bits = 16,
1073 .clk_delays = sunxi_mmc_clk_delays,
1074 .can_calibrate = false,
1075};
1076
1077static const struct sunxi_mmc_cfg sun9i_a80_cfg = {
1078 .idma_des_size_bits = 16,
1079 .clk_delays = sun9i_mmc_clk_delays,
1080 .can_calibrate = false,
1081};
1082
1083static const struct sunxi_mmc_cfg sun50i_a64_cfg = {
1084 .idma_des_size_bits = 16,
1085 .clk_delays = NULL,
1086 .can_calibrate = true,
1087};
1088
1089static const struct of_device_id sunxi_mmc_of_match[] = {
1090 { .compatible = "allwinner,sun4i-a10-mmc", .data = &sun4i_a10_cfg },
1091 { .compatible = "allwinner,sun5i-a13-mmc", .data = &sun5i_a13_cfg },
1092 { .compatible = "allwinner,sun7i-a20-mmc", .data = &sun7i_a20_cfg },
1093 { .compatible = "allwinner,sun9i-a80-mmc", .data = &sun9i_a80_cfg },
1094 { .compatible = "allwinner,sun50i-a64-mmc", .data = &sun50i_a64_cfg },
1095 { /* sentinel */ }
1096};
1097MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match);
1098
977static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, 1099static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
978 struct platform_device *pdev) 1100 struct platform_device *pdev)
979{ 1101{
980 struct device_node *np = pdev->dev.of_node;
981 int ret; 1102 int ret;
982 1103
983 if (of_device_is_compatible(np, "allwinner,sun4i-a10-mmc")) 1104 host->cfg = of_device_get_match_data(&pdev->dev);
984 host->idma_des_size_bits = 13; 1105 if (!host->cfg)
985 else 1106 return -EINVAL;
986 host->idma_des_size_bits = 16;
987
988 if (of_device_is_compatible(np, "allwinner,sun9i-a80-mmc"))
989 host->clk_delays = sun9i_mmc_clk_delays;
990 else
991 host->clk_delays = sunxi_mmc_clk_delays;
992 1107
993 ret = mmc_regulator_get_supply(host->mmc); 1108 ret = mmc_regulator_get_supply(host->mmc);
994 if (ret) { 1109 if (ret) {
@@ -1014,16 +1129,18 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
1014 return PTR_ERR(host->clk_mmc); 1129 return PTR_ERR(host->clk_mmc);
1015 } 1130 }
1016 1131
1017 host->clk_output = devm_clk_get(&pdev->dev, "output"); 1132 if (host->cfg->clk_delays) {
1018 if (IS_ERR(host->clk_output)) { 1133 host->clk_output = devm_clk_get(&pdev->dev, "output");
1019 dev_err(&pdev->dev, "Could not get output clock\n"); 1134 if (IS_ERR(host->clk_output)) {
1020 return PTR_ERR(host->clk_output); 1135 dev_err(&pdev->dev, "Could not get output clock\n");
1021 } 1136 return PTR_ERR(host->clk_output);
1137 }
1022 1138
1023 host->clk_sample = devm_clk_get(&pdev->dev, "sample"); 1139 host->clk_sample = devm_clk_get(&pdev->dev, "sample");
1024 if (IS_ERR(host->clk_sample)) { 1140 if (IS_ERR(host->clk_sample)) {
1025 dev_err(&pdev->dev, "Could not get sample clock\n"); 1141 dev_err(&pdev->dev, "Could not get sample clock\n");
1026 return PTR_ERR(host->clk_sample); 1142 return PTR_ERR(host->clk_sample);
1143 }
1027 } 1144 }
1028 1145
1029 host->reset = devm_reset_control_get_optional(&pdev->dev, "ahb"); 1146 host->reset = devm_reset_control_get_optional(&pdev->dev, "ahb");
@@ -1120,15 +1237,17 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
1120 mmc->max_blk_count = 8192; 1237 mmc->max_blk_count = 8192;
1121 mmc->max_blk_size = 4096; 1238 mmc->max_blk_size = 4096;
1122 mmc->max_segs = PAGE_SIZE / sizeof(struct sunxi_idma_des); 1239 mmc->max_segs = PAGE_SIZE / sizeof(struct sunxi_idma_des);
1123 mmc->max_seg_size = (1 << host->idma_des_size_bits); 1240 mmc->max_seg_size = (1 << host->cfg->idma_des_size_bits);
1124 mmc->max_req_size = mmc->max_seg_size * mmc->max_segs; 1241 mmc->max_req_size = mmc->max_seg_size * mmc->max_segs;
1125 /* 400kHz ~ 52MHz */ 1242 /* 400kHz ~ 52MHz */
1126 mmc->f_min = 400000; 1243 mmc->f_min = 400000;
1127 mmc->f_max = 52000000; 1244 mmc->f_max = 52000000;
1128 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | 1245 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
1129 MMC_CAP_1_8V_DDR |
1130 MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ; 1246 MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ;
1131 1247
1248 if (host->cfg->clk_delays)
1249 mmc->caps |= MMC_CAP_1_8V_DDR;
1250
1132 ret = mmc_of_parse(mmc); 1251 ret = mmc_of_parse(mmc);
1133 if (ret) 1252 if (ret)
1134 goto error_free_dma; 1253 goto error_free_dma;
@@ -1160,6 +1279,8 @@ static int sunxi_mmc_remove(struct platform_device *pdev)
1160 if (!IS_ERR(host->reset)) 1279 if (!IS_ERR(host->reset))
1161 reset_control_assert(host->reset); 1280 reset_control_assert(host->reset);
1162 1281
1282 clk_disable_unprepare(host->clk_sample);
1283 clk_disable_unprepare(host->clk_output);
1163 clk_disable_unprepare(host->clk_mmc); 1284 clk_disable_unprepare(host->clk_mmc);
1164 clk_disable_unprepare(host->clk_ahb); 1285 clk_disable_unprepare(host->clk_ahb);
1165 1286
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 7f63ec05bdf4..8e126afd988c 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -79,6 +79,9 @@
79#define CLK_CTL_DIV_MASK 0xff 79#define CLK_CTL_DIV_MASK 0xff
80#define CLK_CTL_SCLKEN BIT(8) 80#define CLK_CTL_SCLKEN BIT(8)
81 81
82#define CARD_OPT_WIDTH8 BIT(13)
83#define CARD_OPT_WIDTH BIT(15)
84
82#define TMIO_BBS 512 /* Boot block size */ 85#define TMIO_BBS 512 /* Boot block size */
83 86
84/* Definitions for values the CTRL_SDIO_STATUS register can take. */ 87/* Definitions for values the CTRL_SDIO_STATUS register can take. */
@@ -158,6 +161,7 @@ struct tmio_mmc_host {
158 void (*clk_disable)(struct tmio_mmc_host *host); 161 void (*clk_disable)(struct tmio_mmc_host *host);
159 int (*multi_io_quirk)(struct mmc_card *card, 162 int (*multi_io_quirk)(struct mmc_card *card,
160 unsigned int direction, int blk_size); 163 unsigned int direction, int blk_size);
164 int (*card_busy)(struct mmc_host *mmc);
161 int (*start_signal_voltage_switch)(struct mmc_host *mmc, 165 int (*start_signal_voltage_switch)(struct mmc_host *mmc,
162 struct mmc_ios *ios); 166 struct mmc_ios *ios);
163}; 167};
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index 92467efc4e2c..700567603107 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -336,7 +336,9 @@ static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command
336 336
337 switch (mmc_resp_type(cmd)) { 337 switch (mmc_resp_type(cmd)) {
338 case MMC_RSP_NONE: c |= RESP_NONE; break; 338 case MMC_RSP_NONE: c |= RESP_NONE; break;
339 case MMC_RSP_R1: c |= RESP_R1; break; 339 case MMC_RSP_R1:
340 case MMC_RSP_R1_NO_CRC:
341 c |= RESP_R1; break;
340 case MMC_RSP_R1B: c |= RESP_R1B; break; 342 case MMC_RSP_R1B: c |= RESP_R1B; break;
341 case MMC_RSP_R2: c |= RESP_R2; break; 343 case MMC_RSP_R2: c |= RESP_R2; break;
342 case MMC_RSP_R3: c |= RESP_R3; break; 344 case MMC_RSP_R3: c |= RESP_R3; break;
@@ -730,12 +732,13 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
730 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n", 732 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
731 data->blksz, data->blocks); 733 data->blksz, data->blocks);
732 734
733 /* Some hardware cannot perform 2 byte requests in 4 bit mode */ 735 /* Some hardware cannot perform 2 byte requests in 4/8 bit mode */
734 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) { 736 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4 ||
737 host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
735 int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES; 738 int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES;
736 739
737 if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) { 740 if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) {
738 pr_err("%s: %d byte block unsupported in 4 bit mode\n", 741 pr_err("%s: %d byte block unsupported in 4/8 bit mode\n",
739 mmc_hostname(host->mmc), data->blksz); 742 mmc_hostname(host->mmc), data->blksz);
740 return -EINVAL; 743 return -EINVAL;
741 } 744 }
@@ -857,14 +860,16 @@ static void tmio_mmc_power_off(struct tmio_mmc_host *host)
857static void tmio_mmc_set_bus_width(struct tmio_mmc_host *host, 860static void tmio_mmc_set_bus_width(struct tmio_mmc_host *host,
858 unsigned char bus_width) 861 unsigned char bus_width)
859{ 862{
860 switch (bus_width) { 863 u16 reg = sd_ctrl_read16(host, CTL_SD_MEM_CARD_OPT)
861 case MMC_BUS_WIDTH_1: 864 & ~(CARD_OPT_WIDTH | CARD_OPT_WIDTH8);
862 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0); 865
863 break; 866 /* reg now applies to MMC_BUS_WIDTH_4 */
864 case MMC_BUS_WIDTH_4: 867 if (bus_width == MMC_BUS_WIDTH_1)
865 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0); 868 reg |= CARD_OPT_WIDTH;
866 break; 869 else if (bus_width == MMC_BUS_WIDTH_8)
867 } 870 reg |= CARD_OPT_WIDTH8;
871
872 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, reg);
868} 873}
869 874
870/* Set MMC clock / power. 875/* Set MMC clock / power.
@@ -960,20 +965,12 @@ static int tmio_multi_io_quirk(struct mmc_card *card,
960 return blk_size; 965 return blk_size;
961} 966}
962 967
963static int tmio_mmc_card_busy(struct mmc_host *mmc)
964{
965 struct tmio_mmc_host *host = mmc_priv(mmc);
966
967 return !(sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & TMIO_STAT_DAT0);
968}
969
970static struct mmc_host_ops tmio_mmc_ops = { 968static struct mmc_host_ops tmio_mmc_ops = {
971 .request = tmio_mmc_request, 969 .request = tmio_mmc_request,
972 .set_ios = tmio_mmc_set_ios, 970 .set_ios = tmio_mmc_set_ios,
973 .get_ro = tmio_mmc_get_ro, 971 .get_ro = tmio_mmc_get_ro,
974 .get_cd = mmc_gpio_get_cd, 972 .get_cd = mmc_gpio_get_cd,
975 .enable_sdio_irq = tmio_mmc_enable_sdio_irq, 973 .enable_sdio_irq = tmio_mmc_enable_sdio_irq,
976 .card_busy = tmio_mmc_card_busy,
977 .multi_io_quirk = tmio_multi_io_quirk, 974 .multi_io_quirk = tmio_multi_io_quirk,
978}; 975};
979 976
@@ -1072,6 +1069,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
1072 goto host_free; 1069 goto host_free;
1073 } 1070 }
1074 1071
1072 tmio_mmc_ops.card_busy = _host->card_busy;
1075 tmio_mmc_ops.start_signal_voltage_switch = _host->start_signal_voltage_switch; 1073 tmio_mmc_ops.start_signal_voltage_switch = _host->start_signal_voltage_switch;
1076 mmc->ops = &tmio_mmc_ops; 1074 mmc->ops = &tmio_mmc_ops;
1077 1075
@@ -1089,6 +1087,15 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
1089 !mmc_card_is_removable(mmc) || 1087 !mmc_card_is_removable(mmc) ||
1090 mmc->slot.cd_irq >= 0); 1088 mmc->slot.cd_irq >= 0);
1091 1089
1090 /*
1091 * On Gen2+, eMMC with NONREMOVABLE currently fails because native
1092 * hotplug gets disabled. It seems RuntimePM related yet we need further
1093 * research. Since we are planning a PM overhaul anyway, let's enforce
1094 * for now the device being active by enabling native hotplug always.
1095 */
1096 if (pdata->flags & TMIO_MMC_MIN_RCAR2)
1097 _host->native_hotplug = true;
1098
1092 if (tmio_mmc_clk_enable(_host) < 0) { 1099 if (tmio_mmc_clk_enable(_host) < 0) {
1093 mmc->f_max = pdata->hclk; 1100 mmc->f_max = pdata->hclk;
1094 mmc->f_min = mmc->f_max / 512; 1101 mmc->f_min = mmc->f_max / 512;
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index d8673ca968ba..73fad83acbcb 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -292,6 +292,7 @@ struct mmc_card {
292 u32 raw_cid[4]; /* raw card CID */ 292 u32 raw_cid[4]; /* raw card CID */
293 u32 raw_csd[4]; /* raw card CSD */ 293 u32 raw_csd[4]; /* raw card CSD */
294 u32 raw_scr[2]; /* raw card SCR */ 294 u32 raw_scr[2]; /* raw card SCR */
295 u32 raw_ssr[16]; /* raw card SSR */
295 struct mmc_cid cid; /* card identification */ 296 struct mmc_cid cid; /* card identification */
296 struct mmc_csd csd; /* card specific */ 297 struct mmc_csd csd; /* card specific */
297 struct mmc_ext_csd ext_csd; /* mmc v4 extended card specific */ 298 struct mmc_ext_csd ext_csd; /* mmc v4 extended card specific */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index b01e77de1a74..2b953eb8ceae 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -55,6 +55,9 @@ struct mmc_command {
55#define MMC_RSP_R6 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE) 55#define MMC_RSP_R6 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
56#define MMC_RSP_R7 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE) 56#define MMC_RSP_R7 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
57 57
58/* Can be used by core to poll after switch to MMC HS mode */
59#define MMC_RSP_R1_NO_CRC (MMC_RSP_PRESENT|MMC_RSP_OPCODE)
60
58#define mmc_resp_type(cmd) ((cmd)->flags & (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC|MMC_RSP_BUSY|MMC_RSP_OPCODE)) 61#define mmc_resp_type(cmd) ((cmd)->flags & (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC|MMC_RSP_BUSY|MMC_RSP_OPCODE))
59 62
60/* 63/*
@@ -133,8 +136,12 @@ struct mmc_request {
133 struct mmc_command *stop; 136 struct mmc_command *stop;
134 137
135 struct completion completion; 138 struct completion completion;
139 struct completion cmd_completion;
136 void (*done)(struct mmc_request *);/* completion function */ 140 void (*done)(struct mmc_request *);/* completion function */
137 struct mmc_host *host; 141 struct mmc_host *host;
142
143 /* Allow other commands during this ongoing data transfer or busy wait */
144 bool cap_cmd_during_tfr;
138}; 145};
139 146
140struct mmc_card; 147struct mmc_card;
@@ -146,6 +153,9 @@ extern struct mmc_async_req *mmc_start_req(struct mmc_host *,
146 struct mmc_async_req *, int *); 153 struct mmc_async_req *, int *);
147extern int mmc_interrupt_hpi(struct mmc_card *); 154extern int mmc_interrupt_hpi(struct mmc_card *);
148extern void mmc_wait_for_req(struct mmc_host *, struct mmc_request *); 155extern void mmc_wait_for_req(struct mmc_host *, struct mmc_request *);
156extern void mmc_wait_for_req_done(struct mmc_host *host,
157 struct mmc_request *mrq);
158extern bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq);
149extern int mmc_wait_for_cmd(struct mmc_host *, struct mmc_command *, int); 159extern int mmc_wait_for_cmd(struct mmc_host *, struct mmc_command *, int);
150extern int mmc_app_cmd(struct mmc_host *, struct mmc_card *); 160extern int mmc_app_cmd(struct mmc_host *, struct mmc_card *);
151extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *, 161extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *,
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 83b0edfce471..f5af2bd35e7f 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -17,6 +17,7 @@
17#include <linux/scatterlist.h> 17#include <linux/scatterlist.h>
18#include <linux/mmc/core.h> 18#include <linux/mmc/core.h>
19#include <linux/dmaengine.h> 19#include <linux/dmaengine.h>
20#include <linux/reset.h>
20 21
21#define MAX_MCI_SLOTS 2 22#define MAX_MCI_SLOTS 2
22 23
@@ -259,6 +260,7 @@ struct dw_mci_board {
259 /* delay in mS before detecting cards after interrupt */ 260 /* delay in mS before detecting cards after interrupt */
260 u32 detect_delay_ms; 261 u32 detect_delay_ms;
261 262
263 struct reset_control *rstc;
262 struct dw_mci_dma_ops *dma_ops; 264 struct dw_mci_dma_ops *dma_ops;
263 struct dma_pdata *data; 265 struct dma_pdata *data;
264}; 266};
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index aa4bfbf129e4..0b2439441cc8 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -281,6 +281,7 @@ struct mmc_host {
281#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */ 281#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
282#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */ 282#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
283#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */ 283#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
284#define MMC_CAP_CMD_DURING_TFR (1 << 29) /* Commands during data transfer */
284#define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */ 285#define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */
285#define MMC_CAP_HW_RESET (1 << 31) /* Hardware reset */ 286#define MMC_CAP_HW_RESET (1 << 31) /* Hardware reset */
286 287
@@ -382,6 +383,9 @@ struct mmc_host {
382 struct mmc_async_req *areq; /* active async req */ 383 struct mmc_async_req *areq; /* active async req */
383 struct mmc_context_info context_info; /* async synchronization info */ 384 struct mmc_context_info context_info; /* async synchronization info */
384 385
386 /* Ongoing data transfer that allows commands during transfer */
387 struct mmc_request *ongoing_mrq;
388
385#ifdef CONFIG_FAIL_MMC_REQUEST 389#ifdef CONFIG_FAIL_MMC_REQUEST
386 struct fault_attr fail_mmc_request; 390 struct fault_attr fail_mmc_request;
387#endif 391#endif
@@ -418,6 +422,7 @@ int mmc_power_restore_host(struct mmc_host *host);
418 422
419void mmc_detect_change(struct mmc_host *, unsigned long delay); 423void mmc_detect_change(struct mmc_host *, unsigned long delay);
420void mmc_request_done(struct mmc_host *, struct mmc_request *); 424void mmc_request_done(struct mmc_host *, struct mmc_request *);
425void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq);
421 426
422static inline void mmc_signal_sdio_irq(struct mmc_host *host) 427static inline void mmc_signal_sdio_irq(struct mmc_host *host)
423{ 428{