aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBoris Brezillon <boris.brezillon@free-electrons.com>2018-01-29 03:58:36 -0500
committerBoris Brezillon <boris.brezillon@free-electrons.com>2018-01-29 03:58:36 -0500
commit571cb17b23eccc22f18c4fc0a0fc34cf0abca7ef (patch)
treec371417fc983700fc74fe35a017973cb8580eec2
parentc8f22b02a8bbc74525d17dd37d39bdf599e68a79 (diff)
parentf4c6cd1a7f2275d5bc0e494b21fff26f8dde80f0 (diff)
Merge tag 'nand/for-4.16' of git://git.infradead.org/linux-mtd into mtd/next
Pull NAND changes from Boris Brezillon: " Core changes: * Fix NAND_CMD_NONE handling in nand_command[_lp]() hooks * Introduce the ->exec_op() infrastructure * Rework NAND buffers handling * Fix ECC requirements for K9F4G08U0D * Fix nand_do_read_oob() to return the number of bitflips * Mark K9F1G08U0E as not supporting subpage writes Driver changes: * MTK: Rework the driver to support new IP versions * OMAP OneNAND: Full rework to use new APIs (libgpio, dmaengine) and fix DT support * Marvell: Add a new driver to replace the pxa3xx one "
-rw-r--r--Documentation/devicetree/bindings/mtd/gpmc-onenand.txt6
-rw-r--r--Documentation/devicetree/bindings/mtd/marvell-nand.txt123
-rw-r--r--Documentation/devicetree/bindings/mtd/mtk-nand.txt11
-rw-r--r--Documentation/devicetree/bindings/mtd/nand.txt1
-rw-r--r--MAINTAINERS22
-rw-r--r--arch/arm/boot/dts/omap2420-n8x0-common.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-igep.dtsi30
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts1
-rw-r--r--arch/arm/boot/dts/omap3-n950-n9.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3430-sdp.dts1
-rw-r--r--arch/arm/configs/mvebu_v7_defconfig2
-rw-r--r--arch/arm/mach-omap2/Makefile3
-rw-r--r--arch/arm/mach-omap2/gpmc-onenand.c409
-rw-r--r--arch/arm64/configs/defconfig2
-rw-r--r--drivers/memory/omap-gpmc.c163
-rw-r--r--drivers/mtd/nand/Kconfig17
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/atmel/nand-controller.c9
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c6
-rw-r--r--drivers/mtd/nand/brcmnand/brcmnand.c38
-rw-r--r--drivers/mtd/nand/cafe_nand.c52
-rw-r--r--drivers/mtd/nand/denali.c84
-rw-r--r--drivers/mtd/nand/denali.h4
-rw-r--r--drivers/mtd/nand/denali_pci.c4
-rw-r--r--drivers/mtd/nand/diskonchip.c4
-rw-r--r--drivers/mtd/nand/docg4.c21
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c10
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c13
-rw-r--r--drivers/mtd/nand/fsmc_nand.c9
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c111
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h46
-rw-r--r--drivers/mtd/nand/hisi504_nand.c9
-rw-r--r--drivers/mtd/nand/jz4740_nand.c16
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c7
-rw-r--r--drivers/mtd/nand/lpc32xx_slc.c33
-rw-r--r--drivers/mtd/nand/marvell_nand.c2896
-rw-r--r--drivers/mtd/nand/mtk_ecc.c126
-rw-r--r--drivers/mtd/nand/mtk_ecc.h3
-rw-r--r--drivers/mtd/nand/mtk_nand.c76
-rw-r--r--drivers/mtd/nand/nand_base.c2208
-rw-r--r--drivers/mtd/nand/nand_bbt.c2
-rw-r--r--drivers/mtd/nand/nand_hynix.c129
-rw-r--r--drivers/mtd/nand/nand_micron.c83
-rw-r--r--drivers/mtd/nand/nand_samsung.c19
-rw-r--r--drivers/mtd/nand/nand_timings.c21
-rw-r--r--drivers/mtd/nand/omap2.c28
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c14
-rw-r--r--drivers/mtd/nand/qcom_nandc.c30
-rw-r--r--drivers/mtd/nand/r852.c11
-rw-r--r--drivers/mtd/nand/sh_flctl.c6
-rw-r--r--drivers/mtd/nand/sm_common.h2
-rw-r--r--drivers/mtd/nand/sunxi_nand.c111
-rw-r--r--drivers/mtd/nand/tango_nand.c27
-rw-r--r--drivers/mtd/nand/tmio_nand.c5
-rw-r--r--drivers/mtd/nand/vf610_nfc.c6
-rw-r--r--drivers/mtd/onenand/Kconfig7
-rw-r--r--drivers/mtd/onenand/omap2.c577
-rw-r--r--drivers/mtd/onenand/samsung.c185
-rw-r--r--drivers/mtd/tests/nandbiterrs.c2
-rw-r--r--drivers/mtd/tests/oobtest.c21
-rw-r--r--drivers/staging/mt29f_spinand/mt29f_spinand.c5
-rw-r--r--include/linux/mtd/rawnand.h443
-rw-r--r--include/linux/omap-gpmc.h28
-rw-r--r--include/linux/platform_data/mtd-onenand-omap2.h34
64 files changed, 6462 insertions, 1913 deletions
diff --git a/Documentation/devicetree/bindings/mtd/gpmc-onenand.txt b/Documentation/devicetree/bindings/mtd/gpmc-onenand.txt
index b6e8bfd024f4..e9f01a963a0a 100644
--- a/Documentation/devicetree/bindings/mtd/gpmc-onenand.txt
+++ b/Documentation/devicetree/bindings/mtd/gpmc-onenand.txt
@@ -9,13 +9,14 @@ Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt
9 9
10Required properties: 10Required properties:
11 11
12 - compatible: "ti,omap2-onenand"
12 - reg: The CS line the peripheral is connected to 13 - reg: The CS line the peripheral is connected to
13 - gpmc,device-width Width of the ONENAND device connected to the GPMC 14 - gpmc,device-width: Width of the ONENAND device connected to the GPMC
14 in bytes. Must be 1 or 2. 15 in bytes. Must be 1 or 2.
15 16
16Optional properties: 17Optional properties:
17 18
18 - dma-channel: DMA Channel index 19 - int-gpios: GPIO specifier for the INT pin.
19 20
20For inline partition table parsing (optional): 21For inline partition table parsing (optional):
21 22
@@ -35,6 +36,7 @@ Example for an OMAP3430 board:
35 #size-cells = <1>; 36 #size-cells = <1>;
36 37
37 onenand@0 { 38 onenand@0 {
39 compatible = "ti,omap2-onenand";
38 reg = <0 0 0>; /* CS0, offset 0 */ 40 reg = <0 0 0>; /* CS0, offset 0 */
39 gpmc,device-width = <2>; 41 gpmc,device-width = <2>;
40 42
diff --git a/Documentation/devicetree/bindings/mtd/marvell-nand.txt b/Documentation/devicetree/bindings/mtd/marvell-nand.txt
new file mode 100644
index 000000000000..c08fb477b3c6
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/marvell-nand.txt
@@ -0,0 +1,123 @@
1Marvell NAND Flash Controller (NFC)
2
3Required properties:
4- compatible: can be one of the following:
5 * "marvell,armada-8k-nand-controller"
6 * "marvell,armada370-nand-controller"
7 * "marvell,pxa3xx-nand-controller"
8 * "marvell,armada-8k-nand" (deprecated)
9 * "marvell,armada370-nand" (deprecated)
10 * "marvell,pxa3xx-nand" (deprecated)
11 Compatibles marked deprecated support only the old bindings described
12 at the bottom.
13- reg: NAND flash controller memory area.
14- #address-cells: shall be set to 1. Encode the NAND CS.
15- #size-cells: shall be set to 0.
16- interrupts: shall define the NAND controller interrupt.
17- clocks: shall reference the NAND controller clock.
18- marvell,system-controller: Set to retrieve the syscon node that handles
19 NAND controller related registers (only required with the
20 "marvell,armada-8k-nand[-controller]" compatibles).
21
22Optional properties:
23- label: see partition.txt. New platforms shall omit this property.
24- dmas: shall reference DMA channel associated to the NAND controller.
25 This property is only used with "marvell,pxa3xx-nand[-controller]"
26 compatible strings.
27- dma-names: shall be "rxtx".
28 This property is only used with "marvell,pxa3xx-nand[-controller]"
29 compatible strings.
30
31Optional children nodes:
32Children nodes represent the available NAND chips.
33
34Required properties:
35- reg: shall contain the native Chip Select ids (0-3).
36- nand-rb: see nand.txt (0-1).
37
38Optional properties:
39- marvell,nand-keep-config: orders the driver not to take the timings
40 from the core and leaving them completely untouched. Bootloader
41 timings will then be used.
42- label: MTD name.
43- nand-on-flash-bbt: see nand.txt.
44- nand-ecc-mode: see nand.txt. Will use hardware ECC if not specified.
45- nand-ecc-algo: see nand.txt. This property is essentially useful when
46 not using hardware ECC. Howerver, it may be added when using hardware
47 ECC for clarification but will be ignored by the driver because ECC
48 mode is chosen depending on the page size and the strength required by
49 the NAND chip. This value may be overwritten with nand-ecc-strength
50 property.
51- nand-ecc-strength: see nand.txt.
52- nand-ecc-step-size: see nand.txt. Marvell's NAND flash controller does
53 use fixed strength (1-bit for Hamming, 16-bit for BCH), so the actual
54 step size will shrink or grow in order to fit the required strength.
55 Step sizes are not completely random for all and follow certain
56 patterns described in AN-379, "Marvell SoC NFC ECC".
57
58See Documentation/devicetree/bindings/mtd/nand.txt for more details on
59generic bindings.
60
61
62Example:
63nand_controller: nand-controller@d0000 {
64 compatible = "marvell,armada370-nand-controller";
65 reg = <0xd0000 0x54>;
66 #address-cells = <1>;
67 #size-cells = <0>;
68 interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
69 clocks = <&coredivclk 0>;
70
71 nand@0 {
72 reg = <0>;
73 label = "main-storage";
74 nand-rb = <0>;
75 nand-ecc-mode = "hw";
76 marvell,nand-keep-config;
77 nand-on-flash-bbt;
78 nand-ecc-strength = <4>;
79 nand-ecc-step-size = <512>;
80
81 partitions {
82 compatible = "fixed-partitions";
83 #address-cells = <1>;
84 #size-cells = <1>;
85
86 partition@0 {
87 label = "Rootfs";
88 reg = <0x00000000 0x40000000>;
89 };
90 };
91 };
92};
93
94
95Note on legacy bindings: One can find, in not-updated device trees,
96bindings slightly different than described above with other properties
97described below as well as the partitions node at the root of a so
98called "nand" node (without clear controller/chip separation).
99
100Legacy properties:
101- marvell,nand-enable-arbiter: To enable the arbiter, all boards blindly
102 used it, this bit was set by the bootloader for many boards and even if
103 it is marked reserved in several datasheets, it might be needed to set
104 it (otherwise it is harmless) so whether or not this property is set,
105 the bit is selected by the driver.
106- num-cs: Number of chip-select lines to use, all boards blindly set 1
107 to this and for a reason, other values would have failed. The value of
108 this property is ignored.
109
110Example:
111
112 nand0: nand@43100000 {
113 compatible = "marvell,pxa3xx-nand";
114 reg = <0x43100000 90>;
115 interrupts = <45>;
116 dmas = <&pdma 97 0>;
117 dma-names = "rxtx";
118 #address-cells = <1>;
119 marvell,nand-keep-config;
120 marvell,nand-enable-arbiter;
121 num-cs = <1>;
122 /* Partitions (optional) */
123 };
diff --git a/Documentation/devicetree/bindings/mtd/mtk-nand.txt b/Documentation/devicetree/bindings/mtd/mtk-nand.txt
index dbf9e054c11c..0025bc4c94a0 100644
--- a/Documentation/devicetree/bindings/mtd/mtk-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/mtk-nand.txt
@@ -12,8 +12,10 @@ tree nodes.
12 12
13The first part of NFC is NAND Controller Interface (NFI) HW. 13The first part of NFC is NAND Controller Interface (NFI) HW.
14Required NFI properties: 14Required NFI properties:
15- compatible: Should be one of "mediatek,mt2701-nfc", 15- compatible: Should be one of
16 "mediatek,mt2712-nfc". 16 "mediatek,mt2701-nfc",
17 "mediatek,mt2712-nfc",
18 "mediatek,mt7622-nfc".
17- reg: Base physical address and size of NFI. 19- reg: Base physical address and size of NFI.
18- interrupts: Interrupts of NFI. 20- interrupts: Interrupts of NFI.
19- clocks: NFI required clocks. 21- clocks: NFI required clocks.
@@ -142,7 +144,10 @@ Example:
142============== 144==============
143 145
144Required BCH properties: 146Required BCH properties:
145- compatible: Should be one of "mediatek,mt2701-ecc", "mediatek,mt2712-ecc". 147- compatible: Should be one of
148 "mediatek,mt2701-ecc",
149 "mediatek,mt2712-ecc",
150 "mediatek,mt7622-ecc".
146- reg: Base physical address and size of ECC. 151- reg: Base physical address and size of ECC.
147- interrupts: Interrupts of ECC. 152- interrupts: Interrupts of ECC.
148- clocks: ECC required clocks. 153- clocks: ECC required clocks.
diff --git a/Documentation/devicetree/bindings/mtd/nand.txt b/Documentation/devicetree/bindings/mtd/nand.txt
index 133f3813719c..8bb11d809429 100644
--- a/Documentation/devicetree/bindings/mtd/nand.txt
+++ b/Documentation/devicetree/bindings/mtd/nand.txt
@@ -43,6 +43,7 @@ Optional NAND chip properties:
43 This is particularly useful when only the in-band area is 43 This is particularly useful when only the in-band area is
44 used by the upper layers, and you want to make your NAND 44 used by the upper layers, and you want to make your NAND
45 as reliable as possible. 45 as reliable as possible.
46- nand-rb: shall contain the native Ready/Busy ids.
46 47
47The ECC strength and ECC step size properties define the correction capability 48The ECC strength and ECC step size properties define the correction capability
48of a controller. Together, they say a controller can correct "{strength} bit 49of a controller. Together, they say a controller can correct "{strength} bit
diff --git a/MAINTAINERS b/MAINTAINERS
index 10732ecfc937..48ae3a43ed0f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2382,13 +2382,6 @@ F: Documentation/devicetree/bindings/input/atmel,maxtouch.txt
2382F: drivers/input/touchscreen/atmel_mxt_ts.c 2382F: drivers/input/touchscreen/atmel_mxt_ts.c
2383F: include/linux/platform_data/atmel_mxt_ts.h 2383F: include/linux/platform_data/atmel_mxt_ts.h
2384 2384
2385ATMEL NAND DRIVER
2386M: Wenyou Yang <wenyou.yang@atmel.com>
2387M: Josh Wu <rainyfeeling@outlook.com>
2388L: linux-mtd@lists.infradead.org
2389S: Supported
2390F: drivers/mtd/nand/atmel/*
2391
2392ATMEL SAMA5D2 ADC DRIVER 2385ATMEL SAMA5D2 ADC DRIVER
2393M: Ludovic Desroches <ludovic.desroches@microchip.com> 2386M: Ludovic Desroches <ludovic.desroches@microchip.com>
2394L: linux-iio@vger.kernel.org 2387L: linux-iio@vger.kernel.org
@@ -8409,6 +8402,13 @@ L: linux-wireless@vger.kernel.org
8409S: Odd Fixes 8402S: Odd Fixes
8410F: drivers/net/wireless/marvell/mwl8k.c 8403F: drivers/net/wireless/marvell/mwl8k.c
8411 8404
8405MARVELL NAND CONTROLLER DRIVER
8406M: Miquel Raynal <miquel.raynal@free-electrons.com>
8407L: linux-mtd@lists.infradead.org
8408S: Maintained
8409F: drivers/mtd/nand/marvell_nand.c
8410F: Documentation/devicetree/bindings/mtd/marvell-nand.txt
8411
8412MARVELL SOC MMC/SD/SDIO CONTROLLER DRIVER 8412MARVELL SOC MMC/SD/SDIO CONTROLLER DRIVER
8413M: Nicolas Pitre <nico@fluxnic.net> 8413M: Nicolas Pitre <nico@fluxnic.net>
8414S: Odd Fixes 8414S: Odd Fixes
@@ -9045,6 +9045,14 @@ F: drivers/media/platform/atmel/atmel-isc.c
9045F: drivers/media/platform/atmel/atmel-isc-regs.h 9045F: drivers/media/platform/atmel/atmel-isc-regs.h
9046F: devicetree/bindings/media/atmel-isc.txt 9046F: devicetree/bindings/media/atmel-isc.txt
9047 9047
9048MICROCHIP / ATMEL NAND DRIVER
9049M: Wenyou Yang <wenyou.yang@microchip.com>
9050M: Josh Wu <rainyfeeling@outlook.com>
9051L: linux-mtd@lists.infradead.org
9052S: Supported
9053F: drivers/mtd/nand/atmel/*
9054F: Documentation/devicetree/bindings/mtd/atmel-nand.txt
9055
9048MICROCHIP KSZ SERIES ETHERNET SWITCH DRIVER 9056MICROCHIP KSZ SERIES ETHERNET SWITCH DRIVER
9049M: Woojung Huh <Woojung.Huh@microchip.com> 9057M: Woojung Huh <Woojung.Huh@microchip.com>
9050M: Microchip Linux Driver Support <UNGLinuxDriver@microchip.com> 9058M: Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
diff --git a/arch/arm/boot/dts/omap2420-n8x0-common.dtsi b/arch/arm/boot/dts/omap2420-n8x0-common.dtsi
index 1df3ace3af92..63b0b4921e4e 100644
--- a/arch/arm/boot/dts/omap2420-n8x0-common.dtsi
+++ b/arch/arm/boot/dts/omap2420-n8x0-common.dtsi
@@ -52,6 +52,7 @@
52 onenand@0,0 { 52 onenand@0,0 {
53 #address-cells = <1>; 53 #address-cells = <1>;
54 #size-cells = <1>; 54 #size-cells = <1>;
55 compatible = "ti,omap2-onenand";
55 reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */ 56 reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */
56 57
57 gpmc,sync-read; 58 gpmc,sync-read;
diff --git a/arch/arm/boot/dts/omap3-igep.dtsi b/arch/arm/boot/dts/omap3-igep.dtsi
index 4ad7d5565906..f33cc80c9dbc 100644
--- a/arch/arm/boot/dts/omap3-igep.dtsi
+++ b/arch/arm/boot/dts/omap3-igep.dtsi
@@ -147,32 +147,32 @@
147 gpmc,sync-read; 147 gpmc,sync-read;
148 gpmc,sync-write; 148 gpmc,sync-write;
149 gpmc,burst-length = <16>; 149 gpmc,burst-length = <16>;
150 gpmc,burst-read;
151 gpmc,burst-wrap; 150 gpmc,burst-wrap;
151 gpmc,burst-read;
152 gpmc,burst-write; 152 gpmc,burst-write;
153 gpmc,device-width = <2>; /* GPMC_DEVWIDTH_16BIT */ 153 gpmc,device-width = <2>; /* GPMC_DEVWIDTH_16BIT */
154 gpmc,mux-add-data = <2>; /* GPMC_MUX_AD */ 154 gpmc,mux-add-data = <2>; /* GPMC_MUX_AD */
155 gpmc,cs-on-ns = <0>; 155 gpmc,cs-on-ns = <0>;
156 gpmc,cs-rd-off-ns = <87>; 156 gpmc,cs-rd-off-ns = <96>;
157 gpmc,cs-wr-off-ns = <87>; 157 gpmc,cs-wr-off-ns = <96>;
158 gpmc,adv-on-ns = <0>; 158 gpmc,adv-on-ns = <0>;
159 gpmc,adv-rd-off-ns = <10>; 159 gpmc,adv-rd-off-ns = <12>;
160 gpmc,adv-wr-off-ns = <10>; 160 gpmc,adv-wr-off-ns = <12>;
161 gpmc,oe-on-ns = <15>; 161 gpmc,oe-on-ns = <18>;
162 gpmc,oe-off-ns = <87>; 162 gpmc,oe-off-ns = <96>;
163 gpmc,we-on-ns = <0>; 163 gpmc,we-on-ns = <0>;
164 gpmc,we-off-ns = <87>; 164 gpmc,we-off-ns = <96>;
165 gpmc,rd-cycle-ns = <112>; 165 gpmc,rd-cycle-ns = <114>;
166 gpmc,wr-cycle-ns = <112>; 166 gpmc,wr-cycle-ns = <114>;
167 gpmc,access-ns = <81>; 167 gpmc,access-ns = <90>;
168 gpmc,page-burst-access-ns = <15>; 168 gpmc,page-burst-access-ns = <12>;
169 gpmc,bus-turnaround-ns = <0>; 169 gpmc,bus-turnaround-ns = <0>;
170 gpmc,cycle2cycle-delay-ns = <0>; 170 gpmc,cycle2cycle-delay-ns = <0>;
171 gpmc,wait-monitoring-ns = <0>; 171 gpmc,wait-monitoring-ns = <0>;
172 gpmc,clk-activation-ns = <5>; 172 gpmc,clk-activation-ns = <6>;
173 gpmc,wr-data-mux-bus-ns = <30>; 173 gpmc,wr-data-mux-bus-ns = <30>;
174 gpmc,wr-access-ns = <81>; 174 gpmc,wr-access-ns = <90>;
175 gpmc,sync-clk-ps = <15000>; 175 gpmc,sync-clk-ps = <12000>;
176 176
177 #address-cells = <1>; 177 #address-cells = <1>;
178 #size-cells = <1>; 178 #size-cells = <1>;
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 669c51c00c00..e7c7b8e50703 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -838,6 +838,7 @@
838 onenand@0,0 { 838 onenand@0,0 {
839 #address-cells = <1>; 839 #address-cells = <1>;
840 #size-cells = <1>; 840 #size-cells = <1>;
841 compatible = "ti,omap2-onenand";
841 reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */ 842 reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */
842 843
843 gpmc,sync-read; 844 gpmc,sync-read;
diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi
index 12fbb3da5fce..0d9b85317529 100644
--- a/arch/arm/boot/dts/omap3-n950-n9.dtsi
+++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi
@@ -367,6 +367,7 @@
367 onenand@0,0 { 367 onenand@0,0 {
368 #address-cells = <1>; 368 #address-cells = <1>;
369 #size-cells = <1>; 369 #size-cells = <1>;
370 compatible = "ti,omap2-onenand";
370 reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */ 371 reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */
371 372
372 gpmc,sync-read; 373 gpmc,sync-read;
diff --git a/arch/arm/boot/dts/omap3430-sdp.dts b/arch/arm/boot/dts/omap3430-sdp.dts
index 908951eb5943..d652708f6bef 100644
--- a/arch/arm/boot/dts/omap3430-sdp.dts
+++ b/arch/arm/boot/dts/omap3430-sdp.dts
@@ -154,6 +154,7 @@
154 linux,mtd-name= "samsung,kfm2g16q2m-deb8"; 154 linux,mtd-name= "samsung,kfm2g16q2m-deb8";
155 #address-cells = <1>; 155 #address-cells = <1>;
156 #size-cells = <1>; 156 #size-cells = <1>;
157 compatible = "ti,omap2-onenand";
157 reg = <2 0 0x20000>; /* CS2, offset 0, IO size 4 */ 158 reg = <2 0 0x20000>; /* CS2, offset 0, IO size 4 */
158 159
159 gpmc,device-width = <2>; 160 gpmc,device-width = <2>;
diff --git a/arch/arm/configs/mvebu_v7_defconfig b/arch/arm/configs/mvebu_v7_defconfig
index 69553704f2dc..4b6e4fd47e5d 100644
--- a/arch/arm/configs/mvebu_v7_defconfig
+++ b/arch/arm/configs/mvebu_v7_defconfig
@@ -57,7 +57,7 @@ CONFIG_MTD_CFI_STAA=y
57CONFIG_MTD_PHYSMAP_OF=y 57CONFIG_MTD_PHYSMAP_OF=y
58CONFIG_MTD_M25P80=y 58CONFIG_MTD_M25P80=y
59CONFIG_MTD_NAND=y 59CONFIG_MTD_NAND=y
60CONFIG_MTD_NAND_PXA3xx=y 60CONFIG_MTD_NAND_MARVELL=y
61CONFIG_MTD_SPI_NOR=y 61CONFIG_MTD_SPI_NOR=y
62CONFIG_SRAM=y 62CONFIG_SRAM=y
63CONFIG_MTD_UBI=y 63CONFIG_MTD_UBI=y
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 2f722a805948..c15bbcad5f67 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -232,6 +232,3 @@ obj-y += $(omap-hsmmc-m) $(omap-hsmmc-y)
232obj-y += omap_phy_internal.o 232obj-y += omap_phy_internal.o
233 233
234obj-$(CONFIG_MACH_OMAP2_TUSB6010) += usb-tusb6010.o 234obj-$(CONFIG_MACH_OMAP2_TUSB6010) += usb-tusb6010.o
235
236onenand-$(CONFIG_MTD_ONENAND_OMAP2) := gpmc-onenand.o
237obj-y += $(onenand-m) $(onenand-y)
diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
deleted file mode 100644
index 2944af820558..000000000000
--- a/arch/arm/mach-omap2/gpmc-onenand.c
+++ /dev/null
@@ -1,409 +0,0 @@
1/*
2 * linux/arch/arm/mach-omap2/gpmc-onenand.c
3 *
4 * Copyright (C) 2006 - 2009 Nokia Corporation
5 * Contacts: Juha Yrjola
6 * Tony Lindgren
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/string.h>
14#include <linux/kernel.h>
15#include <linux/platform_device.h>
16#include <linux/mtd/onenand_regs.h>
17#include <linux/io.h>
18#include <linux/omap-gpmc.h>
19#include <linux/platform_data/mtd-onenand-omap2.h>
20#include <linux/err.h>
21
22#include <asm/mach/flash.h>
23
24#include "soc.h"
25
26#define ONENAND_IO_SIZE SZ_128K
27
28#define ONENAND_FLAG_SYNCREAD (1 << 0)
29#define ONENAND_FLAG_SYNCWRITE (1 << 1)
30#define ONENAND_FLAG_HF (1 << 2)
31#define ONENAND_FLAG_VHF (1 << 3)
32
33static unsigned onenand_flags;
34static unsigned latency;
35
36static struct omap_onenand_platform_data *gpmc_onenand_data;
37
38static struct resource gpmc_onenand_resource = {
39 .flags = IORESOURCE_MEM,
40};
41
42static struct platform_device gpmc_onenand_device = {
43 .name = "omap2-onenand",
44 .id = -1,
45 .num_resources = 1,
46 .resource = &gpmc_onenand_resource,
47};
48
49static struct gpmc_settings onenand_async = {
50 .device_width = GPMC_DEVWIDTH_16BIT,
51 .mux_add_data = GPMC_MUX_AD,
52};
53
54static struct gpmc_settings onenand_sync = {
55 .burst_read = true,
56 .burst_wrap = true,
57 .burst_len = GPMC_BURST_16,
58 .device_width = GPMC_DEVWIDTH_16BIT,
59 .mux_add_data = GPMC_MUX_AD,
60 .wait_pin = 0,
61};
62
63static void omap2_onenand_calc_async_timings(struct gpmc_timings *t)
64{
65 struct gpmc_device_timings dev_t;
66 const int t_cer = 15;
67 const int t_avdp = 12;
68 const int t_aavdh = 7;
69 const int t_ce = 76;
70 const int t_aa = 76;
71 const int t_oe = 20;
72 const int t_cez = 20; /* max of t_cez, t_oez */
73 const int t_wpl = 40;
74 const int t_wph = 30;
75
76 memset(&dev_t, 0, sizeof(dev_t));
77
78 dev_t.t_avdp_r = max_t(int, t_avdp, t_cer) * 1000;
79 dev_t.t_avdp_w = dev_t.t_avdp_r;
80 dev_t.t_aavdh = t_aavdh * 1000;
81 dev_t.t_aa = t_aa * 1000;
82 dev_t.t_ce = t_ce * 1000;
83 dev_t.t_oe = t_oe * 1000;
84 dev_t.t_cez_r = t_cez * 1000;
85 dev_t.t_cez_w = dev_t.t_cez_r;
86 dev_t.t_wpl = t_wpl * 1000;
87 dev_t.t_wph = t_wph * 1000;
88
89 gpmc_calc_timings(t, &onenand_async, &dev_t);
90}
91
92static void omap2_onenand_set_async_mode(void __iomem *onenand_base)
93{
94 u32 reg;
95
96 /* Ensure sync read and sync write are disabled */
97 reg = readw(onenand_base + ONENAND_REG_SYS_CFG1);
98 reg &= ~ONENAND_SYS_CFG1_SYNC_READ & ~ONENAND_SYS_CFG1_SYNC_WRITE;
99 writew(reg, onenand_base + ONENAND_REG_SYS_CFG1);
100}
101
102static void set_onenand_cfg(void __iomem *onenand_base)
103{
104 u32 reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT;
105
106 reg |= (latency << ONENAND_SYS_CFG1_BRL_SHIFT) |
107 ONENAND_SYS_CFG1_BL_16;
108 if (onenand_flags & ONENAND_FLAG_SYNCREAD)
109 reg |= ONENAND_SYS_CFG1_SYNC_READ;
110 else
111 reg &= ~ONENAND_SYS_CFG1_SYNC_READ;
112 if (onenand_flags & ONENAND_FLAG_SYNCWRITE)
113 reg |= ONENAND_SYS_CFG1_SYNC_WRITE;
114 else
115 reg &= ~ONENAND_SYS_CFG1_SYNC_WRITE;
116 if (onenand_flags & ONENAND_FLAG_HF)
117 reg |= ONENAND_SYS_CFG1_HF;
118 else
119 reg &= ~ONENAND_SYS_CFG1_HF;
120 if (onenand_flags & ONENAND_FLAG_VHF)
121 reg |= ONENAND_SYS_CFG1_VHF;
122 else
123 reg &= ~ONENAND_SYS_CFG1_VHF;
124
125 writew(reg, onenand_base + ONENAND_REG_SYS_CFG1);
126}
127
128static int omap2_onenand_get_freq(struct omap_onenand_platform_data *cfg,
129 void __iomem *onenand_base)
130{
131 u16 ver = readw(onenand_base + ONENAND_REG_VERSION_ID);
132 int freq;
133
134 switch ((ver >> 4) & 0xf) {
135 case 0:
136 freq = 40;
137 break;
138 case 1:
139 freq = 54;
140 break;
141 case 2:
142 freq = 66;
143 break;
144 case 3:
145 freq = 83;
146 break;
147 case 4:
148 freq = 104;
149 break;
150 default:
151 pr_err("onenand rate not detected, bad GPMC async timings?\n");
152 freq = 0;
153 }
154
155 return freq;
156}
157
158static void omap2_onenand_calc_sync_timings(struct gpmc_timings *t,
159 unsigned int flags,
160 int freq)
161{
162 struct gpmc_device_timings dev_t;
163 const int t_cer = 15;
164 const int t_avdp = 12;
165 const int t_cez = 20; /* max of t_cez, t_oez */
166 const int t_wpl = 40;
167 const int t_wph = 30;
168 int min_gpmc_clk_period, t_ces, t_avds, t_avdh, t_ach, t_aavdh, t_rdyo;
169 int div, gpmc_clk_ns;
170
171 if (flags & ONENAND_SYNC_READ)
172 onenand_flags = ONENAND_FLAG_SYNCREAD;
173 else if (flags & ONENAND_SYNC_READWRITE)
174 onenand_flags = ONENAND_FLAG_SYNCREAD | ONENAND_FLAG_SYNCWRITE;
175
176 switch (freq) {
177 case 104:
178 min_gpmc_clk_period = 9600; /* 104 MHz */
179 t_ces = 3;
180 t_avds = 4;
181 t_avdh = 2;
182 t_ach = 3;
183 t_aavdh = 6;
184 t_rdyo = 6;
185 break;
186 case 83:
187 min_gpmc_clk_period = 12000; /* 83 MHz */
188 t_ces = 5;
189 t_avds = 4;
190 t_avdh = 2;
191 t_ach = 6;
192 t_aavdh = 6;
193 t_rdyo = 9;
194 break;
195 case 66:
196 min_gpmc_clk_period = 15000; /* 66 MHz */
197 t_ces = 6;
198 t_avds = 5;
199 t_avdh = 2;
200 t_ach = 6;
201 t_aavdh = 6;
202 t_rdyo = 11;
203 break;
204 default:
205 min_gpmc_clk_period = 18500; /* 54 MHz */
206 t_ces = 7;
207 t_avds = 7;
208 t_avdh = 7;
209 t_ach = 9;
210 t_aavdh = 7;
211 t_rdyo = 15;
212 onenand_flags &= ~ONENAND_FLAG_SYNCWRITE;
213 break;
214 }
215
216 div = gpmc_calc_divider(min_gpmc_clk_period);
217 gpmc_clk_ns = gpmc_ticks_to_ns(div);
218 if (gpmc_clk_ns < 15) /* >66MHz */
219 onenand_flags |= ONENAND_FLAG_HF;
220 else
221 onenand_flags &= ~ONENAND_FLAG_HF;
222 if (gpmc_clk_ns < 12) /* >83MHz */
223 onenand_flags |= ONENAND_FLAG_VHF;
224 else
225 onenand_flags &= ~ONENAND_FLAG_VHF;
226 if (onenand_flags & ONENAND_FLAG_VHF)
227 latency = 8;
228 else if (onenand_flags & ONENAND_FLAG_HF)
229 latency = 6;
230 else if (gpmc_clk_ns >= 25) /* 40 MHz*/
231 latency = 3;
232 else
233 latency = 4;
234
235 /* Set synchronous read timings */
236 memset(&dev_t, 0, sizeof(dev_t));
237
238 if (onenand_flags & ONENAND_FLAG_SYNCREAD)
239 onenand_sync.sync_read = true;
240 if (onenand_flags & ONENAND_FLAG_SYNCWRITE) {
241 onenand_sync.sync_write = true;
242 onenand_sync.burst_write = true;
243 } else {
244 dev_t.t_avdp_w = max(t_avdp, t_cer) * 1000;
245 dev_t.t_wpl = t_wpl * 1000;
246 dev_t.t_wph = t_wph * 1000;
247 dev_t.t_aavdh = t_aavdh * 1000;
248 }
249 dev_t.ce_xdelay = true;
250 dev_t.avd_xdelay = true;
251 dev_t.oe_xdelay = true;
252 dev_t.we_xdelay = true;
253 dev_t.clk = min_gpmc_clk_period;
254 dev_t.t_bacc = dev_t.clk;
255 dev_t.t_ces = t_ces * 1000;
256 dev_t.t_avds = t_avds * 1000;
257 dev_t.t_avdh = t_avdh * 1000;
258 dev_t.t_ach = t_ach * 1000;
259 dev_t.cyc_iaa = (latency + 1);
260 dev_t.t_cez_r = t_cez * 1000;
261 dev_t.t_cez_w = dev_t.t_cez_r;
262 dev_t.cyc_aavdh_oe = 1;
263 dev_t.t_rdyo = t_rdyo * 1000 + min_gpmc_clk_period;
264
265 gpmc_calc_timings(t, &onenand_sync, &dev_t);
266}
267
268static int omap2_onenand_setup_async(void __iomem *onenand_base)
269{
270 struct gpmc_timings t;
271 int ret;
272
273 /*
274 * Note that we need to keep sync_write set for the call to
275 * omap2_onenand_set_async_mode() to work to detect the onenand
276 * supported clock rate for the sync timings.
277 */
278 if (gpmc_onenand_data->of_node) {
279 gpmc_read_settings_dt(gpmc_onenand_data->of_node,
280 &onenand_async);
281 if (onenand_async.sync_read || onenand_async.sync_write) {
282 if (onenand_async.sync_write)
283 gpmc_onenand_data->flags |=
284 ONENAND_SYNC_READWRITE;
285 else
286 gpmc_onenand_data->flags |= ONENAND_SYNC_READ;
287 onenand_async.sync_read = false;
288 }
289 }
290
291 onenand_async.sync_write = true;
292 omap2_onenand_calc_async_timings(&t);
293
294 ret = gpmc_cs_program_settings(gpmc_onenand_data->cs, &onenand_async);
295 if (ret < 0)
296 return ret;
297
298 ret = gpmc_cs_set_timings(gpmc_onenand_data->cs, &t, &onenand_async);
299 if (ret < 0)
300 return ret;
301
302 omap2_onenand_set_async_mode(onenand_base);
303
304 return 0;
305}
306
307static int omap2_onenand_setup_sync(void __iomem *onenand_base, int *freq_ptr)
308{
309 int ret, freq = *freq_ptr;
310 struct gpmc_timings t;
311
312 if (!freq) {
313 /* Very first call freq is not known */
314 freq = omap2_onenand_get_freq(gpmc_onenand_data, onenand_base);
315 if (!freq)
316 return -ENODEV;
317 set_onenand_cfg(onenand_base);
318 }
319
320 if (gpmc_onenand_data->of_node) {
321 gpmc_read_settings_dt(gpmc_onenand_data->of_node,
322 &onenand_sync);
323 } else {
324 /*
325 * FIXME: Appears to be legacy code from initial ONENAND commit.
326 * Unclear what boards this is for and if this can be removed.
327 */
328 if (!cpu_is_omap34xx())
329 onenand_sync.wait_on_read = true;
330 }
331
332 omap2_onenand_calc_sync_timings(&t, gpmc_onenand_data->flags, freq);
333
334 ret = gpmc_cs_program_settings(gpmc_onenand_data->cs, &onenand_sync);
335 if (ret < 0)
336 return ret;
337
338 ret = gpmc_cs_set_timings(gpmc_onenand_data->cs, &t, &onenand_sync);
339 if (ret < 0)
340 return ret;
341
342 set_onenand_cfg(onenand_base);
343
344 *freq_ptr = freq;
345
346 return 0;
347}
348
349static int gpmc_onenand_setup(void __iomem *onenand_base, int *freq_ptr)
350{
351 struct device *dev = &gpmc_onenand_device.dev;
352 unsigned l = ONENAND_SYNC_READ | ONENAND_SYNC_READWRITE;
353 int ret;
354
355 ret = omap2_onenand_setup_async(onenand_base);
356 if (ret) {
357 dev_err(dev, "unable to set to async mode\n");
358 return ret;
359 }
360
361 if (!(gpmc_onenand_data->flags & l))
362 return 0;
363
364 ret = omap2_onenand_setup_sync(onenand_base, freq_ptr);
365 if (ret)
366 dev_err(dev, "unable to set to sync mode\n");
367 return ret;
368}
369
370int gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
371{
372 int err;
373 struct device *dev = &gpmc_onenand_device.dev;
374
375 gpmc_onenand_data = _onenand_data;
376 gpmc_onenand_data->onenand_setup = gpmc_onenand_setup;
377 gpmc_onenand_device.dev.platform_data = gpmc_onenand_data;
378
379 if (cpu_is_omap24xx() &&
380 (gpmc_onenand_data->flags & ONENAND_SYNC_READWRITE)) {
381 dev_warn(dev, "OneNAND using only SYNC_READ on 24xx\n");
382 gpmc_onenand_data->flags &= ~ONENAND_SYNC_READWRITE;
383 gpmc_onenand_data->flags |= ONENAND_SYNC_READ;
384 }
385
386 if (cpu_is_omap34xx())
387 gpmc_onenand_data->flags |= ONENAND_IN_OMAP34XX;
388 else
389 gpmc_onenand_data->flags &= ~ONENAND_IN_OMAP34XX;
390
391 err = gpmc_cs_request(gpmc_onenand_data->cs, ONENAND_IO_SIZE,
392 (unsigned long *)&gpmc_onenand_resource.start);
393 if (err < 0) {
394 dev_err(dev, "Cannot request GPMC CS %d, error %d\n",
395 gpmc_onenand_data->cs, err);
396 return err;
397 }
398
399 gpmc_onenand_resource.end = gpmc_onenand_resource.start +
400 ONENAND_IO_SIZE - 1;
401
402 err = platform_device_register(&gpmc_onenand_device);
403 if (err) {
404 dev_err(dev, "Unable to register OneNAND device\n");
405 gpmc_cs_free(gpmc_onenand_data->cs);
406 }
407
408 return err;
409}
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 6356c6da34ea..b20fa9b31efe 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -161,7 +161,7 @@ CONFIG_MTD_BLOCK=y
161CONFIG_MTD_M25P80=y 161CONFIG_MTD_M25P80=y
162CONFIG_MTD_NAND=y 162CONFIG_MTD_NAND=y
163CONFIG_MTD_NAND_DENALI_DT=y 163CONFIG_MTD_NAND_DENALI_DT=y
164CONFIG_MTD_NAND_PXA3xx=y 164CONFIG_MTD_NAND_MARVELL=y
165CONFIG_MTD_SPI_NOR=y 165CONFIG_MTD_SPI_NOR=y
166CONFIG_BLK_DEV_LOOP=y 166CONFIG_BLK_DEV_LOOP=y
167CONFIG_BLK_DEV_NBD=m 167CONFIG_BLK_DEV_NBD=m
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index a385a35c7de9..90a66b3f7ae1 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -32,7 +32,6 @@
32#include <linux/pm_runtime.h> 32#include <linux/pm_runtime.h>
33 33
34#include <linux/platform_data/mtd-nand-omap2.h> 34#include <linux/platform_data/mtd-nand-omap2.h>
35#include <linux/platform_data/mtd-onenand-omap2.h>
36 35
37#include <asm/mach-types.h> 36#include <asm/mach-types.h>
38 37
@@ -1138,6 +1137,112 @@ struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *reg, int cs)
1138} 1137}
1139EXPORT_SYMBOL_GPL(gpmc_omap_get_nand_ops); 1138EXPORT_SYMBOL_GPL(gpmc_omap_get_nand_ops);
1140 1139
1140static void gpmc_omap_onenand_calc_sync_timings(struct gpmc_timings *t,
1141 struct gpmc_settings *s,
1142 int freq, int latency)
1143{
1144 struct gpmc_device_timings dev_t;
1145 const int t_cer = 15;
1146 const int t_avdp = 12;
1147 const int t_cez = 20; /* max of t_cez, t_oez */
1148 const int t_wpl = 40;
1149 const int t_wph = 30;
1150 int min_gpmc_clk_period, t_ces, t_avds, t_avdh, t_ach, t_aavdh, t_rdyo;
1151
1152 switch (freq) {
1153 case 104:
1154 min_gpmc_clk_period = 9600; /* 104 MHz */
1155 t_ces = 3;
1156 t_avds = 4;
1157 t_avdh = 2;
1158 t_ach = 3;
1159 t_aavdh = 6;
1160 t_rdyo = 6;
1161 break;
1162 case 83:
1163 min_gpmc_clk_period = 12000; /* 83 MHz */
1164 t_ces = 5;
1165 t_avds = 4;
1166 t_avdh = 2;
1167 t_ach = 6;
1168 t_aavdh = 6;
1169 t_rdyo = 9;
1170 break;
1171 case 66:
1172 min_gpmc_clk_period = 15000; /* 66 MHz */
1173 t_ces = 6;
1174 t_avds = 5;
1175 t_avdh = 2;
1176 t_ach = 6;
1177 t_aavdh = 6;
1178 t_rdyo = 11;
1179 break;
1180 default:
1181 min_gpmc_clk_period = 18500; /* 54 MHz */
1182 t_ces = 7;
1183 t_avds = 7;
1184 t_avdh = 7;
1185 t_ach = 9;
1186 t_aavdh = 7;
1187 t_rdyo = 15;
1188 break;
1189 }
1190
1191 /* Set synchronous read timings */
1192 memset(&dev_t, 0, sizeof(dev_t));
1193
1194 if (!s->sync_write) {
1195 dev_t.t_avdp_w = max(t_avdp, t_cer) * 1000;
1196 dev_t.t_wpl = t_wpl * 1000;
1197 dev_t.t_wph = t_wph * 1000;
1198 dev_t.t_aavdh = t_aavdh * 1000;
1199 }
1200 dev_t.ce_xdelay = true;
1201 dev_t.avd_xdelay = true;
1202 dev_t.oe_xdelay = true;
1203 dev_t.we_xdelay = true;
1204 dev_t.clk = min_gpmc_clk_period;
1205 dev_t.t_bacc = dev_t.clk;
1206 dev_t.t_ces = t_ces * 1000;
1207 dev_t.t_avds = t_avds * 1000;
1208 dev_t.t_avdh = t_avdh * 1000;
1209 dev_t.t_ach = t_ach * 1000;
1210 dev_t.cyc_iaa = (latency + 1);
1211 dev_t.t_cez_r = t_cez * 1000;
1212 dev_t.t_cez_w = dev_t.t_cez_r;
1213 dev_t.cyc_aavdh_oe = 1;
1214 dev_t.t_rdyo = t_rdyo * 1000 + min_gpmc_clk_period;
1215
1216 gpmc_calc_timings(t, s, &dev_t);
1217}
1218
1219int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq,
1220 int latency,
1221 struct gpmc_onenand_info *info)
1222{
1223 int ret;
1224 struct gpmc_timings gpmc_t;
1225 struct gpmc_settings gpmc_s;
1226
1227 gpmc_read_settings_dt(dev->of_node, &gpmc_s);
1228
1229 info->sync_read = gpmc_s.sync_read;
1230 info->sync_write = gpmc_s.sync_write;
1231 info->burst_len = gpmc_s.burst_len;
1232
1233 if (!gpmc_s.sync_read && !gpmc_s.sync_write)
1234 return 0;
1235
1236 gpmc_omap_onenand_calc_sync_timings(&gpmc_t, &gpmc_s, freq, latency);
1237
1238 ret = gpmc_cs_program_settings(cs, &gpmc_s);
1239 if (ret < 0)
1240 return ret;
1241
1242 return gpmc_cs_set_timings(cs, &gpmc_t, &gpmc_s);
1243}
1244EXPORT_SYMBOL_GPL(gpmc_omap_onenand_set_timings);
1245
1141int gpmc_get_client_irq(unsigned irq_config) 1246int gpmc_get_client_irq(unsigned irq_config)
1142{ 1247{
1143 if (!gpmc_irq_domain) { 1248 if (!gpmc_irq_domain) {
@@ -1916,41 +2021,6 @@ static void __maybe_unused gpmc_read_timings_dt(struct device_node *np,
1916 of_property_read_bool(np, "gpmc,time-para-granularity"); 2021 of_property_read_bool(np, "gpmc,time-para-granularity");
1917} 2022}
1918 2023
1919#if IS_ENABLED(CONFIG_MTD_ONENAND)
1920static int gpmc_probe_onenand_child(struct platform_device *pdev,
1921 struct device_node *child)
1922{
1923 u32 val;
1924 struct omap_onenand_platform_data *gpmc_onenand_data;
1925
1926 if (of_property_read_u32(child, "reg", &val) < 0) {
1927 dev_err(&pdev->dev, "%pOF has no 'reg' property\n",
1928 child);
1929 return -ENODEV;
1930 }
1931
1932 gpmc_onenand_data = devm_kzalloc(&pdev->dev, sizeof(*gpmc_onenand_data),
1933 GFP_KERNEL);
1934 if (!gpmc_onenand_data)
1935 return -ENOMEM;
1936
1937 gpmc_onenand_data->cs = val;
1938 gpmc_onenand_data->of_node = child;
1939 gpmc_onenand_data->dma_channel = -1;
1940
1941 if (!of_property_read_u32(child, "dma-channel", &val))
1942 gpmc_onenand_data->dma_channel = val;
1943
1944 return gpmc_onenand_init(gpmc_onenand_data);
1945}
1946#else
1947static int gpmc_probe_onenand_child(struct platform_device *pdev,
1948 struct device_node *child)
1949{
1950 return 0;
1951}
1952#endif
1953
1954/** 2024/**
1955 * gpmc_probe_generic_child - configures the gpmc for a child device 2025 * gpmc_probe_generic_child - configures the gpmc for a child device
1956 * @pdev: pointer to gpmc platform device 2026 * @pdev: pointer to gpmc platform device
@@ -2053,6 +2123,16 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
2053 } 2123 }
2054 } 2124 }
2055 2125
2126 if (of_node_cmp(child->name, "onenand") == 0) {
2127 /* Warn about older DT blobs with no compatible property */
2128 if (!of_property_read_bool(child, "compatible")) {
2129 dev_warn(&pdev->dev,
2130 "Incompatible OneNAND node: missing compatible");
2131 ret = -EINVAL;
2132 goto err;
2133 }
2134 }
2135
2056 if (of_device_is_compatible(child, "ti,omap2-nand")) { 2136 if (of_device_is_compatible(child, "ti,omap2-nand")) {
2057 /* NAND specific setup */ 2137 /* NAND specific setup */
2058 val = 8; 2138 val = 8;
@@ -2077,8 +2157,9 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
2077 } else { 2157 } else {
2078 ret = of_property_read_u32(child, "bank-width", 2158 ret = of_property_read_u32(child, "bank-width",
2079 &gpmc_s.device_width); 2159 &gpmc_s.device_width);
2080 if (ret < 0) { 2160 if (ret < 0 && !gpmc_s.device_width) {
2081 dev_err(&pdev->dev, "%pOF has no 'bank-width' property\n", 2161 dev_err(&pdev->dev,
2162 "%pOF has no 'gpmc,device-width' property\n",
2082 child); 2163 child);
2083 goto err; 2164 goto err;
2084 } 2165 }
@@ -2188,11 +2269,7 @@ static void gpmc_probe_dt_children(struct platform_device *pdev)
2188 if (!child->name) 2269 if (!child->name)
2189 continue; 2270 continue;
2190 2271
2191 if (of_node_cmp(child->name, "onenand") == 0) 2272 ret = gpmc_probe_generic_child(pdev, child);
2192 ret = gpmc_probe_onenand_child(pdev, child);
2193 else
2194 ret = gpmc_probe_generic_child(pdev, child);
2195
2196 if (ret) { 2273 if (ret) {
2197 dev_err(&pdev->dev, "failed to probe DT child '%s': %d\n", 2274 dev_err(&pdev->dev, "failed to probe DT child '%s': %d\n",
2198 child->name, ret); 2275 child->name, ret);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index bb48aafed9a2..e6b8c59f2c0d 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -315,6 +315,7 @@ config MTD_NAND_ATMEL
315 315
316config MTD_NAND_PXA3xx 316config MTD_NAND_PXA3xx
317 tristate "NAND support on PXA3xx and Armada 370/XP" 317 tristate "NAND support on PXA3xx and Armada 370/XP"
318 depends on !MTD_NAND_MARVELL
318 depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU 319 depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU
319 help 320 help
320 321
@@ -323,6 +324,18 @@ config MTD_NAND_PXA3xx
323 platforms (XP, 370, 375, 38x, 39x) and 64-bit Armada 324 platforms (XP, 370, 375, 38x, 39x) and 64-bit Armada
324 platforms (7K, 8K) (NFCv2). 325 platforms (7K, 8K) (NFCv2).
325 326
327config MTD_NAND_MARVELL
328 tristate "NAND controller support on Marvell boards"
329 depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU || \
330 COMPILE_TEST
331 depends on HAS_IOMEM
332 help
333 This enables the NAND flash controller driver for Marvell boards,
334 including:
335 - PXA3xx processors (NFCv1)
336 - 32-bit Armada platforms (XP, 37x, 38x, 39x) (NFCv2)
337 - 64-bit Aramda platforms (7k, 8k) (NFCv2)
338
326config MTD_NAND_SLC_LPC32XX 339config MTD_NAND_SLC_LPC32XX
327 tristate "NXP LPC32xx SLC Controller" 340 tristate "NXP LPC32xx SLC Controller"
328 depends on ARCH_LPC32XX 341 depends on ARCH_LPC32XX
@@ -376,9 +389,7 @@ config MTD_NAND_GPMI_NAND
376 Enables NAND Flash support for IMX23, IMX28 or IMX6. 389 Enables NAND Flash support for IMX23, IMX28 or IMX6.
377 The GPMI controller is very powerful, with the help of BCH 390 The GPMI controller is very powerful, with the help of BCH
378 module, it can do the hardware ECC. The GPMI supports several 391 module, it can do the hardware ECC. The GPMI supports several
379 NAND flashs at the same time. The GPMI may conflicts with other 392 NAND flashs at the same time.
380 block, such as SD card. So pay attention to it when you enable
381 the GPMI.
382 393
383config MTD_NAND_BRCMNAND 394config MTD_NAND_BRCMNAND
384 tristate "Broadcom STB NAND controller" 395 tristate "Broadcom STB NAND controller"
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 118a1349aad3..921634ba400c 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_MTD_NAND_OMAP2) += omap2_nand.o
32obj-$(CONFIG_MTD_NAND_OMAP_BCH_BUILD) += omap_elm.o 32obj-$(CONFIG_MTD_NAND_OMAP_BCH_BUILD) += omap_elm.o
33obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o 33obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
34obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o 34obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o
35obj-$(CONFIG_MTD_NAND_MARVELL) += marvell_nand.o
35obj-$(CONFIG_MTD_NAND_TMIO) += tmio_nand.o 36obj-$(CONFIG_MTD_NAND_TMIO) += tmio_nand.o
36obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o 37obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
37obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o 38obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o
diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c
index 90a71a56bc23..b2f00b398490 100644
--- a/drivers/mtd/nand/atmel/nand-controller.c
+++ b/drivers/mtd/nand/atmel/nand-controller.c
@@ -841,6 +841,8 @@ static int atmel_nand_pmecc_write_pg(struct nand_chip *chip, const u8 *buf,
841 struct atmel_nand *nand = to_atmel_nand(chip); 841 struct atmel_nand *nand = to_atmel_nand(chip);
842 int ret; 842 int ret;
843 843
844 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
845
844 ret = atmel_nand_pmecc_enable(chip, NAND_ECC_WRITE, raw); 846 ret = atmel_nand_pmecc_enable(chip, NAND_ECC_WRITE, raw);
845 if (ret) 847 if (ret)
846 return ret; 848 return ret;
@@ -857,7 +859,7 @@ static int atmel_nand_pmecc_write_pg(struct nand_chip *chip, const u8 *buf,
857 859
858 atmel_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize); 860 atmel_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
859 861
860 return 0; 862 return nand_prog_page_end_op(chip);
861} 863}
862 864
863static int atmel_nand_pmecc_write_page(struct mtd_info *mtd, 865static int atmel_nand_pmecc_write_page(struct mtd_info *mtd,
@@ -881,6 +883,8 @@ static int atmel_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
881 struct mtd_info *mtd = nand_to_mtd(chip); 883 struct mtd_info *mtd = nand_to_mtd(chip);
882 int ret; 884 int ret;
883 885
886 nand_read_page_op(chip, page, 0, NULL, 0);
887
884 ret = atmel_nand_pmecc_enable(chip, NAND_ECC_READ, raw); 888 ret = atmel_nand_pmecc_enable(chip, NAND_ECC_READ, raw);
885 if (ret) 889 if (ret)
886 return ret; 890 return ret;
@@ -1000,7 +1004,7 @@ static int atmel_hsmc_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
1000 * to the non-optimized one. 1004 * to the non-optimized one.
1001 */ 1005 */
1002 if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB) { 1006 if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB) {
1003 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page); 1007 nand_read_page_op(chip, page, 0, NULL, 0);
1004 1008
1005 return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page, 1009 return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page,
1006 raw); 1010 raw);
@@ -1178,7 +1182,6 @@ static int atmel_hsmc_nand_ecc_init(struct atmel_nand *nand)
1178 chip->ecc.write_page = atmel_hsmc_nand_pmecc_write_page; 1182 chip->ecc.write_page = atmel_hsmc_nand_pmecc_write_page;
1179 chip->ecc.read_page_raw = atmel_hsmc_nand_pmecc_read_page_raw; 1183 chip->ecc.read_page_raw = atmel_hsmc_nand_pmecc_read_page_raw;
1180 chip->ecc.write_page_raw = atmel_hsmc_nand_pmecc_write_page_raw; 1184 chip->ecc.write_page_raw = atmel_hsmc_nand_pmecc_write_page_raw;
1181 chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS;
1182 1185
1183 return 0; 1186 return 0;
1184} 1187}
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 5655dca6ce43..87bbd177b3e5 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -572,6 +572,8 @@ static void bf5xx_nand_dma_write_buf(struct mtd_info *mtd,
572static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 572static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
573 uint8_t *buf, int oob_required, int page) 573 uint8_t *buf, int oob_required, int page)
574{ 574{
575 nand_read_page_op(chip, page, 0, NULL, 0);
576
575 bf5xx_nand_read_buf(mtd, buf, mtd->writesize); 577 bf5xx_nand_read_buf(mtd, buf, mtd->writesize);
576 bf5xx_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize); 578 bf5xx_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize);
577 579
@@ -582,10 +584,10 @@ static int bf5xx_nand_write_page_raw(struct mtd_info *mtd,
582 struct nand_chip *chip, const uint8_t *buf, int oob_required, 584 struct nand_chip *chip, const uint8_t *buf, int oob_required,
583 int page) 585 int page)
584{ 586{
585 bf5xx_nand_write_buf(mtd, buf, mtd->writesize); 587 nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
586 bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize); 588 bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
587 589
588 return 0; 590 return nand_prog_page_end_op(chip);
589} 591}
590 592
591/* 593/*
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
index e0eb51d8c012..b81ddbaae149 100644
--- a/drivers/mtd/nand/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/brcmnand/brcmnand.c
@@ -1071,7 +1071,7 @@ static void brcmnand_wp(struct mtd_info *mtd, int wp)
1071 return; 1071 return;
1072 1072
1073 brcmnand_set_wp(ctrl, wp); 1073 brcmnand_set_wp(ctrl, wp);
1074 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); 1074 nand_status_op(chip, NULL);
1075 /* NAND_STATUS_WP 0x00 = protected, 0x80 = not protected */ 1075 /* NAND_STATUS_WP 0x00 = protected, 0x80 = not protected */
1076 ret = bcmnand_ctrl_poll_status(ctrl, 1076 ret = bcmnand_ctrl_poll_status(ctrl,
1077 NAND_CTRL_RDY | 1077 NAND_CTRL_RDY |
@@ -1453,7 +1453,7 @@ static uint8_t brcmnand_read_byte(struct mtd_info *mtd)
1453 1453
1454 /* At FC_BYTES boundary, switch to next column */ 1454 /* At FC_BYTES boundary, switch to next column */
1455 if (host->last_byte > 0 && offs == 0) 1455 if (host->last_byte > 0 && offs == 0)
1456 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, addr, -1); 1456 nand_change_read_column_op(chip, addr, NULL, 0, false);
1457 1457
1458 ret = ctrl->flash_cache[offs]; 1458 ret = ctrl->flash_cache[offs];
1459 break; 1459 break;
@@ -1681,7 +1681,7 @@ static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd,
1681 int ret; 1681 int ret;
1682 1682
1683 if (!buf) { 1683 if (!buf) {
1684 buf = chip->buffers->databuf; 1684 buf = chip->data_buf;
1685 /* Invalidate page cache */ 1685 /* Invalidate page cache */
1686 chip->pagebuf = -1; 1686 chip->pagebuf = -1;
1687 } 1687 }
@@ -1689,7 +1689,6 @@ static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd,
1689 sas = mtd->oobsize / chip->ecc.steps; 1689 sas = mtd->oobsize / chip->ecc.steps;
1690 1690
1691 /* read without ecc for verification */ 1691 /* read without ecc for verification */
1692 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1693 ret = chip->ecc.read_page_raw(mtd, chip, buf, true, page); 1692 ret = chip->ecc.read_page_raw(mtd, chip, buf, true, page);
1694 if (ret) 1693 if (ret)
1695 return ret; 1694 return ret;
@@ -1793,6 +1792,8 @@ static int brcmnand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1793 struct brcmnand_host *host = nand_get_controller_data(chip); 1792 struct brcmnand_host *host = nand_get_controller_data(chip);
1794 u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL; 1793 u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
1795 1794
1795 nand_read_page_op(chip, page, 0, NULL, 0);
1796
1796 return brcmnand_read(mtd, chip, host->last_addr, 1797 return brcmnand_read(mtd, chip, host->last_addr,
1797 mtd->writesize >> FC_SHIFT, (u32 *)buf, oob); 1798 mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
1798} 1799}
@@ -1804,6 +1805,8 @@ static int brcmnand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1804 u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL; 1805 u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
1805 int ret; 1806 int ret;
1806 1807
1808 nand_read_page_op(chip, page, 0, NULL, 0);
1809
1807 brcmnand_set_ecc_enabled(host, 0); 1810 brcmnand_set_ecc_enabled(host, 0);
1808 ret = brcmnand_read(mtd, chip, host->last_addr, 1811 ret = brcmnand_read(mtd, chip, host->last_addr,
1809 mtd->writesize >> FC_SHIFT, (u32 *)buf, oob); 1812 mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
@@ -1909,8 +1912,10 @@ static int brcmnand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1909 struct brcmnand_host *host = nand_get_controller_data(chip); 1912 struct brcmnand_host *host = nand_get_controller_data(chip);
1910 void *oob = oob_required ? chip->oob_poi : NULL; 1913 void *oob = oob_required ? chip->oob_poi : NULL;
1911 1914
1915 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
1912 brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob); 1916 brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
1913 return 0; 1917
1918 return nand_prog_page_end_op(chip);
1914} 1919}
1915 1920
1916static int brcmnand_write_page_raw(struct mtd_info *mtd, 1921static int brcmnand_write_page_raw(struct mtd_info *mtd,
@@ -1920,10 +1925,12 @@ static int brcmnand_write_page_raw(struct mtd_info *mtd,
1920 struct brcmnand_host *host = nand_get_controller_data(chip); 1925 struct brcmnand_host *host = nand_get_controller_data(chip);
1921 void *oob = oob_required ? chip->oob_poi : NULL; 1926 void *oob = oob_required ? chip->oob_poi : NULL;
1922 1927
1928 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
1923 brcmnand_set_ecc_enabled(host, 0); 1929 brcmnand_set_ecc_enabled(host, 0);
1924 brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob); 1930 brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
1925 brcmnand_set_ecc_enabled(host, 1); 1931 brcmnand_set_ecc_enabled(host, 1);
1926 return 0; 1932
1933 return nand_prog_page_end_op(chip);
1927} 1934}
1928 1935
1929static int brcmnand_write_oob(struct mtd_info *mtd, struct nand_chip *chip, 1936static int brcmnand_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
@@ -2193,16 +2200,9 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
2193 if (ctrl->nand_version >= 0x0702) 2200 if (ctrl->nand_version >= 0x0702)
2194 tmp |= ACC_CONTROL_RD_ERASED; 2201 tmp |= ACC_CONTROL_RD_ERASED;
2195 tmp &= ~ACC_CONTROL_FAST_PGM_RDIN; 2202 tmp &= ~ACC_CONTROL_FAST_PGM_RDIN;
2196 if (ctrl->features & BRCMNAND_HAS_PREFETCH) { 2203 if (ctrl->features & BRCMNAND_HAS_PREFETCH)
2197 /* 2204 tmp &= ~ACC_CONTROL_PREFETCH;
2198 * FIXME: Flash DMA + prefetch may see spurious erased-page ECC 2205
2199 * errors
2200 */
2201 if (has_flash_dma(ctrl))
2202 tmp &= ~ACC_CONTROL_PREFETCH;
2203 else
2204 tmp |= ACC_CONTROL_PREFETCH;
2205 }
2206 nand_writereg(ctrl, offs, tmp); 2206 nand_writereg(ctrl, offs, tmp);
2207 2207
2208 return 0; 2208 return 0;
@@ -2230,6 +2230,9 @@ static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn)
2230 nand_set_controller_data(chip, host); 2230 nand_set_controller_data(chip, host);
2231 mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "brcmnand.%d", 2231 mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "brcmnand.%d",
2232 host->cs); 2232 host->cs);
2233 if (!mtd->name)
2234 return -ENOMEM;
2235
2233 mtd->owner = THIS_MODULE; 2236 mtd->owner = THIS_MODULE;
2234 mtd->dev.parent = &pdev->dev; 2237 mtd->dev.parent = &pdev->dev;
2235 2238
@@ -2369,12 +2372,11 @@ static int brcmnand_resume(struct device *dev)
2369 2372
2370 list_for_each_entry(host, &ctrl->host_list, node) { 2373 list_for_each_entry(host, &ctrl->host_list, node) {
2371 struct nand_chip *chip = &host->chip; 2374 struct nand_chip *chip = &host->chip;
2372 struct mtd_info *mtd = nand_to_mtd(chip);
2373 2375
2374 brcmnand_save_restore_cs_config(host, 1); 2376 brcmnand_save_restore_cs_config(host, 1);
2375 2377
2376 /* Reset the chip, required by some chips after power-up */ 2378 /* Reset the chip, required by some chips after power-up */
2377 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); 2379 nand_reset_op(chip);
2378 } 2380 }
2379 2381
2380 return 0; 2382 return 0;
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index bc558c438a57..567ff972d5fc 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -353,23 +353,15 @@ static void cafe_nand_bug(struct mtd_info *mtd)
353static int cafe_nand_write_oob(struct mtd_info *mtd, 353static int cafe_nand_write_oob(struct mtd_info *mtd,
354 struct nand_chip *chip, int page) 354 struct nand_chip *chip, int page)
355{ 355{
356 int status = 0; 356 return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
357 357 mtd->oobsize);
358 chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
359 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
360 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
361 status = chip->waitfunc(mtd, chip);
362
363 return status & NAND_STATUS_FAIL ? -EIO : 0;
364} 358}
365 359
366/* Don't use -- use nand_read_oob_std for now */ 360/* Don't use -- use nand_read_oob_std for now */
367static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip, 361static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
368 int page) 362 int page)
369{ 363{
370 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); 364 return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
371 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
372 return 0;
373} 365}
374/** 366/**
375 * cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read 367 * cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read
@@ -391,7 +383,7 @@ static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
391 cafe_readl(cafe, NAND_ECC_RESULT), 383 cafe_readl(cafe, NAND_ECC_RESULT),
392 cafe_readl(cafe, NAND_ECC_SYN01)); 384 cafe_readl(cafe, NAND_ECC_SYN01));
393 385
394 chip->read_buf(mtd, buf, mtd->writesize); 386 nand_read_page_op(chip, page, 0, buf, mtd->writesize);
395 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 387 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
396 388
397 if (checkecc && cafe_readl(cafe, NAND_ECC_RESULT) & (1<<18)) { 389 if (checkecc && cafe_readl(cafe, NAND_ECC_RESULT) & (1<<18)) {
@@ -549,13 +541,13 @@ static int cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
549{ 541{
550 struct cafe_priv *cafe = nand_get_controller_data(chip); 542 struct cafe_priv *cafe = nand_get_controller_data(chip);
551 543
552 chip->write_buf(mtd, buf, mtd->writesize); 544 nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
553 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); 545 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
554 546
555 /* Set up ECC autogeneration */ 547 /* Set up ECC autogeneration */
556 cafe->ctl2 |= (1<<30); 548 cafe->ctl2 |= (1<<30);
557 549
558 return 0; 550 return nand_prog_page_end_op(chip);
559} 551}
560 552
561static int cafe_nand_block_bad(struct mtd_info *mtd, loff_t ofs) 553static int cafe_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
@@ -613,7 +605,6 @@ static int cafe_nand_probe(struct pci_dev *pdev,
613 uint32_t ctrl; 605 uint32_t ctrl;
614 int err = 0; 606 int err = 0;
615 int old_dma; 607 int old_dma;
616 struct nand_buffers *nbuf;
617 608
618 /* Very old versions shared the same PCI ident for all three 609 /* Very old versions shared the same PCI ident for all three
619 functions on the chip. Verify the class too... */ 610 functions on the chip. Verify the class too... */
@@ -661,7 +652,6 @@ static int cafe_nand_probe(struct pci_dev *pdev,
661 652
662 /* Enable the following for a flash based bad block table */ 653 /* Enable the following for a flash based bad block table */
663 cafe->nand.bbt_options = NAND_BBT_USE_FLASH; 654 cafe->nand.bbt_options = NAND_BBT_USE_FLASH;
664 cafe->nand.options = NAND_OWN_BUFFERS;
665 655
666 if (skipbbt) { 656 if (skipbbt) {
667 cafe->nand.options |= NAND_SKIP_BBTSCAN; 657 cafe->nand.options |= NAND_SKIP_BBTSCAN;
@@ -731,32 +721,20 @@ static int cafe_nand_probe(struct pci_dev *pdev,
731 if (err) 721 if (err)
732 goto out_irq; 722 goto out_irq;
733 723
734 cafe->dmabuf = dma_alloc_coherent(&cafe->pdev->dev, 724 cafe->dmabuf = dma_alloc_coherent(&cafe->pdev->dev, 2112,
735 2112 + sizeof(struct nand_buffers) + 725 &cafe->dmaaddr, GFP_KERNEL);
736 mtd->writesize + mtd->oobsize,
737 &cafe->dmaaddr, GFP_KERNEL);
738 if (!cafe->dmabuf) { 726 if (!cafe->dmabuf) {
739 err = -ENOMEM; 727 err = -ENOMEM;
740 goto out_irq; 728 goto out_irq;
741 } 729 }
742 cafe->nand.buffers = nbuf = (void *)cafe->dmabuf + 2112;
743 730
744 /* Set up DMA address */ 731 /* Set up DMA address */
745 cafe_writel(cafe, cafe->dmaaddr & 0xffffffff, NAND_DMA_ADDR0); 732 cafe_writel(cafe, lower_32_bits(cafe->dmaaddr), NAND_DMA_ADDR0);
746 if (sizeof(cafe->dmaaddr) > 4) 733 cafe_writel(cafe, upper_32_bits(cafe->dmaaddr), NAND_DMA_ADDR1);
747 /* Shift in two parts to shut the compiler up */
748 cafe_writel(cafe, (cafe->dmaaddr >> 16) >> 16, NAND_DMA_ADDR1);
749 else
750 cafe_writel(cafe, 0, NAND_DMA_ADDR1);
751 734
752 cafe_dev_dbg(&cafe->pdev->dev, "Set DMA address to %x (virt %p)\n", 735 cafe_dev_dbg(&cafe->pdev->dev, "Set DMA address to %x (virt %p)\n",
753 cafe_readl(cafe, NAND_DMA_ADDR0), cafe->dmabuf); 736 cafe_readl(cafe, NAND_DMA_ADDR0), cafe->dmabuf);
754 737
755 /* this driver does not need the @ecccalc and @ecccode */
756 nbuf->ecccalc = NULL;
757 nbuf->ecccode = NULL;
758 nbuf->databuf = (uint8_t *)(nbuf + 1);
759
760 /* Restore the DMA flag */ 738 /* Restore the DMA flag */
761 usedma = old_dma; 739 usedma = old_dma;
762 740
@@ -801,10 +779,7 @@ static int cafe_nand_probe(struct pci_dev *pdev,
801 goto out; 779 goto out;
802 780
803 out_free_dma: 781 out_free_dma:
804 dma_free_coherent(&cafe->pdev->dev, 782 dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
805 2112 + sizeof(struct nand_buffers) +
806 mtd->writesize + mtd->oobsize,
807 cafe->dmabuf, cafe->dmaaddr);
808 out_irq: 783 out_irq:
809 /* Disable NAND IRQ in global IRQ mask register */ 784 /* Disable NAND IRQ in global IRQ mask register */
810 cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK); 785 cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
@@ -829,10 +804,7 @@ static void cafe_nand_remove(struct pci_dev *pdev)
829 nand_release(mtd); 804 nand_release(mtd);
830 free_rs(cafe->rs); 805 free_rs(cafe->rs);
831 pci_iounmap(pdev, cafe->mmio); 806 pci_iounmap(pdev, cafe->mmio);
832 dma_free_coherent(&cafe->pdev->dev, 807 dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
833 2112 + sizeof(struct nand_buffers) +
834 mtd->writesize + mtd->oobsize,
835 cafe->dmabuf, cafe->dmaaddr);
836 kfree(cafe); 808 kfree(cafe);
837} 809}
838 810
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 5124f8ae8c04..313c7f50621b 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -330,16 +330,12 @@ static int denali_check_erased_page(struct mtd_info *mtd,
330 unsigned long uncor_ecc_flags, 330 unsigned long uncor_ecc_flags,
331 unsigned int max_bitflips) 331 unsigned int max_bitflips)
332{ 332{
333 uint8_t *ecc_code = chip->buffers->ecccode; 333 struct denali_nand_info *denali = mtd_to_denali(mtd);
334 uint8_t *ecc_code = chip->oob_poi + denali->oob_skip_bytes;
334 int ecc_steps = chip->ecc.steps; 335 int ecc_steps = chip->ecc.steps;
335 int ecc_size = chip->ecc.size; 336 int ecc_size = chip->ecc.size;
336 int ecc_bytes = chip->ecc.bytes; 337 int ecc_bytes = chip->ecc.bytes;
337 int i, ret, stat; 338 int i, stat;
338
339 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
340 chip->ecc.total);
341 if (ret)
342 return ret;
343 339
344 for (i = 0; i < ecc_steps; i++) { 340 for (i = 0; i < ecc_steps; i++) {
345 if (!(uncor_ecc_flags & BIT(i))) 341 if (!(uncor_ecc_flags & BIT(i)))
@@ -645,8 +641,6 @@ static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
645 int page, int write) 641 int page, int write)
646{ 642{
647 struct denali_nand_info *denali = mtd_to_denali(mtd); 643 struct denali_nand_info *denali = mtd_to_denali(mtd);
648 unsigned int start_cmd = write ? NAND_CMD_SEQIN : NAND_CMD_READ0;
649 unsigned int rnd_cmd = write ? NAND_CMD_RNDIN : NAND_CMD_RNDOUT;
650 int writesize = mtd->writesize; 644 int writesize = mtd->writesize;
651 int oobsize = mtd->oobsize; 645 int oobsize = mtd->oobsize;
652 uint8_t *bufpoi = chip->oob_poi; 646 uint8_t *bufpoi = chip->oob_poi;
@@ -658,11 +652,11 @@ static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
658 int i, pos, len; 652 int i, pos, len;
659 653
660 /* BBM at the beginning of the OOB area */ 654 /* BBM at the beginning of the OOB area */
661 chip->cmdfunc(mtd, start_cmd, writesize, page);
662 if (write) 655 if (write)
663 chip->write_buf(mtd, bufpoi, oob_skip); 656 nand_prog_page_begin_op(chip, page, writesize, bufpoi,
657 oob_skip);
664 else 658 else
665 chip->read_buf(mtd, bufpoi, oob_skip); 659 nand_read_page_op(chip, page, writesize, bufpoi, oob_skip);
666 bufpoi += oob_skip; 660 bufpoi += oob_skip;
667 661
668 /* OOB ECC */ 662 /* OOB ECC */
@@ -675,30 +669,35 @@ static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
675 else if (pos + len > writesize) 669 else if (pos + len > writesize)
676 len = writesize - pos; 670 len = writesize - pos;
677 671
678 chip->cmdfunc(mtd, rnd_cmd, pos, -1);
679 if (write) 672 if (write)
680 chip->write_buf(mtd, bufpoi, len); 673 nand_change_write_column_op(chip, pos, bufpoi, len,
674 false);
681 else 675 else
682 chip->read_buf(mtd, bufpoi, len); 676 nand_change_read_column_op(chip, pos, bufpoi, len,
677 false);
683 bufpoi += len; 678 bufpoi += len;
684 if (len < ecc_bytes) { 679 if (len < ecc_bytes) {
685 len = ecc_bytes - len; 680 len = ecc_bytes - len;
686 chip->cmdfunc(mtd, rnd_cmd, writesize + oob_skip, -1);
687 if (write) 681 if (write)
688 chip->write_buf(mtd, bufpoi, len); 682 nand_change_write_column_op(chip, writesize +
683 oob_skip, bufpoi,
684 len, false);
689 else 685 else
690 chip->read_buf(mtd, bufpoi, len); 686 nand_change_read_column_op(chip, writesize +
687 oob_skip, bufpoi,
688 len, false);
691 bufpoi += len; 689 bufpoi += len;
692 } 690 }
693 } 691 }
694 692
695 /* OOB free */ 693 /* OOB free */
696 len = oobsize - (bufpoi - chip->oob_poi); 694 len = oobsize - (bufpoi - chip->oob_poi);
697 chip->cmdfunc(mtd, rnd_cmd, size - len, -1);
698 if (write) 695 if (write)
699 chip->write_buf(mtd, bufpoi, len); 696 nand_change_write_column_op(chip, size - len, bufpoi, len,
697 false);
700 else 698 else
701 chip->read_buf(mtd, bufpoi, len); 699 nand_change_read_column_op(chip, size - len, bufpoi, len,
700 false);
702} 701}
703 702
704static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 703static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
@@ -710,12 +709,12 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
710 int ecc_steps = chip->ecc.steps; 709 int ecc_steps = chip->ecc.steps;
711 int ecc_size = chip->ecc.size; 710 int ecc_size = chip->ecc.size;
712 int ecc_bytes = chip->ecc.bytes; 711 int ecc_bytes = chip->ecc.bytes;
713 void *dma_buf = denali->buf; 712 void *tmp_buf = denali->buf;
714 int oob_skip = denali->oob_skip_bytes; 713 int oob_skip = denali->oob_skip_bytes;
715 size_t size = writesize + oobsize; 714 size_t size = writesize + oobsize;
716 int ret, i, pos, len; 715 int ret, i, pos, len;
717 716
718 ret = denali_data_xfer(denali, dma_buf, size, page, 1, 0); 717 ret = denali_data_xfer(denali, tmp_buf, size, page, 1, 0);
719 if (ret) 718 if (ret)
720 return ret; 719 return ret;
721 720
@@ -730,11 +729,11 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
730 else if (pos + len > writesize) 729 else if (pos + len > writesize)
731 len = writesize - pos; 730 len = writesize - pos;
732 731
733 memcpy(buf, dma_buf + pos, len); 732 memcpy(buf, tmp_buf + pos, len);
734 buf += len; 733 buf += len;
735 if (len < ecc_size) { 734 if (len < ecc_size) {
736 len = ecc_size - len; 735 len = ecc_size - len;
737 memcpy(buf, dma_buf + writesize + oob_skip, 736 memcpy(buf, tmp_buf + writesize + oob_skip,
738 len); 737 len);
739 buf += len; 738 buf += len;
740 } 739 }
@@ -745,7 +744,7 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
745 uint8_t *oob = chip->oob_poi; 744 uint8_t *oob = chip->oob_poi;
746 745
747 /* BBM at the beginning of the OOB area */ 746 /* BBM at the beginning of the OOB area */
748 memcpy(oob, dma_buf + writesize, oob_skip); 747 memcpy(oob, tmp_buf + writesize, oob_skip);
749 oob += oob_skip; 748 oob += oob_skip;
750 749
751 /* OOB ECC */ 750 /* OOB ECC */
@@ -758,11 +757,11 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
758 else if (pos + len > writesize) 757 else if (pos + len > writesize)
759 len = writesize - pos; 758 len = writesize - pos;
760 759
761 memcpy(oob, dma_buf + pos, len); 760 memcpy(oob, tmp_buf + pos, len);
762 oob += len; 761 oob += len;
763 if (len < ecc_bytes) { 762 if (len < ecc_bytes) {
764 len = ecc_bytes - len; 763 len = ecc_bytes - len;
765 memcpy(oob, dma_buf + writesize + oob_skip, 764 memcpy(oob, tmp_buf + writesize + oob_skip,
766 len); 765 len);
767 oob += len; 766 oob += len;
768 } 767 }
@@ -770,7 +769,7 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
770 769
771 /* OOB free */ 770 /* OOB free */
772 len = oobsize - (oob - chip->oob_poi); 771 len = oobsize - (oob - chip->oob_poi);
773 memcpy(oob, dma_buf + size - len, len); 772 memcpy(oob, tmp_buf + size - len, len);
774 } 773 }
775 774
776 return 0; 775 return 0;
@@ -788,16 +787,12 @@ static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
788 int page) 787 int page)
789{ 788{
790 struct denali_nand_info *denali = mtd_to_denali(mtd); 789 struct denali_nand_info *denali = mtd_to_denali(mtd);
791 int status;
792 790
793 denali_reset_irq(denali); 791 denali_reset_irq(denali);
794 792
795 denali_oob_xfer(mtd, chip, page, 1); 793 denali_oob_xfer(mtd, chip, page, 1);
796 794
797 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); 795 return nand_prog_page_end_op(chip);
798 status = chip->waitfunc(mtd, chip);
799
800 return status & NAND_STATUS_FAIL ? -EIO : 0;
801} 796}
802 797
803static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip, 798static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
@@ -841,7 +836,7 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
841 int ecc_steps = chip->ecc.steps; 836 int ecc_steps = chip->ecc.steps;
842 int ecc_size = chip->ecc.size; 837 int ecc_size = chip->ecc.size;
843 int ecc_bytes = chip->ecc.bytes; 838 int ecc_bytes = chip->ecc.bytes;
844 void *dma_buf = denali->buf; 839 void *tmp_buf = denali->buf;
845 int oob_skip = denali->oob_skip_bytes; 840 int oob_skip = denali->oob_skip_bytes;
846 size_t size = writesize + oobsize; 841 size_t size = writesize + oobsize;
847 int i, pos, len; 842 int i, pos, len;
@@ -851,7 +846,7 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
851 * This simplifies the logic. 846 * This simplifies the logic.
852 */ 847 */
853 if (!buf || !oob_required) 848 if (!buf || !oob_required)
854 memset(dma_buf, 0xff, size); 849 memset(tmp_buf, 0xff, size);
855 850
856 /* Arrange the buffer for syndrome payload/ecc layout */ 851 /* Arrange the buffer for syndrome payload/ecc layout */
857 if (buf) { 852 if (buf) {
@@ -864,11 +859,11 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
864 else if (pos + len > writesize) 859 else if (pos + len > writesize)
865 len = writesize - pos; 860 len = writesize - pos;
866 861
867 memcpy(dma_buf + pos, buf, len); 862 memcpy(tmp_buf + pos, buf, len);
868 buf += len; 863 buf += len;
869 if (len < ecc_size) { 864 if (len < ecc_size) {
870 len = ecc_size - len; 865 len = ecc_size - len;
871 memcpy(dma_buf + writesize + oob_skip, buf, 866 memcpy(tmp_buf + writesize + oob_skip, buf,
872 len); 867 len);
873 buf += len; 868 buf += len;
874 } 869 }
@@ -879,7 +874,7 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
879 const uint8_t *oob = chip->oob_poi; 874 const uint8_t *oob = chip->oob_poi;
880 875
881 /* BBM at the beginning of the OOB area */ 876 /* BBM at the beginning of the OOB area */
882 memcpy(dma_buf + writesize, oob, oob_skip); 877 memcpy(tmp_buf + writesize, oob, oob_skip);
883 oob += oob_skip; 878 oob += oob_skip;
884 879
885 /* OOB ECC */ 880 /* OOB ECC */
@@ -892,11 +887,11 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
892 else if (pos + len > writesize) 887 else if (pos + len > writesize)
893 len = writesize - pos; 888 len = writesize - pos;
894 889
895 memcpy(dma_buf + pos, oob, len); 890 memcpy(tmp_buf + pos, oob, len);
896 oob += len; 891 oob += len;
897 if (len < ecc_bytes) { 892 if (len < ecc_bytes) {
898 len = ecc_bytes - len; 893 len = ecc_bytes - len;
899 memcpy(dma_buf + writesize + oob_skip, oob, 894 memcpy(tmp_buf + writesize + oob_skip, oob,
900 len); 895 len);
901 oob += len; 896 oob += len;
902 } 897 }
@@ -904,10 +899,10 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
904 899
905 /* OOB free */ 900 /* OOB free */
906 len = oobsize - (oob - chip->oob_poi); 901 len = oobsize - (oob - chip->oob_poi);
907 memcpy(dma_buf + size - len, oob, len); 902 memcpy(tmp_buf + size - len, oob, len);
908 } 903 }
909 904
910 return denali_data_xfer(denali, dma_buf, size, page, 1, 1); 905 return denali_data_xfer(denali, tmp_buf, size, page, 1, 1);
911} 906}
912 907
913static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip, 908static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
@@ -951,7 +946,7 @@ static int denali_erase(struct mtd_info *mtd, int page)
951 irq_status = denali_wait_for_irq(denali, 946 irq_status = denali_wait_for_irq(denali,
952 INTR__ERASE_COMP | INTR__ERASE_FAIL); 947 INTR__ERASE_COMP | INTR__ERASE_FAIL);
953 948
954 return irq_status & INTR__ERASE_COMP ? 0 : NAND_STATUS_FAIL; 949 return irq_status & INTR__ERASE_COMP ? 0 : -EIO;
955} 950}
956 951
957static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr, 952static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
@@ -1359,7 +1354,6 @@ int denali_init(struct denali_nand_info *denali)
1359 chip->read_buf = denali_read_buf; 1354 chip->read_buf = denali_read_buf;
1360 chip->write_buf = denali_write_buf; 1355 chip->write_buf = denali_write_buf;
1361 } 1356 }
1362 chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS;
1363 chip->ecc.read_page = denali_read_page; 1357 chip->ecc.read_page = denali_read_page;
1364 chip->ecc.read_page_raw = denali_read_page_raw; 1358 chip->ecc.read_page_raw = denali_read_page_raw;
1365 chip->ecc.write_page = denali_write_page; 1359 chip->ecc.write_page = denali_write_page;
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
index 2911066dacac..9ad33d237378 100644
--- a/drivers/mtd/nand/denali.h
+++ b/drivers/mtd/nand/denali.h
@@ -329,7 +329,7 @@ struct denali_nand_info {
329#define DENALI_CAP_DMA_64BIT BIT(1) 329#define DENALI_CAP_DMA_64BIT BIT(1)
330 330
331int denali_calc_ecc_bytes(int step_size, int strength); 331int denali_calc_ecc_bytes(int step_size, int strength);
332extern int denali_init(struct denali_nand_info *denali); 332int denali_init(struct denali_nand_info *denali);
333extern void denali_remove(struct denali_nand_info *denali); 333void denali_remove(struct denali_nand_info *denali);
334 334
335#endif /* __DENALI_H__ */ 335#endif /* __DENALI_H__ */
diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
index 57fb7ae31412..49cb3e1f8bd0 100644
--- a/drivers/mtd/nand/denali_pci.c
+++ b/drivers/mtd/nand/denali_pci.c
@@ -125,3 +125,7 @@ static struct pci_driver denali_pci_driver = {
125 .remove = denali_pci_remove, 125 .remove = denali_pci_remove,
126}; 126};
127module_pci_driver(denali_pci_driver); 127module_pci_driver(denali_pci_driver);
128
129MODULE_DESCRIPTION("PCI driver for Denali NAND controller");
130MODULE_AUTHOR("Intel Corporation and its suppliers");
131MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index 72671dc52e2e..6bc93ea66f50 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -448,7 +448,7 @@ static int doc200x_wait(struct mtd_info *mtd, struct nand_chip *this)
448 int status; 448 int status;
449 449
450 DoC_WaitReady(doc); 450 DoC_WaitReady(doc);
451 this->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); 451 nand_status_op(this, NULL);
452 DoC_WaitReady(doc); 452 DoC_WaitReady(doc);
453 status = (int)this->read_byte(mtd); 453 status = (int)this->read_byte(mtd);
454 454
@@ -595,7 +595,7 @@ static void doc2001plus_select_chip(struct mtd_info *mtd, int chip)
595 595
596 /* Assert ChipEnable and deassert WriteProtect */ 596 /* Assert ChipEnable and deassert WriteProtect */
597 WriteDOC((DOC_FLASH_CE), docptr, Mplus_FlashSelect); 597 WriteDOC((DOC_FLASH_CE), docptr, Mplus_FlashSelect);
598 this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); 598 nand_reset_op(this);
599 599
600 doc->curchip = chip; 600 doc->curchip = chip;
601 doc->curfloor = floor; 601 doc->curfloor = floor;
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index 2436cbc71662..72f1327c4430 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -785,6 +785,8 @@ static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
785 785
786 dev_dbg(doc->dev, "%s: page %08x\n", __func__, page); 786 dev_dbg(doc->dev, "%s: page %08x\n", __func__, page);
787 787
788 nand_read_page_op(nand, page, 0, NULL, 0);
789
788 writew(DOC_ECCCONF0_READ_MODE | 790 writew(DOC_ECCCONF0_READ_MODE |
789 DOC_ECCCONF0_ECC_ENABLE | 791 DOC_ECCCONF0_ECC_ENABLE |
790 DOC_ECCCONF0_UNKNOWN | 792 DOC_ECCCONF0_UNKNOWN |
@@ -864,7 +866,7 @@ static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
864 866
865 dev_dbg(doc->dev, "%s: page %x\n", __func__, page); 867 dev_dbg(doc->dev, "%s: page %x\n", __func__, page);
866 868
867 docg4_command(mtd, NAND_CMD_READ0, nand->ecc.size, page); 869 nand_read_page_op(nand, page, nand->ecc.size, NULL, 0);
868 870
869 writew(DOC_ECCCONF0_READ_MODE | DOCG4_OOB_SIZE, docptr + DOC_ECCCONF0); 871 writew(DOC_ECCCONF0_READ_MODE | DOCG4_OOB_SIZE, docptr + DOC_ECCCONF0);
870 write_nop(docptr); 872 write_nop(docptr);
@@ -900,6 +902,7 @@ static int docg4_erase_block(struct mtd_info *mtd, int page)
900 struct docg4_priv *doc = nand_get_controller_data(nand); 902 struct docg4_priv *doc = nand_get_controller_data(nand);
901 void __iomem *docptr = doc->virtadr; 903 void __iomem *docptr = doc->virtadr;
902 uint16_t g4_page; 904 uint16_t g4_page;
905 int status;
903 906
904 dev_dbg(doc->dev, "%s: page %04x\n", __func__, page); 907 dev_dbg(doc->dev, "%s: page %04x\n", __func__, page);
905 908
@@ -939,11 +942,15 @@ static int docg4_erase_block(struct mtd_info *mtd, int page)
939 poll_status(doc); 942 poll_status(doc);
940 write_nop(docptr); 943 write_nop(docptr);
941 944
942 return nand->waitfunc(mtd, nand); 945 status = nand->waitfunc(mtd, nand);
946 if (status < 0)
947 return status;
948
949 return status & NAND_STATUS_FAIL ? -EIO : 0;
943} 950}
944 951
945static int write_page(struct mtd_info *mtd, struct nand_chip *nand, 952static int write_page(struct mtd_info *mtd, struct nand_chip *nand,
946 const uint8_t *buf, bool use_ecc) 953 const uint8_t *buf, int page, bool use_ecc)
947{ 954{
948 struct docg4_priv *doc = nand_get_controller_data(nand); 955 struct docg4_priv *doc = nand_get_controller_data(nand);
949 void __iomem *docptr = doc->virtadr; 956 void __iomem *docptr = doc->virtadr;
@@ -951,6 +958,8 @@ static int write_page(struct mtd_info *mtd, struct nand_chip *nand,
951 958
952 dev_dbg(doc->dev, "%s...\n", __func__); 959 dev_dbg(doc->dev, "%s...\n", __func__);
953 960
961 nand_prog_page_begin_op(nand, page, 0, NULL, 0);
962
954 writew(DOC_ECCCONF0_ECC_ENABLE | 963 writew(DOC_ECCCONF0_ECC_ENABLE |
955 DOC_ECCCONF0_UNKNOWN | 964 DOC_ECCCONF0_UNKNOWN |
956 DOCG4_BCH_SIZE, 965 DOCG4_BCH_SIZE,
@@ -995,19 +1004,19 @@ static int write_page(struct mtd_info *mtd, struct nand_chip *nand,
995 writew(0, docptr + DOC_DATAEND); 1004 writew(0, docptr + DOC_DATAEND);
996 write_nop(docptr); 1005 write_nop(docptr);
997 1006
998 return 0; 1007 return nand_prog_page_end_op(nand);
999} 1008}
1000 1009
1001static int docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand, 1010static int docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
1002 const uint8_t *buf, int oob_required, int page) 1011 const uint8_t *buf, int oob_required, int page)
1003{ 1012{
1004 return write_page(mtd, nand, buf, false); 1013 return write_page(mtd, nand, buf, page, false);
1005} 1014}
1006 1015
1007static int docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand, 1016static int docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand,
1008 const uint8_t *buf, int oob_required, int page) 1017 const uint8_t *buf, int oob_required, int page)
1009{ 1018{
1010 return write_page(mtd, nand, buf, true); 1019 return write_page(mtd, nand, buf, page, true);
1011} 1020}
1012 1021
1013static int docg4_write_oob(struct mtd_info *mtd, struct nand_chip *nand, 1022static int docg4_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 17db2f90aa2c..8b6dcd739ecb 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -713,7 +713,7 @@ static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
713 struct fsl_lbc_ctrl *ctrl = priv->ctrl; 713 struct fsl_lbc_ctrl *ctrl = priv->ctrl;
714 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand; 714 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
715 715
716 fsl_elbc_read_buf(mtd, buf, mtd->writesize); 716 nand_read_page_op(chip, page, 0, buf, mtd->writesize);
717 if (oob_required) 717 if (oob_required)
718 fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize); 718 fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
719 719
@@ -729,10 +729,10 @@ static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
729static int fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip, 729static int fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
730 const uint8_t *buf, int oob_required, int page) 730 const uint8_t *buf, int oob_required, int page)
731{ 731{
732 fsl_elbc_write_buf(mtd, buf, mtd->writesize); 732 nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
733 fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize); 733 fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
734 734
735 return 0; 735 return nand_prog_page_end_op(chip);
736} 736}
737 737
738/* ECC will be calculated automatically, and errors will be detected in 738/* ECC will be calculated automatically, and errors will be detected in
@@ -742,10 +742,10 @@ static int fsl_elbc_write_subpage(struct mtd_info *mtd, struct nand_chip *chip,
742 uint32_t offset, uint32_t data_len, 742 uint32_t offset, uint32_t data_len,
743 const uint8_t *buf, int oob_required, int page) 743 const uint8_t *buf, int oob_required, int page)
744{ 744{
745 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
745 fsl_elbc_write_buf(mtd, buf, mtd->writesize); 746 fsl_elbc_write_buf(mtd, buf, mtd->writesize);
746 fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize); 747 fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
747 748 return nand_prog_page_end_op(chip);
748 return 0;
749} 749}
750 750
751static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv) 751static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 9e03bac7f34c..4872a7ba6503 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -688,7 +688,7 @@ static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
688 struct fsl_ifc_ctrl *ctrl = priv->ctrl; 688 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
689 struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl; 689 struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
690 690
691 fsl_ifc_read_buf(mtd, buf, mtd->writesize); 691 nand_read_page_op(chip, page, 0, buf, mtd->writesize);
692 if (oob_required) 692 if (oob_required)
693 fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize); 693 fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
694 694
@@ -711,10 +711,10 @@ static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
711static int fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip, 711static int fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
712 const uint8_t *buf, int oob_required, int page) 712 const uint8_t *buf, int oob_required, int page)
713{ 713{
714 fsl_ifc_write_buf(mtd, buf, mtd->writesize); 714 nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
715 fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize); 715 fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
716 716
717 return 0; 717 return nand_prog_page_end_op(chip);
718} 718}
719 719
720static int fsl_ifc_chip_init_tail(struct mtd_info *mtd) 720static int fsl_ifc_chip_init_tail(struct mtd_info *mtd)
@@ -916,6 +916,13 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
916 if (ctrl->version >= FSL_IFC_VERSION_1_1_0) 916 if (ctrl->version >= FSL_IFC_VERSION_1_1_0)
917 fsl_ifc_sram_init(priv); 917 fsl_ifc_sram_init(priv);
918 918
919 /*
920 * As IFC version 2.0.0 has 16KB of internal SRAM as compared to older
921 * versions which had 8KB. Hence bufnum mask needs to be updated.
922 */
923 if (ctrl->version >= FSL_IFC_VERSION_2_0_0)
924 priv->bufnum_mask = (priv->bufnum_mask * 2) + 1;
925
919 return 0; 926 return 0;
920} 927}
921 928
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index eac15d9bf49e..f49ed46fa770 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -684,8 +684,8 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
684 int eccbytes = chip->ecc.bytes; 684 int eccbytes = chip->ecc.bytes;
685 int eccsteps = chip->ecc.steps; 685 int eccsteps = chip->ecc.steps;
686 uint8_t *p = buf; 686 uint8_t *p = buf;
687 uint8_t *ecc_calc = chip->buffers->ecccalc; 687 uint8_t *ecc_calc = chip->ecc.calc_buf;
688 uint8_t *ecc_code = chip->buffers->ecccode; 688 uint8_t *ecc_code = chip->ecc.code_buf;
689 int off, len, group = 0; 689 int off, len, group = 0;
690 /* 690 /*
691 * ecc_oob is intentionally taken as uint16_t. In 16bit devices, we 691 * ecc_oob is intentionally taken as uint16_t. In 16bit devices, we
@@ -697,7 +697,7 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
697 unsigned int max_bitflips = 0; 697 unsigned int max_bitflips = 0;
698 698
699 for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) { 699 for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
700 chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page); 700 nand_read_page_op(chip, page, s * eccsize, NULL, 0);
701 chip->ecc.hwctl(mtd, NAND_ECC_READ); 701 chip->ecc.hwctl(mtd, NAND_ECC_READ);
702 chip->read_buf(mtd, p, eccsize); 702 chip->read_buf(mtd, p, eccsize);
703 703
@@ -720,8 +720,7 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
720 if (chip->options & NAND_BUSWIDTH_16) 720 if (chip->options & NAND_BUSWIDTH_16)
721 len = roundup(len, 2); 721 len = roundup(len, 2);
722 722
723 chip->cmdfunc(mtd, NAND_CMD_READOOB, off, page); 723 nand_read_oob_op(chip, page, off, oob + j, len);
724 chip->read_buf(mtd, oob + j, len);
725 j += len; 724 j += len;
726 } 725 }
727 726
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 50f8d4a1b983..ab9a0a2ed3b2 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -1029,11 +1029,13 @@ static void block_mark_swapping(struct gpmi_nand_data *this,
1029 p[1] = (p[1] & mask) | (from_oob >> (8 - bit)); 1029 p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
1030} 1030}
1031 1031
1032static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip, 1032static int gpmi_ecc_read_page_data(struct nand_chip *chip,
1033 uint8_t *buf, int oob_required, int page) 1033 uint8_t *buf, int oob_required,
1034 int page)
1034{ 1035{
1035 struct gpmi_nand_data *this = nand_get_controller_data(chip); 1036 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1036 struct bch_geometry *nfc_geo = &this->bch_geometry; 1037 struct bch_geometry *nfc_geo = &this->bch_geometry;
1038 struct mtd_info *mtd = nand_to_mtd(chip);
1037 void *payload_virt; 1039 void *payload_virt;
1038 dma_addr_t payload_phys; 1040 dma_addr_t payload_phys;
1039 void *auxiliary_virt; 1041 void *auxiliary_virt;
@@ -1097,8 +1099,8 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1097 eccbytes = DIV_ROUND_UP(offset + eccbits, 8); 1099 eccbytes = DIV_ROUND_UP(offset + eccbits, 8);
1098 offset /= 8; 1100 offset /= 8;
1099 eccbytes -= offset; 1101 eccbytes -= offset;
1100 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1); 1102 nand_change_read_column_op(chip, offset, eccbuf,
1101 chip->read_buf(mtd, eccbuf, eccbytes); 1103 eccbytes, false);
1102 1104
1103 /* 1105 /*
1104 * ECC data are not byte aligned and we may have 1106 * ECC data are not byte aligned and we may have
@@ -1176,6 +1178,14 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1176 return max_bitflips; 1178 return max_bitflips;
1177} 1179}
1178 1180
1181static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1182 uint8_t *buf, int oob_required, int page)
1183{
1184 nand_read_page_op(chip, page, 0, NULL, 0);
1185
1186 return gpmi_ecc_read_page_data(chip, buf, oob_required, page);
1187}
1188
1179/* Fake a virtual small page for the subpage read */ 1189/* Fake a virtual small page for the subpage read */
1180static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, 1190static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1181 uint32_t offs, uint32_t len, uint8_t *buf, int page) 1191 uint32_t offs, uint32_t len, uint8_t *buf, int page)
@@ -1220,12 +1230,12 @@ static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1220 meta = geo->metadata_size; 1230 meta = geo->metadata_size;
1221 if (first) { 1231 if (first) {
1222 col = meta + (size + ecc_parity_size) * first; 1232 col = meta + (size + ecc_parity_size) * first;
1223 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, col, -1);
1224
1225 meta = 0; 1233 meta = 0;
1226 buf = buf + first * size; 1234 buf = buf + first * size;
1227 } 1235 }
1228 1236
1237 nand_read_page_op(chip, page, col, NULL, 0);
1238
1229 /* Save the old environment */ 1239 /* Save the old environment */
1230 r1_old = r1_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT0); 1240 r1_old = r1_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT0);
1231 r2_old = r2_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT1); 1241 r2_old = r2_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT1);
@@ -1254,7 +1264,7 @@ static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1254 1264
1255 /* Read the subpage now */ 1265 /* Read the subpage now */
1256 this->swap_block_mark = false; 1266 this->swap_block_mark = false;
1257 max_bitflips = gpmi_ecc_read_page(mtd, chip, buf, 0, page); 1267 max_bitflips = gpmi_ecc_read_page_data(chip, buf, 0, page);
1258 1268
1259 /* Restore */ 1269 /* Restore */
1260 writel(r1_old, bch_regs + HW_BCH_FLASH0LAYOUT0); 1270 writel(r1_old, bch_regs + HW_BCH_FLASH0LAYOUT0);
@@ -1277,6 +1287,9 @@ static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1277 int ret; 1287 int ret;
1278 1288
1279 dev_dbg(this->dev, "ecc write page.\n"); 1289 dev_dbg(this->dev, "ecc write page.\n");
1290
1291 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
1292
1280 if (this->swap_block_mark) { 1293 if (this->swap_block_mark) {
1281 /* 1294 /*
1282 * If control arrives here, we're doing block mark swapping. 1295 * If control arrives here, we're doing block mark swapping.
@@ -1338,7 +1351,10 @@ exit_auxiliary:
1338 payload_virt, payload_phys); 1351 payload_virt, payload_phys);
1339 } 1352 }
1340 1353
1341 return 0; 1354 if (ret)
1355 return ret;
1356
1357 return nand_prog_page_end_op(chip);
1342} 1358}
1343 1359
1344/* 1360/*
@@ -1411,7 +1427,7 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1411 memset(chip->oob_poi, ~0, mtd->oobsize); 1427 memset(chip->oob_poi, ~0, mtd->oobsize);
1412 1428
1413 /* Read out the conventional OOB. */ 1429 /* Read out the conventional OOB. */
1414 chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page); 1430 nand_read_page_op(chip, page, mtd->writesize, NULL, 0);
1415 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 1431 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1416 1432
1417 /* 1433 /*
@@ -1421,7 +1437,7 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1421 */ 1437 */
1422 if (GPMI_IS_MX23(this)) { 1438 if (GPMI_IS_MX23(this)) {
1423 /* Read the block mark into the first byte of the OOB buffer. */ 1439 /* Read the block mark into the first byte of the OOB buffer. */
1424 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); 1440 nand_read_page_op(chip, page, 0, NULL, 0);
1425 chip->oob_poi[0] = chip->read_byte(mtd); 1441 chip->oob_poi[0] = chip->read_byte(mtd);
1426 } 1442 }
1427 1443
@@ -1432,7 +1448,6 @@ static int
1432gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page) 1448gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
1433{ 1449{
1434 struct mtd_oob_region of = { }; 1450 struct mtd_oob_region of = { };
1435 int status = 0;
1436 1451
1437 /* Do we have available oob area? */ 1452 /* Do we have available oob area? */
1438 mtd_ooblayout_free(mtd, 0, &of); 1453 mtd_ooblayout_free(mtd, 0, &of);
@@ -1442,12 +1457,8 @@ gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
1442 if (!nand_is_slc(chip)) 1457 if (!nand_is_slc(chip))
1443 return -EPERM; 1458 return -EPERM;
1444 1459
1445 chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize + of.offset, page); 1460 return nand_prog_page_op(chip, page, mtd->writesize + of.offset,
1446 chip->write_buf(mtd, chip->oob_poi + of.offset, of.length); 1461 chip->oob_poi + of.offset, of.length);
1447 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1448
1449 status = chip->waitfunc(mtd, chip);
1450 return status & NAND_STATUS_FAIL ? -EIO : 0;
1451} 1462}
1452 1463
1453/* 1464/*
@@ -1477,8 +1488,8 @@ static int gpmi_ecc_read_page_raw(struct mtd_info *mtd,
1477 uint8_t *oob = chip->oob_poi; 1488 uint8_t *oob = chip->oob_poi;
1478 int step; 1489 int step;
1479 1490
1480 chip->read_buf(mtd, tmp_buf, 1491 nand_read_page_op(chip, page, 0, tmp_buf,
1481 mtd->writesize + mtd->oobsize); 1492 mtd->writesize + mtd->oobsize);
1482 1493
1483 /* 1494 /*
1484 * If required, swap the bad block marker and the data stored in the 1495 * If required, swap the bad block marker and the data stored in the
@@ -1487,12 +1498,8 @@ static int gpmi_ecc_read_page_raw(struct mtd_info *mtd,
1487 * See the layout description for a detailed explanation on why this 1498 * See the layout description for a detailed explanation on why this
1488 * is needed. 1499 * is needed.
1489 */ 1500 */
1490 if (this->swap_block_mark) { 1501 if (this->swap_block_mark)
1491 u8 swap = tmp_buf[0]; 1502 swap(tmp_buf[0], tmp_buf[mtd->writesize]);
1492
1493 tmp_buf[0] = tmp_buf[mtd->writesize];
1494 tmp_buf[mtd->writesize] = swap;
1495 }
1496 1503
1497 /* 1504 /*
1498 * Copy the metadata section into the oob buffer (this section is 1505 * Copy the metadata section into the oob buffer (this section is
@@ -1615,31 +1622,22 @@ static int gpmi_ecc_write_page_raw(struct mtd_info *mtd,
1615 * See the layout description for a detailed explanation on why this 1622 * See the layout description for a detailed explanation on why this
1616 * is needed. 1623 * is needed.
1617 */ 1624 */
1618 if (this->swap_block_mark) { 1625 if (this->swap_block_mark)
1619 u8 swap = tmp_buf[0]; 1626 swap(tmp_buf[0], tmp_buf[mtd->writesize]);
1620
1621 tmp_buf[0] = tmp_buf[mtd->writesize];
1622 tmp_buf[mtd->writesize] = swap;
1623 }
1624 1627
1625 chip->write_buf(mtd, tmp_buf, mtd->writesize + mtd->oobsize); 1628 return nand_prog_page_op(chip, page, 0, tmp_buf,
1626 1629 mtd->writesize + mtd->oobsize);
1627 return 0;
1628} 1630}
1629 1631
1630static int gpmi_ecc_read_oob_raw(struct mtd_info *mtd, struct nand_chip *chip, 1632static int gpmi_ecc_read_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
1631 int page) 1633 int page)
1632{ 1634{
1633 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
1634
1635 return gpmi_ecc_read_page_raw(mtd, chip, NULL, 1, page); 1635 return gpmi_ecc_read_page_raw(mtd, chip, NULL, 1, page);
1636} 1636}
1637 1637
1638static int gpmi_ecc_write_oob_raw(struct mtd_info *mtd, struct nand_chip *chip, 1638static int gpmi_ecc_write_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
1639 int page) 1639 int page)
1640{ 1640{
1641 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
1642
1643 return gpmi_ecc_write_page_raw(mtd, chip, NULL, 1, page); 1641 return gpmi_ecc_write_page_raw(mtd, chip, NULL, 1, page);
1644} 1642}
1645 1643
@@ -1649,7 +1647,7 @@ static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
1649 struct gpmi_nand_data *this = nand_get_controller_data(chip); 1647 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1650 int ret = 0; 1648 int ret = 0;
1651 uint8_t *block_mark; 1649 uint8_t *block_mark;
1652 int column, page, status, chipnr; 1650 int column, page, chipnr;
1653 1651
1654 chipnr = (int)(ofs >> chip->chip_shift); 1652 chipnr = (int)(ofs >> chip->chip_shift);
1655 chip->select_chip(mtd, chipnr); 1653 chip->select_chip(mtd, chipnr);
@@ -1663,13 +1661,7 @@ static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
1663 /* Shift to get page */ 1661 /* Shift to get page */
1664 page = (int)(ofs >> chip->page_shift); 1662 page = (int)(ofs >> chip->page_shift);
1665 1663
1666 chip->cmdfunc(mtd, NAND_CMD_SEQIN, column, page); 1664 ret = nand_prog_page_op(chip, page, column, block_mark, 1);
1667 chip->write_buf(mtd, block_mark, 1);
1668 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1669
1670 status = chip->waitfunc(mtd, chip);
1671 if (status & NAND_STATUS_FAIL)
1672 ret = -EIO;
1673 1665
1674 chip->select_chip(mtd, -1); 1666 chip->select_chip(mtd, -1);
1675 1667
@@ -1712,7 +1704,7 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
1712 unsigned int search_area_size_in_strides; 1704 unsigned int search_area_size_in_strides;
1713 unsigned int stride; 1705 unsigned int stride;
1714 unsigned int page; 1706 unsigned int page;
1715 uint8_t *buffer = chip->buffers->databuf; 1707 uint8_t *buffer = chip->data_buf;
1716 int saved_chip_number; 1708 int saved_chip_number;
1717 int found_an_ncb_fingerprint = false; 1709 int found_an_ncb_fingerprint = false;
1718 1710
@@ -1737,7 +1729,7 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
1737 * Read the NCB fingerprint. The fingerprint is four bytes long 1729 * Read the NCB fingerprint. The fingerprint is four bytes long
1738 * and starts in the 12th byte of the page. 1730 * and starts in the 12th byte of the page.
1739 */ 1731 */
1740 chip->cmdfunc(mtd, NAND_CMD_READ0, 12, page); 1732 nand_read_page_op(chip, page, 12, NULL, 0);
1741 chip->read_buf(mtd, buffer, strlen(fingerprint)); 1733 chip->read_buf(mtd, buffer, strlen(fingerprint));
1742 1734
1743 /* Look for the fingerprint. */ 1735 /* Look for the fingerprint. */
@@ -1771,7 +1763,7 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
1771 unsigned int block; 1763 unsigned int block;
1772 unsigned int stride; 1764 unsigned int stride;
1773 unsigned int page; 1765 unsigned int page;
1774 uint8_t *buffer = chip->buffers->databuf; 1766 uint8_t *buffer = chip->data_buf;
1775 int saved_chip_number; 1767 int saved_chip_number;
1776 int status; 1768 int status;
1777 1769
@@ -1797,17 +1789,10 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
1797 dev_dbg(dev, "Erasing the search area...\n"); 1789 dev_dbg(dev, "Erasing the search area...\n");
1798 1790
1799 for (block = 0; block < search_area_size_in_blocks; block++) { 1791 for (block = 0; block < search_area_size_in_blocks; block++) {
1800 /* Compute the page address. */
1801 page = block * block_size_in_pages;
1802
1803 /* Erase this block. */ 1792 /* Erase this block. */
1804 dev_dbg(dev, "\tErasing block 0x%x\n", block); 1793 dev_dbg(dev, "\tErasing block 0x%x\n", block);
1805 chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page); 1794 status = nand_erase_op(chip, block);
1806 chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1); 1795 if (status)
1807
1808 /* Wait for the erase to finish. */
1809 status = chip->waitfunc(mtd, chip);
1810 if (status & NAND_STATUS_FAIL)
1811 dev_err(dev, "[%s] Erase failed.\n", __func__); 1796 dev_err(dev, "[%s] Erase failed.\n", __func__);
1812 } 1797 }
1813 1798
@@ -1823,13 +1808,9 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
1823 1808
1824 /* Write the first page of the current stride. */ 1809 /* Write the first page of the current stride. */
1825 dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page); 1810 dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
1826 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
1827 chip->ecc.write_page_raw(mtd, chip, buffer, 0, page);
1828 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1829 1811
1830 /* Wait for the write to finish. */ 1812 status = chip->ecc.write_page_raw(mtd, chip, buffer, 0, page);
1831 status = chip->waitfunc(mtd, chip); 1813 if (status)
1832 if (status & NAND_STATUS_FAIL)
1833 dev_err(dev, "[%s] Write failed.\n", __func__); 1814 dev_err(dev, "[%s] Write failed.\n", __func__);
1834 } 1815 }
1835 1816
@@ -1884,7 +1865,7 @@ static int mx23_boot_init(struct gpmi_nand_data *this)
1884 1865
1885 /* Send the command to read the conventional block mark. */ 1866 /* Send the command to read the conventional block mark. */
1886 chip->select_chip(mtd, chipnr); 1867 chip->select_chip(mtd, chipnr);
1887 chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page); 1868 nand_read_page_op(chip, page, mtd->writesize, NULL, 0);
1888 block_mark = chip->read_byte(mtd); 1869 block_mark = chip->read_byte(mtd);
1889 chip->select_chip(mtd, -1); 1870 chip->select_chip(mtd, -1);
1890 1871
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
index a45e4ce13d10..06c1f993912c 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -268,31 +268,31 @@ struct timing_threshold {
268}; 268};
269 269
270/* Common Services */ 270/* Common Services */
271extern int common_nfc_set_geometry(struct gpmi_nand_data *); 271int common_nfc_set_geometry(struct gpmi_nand_data *);
272extern struct dma_chan *get_dma_chan(struct gpmi_nand_data *); 272struct dma_chan *get_dma_chan(struct gpmi_nand_data *);
273extern void prepare_data_dma(struct gpmi_nand_data *, 273void prepare_data_dma(struct gpmi_nand_data *,
274 enum dma_data_direction dr); 274 enum dma_data_direction dr);
275extern int start_dma_without_bch_irq(struct gpmi_nand_data *, 275int start_dma_without_bch_irq(struct gpmi_nand_data *,
276 struct dma_async_tx_descriptor *); 276 struct dma_async_tx_descriptor *);
277extern int start_dma_with_bch_irq(struct gpmi_nand_data *, 277int start_dma_with_bch_irq(struct gpmi_nand_data *,
278 struct dma_async_tx_descriptor *); 278 struct dma_async_tx_descriptor *);
279 279
280/* GPMI-NAND helper function library */ 280/* GPMI-NAND helper function library */
281extern int gpmi_init(struct gpmi_nand_data *); 281int gpmi_init(struct gpmi_nand_data *);
282extern int gpmi_extra_init(struct gpmi_nand_data *); 282int gpmi_extra_init(struct gpmi_nand_data *);
283extern void gpmi_clear_bch(struct gpmi_nand_data *); 283void gpmi_clear_bch(struct gpmi_nand_data *);
284extern void gpmi_dump_info(struct gpmi_nand_data *); 284void gpmi_dump_info(struct gpmi_nand_data *);
285extern int bch_set_geometry(struct gpmi_nand_data *); 285int bch_set_geometry(struct gpmi_nand_data *);
286extern int gpmi_is_ready(struct gpmi_nand_data *, unsigned chip); 286int gpmi_is_ready(struct gpmi_nand_data *, unsigned chip);
287extern int gpmi_send_command(struct gpmi_nand_data *); 287int gpmi_send_command(struct gpmi_nand_data *);
288extern void gpmi_begin(struct gpmi_nand_data *); 288void gpmi_begin(struct gpmi_nand_data *);
289extern void gpmi_end(struct gpmi_nand_data *); 289void gpmi_end(struct gpmi_nand_data *);
290extern int gpmi_read_data(struct gpmi_nand_data *); 290int gpmi_read_data(struct gpmi_nand_data *);
291extern int gpmi_send_data(struct gpmi_nand_data *); 291int gpmi_send_data(struct gpmi_nand_data *);
292extern int gpmi_send_page(struct gpmi_nand_data *, 292int gpmi_send_page(struct gpmi_nand_data *,
293 dma_addr_t payload, dma_addr_t auxiliary); 293 dma_addr_t payload, dma_addr_t auxiliary);
294extern int gpmi_read_page(struct gpmi_nand_data *, 294int gpmi_read_page(struct gpmi_nand_data *,
295 dma_addr_t payload, dma_addr_t auxiliary); 295 dma_addr_t payload, dma_addr_t auxiliary);
296 296
297void gpmi_copy_bits(u8 *dst, size_t dst_bit_off, 297void gpmi_copy_bits(u8 *dst, size_t dst_bit_off,
298 const u8 *src, size_t src_bit_off, 298 const u8 *src, size_t src_bit_off,
diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c
index 0897261c3e17..cb862793ab6d 100644
--- a/drivers/mtd/nand/hisi504_nand.c
+++ b/drivers/mtd/nand/hisi504_nand.c
@@ -544,7 +544,7 @@ static int hisi_nand_read_page_hwecc(struct mtd_info *mtd,
544 int max_bitflips = 0, stat = 0, stat_max = 0, status_ecc; 544 int max_bitflips = 0, stat = 0, stat_max = 0, status_ecc;
545 int stat_1, stat_2; 545 int stat_1, stat_2;
546 546
547 chip->read_buf(mtd, buf, mtd->writesize); 547 nand_read_page_op(chip, page, 0, buf, mtd->writesize);
548 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 548 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
549 549
550 /* errors which can not be corrected by ECC */ 550 /* errors which can not be corrected by ECC */
@@ -574,8 +574,7 @@ static int hisi_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
574{ 574{
575 struct hinfc_host *host = nand_get_controller_data(chip); 575 struct hinfc_host *host = nand_get_controller_data(chip);
576 576
577 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); 577 nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
578 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
579 578
580 if (host->irq_status & HINFC504_INTS_UE) { 579 if (host->irq_status & HINFC504_INTS_UE) {
581 host->irq_status = 0; 580 host->irq_status = 0;
@@ -590,11 +589,11 @@ static int hisi_nand_write_page_hwecc(struct mtd_info *mtd,
590 struct nand_chip *chip, const uint8_t *buf, int oob_required, 589 struct nand_chip *chip, const uint8_t *buf, int oob_required,
591 int page) 590 int page)
592{ 591{
593 chip->write_buf(mtd, buf, mtd->writesize); 592 nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
594 if (oob_required) 593 if (oob_required)
595 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); 594 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
596 595
597 return 0; 596 return nand_prog_page_end_op(chip);
598} 597}
599 598
600static void hisi_nfc_host_init(struct hinfc_host *host) 599static void hisi_nfc_host_init(struct hinfc_host *host)
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index ad827d4af3e9..613b00a9604b 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -313,6 +313,7 @@ static int jz_nand_detect_bank(struct platform_device *pdev,
313 uint32_t ctrl; 313 uint32_t ctrl;
314 struct nand_chip *chip = &nand->chip; 314 struct nand_chip *chip = &nand->chip;
315 struct mtd_info *mtd = nand_to_mtd(chip); 315 struct mtd_info *mtd = nand_to_mtd(chip);
316 u8 id[2];
316 317
317 /* Request I/O resource. */ 318 /* Request I/O resource. */
318 sprintf(res_name, "bank%d", bank); 319 sprintf(res_name, "bank%d", bank);
@@ -335,17 +336,16 @@ static int jz_nand_detect_bank(struct platform_device *pdev,
335 336
336 /* Retrieve the IDs from the first chip. */ 337 /* Retrieve the IDs from the first chip. */
337 chip->select_chip(mtd, 0); 338 chip->select_chip(mtd, 0);
338 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); 339 nand_reset_op(chip);
339 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); 340 nand_readid_op(chip, 0, id, sizeof(id));
340 *nand_maf_id = chip->read_byte(mtd); 341 *nand_maf_id = id[0];
341 *nand_dev_id = chip->read_byte(mtd); 342 *nand_dev_id = id[1];
342 } else { 343 } else {
343 /* Detect additional chip. */ 344 /* Detect additional chip. */
344 chip->select_chip(mtd, chipnr); 345 chip->select_chip(mtd, chipnr);
345 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); 346 nand_reset_op(chip);
346 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); 347 nand_readid_op(chip, 0, id, sizeof(id));
347 if (*nand_maf_id != chip->read_byte(mtd) 348 if (*nand_maf_id != id[0] || *nand_dev_id != id[1]) {
348 || *nand_dev_id != chip->read_byte(mtd)) {
349 ret = -ENODEV; 349 ret = -ENODEV;
350 goto notfound_id; 350 goto notfound_id;
351 } 351 }
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index 5796468db653..e357948a7505 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -461,7 +461,7 @@ static int lpc32xx_read_page(struct mtd_info *mtd, struct nand_chip *chip,
461 } 461 }
462 462
463 /* Writing Command and Address */ 463 /* Writing Command and Address */
464 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); 464 nand_read_page_op(chip, page, 0, NULL, 0);
465 465
466 /* For all sub-pages */ 466 /* For all sub-pages */
467 for (i = 0; i < host->mlcsubpages; i++) { 467 for (i = 0; i < host->mlcsubpages; i++) {
@@ -522,6 +522,8 @@ static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,
522 memcpy(dma_buf, buf, mtd->writesize); 522 memcpy(dma_buf, buf, mtd->writesize);
523 } 523 }
524 524
525 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
526
525 for (i = 0; i < host->mlcsubpages; i++) { 527 for (i = 0; i < host->mlcsubpages; i++) {
526 /* Start Encode */ 528 /* Start Encode */
527 writeb(0x00, MLC_ECC_ENC_REG(host->io_base)); 529 writeb(0x00, MLC_ECC_ENC_REG(host->io_base));
@@ -550,7 +552,8 @@ static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,
550 /* Wait for Controller Ready */ 552 /* Wait for Controller Ready */
551 lpc32xx_waitfunc_controller(mtd, chip); 553 lpc32xx_waitfunc_controller(mtd, chip);
552 } 554 }
553 return 0; 555
556 return nand_prog_page_end_op(chip);
554} 557}
555 558
556static int lpc32xx_read_oob(struct mtd_info *mtd, struct nand_chip *chip, 559static int lpc32xx_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c
index b61f28a1554d..5f7cc6da0a7f 100644
--- a/drivers/mtd/nand/lpc32xx_slc.c
+++ b/drivers/mtd/nand/lpc32xx_slc.c
@@ -399,10 +399,7 @@ static void lpc32xx_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int
399static int lpc32xx_nand_read_oob_syndrome(struct mtd_info *mtd, 399static int lpc32xx_nand_read_oob_syndrome(struct mtd_info *mtd,
400 struct nand_chip *chip, int page) 400 struct nand_chip *chip, int page)
401{ 401{
402 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); 402 return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
403 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
404
405 return 0;
406} 403}
407 404
408/* 405/*
@@ -411,17 +408,8 @@ static int lpc32xx_nand_read_oob_syndrome(struct mtd_info *mtd,
411static int lpc32xx_nand_write_oob_syndrome(struct mtd_info *mtd, 408static int lpc32xx_nand_write_oob_syndrome(struct mtd_info *mtd,
412 struct nand_chip *chip, int page) 409 struct nand_chip *chip, int page)
413{ 410{
414 int status; 411 return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
415 412 mtd->oobsize);
416 chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
417 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
418
419 /* Send command to program the OOB data */
420 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
421
422 status = chip->waitfunc(mtd, chip);
423
424 return status & NAND_STATUS_FAIL ? -EIO : 0;
425} 413}
426 414
427/* 415/*
@@ -632,7 +620,7 @@ static int lpc32xx_nand_read_page_syndrome(struct mtd_info *mtd,
632 uint8_t *oobecc, tmpecc[LPC32XX_ECC_SAVE_SIZE]; 620 uint8_t *oobecc, tmpecc[LPC32XX_ECC_SAVE_SIZE];
633 621
634 /* Issue read command */ 622 /* Issue read command */
635 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); 623 nand_read_page_op(chip, page, 0, NULL, 0);
636 624
637 /* Read data and oob, calculate ECC */ 625 /* Read data and oob, calculate ECC */
638 status = lpc32xx_xfer(mtd, buf, chip->ecc.steps, 1); 626 status = lpc32xx_xfer(mtd, buf, chip->ecc.steps, 1);
@@ -675,7 +663,7 @@ static int lpc32xx_nand_read_page_raw_syndrome(struct mtd_info *mtd,
675 int page) 663 int page)
676{ 664{
677 /* Issue read command */ 665 /* Issue read command */
678 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); 666 nand_read_page_op(chip, page, 0, NULL, 0);
679 667
680 /* Raw reads can just use the FIFO interface */ 668 /* Raw reads can just use the FIFO interface */
681 chip->read_buf(mtd, buf, chip->ecc.size * chip->ecc.steps); 669 chip->read_buf(mtd, buf, chip->ecc.size * chip->ecc.steps);
@@ -698,6 +686,8 @@ static int lpc32xx_nand_write_page_syndrome(struct mtd_info *mtd,
698 uint8_t *pb; 686 uint8_t *pb;
699 int error; 687 int error;
700 688
689 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
690
701 /* Write data, calculate ECC on outbound data */ 691 /* Write data, calculate ECC on outbound data */
702 error = lpc32xx_xfer(mtd, (uint8_t *)buf, chip->ecc.steps, 0); 692 error = lpc32xx_xfer(mtd, (uint8_t *)buf, chip->ecc.steps, 0);
703 if (error) 693 if (error)
@@ -716,7 +706,8 @@ static int lpc32xx_nand_write_page_syndrome(struct mtd_info *mtd,
716 706
717 /* Write ECC data to device */ 707 /* Write ECC data to device */
718 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); 708 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
719 return 0; 709
710 return nand_prog_page_end_op(chip);
720} 711}
721 712
722/* 713/*
@@ -729,9 +720,11 @@ static int lpc32xx_nand_write_page_raw_syndrome(struct mtd_info *mtd,
729 int oob_required, int page) 720 int oob_required, int page)
730{ 721{
731 /* Raw writes can just use the FIFO interface */ 722 /* Raw writes can just use the FIFO interface */
732 chip->write_buf(mtd, buf, chip->ecc.size * chip->ecc.steps); 723 nand_prog_page_begin_op(chip, page, 0, buf,
724 chip->ecc.size * chip->ecc.steps);
733 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); 725 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
734 return 0; 726
727 return nand_prog_page_end_op(chip);
735} 728}
736 729
737static int lpc32xx_nand_dma_setup(struct lpc32xx_nand_host *host) 730static int lpc32xx_nand_dma_setup(struct lpc32xx_nand_host *host)
diff --git a/drivers/mtd/nand/marvell_nand.c b/drivers/mtd/nand/marvell_nand.c
new file mode 100644
index 000000000000..2196f2a233d6
--- /dev/null
+++ b/drivers/mtd/nand/marvell_nand.c
@@ -0,0 +1,2896 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Marvell NAND flash controller driver
4 *
5 * Copyright (C) 2017 Marvell
6 * Author: Miquel RAYNAL <miquel.raynal@free-electrons.com>
7 *
8 */
9
10#include <linux/module.h>
11#include <linux/clk.h>
12#include <linux/mtd/rawnand.h>
13#include <linux/of_platform.h>
14#include <linux/iopoll.h>
15#include <linux/interrupt.h>
16#include <linux/slab.h>
17#include <linux/mfd/syscon.h>
18#include <linux/regmap.h>
19#include <asm/unaligned.h>
20
21#include <linux/dmaengine.h>
22#include <linux/dma-mapping.h>
23#include <linux/dma/pxa-dma.h>
24#include <linux/platform_data/mtd-nand-pxa3xx.h>
25
26/* Data FIFO granularity, FIFO reads/writes must be a multiple of this length */
27#define FIFO_DEPTH 8
28#define FIFO_REP(x) (x / sizeof(u32))
29#define BCH_SEQ_READS (32 / FIFO_DEPTH)
30/* NFC does not support transfers of larger chunks at a time */
31#define MAX_CHUNK_SIZE 2112
32/* NFCv1 cannot read more that 7 bytes of ID */
33#define NFCV1_READID_LEN 7
34/* Polling is done at a pace of POLL_PERIOD us until POLL_TIMEOUT is reached */
35#define POLL_PERIOD 0
36#define POLL_TIMEOUT 100000
37/* Interrupt maximum wait period in ms */
38#define IRQ_TIMEOUT 1000
39/* Latency in clock cycles between SoC pins and NFC logic */
40#define MIN_RD_DEL_CNT 3
41/* Maximum number of contiguous address cycles */
42#define MAX_ADDRESS_CYC_NFCV1 5
43#define MAX_ADDRESS_CYC_NFCV2 7
44/* System control registers/bits to enable the NAND controller on some SoCs */
45#define GENCONF_SOC_DEVICE_MUX 0x208
46#define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0)
47#define GENCONF_SOC_DEVICE_MUX_ECC_CLK_RST BIT(20)
48#define GENCONF_SOC_DEVICE_MUX_ECC_CORE_RST BIT(21)
49#define GENCONF_SOC_DEVICE_MUX_NFC_INT_EN BIT(25)
50#define GENCONF_CLK_GATING_CTRL 0x220
51#define GENCONF_CLK_GATING_CTRL_ND_GATE BIT(2)
52#define GENCONF_ND_CLK_CTRL 0x700
53#define GENCONF_ND_CLK_CTRL_EN BIT(0)
54
55/* NAND controller data flash control register */
56#define NDCR 0x00
57#define NDCR_ALL_INT GENMASK(11, 0)
58#define NDCR_CS1_CMDDM BIT(7)
59#define NDCR_CS0_CMDDM BIT(8)
60#define NDCR_RDYM BIT(11)
61#define NDCR_ND_ARB_EN BIT(12)
62#define NDCR_RA_START BIT(15)
63#define NDCR_RD_ID_CNT(x) (min_t(unsigned int, x, 0x7) << 16)
64#define NDCR_PAGE_SZ(x) (x >= 2048 ? BIT(24) : 0)
65#define NDCR_DWIDTH_M BIT(26)
66#define NDCR_DWIDTH_C BIT(27)
67#define NDCR_ND_RUN BIT(28)
68#define NDCR_DMA_EN BIT(29)
69#define NDCR_ECC_EN BIT(30)
70#define NDCR_SPARE_EN BIT(31)
71#define NDCR_GENERIC_FIELDS_MASK (~(NDCR_RA_START | NDCR_PAGE_SZ(2048) | \
72 NDCR_DWIDTH_M | NDCR_DWIDTH_C))
73
74/* NAND interface timing parameter 0 register */
75#define NDTR0 0x04
76#define NDTR0_TRP(x) ((min_t(unsigned int, x, 0xF) & 0x7) << 0)
77#define NDTR0_TRH(x) (min_t(unsigned int, x, 0x7) << 3)
78#define NDTR0_ETRP(x) ((min_t(unsigned int, x, 0xF) & 0x8) << 3)
79#define NDTR0_SEL_NRE_EDGE BIT(7)
80#define NDTR0_TWP(x) (min_t(unsigned int, x, 0x7) << 8)
81#define NDTR0_TWH(x) (min_t(unsigned int, x, 0x7) << 11)
82#define NDTR0_TCS(x) (min_t(unsigned int, x, 0x7) << 16)
83#define NDTR0_TCH(x) (min_t(unsigned int, x, 0x7) << 19)
84#define NDTR0_RD_CNT_DEL(x) (min_t(unsigned int, x, 0xF) << 22)
85#define NDTR0_SELCNTR BIT(26)
86#define NDTR0_TADL(x) (min_t(unsigned int, x, 0x1F) << 27)
87
88/* NAND interface timing parameter 1 register */
89#define NDTR1 0x0C
90#define NDTR1_TAR(x) (min_t(unsigned int, x, 0xF) << 0)
91#define NDTR1_TWHR(x) (min_t(unsigned int, x, 0xF) << 4)
92#define NDTR1_TRHW(x) (min_t(unsigned int, x / 16, 0x3) << 8)
93#define NDTR1_PRESCALE BIT(14)
94#define NDTR1_WAIT_MODE BIT(15)
95#define NDTR1_TR(x) (min_t(unsigned int, x, 0xFFFF) << 16)
96
97/* NAND controller status register */
98#define NDSR 0x14
99#define NDSR_WRCMDREQ BIT(0)
100#define NDSR_RDDREQ BIT(1)
101#define NDSR_WRDREQ BIT(2)
102#define NDSR_CORERR BIT(3)
103#define NDSR_UNCERR BIT(4)
104#define NDSR_CMDD(cs) BIT(8 - cs)
105#define NDSR_RDY(rb) BIT(11 + rb)
106#define NDSR_ERRCNT(x) ((x >> 16) & 0x1F)
107
108/* NAND ECC control register */
109#define NDECCCTRL 0x28
110#define NDECCCTRL_BCH_EN BIT(0)
111
112/* NAND controller data buffer register */
113#define NDDB 0x40
114
115/* NAND controller command buffer 0 register */
116#define NDCB0 0x48
117#define NDCB0_CMD1(x) ((x & 0xFF) << 0)
118#define NDCB0_CMD2(x) ((x & 0xFF) << 8)
119#define NDCB0_ADDR_CYC(x) ((x & 0x7) << 16)
120#define NDCB0_ADDR_GET_NUM_CYC(x) (((x) >> 16) & 0x7)
121#define NDCB0_DBC BIT(19)
122#define NDCB0_CMD_TYPE(x) ((x & 0x7) << 21)
123#define NDCB0_CSEL BIT(24)
124#define NDCB0_RDY_BYP BIT(27)
125#define NDCB0_LEN_OVRD BIT(28)
126#define NDCB0_CMD_XTYPE(x) ((x & 0x7) << 29)
127
128/* NAND controller command buffer 1 register */
129#define NDCB1 0x4C
130#define NDCB1_COLS(x) ((x & 0xFFFF) << 0)
131#define NDCB1_ADDRS_PAGE(x) (x << 16)
132
133/* NAND controller command buffer 2 register */
134#define NDCB2 0x50
135#define NDCB2_ADDR5_PAGE(x) (((x >> 16) & 0xFF) << 0)
136#define NDCB2_ADDR5_CYC(x) ((x & 0xFF) << 0)
137
138/* NAND controller command buffer 3 register */
139#define NDCB3 0x54
140#define NDCB3_ADDR6_CYC(x) ((x & 0xFF) << 16)
141#define NDCB3_ADDR7_CYC(x) ((x & 0xFF) << 24)
142
143/* NAND controller command buffer 0 register 'type' and 'xtype' fields */
144#define TYPE_READ 0
145#define TYPE_WRITE 1
146#define TYPE_ERASE 2
147#define TYPE_READ_ID 3
148#define TYPE_STATUS 4
149#define TYPE_RESET 5
150#define TYPE_NAKED_CMD 6
151#define TYPE_NAKED_ADDR 7
152#define TYPE_MASK 7
153#define XTYPE_MONOLITHIC_RW 0
154#define XTYPE_LAST_NAKED_RW 1
155#define XTYPE_FINAL_COMMAND 3
156#define XTYPE_READ 4
157#define XTYPE_WRITE_DISPATCH 4
158#define XTYPE_NAKED_RW 5
159#define XTYPE_COMMAND_DISPATCH 6
160#define XTYPE_MASK 7
161
162/**
163 * Marvell ECC engine works differently than the others, in order to limit the
164 * size of the IP, hardware engineers chose to set a fixed strength at 16 bits
165 * per subpage, and depending on a the desired strength needed by the NAND chip,
166 * a particular layout mixing data/spare/ecc is defined, with a possible last
167 * chunk smaller that the others.
168 *
169 * @writesize: Full page size on which the layout applies
170 * @chunk: Desired ECC chunk size on which the layout applies
171 * @strength: Desired ECC strength (per chunk size bytes) on which the
172 * layout applies
173 * @nchunks: Total number of chunks
174 * @full_chunk_cnt: Number of full-sized chunks, which is the number of
175 * repetitions of the pattern:
176 * (data_bytes + spare_bytes + ecc_bytes).
177 * @data_bytes: Number of data bytes per chunk
178 * @spare_bytes: Number of spare bytes per chunk
179 * @ecc_bytes: Number of ecc bytes per chunk
180 * @last_data_bytes: Number of data bytes in the last chunk
181 * @last_spare_bytes: Number of spare bytes in the last chunk
182 * @last_ecc_bytes: Number of ecc bytes in the last chunk
183 */
184struct marvell_hw_ecc_layout {
185 /* Constraints */
186 int writesize;
187 int chunk;
188 int strength;
189 /* Corresponding layout */
190 int nchunks;
191 int full_chunk_cnt;
192 int data_bytes;
193 int spare_bytes;
194 int ecc_bytes;
195 int last_data_bytes;
196 int last_spare_bytes;
197 int last_ecc_bytes;
198};
199
200#define MARVELL_LAYOUT(ws, dc, ds, nc, fcc, db, sb, eb, ldb, lsb, leb) \
201 { \
202 .writesize = ws, \
203 .chunk = dc, \
204 .strength = ds, \
205 .nchunks = nc, \
206 .full_chunk_cnt = fcc, \
207 .data_bytes = db, \
208 .spare_bytes = sb, \
209 .ecc_bytes = eb, \
210 .last_data_bytes = ldb, \
211 .last_spare_bytes = lsb, \
212 .last_ecc_bytes = leb, \
213 }
214
215/* Layouts explained in AN-379_Marvell_SoC_NFC_ECC */
216static const struct marvell_hw_ecc_layout marvell_nfc_layouts[] = {
217 MARVELL_LAYOUT( 512, 512, 1, 1, 1, 512, 8, 8, 0, 0, 0),
218 MARVELL_LAYOUT( 2048, 512, 1, 1, 1, 2048, 40, 24, 0, 0, 0),
219 MARVELL_LAYOUT( 2048, 512, 4, 1, 1, 2048, 32, 30, 0, 0, 0),
220 MARVELL_LAYOUT( 4096, 512, 4, 2, 2, 2048, 32, 30, 0, 0, 0),
221 MARVELL_LAYOUT( 4096, 512, 8, 5, 4, 1024, 0, 30, 0, 64, 30),
222};
223
224/**
225 * The Nand Flash Controller has up to 4 CE and 2 RB pins. The CE selection
226 * is made by a field in NDCB0 register, and in another field in NDCB2 register.
227 * The datasheet describes the logic with an error: ADDR5 field is once
228 * declared at the beginning of NDCB2, and another time at its end. Because the
229 * ADDR5 field of NDCB2 may be used by other bytes, it would be more logical
230 * to use the last bit of this field instead of the first ones.
231 *
232 * @cs: Wanted CE lane.
233 * @ndcb0_csel: Value of the NDCB0 register with or without the flag
234 * selecting the wanted CE lane. This is set once when
235 * the Device Tree is probed.
236 * @rb: Ready/Busy pin for the flash chip
237 */
238struct marvell_nand_chip_sel {
239 unsigned int cs;
240 u32 ndcb0_csel;
241 unsigned int rb;
242};
243
244/**
245 * NAND chip structure: stores NAND chip device related information
246 *
247 * @chip: Base NAND chip structure
248 * @node: Used to store NAND chips into a list
249 * @layout NAND layout when using hardware ECC
250 * @ndcr: Controller register value for this NAND chip
251 * @ndtr0: Timing registers 0 value for this NAND chip
252 * @ndtr1: Timing registers 1 value for this NAND chip
253 * @selected_die: Current active CS
254 * @nsels: Number of CS lines required by the NAND chip
255 * @sels: Array of CS lines descriptions
256 */
257struct marvell_nand_chip {
258 struct nand_chip chip;
259 struct list_head node;
260 const struct marvell_hw_ecc_layout *layout;
261 u32 ndcr;
262 u32 ndtr0;
263 u32 ndtr1;
264 int addr_cyc;
265 int selected_die;
266 unsigned int nsels;
267 struct marvell_nand_chip_sel sels[0];
268};
269
270static inline struct marvell_nand_chip *to_marvell_nand(struct nand_chip *chip)
271{
272 return container_of(chip, struct marvell_nand_chip, chip);
273}
274
275static inline struct marvell_nand_chip_sel *to_nand_sel(struct marvell_nand_chip
276 *nand)
277{
278 return &nand->sels[nand->selected_die];
279}
280
281/**
282 * NAND controller capabilities for distinction between compatible strings
283 *
284 * @max_cs_nb: Number of Chip Select lines available
285 * @max_rb_nb: Number of Ready/Busy lines available
286 * @need_system_controller: Indicates if the SoC needs to have access to the
287 * system controller (ie. to enable the NAND controller)
288 * @legacy_of_bindings: Indicates if DT parsing must be done using the old
289 * fashion way
290 * @is_nfcv2: NFCv2 has numerous enhancements compared to NFCv1, ie.
291 * BCH error detection and correction algorithm,
292 * NDCB3 register has been added
293 * @use_dma: Use dma for data transfers
294 */
295struct marvell_nfc_caps {
296 unsigned int max_cs_nb;
297 unsigned int max_rb_nb;
298 bool need_system_controller;
299 bool legacy_of_bindings;
300 bool is_nfcv2;
301 bool use_dma;
302};
303
304/**
305 * NAND controller structure: stores Marvell NAND controller information
306 *
307 * @controller: Base controller structure
308 * @dev: Parent device (used to print error messages)
309 * @regs: NAND controller registers
310 * @ecc_clk: ECC block clock, two times the NAND controller clock
311 * @complete: Completion object to wait for NAND controller events
312 * @assigned_cs: Bitmask describing already assigned CS lines
313 * @chips: List containing all the NAND chips attached to
314 * this NAND controller
315 * @caps: NAND controller capabilities for each compatible string
316 * @dma_chan: DMA channel (NFCv1 only)
317 * @dma_buf: 32-bit aligned buffer for DMA transfers (NFCv1 only)
318 */
319struct marvell_nfc {
320 struct nand_hw_control controller;
321 struct device *dev;
322 void __iomem *regs;
323 struct clk *ecc_clk;
324 struct completion complete;
325 unsigned long assigned_cs;
326 struct list_head chips;
327 struct nand_chip *selected_chip;
328 const struct marvell_nfc_caps *caps;
329
330 /* DMA (NFCv1 only) */
331 bool use_dma;
332 struct dma_chan *dma_chan;
333 u8 *dma_buf;
334};
335
336static inline struct marvell_nfc *to_marvell_nfc(struct nand_hw_control *ctrl)
337{
338 return container_of(ctrl, struct marvell_nfc, controller);
339}
340
341/**
342 * NAND controller timings expressed in NAND Controller clock cycles
343 *
344 * @tRP: ND_nRE pulse width
345 * @tRH: ND_nRE high duration
346 * @tWP: ND_nWE pulse time
347 * @tWH: ND_nWE high duration
348 * @tCS: Enable signal setup time
349 * @tCH: Enable signal hold time
350 * @tADL: Address to write data delay
351 * @tAR: ND_ALE low to ND_nRE low delay
352 * @tWHR: ND_nWE high to ND_nRE low for status read
353 * @tRHW: ND_nRE high duration, read to write delay
354 * @tR: ND_nWE high to ND_nRE low for read
355 */
356struct marvell_nfc_timings {
357 /* NDTR0 fields */
358 unsigned int tRP;
359 unsigned int tRH;
360 unsigned int tWP;
361 unsigned int tWH;
362 unsigned int tCS;
363 unsigned int tCH;
364 unsigned int tADL;
365 /* NDTR1 fields */
366 unsigned int tAR;
367 unsigned int tWHR;
368 unsigned int tRHW;
369 unsigned int tR;
370};
371
372/**
373 * Derives a duration in numbers of clock cycles.
374 *
375 * @ps: Duration in pico-seconds
376 * @period_ns: Clock period in nano-seconds
377 *
378 * Convert the duration in nano-seconds, then divide by the period and
379 * return the number of clock periods.
380 */
381#define TO_CYCLES(ps, period_ns) (DIV_ROUND_UP(ps / 1000, period_ns))
382
383/**
384 * NAND driver structure filled during the parsing of the ->exec_op() subop
385 * subset of instructions.
386 *
387 * @ndcb: Array of values written to NDCBx registers
388 * @cle_ale_delay_ns: Optional delay after the last CMD or ADDR cycle
389 * @rdy_timeout_ms: Timeout for waits on Ready/Busy pin
390 * @rdy_delay_ns: Optional delay after waiting for the RB pin
391 * @data_delay_ns: Optional delay after the data xfer
392 * @data_instr_idx: Index of the data instruction in the subop
393 * @data_instr: Pointer to the data instruction in the subop
394 */
395struct marvell_nfc_op {
396 u32 ndcb[4];
397 unsigned int cle_ale_delay_ns;
398 unsigned int rdy_timeout_ms;
399 unsigned int rdy_delay_ns;
400 unsigned int data_delay_ns;
401 unsigned int data_instr_idx;
402 const struct nand_op_instr *data_instr;
403};
404
405/*
406 * Internal helper to conditionnally apply a delay (from the above structure,
407 * most of the time).
408 */
409static void cond_delay(unsigned int ns)
410{
411 if (!ns)
412 return;
413
414 if (ns < 10000)
415 ndelay(ns);
416 else
417 udelay(DIV_ROUND_UP(ns, 1000));
418}
419
420/*
421 * The controller has many flags that could generate interrupts, most of them
422 * are disabled and polling is used. For the very slow signals, using interrupts
423 * may relax the CPU charge.
424 */
425static void marvell_nfc_disable_int(struct marvell_nfc *nfc, u32 int_mask)
426{
427 u32 reg;
428
429 /* Writing 1 disables the interrupt */
430 reg = readl_relaxed(nfc->regs + NDCR);
431 writel_relaxed(reg | int_mask, nfc->regs + NDCR);
432}
433
434static void marvell_nfc_enable_int(struct marvell_nfc *nfc, u32 int_mask)
435{
436 u32 reg;
437
438 /* Writing 0 enables the interrupt */
439 reg = readl_relaxed(nfc->regs + NDCR);
440 writel_relaxed(reg & ~int_mask, nfc->regs + NDCR);
441}
442
443static void marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask)
444{
445 writel_relaxed(int_mask, nfc->regs + NDSR);
446}
447
448static void marvell_nfc_force_byte_access(struct nand_chip *chip,
449 bool force_8bit)
450{
451 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
452 u32 ndcr;
453
454 /*
455 * Callers of this function do not verify if the NAND is using a 16-bit
456 * an 8-bit bus for normal operations, so we need to take care of that
457 * here by leaving the configuration unchanged if the NAND does not have
458 * the NAND_BUSWIDTH_16 flag set.
459 */
460 if (!(chip->options & NAND_BUSWIDTH_16))
461 return;
462
463 ndcr = readl_relaxed(nfc->regs + NDCR);
464
465 if (force_8bit)
466 ndcr &= ~(NDCR_DWIDTH_M | NDCR_DWIDTH_C);
467 else
468 ndcr |= NDCR_DWIDTH_M | NDCR_DWIDTH_C;
469
470 writel_relaxed(ndcr, nfc->regs + NDCR);
471}
472
473static int marvell_nfc_wait_ndrun(struct nand_chip *chip)
474{
475 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
476 u32 val;
477 int ret;
478
479 /*
480 * The command is being processed, wait for the ND_RUN bit to be
481 * cleared by the NFC. If not, we must clear it by hand.
482 */
483 ret = readl_relaxed_poll_timeout(nfc->regs + NDCR, val,
484 (val & NDCR_ND_RUN) == 0,
485 POLL_PERIOD, POLL_TIMEOUT);
486 if (ret) {
487 dev_err(nfc->dev, "Timeout on NAND controller run mode\n");
488 writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
489 nfc->regs + NDCR);
490 return ret;
491 }
492
493 return 0;
494}
495
496/*
497 * Any time a command has to be sent to the controller, the following sequence
498 * has to be followed:
499 * - call marvell_nfc_prepare_cmd()
500 * -> activate the ND_RUN bit that will kind of 'start a job'
501 * -> wait the signal indicating the NFC is waiting for a command
502 * - send the command (cmd and address cycles)
503 * - enventually send or receive the data
504 * - call marvell_nfc_end_cmd() with the corresponding flag
505 * -> wait the flag to be triggered or cancel the job with a timeout
506 *
507 * The following helpers are here to factorize the code a bit so that
508 * specialized functions responsible for executing the actual NAND
509 * operations do not have to replicate the same code blocks.
510 */
511static int marvell_nfc_prepare_cmd(struct nand_chip *chip)
512{
513 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
514 u32 ndcr, val;
515 int ret;
516
517 /* Poll ND_RUN and clear NDSR before issuing any command */
518 ret = marvell_nfc_wait_ndrun(chip);
519 if (ret) {
520 dev_err(nfc->dev, "Last operation did not succeed\n");
521 return ret;
522 }
523
524 ndcr = readl_relaxed(nfc->regs + NDCR);
525 writel_relaxed(readl(nfc->regs + NDSR), nfc->regs + NDSR);
526
527 /* Assert ND_RUN bit and wait the NFC to be ready */
528 writel_relaxed(ndcr | NDCR_ND_RUN, nfc->regs + NDCR);
529 ret = readl_relaxed_poll_timeout(nfc->regs + NDSR, val,
530 val & NDSR_WRCMDREQ,
531 POLL_PERIOD, POLL_TIMEOUT);
532 if (ret) {
533 dev_err(nfc->dev, "Timeout on WRCMDRE\n");
534 return -ETIMEDOUT;
535 }
536
537 /* Command may be written, clear WRCMDREQ status bit */
538 writel_relaxed(NDSR_WRCMDREQ, nfc->regs + NDSR);
539
540 return 0;
541}
542
543static void marvell_nfc_send_cmd(struct nand_chip *chip,
544 struct marvell_nfc_op *nfc_op)
545{
546 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
547 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
548
549 dev_dbg(nfc->dev, "\nNDCR: 0x%08x\n"
550 "NDCB0: 0x%08x\nNDCB1: 0x%08x\nNDCB2: 0x%08x\nNDCB3: 0x%08x\n",
551 (u32)readl_relaxed(nfc->regs + NDCR), nfc_op->ndcb[0],
552 nfc_op->ndcb[1], nfc_op->ndcb[2], nfc_op->ndcb[3]);
553
554 writel_relaxed(to_nand_sel(marvell_nand)->ndcb0_csel | nfc_op->ndcb[0],
555 nfc->regs + NDCB0);
556 writel_relaxed(nfc_op->ndcb[1], nfc->regs + NDCB0);
557 writel(nfc_op->ndcb[2], nfc->regs + NDCB0);
558
559 /*
560 * Write NDCB0 four times only if LEN_OVRD is set or if ADDR6 or ADDR7
561 * fields are used (only available on NFCv2).
562 */
563 if (nfc_op->ndcb[0] & NDCB0_LEN_OVRD ||
564 NDCB0_ADDR_GET_NUM_CYC(nfc_op->ndcb[0]) >= 6) {
565 if (!WARN_ON_ONCE(!nfc->caps->is_nfcv2))
566 writel(nfc_op->ndcb[3], nfc->regs + NDCB0);
567 }
568}
569
570static int marvell_nfc_end_cmd(struct nand_chip *chip, int flag,
571 const char *label)
572{
573 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
574 u32 val;
575 int ret;
576
577 ret = readl_relaxed_poll_timeout(nfc->regs + NDSR, val,
578 val & flag,
579 POLL_PERIOD, POLL_TIMEOUT);
580
581 if (ret) {
582 dev_err(nfc->dev, "Timeout on %s (NDSR: 0x%08x)\n",
583 label, val);
584 if (nfc->dma_chan)
585 dmaengine_terminate_all(nfc->dma_chan);
586 return ret;
587 }
588
589 /*
590 * DMA function uses this helper to poll on CMDD bits without wanting
591 * them to be cleared.
592 */
593 if (nfc->use_dma && (readl_relaxed(nfc->regs + NDCR) & NDCR_DMA_EN))
594 return 0;
595
596 writel_relaxed(flag, nfc->regs + NDSR);
597
598 return 0;
599}
600
601static int marvell_nfc_wait_cmdd(struct nand_chip *chip)
602{
603 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
604 int cs_flag = NDSR_CMDD(to_nand_sel(marvell_nand)->ndcb0_csel);
605
606 return marvell_nfc_end_cmd(chip, cs_flag, "CMDD");
607}
608
609static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
610{
611 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
612 int ret;
613
614 /* Timeout is expressed in ms */
615 if (!timeout_ms)
616 timeout_ms = IRQ_TIMEOUT;
617
618 init_completion(&nfc->complete);
619
620 marvell_nfc_enable_int(nfc, NDCR_RDYM);
621 ret = wait_for_completion_timeout(&nfc->complete,
622 msecs_to_jiffies(timeout_ms));
623 marvell_nfc_disable_int(nfc, NDCR_RDYM);
624 marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1));
625 if (!ret) {
626 dev_err(nfc->dev, "Timeout waiting for RB signal\n");
627 return -ETIMEDOUT;
628 }
629
630 return 0;
631}
632
633static void marvell_nfc_select_chip(struct mtd_info *mtd, int die_nr)
634{
635 struct nand_chip *chip = mtd_to_nand(mtd);
636 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
637 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
638 u32 ndcr_generic;
639
640 if (chip == nfc->selected_chip && die_nr == marvell_nand->selected_die)
641 return;
642
643 if (die_nr < 0 || die_nr >= marvell_nand->nsels) {
644 nfc->selected_chip = NULL;
645 marvell_nand->selected_die = -1;
646 return;
647 }
648
649 /*
650 * Do not change the timing registers when using the DT property
651 * marvell,nand-keep-config; in that case ->ndtr0 and ->ndtr1 from the
652 * marvell_nand structure are supposedly empty.
653 */
654 writel_relaxed(marvell_nand->ndtr0, nfc->regs + NDTR0);
655 writel_relaxed(marvell_nand->ndtr1, nfc->regs + NDTR1);
656
657 /*
658 * Reset the NDCR register to a clean state for this particular chip,
659 * also clear ND_RUN bit.
660 */
661 ndcr_generic = readl_relaxed(nfc->regs + NDCR) &
662 NDCR_GENERIC_FIELDS_MASK & ~NDCR_ND_RUN;
663 writel_relaxed(ndcr_generic | marvell_nand->ndcr, nfc->regs + NDCR);
664
665 /* Also reset the interrupt status register */
666 marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
667
668 nfc->selected_chip = chip;
669 marvell_nand->selected_die = die_nr;
670}
671
672static irqreturn_t marvell_nfc_isr(int irq, void *dev_id)
673{
674 struct marvell_nfc *nfc = dev_id;
675 u32 st = readl_relaxed(nfc->regs + NDSR);
676 u32 ien = (~readl_relaxed(nfc->regs + NDCR)) & NDCR_ALL_INT;
677
678 /*
679 * RDY interrupt mask is one bit in NDCR while there are two status
680 * bit in NDSR (RDY[cs0/cs2] and RDY[cs1/cs3]).
681 */
682 if (st & NDSR_RDY(1))
683 st |= NDSR_RDY(0);
684
685 if (!(st & ien))
686 return IRQ_NONE;
687
688 marvell_nfc_disable_int(nfc, st & NDCR_ALL_INT);
689
690 if (!(st & (NDSR_RDDREQ | NDSR_WRDREQ | NDSR_WRCMDREQ)))
691 complete(&nfc->complete);
692
693 return IRQ_HANDLED;
694}
695
696/* HW ECC related functions */
697static void marvell_nfc_enable_hw_ecc(struct nand_chip *chip)
698{
699 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
700 u32 ndcr = readl_relaxed(nfc->regs + NDCR);
701
702 if (!(ndcr & NDCR_ECC_EN)) {
703 writel_relaxed(ndcr | NDCR_ECC_EN, nfc->regs + NDCR);
704
705 /*
706 * When enabling BCH, set threshold to 0 to always know the
707 * number of corrected bitflips.
708 */
709 if (chip->ecc.algo == NAND_ECC_BCH)
710 writel_relaxed(NDECCCTRL_BCH_EN, nfc->regs + NDECCCTRL);
711 }
712}
713
714static void marvell_nfc_disable_hw_ecc(struct nand_chip *chip)
715{
716 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
717 u32 ndcr = readl_relaxed(nfc->regs + NDCR);
718
719 if (ndcr & NDCR_ECC_EN) {
720 writel_relaxed(ndcr & ~NDCR_ECC_EN, nfc->regs + NDCR);
721 if (chip->ecc.algo == NAND_ECC_BCH)
722 writel_relaxed(0, nfc->regs + NDECCCTRL);
723 }
724}
725
726/* DMA related helpers */
727static void marvell_nfc_enable_dma(struct marvell_nfc *nfc)
728{
729 u32 reg;
730
731 reg = readl_relaxed(nfc->regs + NDCR);
732 writel_relaxed(reg | NDCR_DMA_EN, nfc->regs + NDCR);
733}
734
735static void marvell_nfc_disable_dma(struct marvell_nfc *nfc)
736{
737 u32 reg;
738
739 reg = readl_relaxed(nfc->regs + NDCR);
740 writel_relaxed(reg & ~NDCR_DMA_EN, nfc->regs + NDCR);
741}
742
743/* Read/write PIO/DMA accessors */
744static int marvell_nfc_xfer_data_dma(struct marvell_nfc *nfc,
745 enum dma_data_direction direction,
746 unsigned int len)
747{
748 unsigned int dma_len = min_t(int, ALIGN(len, 32), MAX_CHUNK_SIZE);
749 struct dma_async_tx_descriptor *tx;
750 struct scatterlist sg;
751 dma_cookie_t cookie;
752 int ret;
753
754 marvell_nfc_enable_dma(nfc);
755 /* Prepare the DMA transfer */
756 sg_init_one(&sg, nfc->dma_buf, dma_len);
757 dma_map_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
758 tx = dmaengine_prep_slave_sg(nfc->dma_chan, &sg, 1,
759 direction == DMA_FROM_DEVICE ?
760 DMA_DEV_TO_MEM : DMA_MEM_TO_DEV,
761 DMA_PREP_INTERRUPT);
762 if (!tx) {
763 dev_err(nfc->dev, "Could not prepare DMA S/G list\n");
764 return -ENXIO;
765 }
766
767 /* Do the task and wait for it to finish */
768 cookie = dmaengine_submit(tx);
769 ret = dma_submit_error(cookie);
770 if (ret)
771 return -EIO;
772
773 dma_async_issue_pending(nfc->dma_chan);
774 ret = marvell_nfc_wait_cmdd(nfc->selected_chip);
775 dma_unmap_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
776 marvell_nfc_disable_dma(nfc);
777 if (ret) {
778 dev_err(nfc->dev, "Timeout waiting for DMA (status: %d)\n",
779 dmaengine_tx_status(nfc->dma_chan, cookie, NULL));
780 dmaengine_terminate_all(nfc->dma_chan);
781 return -ETIMEDOUT;
782 }
783
784 return 0;
785}
786
787static int marvell_nfc_xfer_data_in_pio(struct marvell_nfc *nfc, u8 *in,
788 unsigned int len)
789{
790 unsigned int last_len = len % FIFO_DEPTH;
791 unsigned int last_full_offset = round_down(len, FIFO_DEPTH);
792 int i;
793
794 for (i = 0; i < last_full_offset; i += FIFO_DEPTH)
795 ioread32_rep(nfc->regs + NDDB, in + i, FIFO_REP(FIFO_DEPTH));
796
797 if (last_len) {
798 u8 tmp_buf[FIFO_DEPTH];
799
800 ioread32_rep(nfc->regs + NDDB, tmp_buf, FIFO_REP(FIFO_DEPTH));
801 memcpy(in + last_full_offset, tmp_buf, last_len);
802 }
803
804 return 0;
805}
806
807static int marvell_nfc_xfer_data_out_pio(struct marvell_nfc *nfc, const u8 *out,
808 unsigned int len)
809{
810 unsigned int last_len = len % FIFO_DEPTH;
811 unsigned int last_full_offset = round_down(len, FIFO_DEPTH);
812 int i;
813
814 for (i = 0; i < last_full_offset; i += FIFO_DEPTH)
815 iowrite32_rep(nfc->regs + NDDB, out + i, FIFO_REP(FIFO_DEPTH));
816
817 if (last_len) {
818 u8 tmp_buf[FIFO_DEPTH];
819
820 memcpy(tmp_buf, out + last_full_offset, last_len);
821 iowrite32_rep(nfc->regs + NDDB, tmp_buf, FIFO_REP(FIFO_DEPTH));
822 }
823
824 return 0;
825}
826
827static void marvell_nfc_check_empty_chunk(struct nand_chip *chip,
828 u8 *data, int data_len,
829 u8 *spare, int spare_len,
830 u8 *ecc, int ecc_len,
831 unsigned int *max_bitflips)
832{
833 struct mtd_info *mtd = nand_to_mtd(chip);
834 int bf;
835
836 /*
837 * Blank pages (all 0xFF) that have not been written may be recognized
838 * as bad if bitflips occur, so whenever an uncorrectable error occurs,
839 * check if the entire page (with ECC bytes) is actually blank or not.
840 */
841 if (!data)
842 data_len = 0;
843 if (!spare)
844 spare_len = 0;
845 if (!ecc)
846 ecc_len = 0;
847
848 bf = nand_check_erased_ecc_chunk(data, data_len, ecc, ecc_len,
849 spare, spare_len, chip->ecc.strength);
850 if (bf < 0) {
851 mtd->ecc_stats.failed++;
852 return;
853 }
854
855 /* Update the stats and max_bitflips */
856 mtd->ecc_stats.corrected += bf;
857 *max_bitflips = max_t(unsigned int, *max_bitflips, bf);
858}
859
860/*
861 * Check a chunk is correct or not according to hardware ECC engine.
862 * mtd->ecc_stats.corrected is updated, as well as max_bitflips, however
863 * mtd->ecc_stats.failure is not, the function will instead return a non-zero
864 * value indicating that a check on the emptyness of the subpage must be
865 * performed before declaring the subpage corrupted.
866 */
867static int marvell_nfc_hw_ecc_correct(struct nand_chip *chip,
868 unsigned int *max_bitflips)
869{
870 struct mtd_info *mtd = nand_to_mtd(chip);
871 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
872 int bf = 0;
873 u32 ndsr;
874
875 ndsr = readl_relaxed(nfc->regs + NDSR);
876
877 /* Check uncorrectable error flag */
878 if (ndsr & NDSR_UNCERR) {
879 writel_relaxed(ndsr, nfc->regs + NDSR);
880
881 /*
882 * Do not increment ->ecc_stats.failed now, instead, return a
883 * non-zero value to indicate that this chunk was apparently
884 * bad, and it should be check to see if it empty or not. If
885 * the chunk (with ECC bytes) is not declared empty, the calling
886 * function must increment the failure count.
887 */
888 return -EBADMSG;
889 }
890
891 /* Check correctable error flag */
892 if (ndsr & NDSR_CORERR) {
893 writel_relaxed(ndsr, nfc->regs + NDSR);
894
895 if (chip->ecc.algo == NAND_ECC_BCH)
896 bf = NDSR_ERRCNT(ndsr);
897 else
898 bf = 1;
899 }
900
901 /* Update the stats and max_bitflips */
902 mtd->ecc_stats.corrected += bf;
903 *max_bitflips = max_t(unsigned int, *max_bitflips, bf);
904
905 return 0;
906}
907
908/* Hamming read helpers */
909static int marvell_nfc_hw_ecc_hmg_do_read_page(struct nand_chip *chip,
910 u8 *data_buf, u8 *oob_buf,
911 bool raw, int page)
912{
913 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
914 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
915 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
916 struct marvell_nfc_op nfc_op = {
917 .ndcb[0] = NDCB0_CMD_TYPE(TYPE_READ) |
918 NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
919 NDCB0_DBC |
920 NDCB0_CMD1(NAND_CMD_READ0) |
921 NDCB0_CMD2(NAND_CMD_READSTART),
922 .ndcb[1] = NDCB1_ADDRS_PAGE(page),
923 .ndcb[2] = NDCB2_ADDR5_PAGE(page),
924 };
925 unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0);
926 int ret;
927
928 /* NFCv2 needs more information about the operation being executed */
929 if (nfc->caps->is_nfcv2)
930 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
931
932 ret = marvell_nfc_prepare_cmd(chip);
933 if (ret)
934 return ret;
935
936 marvell_nfc_send_cmd(chip, &nfc_op);
937 ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
938 "RDDREQ while draining FIFO (data/oob)");
939 if (ret)
940 return ret;
941
942 /*
943 * Read the page then the OOB area. Unlike what is shown in current
944 * documentation, spare bytes are protected by the ECC engine, and must
945 * be at the beginning of the OOB area or running this driver on legacy
946 * systems will prevent the discovery of the BBM/BBT.
947 */
948 if (nfc->use_dma) {
949 marvell_nfc_xfer_data_dma(nfc, DMA_FROM_DEVICE,
950 lt->data_bytes + oob_bytes);
951 memcpy(data_buf, nfc->dma_buf, lt->data_bytes);
952 memcpy(oob_buf, nfc->dma_buf + lt->data_bytes, oob_bytes);
953 } else {
954 marvell_nfc_xfer_data_in_pio(nfc, data_buf, lt->data_bytes);
955 marvell_nfc_xfer_data_in_pio(nfc, oob_buf, oob_bytes);
956 }
957
958 ret = marvell_nfc_wait_cmdd(chip);
959
960 return ret;
961}
962
963static int marvell_nfc_hw_ecc_hmg_read_page_raw(struct mtd_info *mtd,
964 struct nand_chip *chip, u8 *buf,
965 int oob_required, int page)
966{
967 return marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi,
968 true, page);
969}
970
971static int marvell_nfc_hw_ecc_hmg_read_page(struct mtd_info *mtd,
972 struct nand_chip *chip,
973 u8 *buf, int oob_required,
974 int page)
975{
976 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
977 unsigned int full_sz = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
978 int max_bitflips = 0, ret;
979 u8 *raw_buf;
980
981 marvell_nfc_enable_hw_ecc(chip);
982 marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi, false,
983 page);
984 ret = marvell_nfc_hw_ecc_correct(chip, &max_bitflips);
985 marvell_nfc_disable_hw_ecc(chip);
986
987 if (!ret)
988 return max_bitflips;
989
990 /*
991 * When ECC failures are detected, check if the full page has been
992 * written or not. Ignore the failure if it is actually empty.
993 */
994 raw_buf = kmalloc(full_sz, GFP_KERNEL);
995 if (!raw_buf)
996 return -ENOMEM;
997
998 marvell_nfc_hw_ecc_hmg_do_read_page(chip, raw_buf, raw_buf +
999 lt->data_bytes, true, page);
1000 marvell_nfc_check_empty_chunk(chip, raw_buf, full_sz, NULL, 0, NULL, 0,
1001 &max_bitflips);
1002 kfree(raw_buf);
1003
1004 return max_bitflips;
1005}
1006
1007/*
1008 * Spare area in Hamming layouts is not protected by the ECC engine (even if
1009 * it appears before the ECC bytes when reading), the ->read_oob_raw() function
1010 * also stands for ->read_oob().
1011 */
1012static int marvell_nfc_hw_ecc_hmg_read_oob_raw(struct mtd_info *mtd,
1013 struct nand_chip *chip, int page)
1014{
1015 /* Invalidate page cache */
1016 chip->pagebuf = -1;
1017
1018 return marvell_nfc_hw_ecc_hmg_do_read_page(chip, chip->data_buf,
1019 chip->oob_poi, true, page);
1020}
1021
1022/* Hamming write helpers */
1023static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
1024 const u8 *data_buf,
1025 const u8 *oob_buf, bool raw,
1026 int page)
1027{
1028 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
1029 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
1030 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
1031 struct marvell_nfc_op nfc_op = {
1032 .ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) |
1033 NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
1034 NDCB0_CMD1(NAND_CMD_SEQIN) |
1035 NDCB0_CMD2(NAND_CMD_PAGEPROG) |
1036 NDCB0_DBC,
1037 .ndcb[1] = NDCB1_ADDRS_PAGE(page),
1038 .ndcb[2] = NDCB2_ADDR5_PAGE(page),
1039 };
1040 unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0);
1041 int ret;
1042
1043 /* NFCv2 needs more information about the operation being executed */
1044 if (nfc->caps->is_nfcv2)
1045 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
1046
1047 ret = marvell_nfc_prepare_cmd(chip);
1048 if (ret)
1049 return ret;
1050
1051 marvell_nfc_send_cmd(chip, &nfc_op);
1052 ret = marvell_nfc_end_cmd(chip, NDSR_WRDREQ,
1053 "WRDREQ while loading FIFO (data)");
1054 if (ret)
1055 return ret;
1056
1057 /* Write the page then the OOB area */
1058 if (nfc->use_dma) {
1059 memcpy(nfc->dma_buf, data_buf, lt->data_bytes);
1060 memcpy(nfc->dma_buf + lt->data_bytes, oob_buf, oob_bytes);
1061 marvell_nfc_xfer_data_dma(nfc, DMA_TO_DEVICE, lt->data_bytes +
1062 lt->ecc_bytes + lt->spare_bytes);
1063 } else {
1064 marvell_nfc_xfer_data_out_pio(nfc, data_buf, lt->data_bytes);
1065 marvell_nfc_xfer_data_out_pio(nfc, oob_buf, oob_bytes);
1066 }
1067
1068 ret = marvell_nfc_wait_cmdd(chip);
1069 if (ret)
1070 return ret;
1071
1072 ret = marvell_nfc_wait_op(chip,
1073 chip->data_interface.timings.sdr.tPROG_max);
1074 return ret;
1075}
1076
1077static int marvell_nfc_hw_ecc_hmg_write_page_raw(struct mtd_info *mtd,
1078 struct nand_chip *chip,
1079 const u8 *buf,
1080 int oob_required, int page)
1081{
1082 return marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
1083 true, page);
1084}
1085
1086static int marvell_nfc_hw_ecc_hmg_write_page(struct mtd_info *mtd,
1087 struct nand_chip *chip,
1088 const u8 *buf,
1089 int oob_required, int page)
1090{
1091 int ret;
1092
1093 marvell_nfc_enable_hw_ecc(chip);
1094 ret = marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
1095 false, page);
1096 marvell_nfc_disable_hw_ecc(chip);
1097
1098 return ret;
1099}
1100
1101/*
1102 * Spare area in Hamming layouts is not protected by the ECC engine (even if
1103 * it appears before the ECC bytes when reading), the ->write_oob_raw() function
1104 * also stands for ->write_oob().
1105 */
1106static int marvell_nfc_hw_ecc_hmg_write_oob_raw(struct mtd_info *mtd,
1107 struct nand_chip *chip,
1108 int page)
1109{
1110 /* Invalidate page cache */
1111 chip->pagebuf = -1;
1112
1113 memset(chip->data_buf, 0xFF, mtd->writesize);
1114
1115 return marvell_nfc_hw_ecc_hmg_do_write_page(chip, chip->data_buf,
1116 chip->oob_poi, true, page);
1117}
1118
1119/* BCH read helpers */
1120static int marvell_nfc_hw_ecc_bch_read_page_raw(struct mtd_info *mtd,
1121 struct nand_chip *chip, u8 *buf,
1122 int oob_required, int page)
1123{
1124 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
1125 u8 *oob = chip->oob_poi;
1126 int chunk_size = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
1127 int ecc_offset = (lt->full_chunk_cnt * lt->spare_bytes) +
1128 lt->last_spare_bytes;
1129 int data_len = lt->data_bytes;
1130 int spare_len = lt->spare_bytes;
1131 int ecc_len = lt->ecc_bytes;
1132 int chunk;
1133
1134 if (oob_required)
1135 memset(chip->oob_poi, 0xFF, mtd->oobsize);
1136
1137 nand_read_page_op(chip, page, 0, NULL, 0);
1138
1139 for (chunk = 0; chunk < lt->nchunks; chunk++) {
1140 /* Update last chunk length */
1141 if (chunk >= lt->full_chunk_cnt) {
1142 data_len = lt->last_data_bytes;
1143 spare_len = lt->last_spare_bytes;
1144 ecc_len = lt->last_ecc_bytes;
1145 }
1146
1147 /* Read data bytes*/
1148 nand_change_read_column_op(chip, chunk * chunk_size,
1149 buf + (lt->data_bytes * chunk),
1150 data_len, false);
1151
1152 /* Read spare bytes */
1153 nand_read_data_op(chip, oob + (lt->spare_bytes * chunk),
1154 spare_len, false);
1155
1156 /* Read ECC bytes */
1157 nand_read_data_op(chip, oob + ecc_offset +
1158 (ALIGN(lt->ecc_bytes, 32) * chunk),
1159 ecc_len, false);
1160 }
1161
1162 return 0;
1163}
1164
1165static void marvell_nfc_hw_ecc_bch_read_chunk(struct nand_chip *chip, int chunk,
1166 u8 *data, unsigned int data_len,
1167 u8 *spare, unsigned int spare_len,
1168 int page)
1169{
1170 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
1171 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
1172 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
1173 int i, ret;
1174 struct marvell_nfc_op nfc_op = {
1175 .ndcb[0] = NDCB0_CMD_TYPE(TYPE_READ) |
1176 NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
1177 NDCB0_LEN_OVRD,
1178 .ndcb[1] = NDCB1_ADDRS_PAGE(page),
1179 .ndcb[2] = NDCB2_ADDR5_PAGE(page),
1180 .ndcb[3] = data_len + spare_len,
1181 };
1182
1183 ret = marvell_nfc_prepare_cmd(chip);
1184 if (ret)
1185 return;
1186
1187 if (chunk == 0)
1188 nfc_op.ndcb[0] |= NDCB0_DBC |
1189 NDCB0_CMD1(NAND_CMD_READ0) |
1190 NDCB0_CMD2(NAND_CMD_READSTART);
1191
1192 /*
1193 * Trigger the naked read operation only on the last chunk.
1194 * Otherwise, use monolithic read.
1195 */
1196 if (lt->nchunks == 1 || (chunk < lt->nchunks - 1))
1197 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
1198 else
1199 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
1200
1201 marvell_nfc_send_cmd(chip, &nfc_op);
1202
1203 /*
1204 * According to the datasheet, when reading from NDDB
1205 * with BCH enabled, after each 32 bytes reads, we
1206 * have to make sure that the NDSR.RDDREQ bit is set.
1207 *
1208 * Drain the FIFO, 8 32-bit reads at a time, and skip
1209 * the polling on the last read.
1210 *
1211 * Length is a multiple of 32 bytes, hence it is a multiple of 8 too.
1212 */
1213 for (i = 0; i < data_len; i += FIFO_DEPTH * BCH_SEQ_READS) {
1214 marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
1215 "RDDREQ while draining FIFO (data)");
1216 marvell_nfc_xfer_data_in_pio(nfc, data,
1217 FIFO_DEPTH * BCH_SEQ_READS);
1218 data += FIFO_DEPTH * BCH_SEQ_READS;
1219 }
1220
1221 for (i = 0; i < spare_len; i += FIFO_DEPTH * BCH_SEQ_READS) {
1222 marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
1223 "RDDREQ while draining FIFO (OOB)");
1224 marvell_nfc_xfer_data_in_pio(nfc, spare,
1225 FIFO_DEPTH * BCH_SEQ_READS);
1226 spare += FIFO_DEPTH * BCH_SEQ_READS;
1227 }
1228}
1229
1230static int marvell_nfc_hw_ecc_bch_read_page(struct mtd_info *mtd,
1231 struct nand_chip *chip,
1232 u8 *buf, int oob_required,
1233 int page)
1234{
1235 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
1236 int data_len = lt->data_bytes, spare_len = lt->spare_bytes, ecc_len;
1237 u8 *data = buf, *spare = chip->oob_poi, *ecc;
1238 int max_bitflips = 0;
1239 u32 failure_mask = 0;
1240 int chunk, ecc_offset_in_page, ret;
1241
1242 /*
1243 * With BCH, OOB is not fully used (and thus not read entirely), not
1244 * expected bytes could show up at the end of the OOB buffer if not
1245 * explicitly erased.
1246 */
1247 if (oob_required)
1248 memset(chip->oob_poi, 0xFF, mtd->oobsize);
1249
1250 marvell_nfc_enable_hw_ecc(chip);
1251
1252 for (chunk = 0; chunk < lt->nchunks; chunk++) {
1253 /* Update length for the last chunk */
1254 if (chunk >= lt->full_chunk_cnt) {
1255 data_len = lt->last_data_bytes;
1256 spare_len = lt->last_spare_bytes;
1257 }
1258
1259 /* Read the chunk and detect number of bitflips */
1260 marvell_nfc_hw_ecc_bch_read_chunk(chip, chunk, data, data_len,
1261 spare, spare_len, page);
1262 ret = marvell_nfc_hw_ecc_correct(chip, &max_bitflips);
1263 if (ret)
1264 failure_mask |= BIT(chunk);
1265
1266 data += data_len;
1267 spare += spare_len;
1268 }
1269
1270 marvell_nfc_disable_hw_ecc(chip);
1271
1272 if (!failure_mask)
1273 return max_bitflips;
1274
1275 /*
1276 * Please note that dumping the ECC bytes during a normal read with OOB
1277 * area would add a significant overhead as ECC bytes are "consumed" by
1278 * the controller in normal mode and must be re-read in raw mode. To
1279 * avoid dropping the performances, we prefer not to include them. The
1280 * user should re-read the page in raw mode if ECC bytes are required.
1281 *
1282 * However, for any subpage read error reported by ->correct(), the ECC
1283 * bytes must be read in raw mode and the full subpage must be checked
1284 * to see if it is entirely empty of if there was an actual error.
1285 */
1286 for (chunk = 0; chunk < lt->nchunks; chunk++) {
1287 /* No failure reported for this chunk, move to the next one */
1288 if (!(failure_mask & BIT(chunk)))
1289 continue;
1290
1291 /* Derive ECC bytes positions (in page/buffer) and length */
1292 ecc = chip->oob_poi +
1293 (lt->full_chunk_cnt * lt->spare_bytes) +
1294 lt->last_spare_bytes +
1295 (chunk * ALIGN(lt->ecc_bytes, 32));
1296 ecc_offset_in_page =
1297 (chunk * (lt->data_bytes + lt->spare_bytes +
1298 lt->ecc_bytes)) +
1299 (chunk < lt->full_chunk_cnt ?
1300 lt->data_bytes + lt->spare_bytes :
1301 lt->last_data_bytes + lt->last_spare_bytes);
1302 ecc_len = chunk < lt->full_chunk_cnt ?
1303 lt->ecc_bytes : lt->last_ecc_bytes;
1304
1305 /* Do the actual raw read of the ECC bytes */
1306 nand_change_read_column_op(chip, ecc_offset_in_page,
1307 ecc, ecc_len, false);
1308
1309 /* Derive data/spare bytes positions (in buffer) and length */
1310 data = buf + (chunk * lt->data_bytes);
1311 data_len = chunk < lt->full_chunk_cnt ?
1312 lt->data_bytes : lt->last_data_bytes;
1313 spare = chip->oob_poi + (chunk * (lt->spare_bytes +
1314 lt->ecc_bytes));
1315 spare_len = chunk < lt->full_chunk_cnt ?
1316 lt->spare_bytes : lt->last_spare_bytes;
1317
1318 /* Check the entire chunk (data + spare + ecc) for emptyness */
1319 marvell_nfc_check_empty_chunk(chip, data, data_len, spare,
1320 spare_len, ecc, ecc_len,
1321 &max_bitflips);
1322 }
1323
1324 return max_bitflips;
1325}
1326
1327static int marvell_nfc_hw_ecc_bch_read_oob_raw(struct mtd_info *mtd,
1328 struct nand_chip *chip, int page)
1329{
1330 /* Invalidate page cache */
1331 chip->pagebuf = -1;
1332
1333 return chip->ecc.read_page_raw(mtd, chip, chip->data_buf, true, page);
1334}
1335
1336static int marvell_nfc_hw_ecc_bch_read_oob(struct mtd_info *mtd,
1337 struct nand_chip *chip, int page)
1338{
1339 /* Invalidate page cache */
1340 chip->pagebuf = -1;
1341
1342 return chip->ecc.read_page(mtd, chip, chip->data_buf, true, page);
1343}
1344
1345/* BCH write helpers */
1346static int marvell_nfc_hw_ecc_bch_write_page_raw(struct mtd_info *mtd,
1347 struct nand_chip *chip,
1348 const u8 *buf,
1349 int oob_required, int page)
1350{
1351 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
1352 int full_chunk_size = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
1353 int data_len = lt->data_bytes;
1354 int spare_len = lt->spare_bytes;
1355 int ecc_len = lt->ecc_bytes;
1356 int spare_offset = 0;
1357 int ecc_offset = (lt->full_chunk_cnt * lt->spare_bytes) +
1358 lt->last_spare_bytes;
1359 int chunk;
1360
1361 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
1362
1363 for (chunk = 0; chunk < lt->nchunks; chunk++) {
1364 if (chunk >= lt->full_chunk_cnt) {
1365 data_len = lt->last_data_bytes;
1366 spare_len = lt->last_spare_bytes;
1367 ecc_len = lt->last_ecc_bytes;
1368 }
1369
1370 /* Point to the column of the next chunk */
1371 nand_change_write_column_op(chip, chunk * full_chunk_size,
1372 NULL, 0, false);
1373
1374 /* Write the data */
1375 nand_write_data_op(chip, buf + (chunk * lt->data_bytes),
1376 data_len, false);
1377
1378 if (!oob_required)
1379 continue;
1380
1381 /* Write the spare bytes */
1382 if (spare_len)
1383 nand_write_data_op(chip, chip->oob_poi + spare_offset,
1384 spare_len, false);
1385
1386 /* Write the ECC bytes */
1387 if (ecc_len)
1388 nand_write_data_op(chip, chip->oob_poi + ecc_offset,
1389 ecc_len, false);
1390
1391 spare_offset += spare_len;
1392 ecc_offset += ALIGN(ecc_len, 32);
1393 }
1394
1395 return nand_prog_page_end_op(chip);
1396}
1397
1398static int
1399marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk,
1400 const u8 *data, unsigned int data_len,
1401 const u8 *spare, unsigned int spare_len,
1402 int page)
1403{
1404 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
1405 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
1406 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
1407 int ret;
1408 struct marvell_nfc_op nfc_op = {
1409 .ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) | NDCB0_LEN_OVRD,
1410 .ndcb[3] = data_len + spare_len,
1411 };
1412
1413 /*
1414 * First operation dispatches the CMD_SEQIN command, issue the address
1415 * cycles and asks for the first chunk of data.
1416 * All operations in the middle (if any) will issue a naked write and
1417 * also ask for data.
1418 * Last operation (if any) asks for the last chunk of data through a
1419 * last naked write.
1420 */
1421 if (chunk == 0) {
1422 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_WRITE_DISPATCH) |
1423 NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
1424 NDCB0_CMD1(NAND_CMD_SEQIN);
1425 nfc_op.ndcb[1] |= NDCB1_ADDRS_PAGE(page);
1426 nfc_op.ndcb[2] |= NDCB2_ADDR5_PAGE(page);
1427 } else if (chunk < lt->nchunks - 1) {
1428 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_NAKED_RW);
1429 } else {
1430 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
1431 }
1432
1433 /* Always dispatch the PAGEPROG command on the last chunk */
1434 if (chunk == lt->nchunks - 1)
1435 nfc_op.ndcb[0] |= NDCB0_CMD2(NAND_CMD_PAGEPROG) | NDCB0_DBC;
1436
1437 ret = marvell_nfc_prepare_cmd(chip);
1438 if (ret)
1439 return ret;
1440
1441 marvell_nfc_send_cmd(chip, &nfc_op);
1442 ret = marvell_nfc_end_cmd(chip, NDSR_WRDREQ,
1443 "WRDREQ while loading FIFO (data)");
1444 if (ret)
1445 return ret;
1446
1447 /* Transfer the contents */
1448 iowrite32_rep(nfc->regs + NDDB, data, FIFO_REP(data_len));
1449 iowrite32_rep(nfc->regs + NDDB, spare, FIFO_REP(spare_len));
1450
1451 return 0;
1452}
1453
1454static int marvell_nfc_hw_ecc_bch_write_page(struct mtd_info *mtd,
1455 struct nand_chip *chip,
1456 const u8 *buf,
1457 int oob_required, int page)
1458{
1459 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
1460 const u8 *data = buf;
1461 const u8 *spare = chip->oob_poi;
1462 int data_len = lt->data_bytes;
1463 int spare_len = lt->spare_bytes;
1464 int chunk, ret;
1465
1466 /* Spare data will be written anyway, so clear it to avoid garbage */
1467 if (!oob_required)
1468 memset(chip->oob_poi, 0xFF, mtd->oobsize);
1469
1470 marvell_nfc_enable_hw_ecc(chip);
1471
1472 for (chunk = 0; chunk < lt->nchunks; chunk++) {
1473 if (chunk >= lt->full_chunk_cnt) {
1474 data_len = lt->last_data_bytes;
1475 spare_len = lt->last_spare_bytes;
1476 }
1477
1478 marvell_nfc_hw_ecc_bch_write_chunk(chip, chunk, data, data_len,
1479 spare, spare_len, page);
1480 data += data_len;
1481 spare += spare_len;
1482
1483 /*
1484 * Waiting only for CMDD or PAGED is not enough, ECC are
1485 * partially written. No flag is set once the operation is
1486 * really finished but the ND_RUN bit is cleared, so wait for it
1487 * before stepping into the next command.
1488 */
1489 marvell_nfc_wait_ndrun(chip);
1490 }
1491
1492 ret = marvell_nfc_wait_op(chip,
1493 chip->data_interface.timings.sdr.tPROG_max);
1494
1495 marvell_nfc_disable_hw_ecc(chip);
1496
1497 if (ret)
1498 return ret;
1499
1500 return 0;
1501}
1502
1503static int marvell_nfc_hw_ecc_bch_write_oob_raw(struct mtd_info *mtd,
1504 struct nand_chip *chip,
1505 int page)
1506{
1507 /* Invalidate page cache */
1508 chip->pagebuf = -1;
1509
1510 memset(chip->data_buf, 0xFF, mtd->writesize);
1511
1512 return chip->ecc.write_page_raw(mtd, chip, chip->data_buf, true, page);
1513}
1514
1515static int marvell_nfc_hw_ecc_bch_write_oob(struct mtd_info *mtd,
1516 struct nand_chip *chip, int page)
1517{
1518 /* Invalidate page cache */
1519 chip->pagebuf = -1;
1520
1521 memset(chip->data_buf, 0xFF, mtd->writesize);
1522
1523 return chip->ecc.write_page(mtd, chip, chip->data_buf, true, page);
1524}
1525
1526/* NAND framework ->exec_op() hooks and related helpers */
1527static void marvell_nfc_parse_instructions(struct nand_chip *chip,
1528 const struct nand_subop *subop,
1529 struct marvell_nfc_op *nfc_op)
1530{
1531 const struct nand_op_instr *instr = NULL;
1532 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
1533 bool first_cmd = true;
1534 unsigned int op_id;
1535 int i;
1536
1537 /* Reset the input structure as most of its fields will be OR'ed */
1538 memset(nfc_op, 0, sizeof(struct marvell_nfc_op));
1539
1540 for (op_id = 0; op_id < subop->ninstrs; op_id++) {
1541 unsigned int offset, naddrs;
1542 const u8 *addrs;
1543 int len = nand_subop_get_data_len(subop, op_id);
1544
1545 instr = &subop->instrs[op_id];
1546
1547 switch (instr->type) {
1548 case NAND_OP_CMD_INSTR:
1549 if (first_cmd)
1550 nfc_op->ndcb[0] |=
1551 NDCB0_CMD1(instr->ctx.cmd.opcode);
1552 else
1553 nfc_op->ndcb[0] |=
1554 NDCB0_CMD2(instr->ctx.cmd.opcode) |
1555 NDCB0_DBC;
1556
1557 nfc_op->cle_ale_delay_ns = instr->delay_ns;
1558 first_cmd = false;
1559 break;
1560
1561 case NAND_OP_ADDR_INSTR:
1562 offset = nand_subop_get_addr_start_off(subop, op_id);
1563 naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
1564 addrs = &instr->ctx.addr.addrs[offset];
1565
1566 nfc_op->ndcb[0] |= NDCB0_ADDR_CYC(naddrs);
1567
1568 for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
1569 nfc_op->ndcb[1] |= addrs[i] << (8 * i);
1570
1571 if (naddrs >= 5)
1572 nfc_op->ndcb[2] |= NDCB2_ADDR5_CYC(addrs[4]);
1573 if (naddrs >= 6)
1574 nfc_op->ndcb[3] |= NDCB3_ADDR6_CYC(addrs[5]);
1575 if (naddrs == 7)
1576 nfc_op->ndcb[3] |= NDCB3_ADDR7_CYC(addrs[6]);
1577
1578 nfc_op->cle_ale_delay_ns = instr->delay_ns;
1579 break;
1580
1581 case NAND_OP_DATA_IN_INSTR:
1582 nfc_op->data_instr = instr;
1583 nfc_op->data_instr_idx = op_id;
1584 nfc_op->ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ);
1585 if (nfc->caps->is_nfcv2) {
1586 nfc_op->ndcb[0] |=
1587 NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
1588 NDCB0_LEN_OVRD;
1589 nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
1590 }
1591 nfc_op->data_delay_ns = instr->delay_ns;
1592 break;
1593
1594 case NAND_OP_DATA_OUT_INSTR:
1595 nfc_op->data_instr = instr;
1596 nfc_op->data_instr_idx = op_id;
1597 nfc_op->ndcb[0] |= NDCB0_CMD_TYPE(TYPE_WRITE);
1598 if (nfc->caps->is_nfcv2) {
1599 nfc_op->ndcb[0] |=
1600 NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
1601 NDCB0_LEN_OVRD;
1602 nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
1603 }
1604 nfc_op->data_delay_ns = instr->delay_ns;
1605 break;
1606
1607 case NAND_OP_WAITRDY_INSTR:
1608 nfc_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
1609 nfc_op->rdy_delay_ns = instr->delay_ns;
1610 break;
1611 }
1612 }
1613}
1614
1615static int marvell_nfc_xfer_data_pio(struct nand_chip *chip,
1616 const struct nand_subop *subop,
1617 struct marvell_nfc_op *nfc_op)
1618{
1619 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
1620 const struct nand_op_instr *instr = nfc_op->data_instr;
1621 unsigned int op_id = nfc_op->data_instr_idx;
1622 unsigned int len = nand_subop_get_data_len(subop, op_id);
1623 unsigned int offset = nand_subop_get_data_start_off(subop, op_id);
1624 bool reading = (instr->type == NAND_OP_DATA_IN_INSTR);
1625 int ret;
1626
1627 if (instr->ctx.data.force_8bit)
1628 marvell_nfc_force_byte_access(chip, true);
1629
1630 if (reading) {
1631 u8 *in = instr->ctx.data.buf.in + offset;
1632
1633 ret = marvell_nfc_xfer_data_in_pio(nfc, in, len);
1634 } else {
1635 const u8 *out = instr->ctx.data.buf.out + offset;
1636
1637 ret = marvell_nfc_xfer_data_out_pio(nfc, out, len);
1638 }
1639
1640 if (instr->ctx.data.force_8bit)
1641 marvell_nfc_force_byte_access(chip, false);
1642
1643 return ret;
1644}
1645
1646static int marvell_nfc_monolithic_access_exec(struct nand_chip *chip,
1647 const struct nand_subop *subop)
1648{
1649 struct marvell_nfc_op nfc_op;
1650 bool reading;
1651 int ret;
1652
1653 marvell_nfc_parse_instructions(chip, subop, &nfc_op);
1654 reading = (nfc_op.data_instr->type == NAND_OP_DATA_IN_INSTR);
1655
1656 ret = marvell_nfc_prepare_cmd(chip);
1657 if (ret)
1658 return ret;
1659
1660 marvell_nfc_send_cmd(chip, &nfc_op);
1661 ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ | NDSR_WRDREQ,
1662 "RDDREQ/WRDREQ while draining raw data");
1663 if (ret)
1664 return ret;
1665
1666 cond_delay(nfc_op.cle_ale_delay_ns);
1667
1668 if (reading) {
1669 if (nfc_op.rdy_timeout_ms) {
1670 ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
1671 if (ret)
1672 return ret;
1673 }
1674
1675 cond_delay(nfc_op.rdy_delay_ns);
1676 }
1677
1678 marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
1679 ret = marvell_nfc_wait_cmdd(chip);
1680 if (ret)
1681 return ret;
1682
1683 cond_delay(nfc_op.data_delay_ns);
1684
1685 if (!reading) {
1686 if (nfc_op.rdy_timeout_ms) {
1687 ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
1688 if (ret)
1689 return ret;
1690 }
1691
1692 cond_delay(nfc_op.rdy_delay_ns);
1693 }
1694
1695 /*
1696 * NDCR ND_RUN bit should be cleared automatically at the end of each
1697 * operation but experience shows that the behavior is buggy when it
1698 * comes to writes (with LEN_OVRD). Clear it by hand in this case.
1699 */
1700 if (!reading) {
1701 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
1702
1703 writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
1704 nfc->regs + NDCR);
1705 }
1706
1707 return 0;
1708}
1709
1710static int marvell_nfc_naked_access_exec(struct nand_chip *chip,
1711 const struct nand_subop *subop)
1712{
1713 struct marvell_nfc_op nfc_op;
1714 int ret;
1715
1716 marvell_nfc_parse_instructions(chip, subop, &nfc_op);
1717
1718 /*
1719 * Naked access are different in that they need to be flagged as naked
1720 * by the controller. Reset the controller registers fields that inform
1721 * on the type and refill them according to the ongoing operation.
1722 */
1723 nfc_op.ndcb[0] &= ~(NDCB0_CMD_TYPE(TYPE_MASK) |
1724 NDCB0_CMD_XTYPE(XTYPE_MASK));
1725 switch (subop->instrs[0].type) {
1726 case NAND_OP_CMD_INSTR:
1727 nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_NAKED_CMD);
1728 break;
1729 case NAND_OP_ADDR_INSTR:
1730 nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_NAKED_ADDR);
1731 break;
1732 case NAND_OP_DATA_IN_INSTR:
1733 nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ) |
1734 NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
1735 break;
1736 case NAND_OP_DATA_OUT_INSTR:
1737 nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_WRITE) |
1738 NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
1739 break;
1740 default:
1741 /* This should never happen */
1742 break;
1743 }
1744
1745 ret = marvell_nfc_prepare_cmd(chip);
1746 if (ret)
1747 return ret;
1748
1749 marvell_nfc_send_cmd(chip, &nfc_op);
1750
1751 if (!nfc_op.data_instr) {
1752 ret = marvell_nfc_wait_cmdd(chip);
1753 cond_delay(nfc_op.cle_ale_delay_ns);
1754 return ret;
1755 }
1756
1757 ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ | NDSR_WRDREQ,
1758 "RDDREQ/WRDREQ while draining raw data");
1759 if (ret)
1760 return ret;
1761
1762 marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
1763 ret = marvell_nfc_wait_cmdd(chip);
1764 if (ret)
1765 return ret;
1766
1767 /*
1768 * NDCR ND_RUN bit should be cleared automatically at the end of each
1769 * operation but experience shows that the behavior is buggy when it
1770 * comes to writes (with LEN_OVRD). Clear it by hand in this case.
1771 */
1772 if (subop->instrs[0].type == NAND_OP_DATA_OUT_INSTR) {
1773 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
1774
1775 writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
1776 nfc->regs + NDCR);
1777 }
1778
1779 return 0;
1780}
1781
1782static int marvell_nfc_naked_waitrdy_exec(struct nand_chip *chip,
1783 const struct nand_subop *subop)
1784{
1785 struct marvell_nfc_op nfc_op;
1786 int ret;
1787
1788 marvell_nfc_parse_instructions(chip, subop, &nfc_op);
1789
1790 ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
1791 cond_delay(nfc_op.rdy_delay_ns);
1792
1793 return ret;
1794}
1795
1796static int marvell_nfc_read_id_type_exec(struct nand_chip *chip,
1797 const struct nand_subop *subop)
1798{
1799 struct marvell_nfc_op nfc_op;
1800 int ret;
1801
1802 marvell_nfc_parse_instructions(chip, subop, &nfc_op);
1803 nfc_op.ndcb[0] &= ~NDCB0_CMD_TYPE(TYPE_READ);
1804 nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ_ID);
1805
1806 ret = marvell_nfc_prepare_cmd(chip);
1807 if (ret)
1808 return ret;
1809
1810 marvell_nfc_send_cmd(chip, &nfc_op);
1811 ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
1812 "RDDREQ while reading ID");
1813 if (ret)
1814 return ret;
1815
1816 cond_delay(nfc_op.cle_ale_delay_ns);
1817
1818 if (nfc_op.rdy_timeout_ms) {
1819 ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
1820 if (ret)
1821 return ret;
1822 }
1823
1824 cond_delay(nfc_op.rdy_delay_ns);
1825
1826 marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
1827 ret = marvell_nfc_wait_cmdd(chip);
1828 if (ret)
1829 return ret;
1830
1831 cond_delay(nfc_op.data_delay_ns);
1832
1833 return 0;
1834}
1835
1836static int marvell_nfc_read_status_exec(struct nand_chip *chip,
1837 const struct nand_subop *subop)
1838{
1839 struct marvell_nfc_op nfc_op;
1840 int ret;
1841
1842 marvell_nfc_parse_instructions(chip, subop, &nfc_op);
1843 nfc_op.ndcb[0] &= ~NDCB0_CMD_TYPE(TYPE_READ);
1844 nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_STATUS);
1845
1846 ret = marvell_nfc_prepare_cmd(chip);
1847 if (ret)
1848 return ret;
1849
1850 marvell_nfc_send_cmd(chip, &nfc_op);
1851 ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
1852 "RDDREQ while reading status");
1853 if (ret)
1854 return ret;
1855
1856 cond_delay(nfc_op.cle_ale_delay_ns);
1857
1858 if (nfc_op.rdy_timeout_ms) {
1859 ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
1860 if (ret)
1861 return ret;
1862 }
1863
1864 cond_delay(nfc_op.rdy_delay_ns);
1865
1866 marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
1867 ret = marvell_nfc_wait_cmdd(chip);
1868 if (ret)
1869 return ret;
1870
1871 cond_delay(nfc_op.data_delay_ns);
1872
1873 return 0;
1874}
1875
1876static int marvell_nfc_reset_cmd_type_exec(struct nand_chip *chip,
1877 const struct nand_subop *subop)
1878{
1879 struct marvell_nfc_op nfc_op;
1880 int ret;
1881
1882 marvell_nfc_parse_instructions(chip, subop, &nfc_op);
1883 nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_RESET);
1884
1885 ret = marvell_nfc_prepare_cmd(chip);
1886 if (ret)
1887 return ret;
1888
1889 marvell_nfc_send_cmd(chip, &nfc_op);
1890 ret = marvell_nfc_wait_cmdd(chip);
1891 if (ret)
1892 return ret;
1893
1894 cond_delay(nfc_op.cle_ale_delay_ns);
1895
1896 ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
1897 if (ret)
1898 return ret;
1899
1900 cond_delay(nfc_op.rdy_delay_ns);
1901
1902 return 0;
1903}
1904
1905static int marvell_nfc_erase_cmd_type_exec(struct nand_chip *chip,
1906 const struct nand_subop *subop)
1907{
1908 struct marvell_nfc_op nfc_op;
1909 int ret;
1910
1911 marvell_nfc_parse_instructions(chip, subop, &nfc_op);
1912 nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_ERASE);
1913
1914 ret = marvell_nfc_prepare_cmd(chip);
1915 if (ret)
1916 return ret;
1917
1918 marvell_nfc_send_cmd(chip, &nfc_op);
1919 ret = marvell_nfc_wait_cmdd(chip);
1920 if (ret)
1921 return ret;
1922
1923 cond_delay(nfc_op.cle_ale_delay_ns);
1924
1925 ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
1926 if (ret)
1927 return ret;
1928
1929 cond_delay(nfc_op.rdy_delay_ns);
1930
1931 return 0;
1932}
1933
1934static const struct nand_op_parser marvell_nfcv2_op_parser = NAND_OP_PARSER(
1935 /* Monolithic reads/writes */
1936 NAND_OP_PARSER_PATTERN(
1937 marvell_nfc_monolithic_access_exec,
1938 NAND_OP_PARSER_PAT_CMD_ELEM(false),
1939 NAND_OP_PARSER_PAT_ADDR_ELEM(true, MAX_ADDRESS_CYC_NFCV2),
1940 NAND_OP_PARSER_PAT_CMD_ELEM(true),
1941 NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
1942 NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)),
1943 NAND_OP_PARSER_PATTERN(
1944 marvell_nfc_monolithic_access_exec,
1945 NAND_OP_PARSER_PAT_CMD_ELEM(false),
1946 NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV2),
1947 NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_CHUNK_SIZE),
1948 NAND_OP_PARSER_PAT_CMD_ELEM(true),
1949 NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
1950 /* Naked commands */
1951 NAND_OP_PARSER_PATTERN(
1952 marvell_nfc_naked_access_exec,
1953 NAND_OP_PARSER_PAT_CMD_ELEM(false)),
1954 NAND_OP_PARSER_PATTERN(
1955 marvell_nfc_naked_access_exec,
1956 NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV2)),
1957 NAND_OP_PARSER_PATTERN(
1958 marvell_nfc_naked_access_exec,
1959 NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)),
1960 NAND_OP_PARSER_PATTERN(
1961 marvell_nfc_naked_access_exec,
1962 NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_CHUNK_SIZE)),
1963 NAND_OP_PARSER_PATTERN(
1964 marvell_nfc_naked_waitrdy_exec,
1965 NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
1966 );
1967
1968static const struct nand_op_parser marvell_nfcv1_op_parser = NAND_OP_PARSER(
1969 /* Naked commands not supported, use a function for each pattern */
1970 NAND_OP_PARSER_PATTERN(
1971 marvell_nfc_read_id_type_exec,
1972 NAND_OP_PARSER_PAT_CMD_ELEM(false),
1973 NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV1),
1974 NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)),
1975 NAND_OP_PARSER_PATTERN(
1976 marvell_nfc_erase_cmd_type_exec,
1977 NAND_OP_PARSER_PAT_CMD_ELEM(false),
1978 NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV1),
1979 NAND_OP_PARSER_PAT_CMD_ELEM(false),
1980 NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
1981 NAND_OP_PARSER_PATTERN(
1982 marvell_nfc_read_status_exec,
1983 NAND_OP_PARSER_PAT_CMD_ELEM(false),
1984 NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
1985 NAND_OP_PARSER_PATTERN(
1986 marvell_nfc_reset_cmd_type_exec,
1987 NAND_OP_PARSER_PAT_CMD_ELEM(false),
1988 NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
1989 NAND_OP_PARSER_PATTERN(
1990 marvell_nfc_naked_waitrdy_exec,
1991 NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
1992 );
1993
1994static int marvell_nfc_exec_op(struct nand_chip *chip,
1995 const struct nand_operation *op,
1996 bool check_only)
1997{
1998 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
1999
2000 if (nfc->caps->is_nfcv2)
2001 return nand_op_parser_exec_op(chip, &marvell_nfcv2_op_parser,
2002 op, check_only);
2003 else
2004 return nand_op_parser_exec_op(chip, &marvell_nfcv1_op_parser,
2005 op, check_only);
2006}
2007
2008/*
2009 * Layouts were broken in old pxa3xx_nand driver, these are supposed to be
2010 * usable.
2011 */
2012static int marvell_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
2013 struct mtd_oob_region *oobregion)
2014{
2015 struct nand_chip *chip = mtd_to_nand(mtd);
2016 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
2017
2018 if (section)
2019 return -ERANGE;
2020
2021 oobregion->length = (lt->full_chunk_cnt * lt->ecc_bytes) +
2022 lt->last_ecc_bytes;
2023 oobregion->offset = mtd->oobsize - oobregion->length;
2024
2025 return 0;
2026}
2027
2028static int marvell_nand_ooblayout_free(struct mtd_info *mtd, int section,
2029 struct mtd_oob_region *oobregion)
2030{
2031 struct nand_chip *chip = mtd_to_nand(mtd);
2032 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
2033
2034 if (section)
2035 return -ERANGE;
2036
2037 /*
2038 * Bootrom looks in bytes 0 & 5 for bad blocks for the
2039 * 4KB page / 4bit BCH combination.
2040 */
2041 if (mtd->writesize == SZ_4K && lt->data_bytes == SZ_2K)
2042 oobregion->offset = 6;
2043 else
2044 oobregion->offset = 2;
2045
2046 oobregion->length = (lt->full_chunk_cnt * lt->spare_bytes) +
2047 lt->last_spare_bytes - oobregion->offset;
2048
2049 return 0;
2050}
2051
2052static const struct mtd_ooblayout_ops marvell_nand_ooblayout_ops = {
2053 .ecc = marvell_nand_ooblayout_ecc,
2054 .free = marvell_nand_ooblayout_free,
2055};
2056
2057static int marvell_nand_hw_ecc_ctrl_init(struct mtd_info *mtd,
2058 struct nand_ecc_ctrl *ecc)
2059{
2060 struct nand_chip *chip = mtd_to_nand(mtd);
2061 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
2062 const struct marvell_hw_ecc_layout *l;
2063 int i;
2064
2065 if (!nfc->caps->is_nfcv2 &&
2066 (mtd->writesize + mtd->oobsize > MAX_CHUNK_SIZE)) {
2067 dev_err(nfc->dev,
2068 "NFCv1: writesize (%d) cannot be bigger than a chunk (%d)\n",
2069 mtd->writesize, MAX_CHUNK_SIZE - mtd->oobsize);
2070 return -ENOTSUPP;
2071 }
2072
2073 to_marvell_nand(chip)->layout = NULL;
2074 for (i = 0; i < ARRAY_SIZE(marvell_nfc_layouts); i++) {
2075 l = &marvell_nfc_layouts[i];
2076 if (mtd->writesize == l->writesize &&
2077 ecc->size == l->chunk && ecc->strength == l->strength) {
2078 to_marvell_nand(chip)->layout = l;
2079 break;
2080 }
2081 }
2082
2083 if (!to_marvell_nand(chip)->layout ||
2084 (!nfc->caps->is_nfcv2 && ecc->strength > 1)) {
2085 dev_err(nfc->dev,
2086 "ECC strength %d at page size %d is not supported\n",
2087 ecc->strength, mtd->writesize);
2088 return -ENOTSUPP;
2089 }
2090
2091 mtd_set_ooblayout(mtd, &marvell_nand_ooblayout_ops);
2092 ecc->steps = l->nchunks;
2093 ecc->size = l->data_bytes;
2094
2095 if (ecc->strength == 1) {
2096 chip->ecc.algo = NAND_ECC_HAMMING;
2097 ecc->read_page_raw = marvell_nfc_hw_ecc_hmg_read_page_raw;
2098 ecc->read_page = marvell_nfc_hw_ecc_hmg_read_page;
2099 ecc->read_oob_raw = marvell_nfc_hw_ecc_hmg_read_oob_raw;
2100 ecc->read_oob = ecc->read_oob_raw;
2101 ecc->write_page_raw = marvell_nfc_hw_ecc_hmg_write_page_raw;
2102 ecc->write_page = marvell_nfc_hw_ecc_hmg_write_page;
2103 ecc->write_oob_raw = marvell_nfc_hw_ecc_hmg_write_oob_raw;
2104 ecc->write_oob = ecc->write_oob_raw;
2105 } else {
2106 chip->ecc.algo = NAND_ECC_BCH;
2107 ecc->strength = 16;
2108 ecc->read_page_raw = marvell_nfc_hw_ecc_bch_read_page_raw;
2109 ecc->read_page = marvell_nfc_hw_ecc_bch_read_page;
2110 ecc->read_oob_raw = marvell_nfc_hw_ecc_bch_read_oob_raw;
2111 ecc->read_oob = marvell_nfc_hw_ecc_bch_read_oob;
2112 ecc->write_page_raw = marvell_nfc_hw_ecc_bch_write_page_raw;
2113 ecc->write_page = marvell_nfc_hw_ecc_bch_write_page;
2114 ecc->write_oob_raw = marvell_nfc_hw_ecc_bch_write_oob_raw;
2115 ecc->write_oob = marvell_nfc_hw_ecc_bch_write_oob;
2116 }
2117
2118 return 0;
2119}
2120
2121static int marvell_nand_ecc_init(struct mtd_info *mtd,
2122 struct nand_ecc_ctrl *ecc)
2123{
2124 struct nand_chip *chip = mtd_to_nand(mtd);
2125 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
2126 int ret;
2127
2128 if (ecc->mode != NAND_ECC_NONE && (!ecc->size || !ecc->strength)) {
2129 if (chip->ecc_step_ds && chip->ecc_strength_ds) {
2130 ecc->size = chip->ecc_step_ds;
2131 ecc->strength = chip->ecc_strength_ds;
2132 } else {
2133 dev_info(nfc->dev,
2134 "No minimum ECC strength, using 1b/512B\n");
2135 ecc->size = 512;
2136 ecc->strength = 1;
2137 }
2138 }
2139
2140 switch (ecc->mode) {
2141 case NAND_ECC_HW:
2142 ret = marvell_nand_hw_ecc_ctrl_init(mtd, ecc);
2143 if (ret)
2144 return ret;
2145 break;
2146 case NAND_ECC_NONE:
2147 case NAND_ECC_SOFT:
2148 if (!nfc->caps->is_nfcv2 && mtd->writesize != SZ_512 &&
2149 mtd->writesize != SZ_2K) {
2150 dev_err(nfc->dev, "NFCv1 cannot write %d bytes pages\n",
2151 mtd->writesize);
2152 return -EINVAL;
2153 }
2154 break;
2155 default:
2156 return -EINVAL;
2157 }
2158
2159 return 0;
2160}
2161
2162static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
2163static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
2164
2165static struct nand_bbt_descr bbt_main_descr = {
2166 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
2167 NAND_BBT_2BIT | NAND_BBT_VERSION,
2168 .offs = 8,
2169 .len = 6,
2170 .veroffs = 14,
2171 .maxblocks = 8, /* Last 8 blocks in each chip */
2172 .pattern = bbt_pattern
2173};
2174
2175static struct nand_bbt_descr bbt_mirror_descr = {
2176 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
2177 NAND_BBT_2BIT | NAND_BBT_VERSION,
2178 .offs = 8,
2179 .len = 6,
2180 .veroffs = 14,
2181 .maxblocks = 8, /* Last 8 blocks in each chip */
2182 .pattern = bbt_mirror_pattern
2183};
2184
2185static int marvell_nfc_setup_data_interface(struct mtd_info *mtd, int chipnr,
2186 const struct nand_data_interface
2187 *conf)
2188{
2189 struct nand_chip *chip = mtd_to_nand(mtd);
2190 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
2191 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
2192 unsigned int period_ns = 1000000000 / clk_get_rate(nfc->ecc_clk) * 2;
2193 const struct nand_sdr_timings *sdr;
2194 struct marvell_nfc_timings nfc_tmg;
2195 int read_delay;
2196
2197 sdr = nand_get_sdr_timings(conf);
2198 if (IS_ERR(sdr))
2199 return PTR_ERR(sdr);
2200
2201 /*
2202 * SDR timings are given in pico-seconds while NFC timings must be
2203 * expressed in NAND controller clock cycles, which is half of the
2204 * frequency of the accessible ECC clock retrieved by clk_get_rate().
2205 * This is not written anywhere in the datasheet but was observed
2206 * with an oscilloscope.
2207 *
2208 * NFC datasheet gives equations from which thoses calculations
2209 * are derived, they tend to be slightly more restrictives than the
2210 * given core timings and may improve the overall speed.
2211 */
2212 nfc_tmg.tRP = TO_CYCLES(DIV_ROUND_UP(sdr->tRC_min, 2), period_ns) - 1;
2213 nfc_tmg.tRH = nfc_tmg.tRP;
2214 nfc_tmg.tWP = TO_CYCLES(DIV_ROUND_UP(sdr->tWC_min, 2), period_ns) - 1;
2215 nfc_tmg.tWH = nfc_tmg.tWP;
2216 nfc_tmg.tCS = TO_CYCLES(sdr->tCS_min, period_ns);
2217 nfc_tmg.tCH = TO_CYCLES(sdr->tCH_min, period_ns) - 1;
2218 nfc_tmg.tADL = TO_CYCLES(sdr->tADL_min, period_ns);
2219 /*
2220 * Read delay is the time of propagation from SoC pins to NFC internal
2221 * logic. With non-EDO timings, this is MIN_RD_DEL_CNT clock cycles. In
2222 * EDO mode, an additional delay of tRH must be taken into account so
2223 * the data is sampled on the falling edge instead of the rising edge.
2224 */
2225 read_delay = sdr->tRC_min >= 30000 ?
2226 MIN_RD_DEL_CNT : MIN_RD_DEL_CNT + nfc_tmg.tRH;
2227
2228 nfc_tmg.tAR = TO_CYCLES(sdr->tAR_min, period_ns);
2229 /*
2230 * tWHR and tRHW are supposed to be read to write delays (and vice
2231 * versa) but in some cases, ie. when doing a change column, they must
2232 * be greater than that to be sure tCCS delay is respected.
2233 */
2234 nfc_tmg.tWHR = TO_CYCLES(max_t(int, sdr->tWHR_min, sdr->tCCS_min),
2235 period_ns) - 2,
2236 nfc_tmg.tRHW = TO_CYCLES(max_t(int, sdr->tRHW_min, sdr->tCCS_min),
2237 period_ns);
2238
2239 /* Use WAIT_MODE (wait for RB line) instead of only relying on delays */
2240 nfc_tmg.tR = TO_CYCLES(sdr->tWB_max, period_ns);
2241
2242 if (chipnr < 0)
2243 return 0;
2244
2245 marvell_nand->ndtr0 =
2246 NDTR0_TRP(nfc_tmg.tRP) |
2247 NDTR0_TRH(nfc_tmg.tRH) |
2248 NDTR0_ETRP(nfc_tmg.tRP) |
2249 NDTR0_TWP(nfc_tmg.tWP) |
2250 NDTR0_TWH(nfc_tmg.tWH) |
2251 NDTR0_TCS(nfc_tmg.tCS) |
2252 NDTR0_TCH(nfc_tmg.tCH) |
2253 NDTR0_RD_CNT_DEL(read_delay) |
2254 NDTR0_SELCNTR |
2255 NDTR0_TADL(nfc_tmg.tADL);
2256
2257 marvell_nand->ndtr1 =
2258 NDTR1_TAR(nfc_tmg.tAR) |
2259 NDTR1_TWHR(nfc_tmg.tWHR) |
2260 NDTR1_TRHW(nfc_tmg.tRHW) |
2261 NDTR1_WAIT_MODE |
2262 NDTR1_TR(nfc_tmg.tR);
2263
2264 return 0;
2265}
2266
2267static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
2268 struct device_node *np)
2269{
2270 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(dev);
2271 struct marvell_nand_chip *marvell_nand;
2272 struct mtd_info *mtd;
2273 struct nand_chip *chip;
2274 int nsels, ret, i;
2275 u32 cs, rb;
2276
2277 /*
2278 * The legacy "num-cs" property indicates the number of CS on the only
2279 * chip connected to the controller (legacy bindings does not support
2280 * more than one chip). CS are only incremented one by one while the RB
2281 * pin is always the #0.
2282 *
2283 * When not using legacy bindings, a couple of "reg" and "nand-rb"
2284 * properties must be filled. For each chip, expressed as a subnode,
2285 * "reg" points to the CS lines and "nand-rb" to the RB line.
2286 */
2287 if (pdata) {
2288 nsels = 1;
2289 } else if (nfc->caps->legacy_of_bindings &&
2290 !of_get_property(np, "num-cs", &nsels)) {
2291 dev_err(dev, "missing num-cs property\n");
2292 return -EINVAL;
2293 } else if (!of_get_property(np, "reg", &nsels)) {
2294 dev_err(dev, "missing reg property\n");
2295 return -EINVAL;
2296 }
2297
2298 if (!pdata)
2299 nsels /= sizeof(u32);
2300 if (!nsels) {
2301 dev_err(dev, "invalid reg property size\n");
2302 return -EINVAL;
2303 }
2304
2305 /* Alloc the nand chip structure */
2306 marvell_nand = devm_kzalloc(dev, sizeof(*marvell_nand) +
2307 (nsels *
2308 sizeof(struct marvell_nand_chip_sel)),
2309 GFP_KERNEL);
2310 if (!marvell_nand) {
2311 dev_err(dev, "could not allocate chip structure\n");
2312 return -ENOMEM;
2313 }
2314
2315 marvell_nand->nsels = nsels;
2316 marvell_nand->selected_die = -1;
2317
2318 for (i = 0; i < nsels; i++) {
2319 if (pdata || nfc->caps->legacy_of_bindings) {
2320 /*
2321 * Legacy bindings use the CS lines in natural
2322 * order (0, 1, ...)
2323 */
2324 cs = i;
2325 } else {
2326 /* Retrieve CS id */
2327 ret = of_property_read_u32_index(np, "reg", i, &cs);
2328 if (ret) {
2329 dev_err(dev, "could not retrieve reg property: %d\n",
2330 ret);
2331 return ret;
2332 }
2333 }
2334
2335 if (cs >= nfc->caps->max_cs_nb) {
2336 dev_err(dev, "invalid reg value: %u (max CS = %d)\n",
2337 cs, nfc->caps->max_cs_nb);
2338 return -EINVAL;
2339 }
2340
2341 if (test_and_set_bit(cs, &nfc->assigned_cs)) {
2342 dev_err(dev, "CS %d already assigned\n", cs);
2343 return -EINVAL;
2344 }
2345
2346 /*
2347 * The cs variable represents the chip select id, which must be
2348 * converted in bit fields for NDCB0 and NDCB2 to select the
2349 * right chip. Unfortunately, due to a lack of information on
2350 * the subject and incoherent documentation, the user should not
2351 * use CS1 and CS3 at all as asserting them is not supported in
2352 * a reliable way (due to multiplexing inside ADDR5 field).
2353 */
2354 marvell_nand->sels[i].cs = cs;
2355 switch (cs) {
2356 case 0:
2357 case 2:
2358 marvell_nand->sels[i].ndcb0_csel = 0;
2359 break;
2360 case 1:
2361 case 3:
2362 marvell_nand->sels[i].ndcb0_csel = NDCB0_CSEL;
2363 break;
2364 default:
2365 return -EINVAL;
2366 }
2367
2368 /* Retrieve RB id */
2369 if (pdata || nfc->caps->legacy_of_bindings) {
2370 /* Legacy bindings always use RB #0 */
2371 rb = 0;
2372 } else {
2373 ret = of_property_read_u32_index(np, "nand-rb", i,
2374 &rb);
2375 if (ret) {
2376 dev_err(dev,
2377 "could not retrieve RB property: %d\n",
2378 ret);
2379 return ret;
2380 }
2381 }
2382
2383 if (rb >= nfc->caps->max_rb_nb) {
2384 dev_err(dev, "invalid reg value: %u (max RB = %d)\n",
2385 rb, nfc->caps->max_rb_nb);
2386 return -EINVAL;
2387 }
2388
2389 marvell_nand->sels[i].rb = rb;
2390 }
2391
2392 chip = &marvell_nand->chip;
2393 chip->controller = &nfc->controller;
2394 nand_set_flash_node(chip, np);
2395
2396 chip->exec_op = marvell_nfc_exec_op;
2397 chip->select_chip = marvell_nfc_select_chip;
2398 if (nfc->caps->is_nfcv2 &&
2399 !of_property_read_bool(np, "marvell,nand-keep-config"))
2400 chip->setup_data_interface = marvell_nfc_setup_data_interface;
2401
2402 mtd = nand_to_mtd(chip);
2403 mtd->dev.parent = dev;
2404
2405 /*
2406 * Default to HW ECC engine mode. If the nand-ecc-mode property is given
2407 * in the DT node, this entry will be overwritten in nand_scan_ident().
2408 */
2409 chip->ecc.mode = NAND_ECC_HW;
2410
2411 /*
2412 * Save a reference value for timing registers before
2413 * ->setup_data_interface() is called.
2414 */
2415 marvell_nand->ndtr0 = readl_relaxed(nfc->regs + NDTR0);
2416 marvell_nand->ndtr1 = readl_relaxed(nfc->regs + NDTR1);
2417
2418 chip->options |= NAND_BUSWIDTH_AUTO;
2419 ret = nand_scan_ident(mtd, marvell_nand->nsels, NULL);
2420 if (ret) {
2421 dev_err(dev, "could not identify the nand chip\n");
2422 return ret;
2423 }
2424
2425 if (pdata && pdata->flash_bbt)
2426 chip->bbt_options |= NAND_BBT_USE_FLASH;
2427
2428 if (chip->bbt_options & NAND_BBT_USE_FLASH) {
2429 /*
2430 * We'll use a bad block table stored in-flash and don't
2431 * allow writing the bad block marker to the flash.
2432 */
2433 chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
2434 chip->bbt_td = &bbt_main_descr;
2435 chip->bbt_md = &bbt_mirror_descr;
2436 }
2437
2438 /* Save the chip-specific fields of NDCR */
2439 marvell_nand->ndcr = NDCR_PAGE_SZ(mtd->writesize);
2440 if (chip->options & NAND_BUSWIDTH_16)
2441 marvell_nand->ndcr |= NDCR_DWIDTH_M | NDCR_DWIDTH_C;
2442
2443 /*
2444 * On small page NANDs, only one cycle is needed to pass the
2445 * column address.
2446 */
2447 if (mtd->writesize <= 512) {
2448 marvell_nand->addr_cyc = 1;
2449 } else {
2450 marvell_nand->addr_cyc = 2;
2451 marvell_nand->ndcr |= NDCR_RA_START;
2452 }
2453
2454 /*
2455 * Now add the number of cycles needed to pass the row
2456 * address.
2457 *
2458 * Addressing a chip using CS 2 or 3 should also need the third row
2459 * cycle but due to inconsistance in the documentation and lack of
2460 * hardware to test this situation, this case is not supported.
2461 */
2462 if (chip->options & NAND_ROW_ADDR_3)
2463 marvell_nand->addr_cyc += 3;
2464 else
2465 marvell_nand->addr_cyc += 2;
2466
2467 if (pdata) {
2468 chip->ecc.size = pdata->ecc_step_size;
2469 chip->ecc.strength = pdata->ecc_strength;
2470 }
2471
2472 ret = marvell_nand_ecc_init(mtd, &chip->ecc);
2473 if (ret) {
2474 dev_err(dev, "ECC init failed: %d\n", ret);
2475 return ret;
2476 }
2477
2478 if (chip->ecc.mode == NAND_ECC_HW) {
2479 /*
2480 * Subpage write not available with hardware ECC, prohibit also
2481 * subpage read as in userspace subpage access would still be
2482 * allowed and subpage write, if used, would lead to numerous
2483 * uncorrectable ECC errors.
2484 */
2485 chip->options |= NAND_NO_SUBPAGE_WRITE;
2486 }
2487
2488 if (pdata || nfc->caps->legacy_of_bindings) {
2489 /*
2490 * We keep the MTD name unchanged to avoid breaking platforms
2491 * where the MTD cmdline parser is used and the bootloader
2492 * has not been updated to use the new naming scheme.
2493 */
2494 mtd->name = "pxa3xx_nand-0";
2495 } else if (!mtd->name) {
2496 /*
2497 * If the new bindings are used and the bootloader has not been
2498 * updated to pass a new mtdparts parameter on the cmdline, you
2499 * should define the following property in your NAND node, ie:
2500 *
2501 * label = "main-storage";
2502 *
2503 * This way, mtd->name will be set by the core when
2504 * nand_set_flash_node() is called.
2505 */
2506 mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL,
2507 "%s:nand.%d", dev_name(nfc->dev),
2508 marvell_nand->sels[0].cs);
2509 if (!mtd->name) {
2510 dev_err(nfc->dev, "Failed to allocate mtd->name\n");
2511 return -ENOMEM;
2512 }
2513 }
2514
2515 ret = nand_scan_tail(mtd);
2516 if (ret) {
2517 dev_err(dev, "nand_scan_tail failed: %d\n", ret);
2518 return ret;
2519 }
2520
2521 if (pdata)
2522 /* Legacy bindings support only one chip */
2523 ret = mtd_device_register(mtd, pdata->parts[0],
2524 pdata->nr_parts[0]);
2525 else
2526 ret = mtd_device_register(mtd, NULL, 0);
2527 if (ret) {
2528 dev_err(dev, "failed to register mtd device: %d\n", ret);
2529 nand_release(mtd);
2530 return ret;
2531 }
2532
2533 list_add_tail(&marvell_nand->node, &nfc->chips);
2534
2535 return 0;
2536}
2537
2538static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc)
2539{
2540 struct device_node *np = dev->of_node;
2541 struct device_node *nand_np;
2542 int max_cs = nfc->caps->max_cs_nb;
2543 int nchips;
2544 int ret;
2545
2546 if (!np)
2547 nchips = 1;
2548 else
2549 nchips = of_get_child_count(np);
2550
2551 if (nchips > max_cs) {
2552 dev_err(dev, "too many NAND chips: %d (max = %d CS)\n", nchips,
2553 max_cs);
2554 return -EINVAL;
2555 }
2556
2557 /*
2558 * Legacy bindings do not use child nodes to exhibit NAND chip
2559 * properties and layout. Instead, NAND properties are mixed with the
2560 * controller ones, and partitions are defined as direct subnodes of the
2561 * NAND controller node.
2562 */
2563 if (nfc->caps->legacy_of_bindings) {
2564 ret = marvell_nand_chip_init(dev, nfc, np);
2565 return ret;
2566 }
2567
2568 for_each_child_of_node(np, nand_np) {
2569 ret = marvell_nand_chip_init(dev, nfc, nand_np);
2570 if (ret) {
2571 of_node_put(nand_np);
2572 return ret;
2573 }
2574 }
2575
2576 return 0;
2577}
2578
2579static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc)
2580{
2581 struct marvell_nand_chip *entry, *temp;
2582
2583 list_for_each_entry_safe(entry, temp, &nfc->chips, node) {
2584 nand_release(nand_to_mtd(&entry->chip));
2585 list_del(&entry->node);
2586 }
2587}
2588
2589static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
2590{
2591 struct platform_device *pdev = container_of(nfc->dev,
2592 struct platform_device,
2593 dev);
2594 struct dma_slave_config config = {};
2595 struct resource *r;
2596 dma_cap_mask_t mask;
2597 struct pxad_param param;
2598 int ret;
2599
2600 if (!IS_ENABLED(CONFIG_PXA_DMA)) {
2601 dev_warn(nfc->dev,
2602 "DMA not enabled in configuration\n");
2603 return -ENOTSUPP;
2604 }
2605
2606 ret = dma_set_mask_and_coherent(nfc->dev, DMA_BIT_MASK(32));
2607 if (ret)
2608 return ret;
2609
2610 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
2611 if (!r) {
2612 dev_err(nfc->dev, "No resource defined for data DMA\n");
2613 return -ENXIO;
2614 }
2615
2616 param.drcmr = r->start;
2617 param.prio = PXAD_PRIO_LOWEST;
2618 dma_cap_zero(mask);
2619 dma_cap_set(DMA_SLAVE, mask);
2620 nfc->dma_chan =
2621 dma_request_slave_channel_compat(mask, pxad_filter_fn,
2622 &param, nfc->dev,
2623 "data");
2624 if (!nfc->dma_chan) {
2625 dev_err(nfc->dev,
2626 "Unable to request data DMA channel\n");
2627 return -ENODEV;
2628 }
2629
2630 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2631 if (!r)
2632 return -ENXIO;
2633
2634 config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2635 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2636 config.src_addr = r->start + NDDB;
2637 config.dst_addr = r->start + NDDB;
2638 config.src_maxburst = 32;
2639 config.dst_maxburst = 32;
2640 ret = dmaengine_slave_config(nfc->dma_chan, &config);
2641 if (ret < 0) {
2642 dev_err(nfc->dev, "Failed to configure DMA channel\n");
2643 return ret;
2644 }
2645
2646 /*
2647 * DMA must act on length multiple of 32 and this length may be
2648 * bigger than the destination buffer. Use this buffer instead
2649 * for DMA transfers and then copy the desired amount of data to
2650 * the provided buffer.
2651 */
2652 nfc->dma_buf = kmalloc(MAX_CHUNK_SIZE, GFP_KERNEL | GFP_DMA);
2653 if (!nfc->dma_buf)
2654 return -ENOMEM;
2655
2656 nfc->use_dma = true;
2657
2658 return 0;
2659}
2660
2661static int marvell_nfc_init(struct marvell_nfc *nfc)
2662{
2663 struct device_node *np = nfc->dev->of_node;
2664
2665 /*
2666 * Some SoCs like A7k/A8k need to enable manually the NAND
2667 * controller, gated clocks and reset bits to avoid being bootloader
2668 * dependent. This is done through the use of the System Functions
2669 * registers.
2670 */
2671 if (nfc->caps->need_system_controller) {
2672 struct regmap *sysctrl_base =
2673 syscon_regmap_lookup_by_phandle(np,
2674 "marvell,system-controller");
2675 u32 reg;
2676
2677 if (IS_ERR(sysctrl_base))
2678 return PTR_ERR(sysctrl_base);
2679
2680 reg = GENCONF_SOC_DEVICE_MUX_NFC_EN |
2681 GENCONF_SOC_DEVICE_MUX_ECC_CLK_RST |
2682 GENCONF_SOC_DEVICE_MUX_ECC_CORE_RST |
2683 GENCONF_SOC_DEVICE_MUX_NFC_INT_EN;
2684 regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, reg);
2685
2686 regmap_read(sysctrl_base, GENCONF_CLK_GATING_CTRL, &reg);
2687 reg |= GENCONF_CLK_GATING_CTRL_ND_GATE;
2688 regmap_write(sysctrl_base, GENCONF_CLK_GATING_CTRL, reg);
2689
2690 regmap_read(sysctrl_base, GENCONF_ND_CLK_CTRL, &reg);
2691 reg |= GENCONF_ND_CLK_CTRL_EN;
2692 regmap_write(sysctrl_base, GENCONF_ND_CLK_CTRL, reg);
2693 }
2694
2695 /* Configure the DMA if appropriate */
2696 if (!nfc->caps->is_nfcv2)
2697 marvell_nfc_init_dma(nfc);
2698
2699 /*
2700 * ECC operations and interruptions are only enabled when specifically
2701 * needed. ECC shall not be activated in the early stages (fails probe).
2702 * Arbiter flag, even if marked as "reserved", must be set (empirical).
2703 * SPARE_EN bit must always be set or ECC bytes will not be at the same
2704 * offset in the read page and this will fail the protection.
2705 */
2706 writel_relaxed(NDCR_ALL_INT | NDCR_ND_ARB_EN | NDCR_SPARE_EN |
2707 NDCR_RD_ID_CNT(NFCV1_READID_LEN), nfc->regs + NDCR);
2708 writel_relaxed(0xFFFFFFFF, nfc->regs + NDSR);
2709 writel_relaxed(0, nfc->regs + NDECCCTRL);
2710
2711 return 0;
2712}
2713
2714static int marvell_nfc_probe(struct platform_device *pdev)
2715{
2716 struct device *dev = &pdev->dev;
2717 struct resource *r;
2718 struct marvell_nfc *nfc;
2719 int ret;
2720 int irq;
2721
2722 nfc = devm_kzalloc(&pdev->dev, sizeof(struct marvell_nfc),
2723 GFP_KERNEL);
2724 if (!nfc)
2725 return -ENOMEM;
2726
2727 nfc->dev = dev;
2728 nand_hw_control_init(&nfc->controller);
2729 INIT_LIST_HEAD(&nfc->chips);
2730
2731 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2732 nfc->regs = devm_ioremap_resource(dev, r);
2733 if (IS_ERR(nfc->regs))
2734 return PTR_ERR(nfc->regs);
2735
2736 irq = platform_get_irq(pdev, 0);
2737 if (irq < 0) {
2738 dev_err(dev, "failed to retrieve irq\n");
2739 return irq;
2740 }
2741
2742 nfc->ecc_clk = devm_clk_get(&pdev->dev, NULL);
2743 if (IS_ERR(nfc->ecc_clk))
2744 return PTR_ERR(nfc->ecc_clk);
2745
2746 ret = clk_prepare_enable(nfc->ecc_clk);
2747 if (ret)
2748 return ret;
2749
2750 marvell_nfc_disable_int(nfc, NDCR_ALL_INT);
2751 marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
2752 ret = devm_request_irq(dev, irq, marvell_nfc_isr,
2753 0, "marvell-nfc", nfc);
2754 if (ret)
2755 goto unprepare_clk;
2756
2757 /* Get NAND controller capabilities */
2758 if (pdev->id_entry)
2759 nfc->caps = (void *)pdev->id_entry->driver_data;
2760 else
2761 nfc->caps = of_device_get_match_data(&pdev->dev);
2762
2763 if (!nfc->caps) {
2764 dev_err(dev, "Could not retrieve NFC caps\n");
2765 ret = -EINVAL;
2766 goto unprepare_clk;
2767 }
2768
2769 /* Init the controller and then probe the chips */
2770 ret = marvell_nfc_init(nfc);
2771 if (ret)
2772 goto unprepare_clk;
2773
2774 platform_set_drvdata(pdev, nfc);
2775
2776 ret = marvell_nand_chips_init(dev, nfc);
2777 if (ret)
2778 goto unprepare_clk;
2779
2780 return 0;
2781
2782unprepare_clk:
2783 clk_disable_unprepare(nfc->ecc_clk);
2784
2785 return ret;
2786}
2787
2788static int marvell_nfc_remove(struct platform_device *pdev)
2789{
2790 struct marvell_nfc *nfc = platform_get_drvdata(pdev);
2791
2792 marvell_nand_chips_cleanup(nfc);
2793
2794 if (nfc->use_dma) {
2795 dmaengine_terminate_all(nfc->dma_chan);
2796 dma_release_channel(nfc->dma_chan);
2797 }
2798
2799 clk_disable_unprepare(nfc->ecc_clk);
2800
2801 return 0;
2802}
2803
2804static const struct marvell_nfc_caps marvell_armada_8k_nfc_caps = {
2805 .max_cs_nb = 4,
2806 .max_rb_nb = 2,
2807 .need_system_controller = true,
2808 .is_nfcv2 = true,
2809};
2810
2811static const struct marvell_nfc_caps marvell_armada370_nfc_caps = {
2812 .max_cs_nb = 4,
2813 .max_rb_nb = 2,
2814 .is_nfcv2 = true,
2815};
2816
2817static const struct marvell_nfc_caps marvell_pxa3xx_nfc_caps = {
2818 .max_cs_nb = 2,
2819 .max_rb_nb = 1,
2820 .use_dma = true,
2821};
2822
2823static const struct marvell_nfc_caps marvell_armada_8k_nfc_legacy_caps = {
2824 .max_cs_nb = 4,
2825 .max_rb_nb = 2,
2826 .need_system_controller = true,
2827 .legacy_of_bindings = true,
2828 .is_nfcv2 = true,
2829};
2830
2831static const struct marvell_nfc_caps marvell_armada370_nfc_legacy_caps = {
2832 .max_cs_nb = 4,
2833 .max_rb_nb = 2,
2834 .legacy_of_bindings = true,
2835 .is_nfcv2 = true,
2836};
2837
2838static const struct marvell_nfc_caps marvell_pxa3xx_nfc_legacy_caps = {
2839 .max_cs_nb = 2,
2840 .max_rb_nb = 1,
2841 .legacy_of_bindings = true,
2842 .use_dma = true,
2843};
2844
2845static const struct platform_device_id marvell_nfc_platform_ids[] = {
2846 {
2847 .name = "pxa3xx-nand",
2848 .driver_data = (kernel_ulong_t)&marvell_pxa3xx_nfc_legacy_caps,
2849 },
2850 { /* sentinel */ },
2851};
2852MODULE_DEVICE_TABLE(platform, marvell_nfc_platform_ids);
2853
2854static const struct of_device_id marvell_nfc_of_ids[] = {
2855 {
2856 .compatible = "marvell,armada-8k-nand-controller",
2857 .data = &marvell_armada_8k_nfc_caps,
2858 },
2859 {
2860 .compatible = "marvell,armada370-nand-controller",
2861 .data = &marvell_armada370_nfc_caps,
2862 },
2863 {
2864 .compatible = "marvell,pxa3xx-nand-controller",
2865 .data = &marvell_pxa3xx_nfc_caps,
2866 },
2867 /* Support for old/deprecated bindings: */
2868 {
2869 .compatible = "marvell,armada-8k-nand",
2870 .data = &marvell_armada_8k_nfc_legacy_caps,
2871 },
2872 {
2873 .compatible = "marvell,armada370-nand",
2874 .data = &marvell_armada370_nfc_legacy_caps,
2875 },
2876 {
2877 .compatible = "marvell,pxa3xx-nand",
2878 .data = &marvell_pxa3xx_nfc_legacy_caps,
2879 },
2880 { /* sentinel */ },
2881};
2882MODULE_DEVICE_TABLE(of, marvell_nfc_of_ids);
2883
2884static struct platform_driver marvell_nfc_driver = {
2885 .driver = {
2886 .name = "marvell-nfc",
2887 .of_match_table = marvell_nfc_of_ids,
2888 },
2889 .id_table = marvell_nfc_platform_ids,
2890 .probe = marvell_nfc_probe,
2891 .remove = marvell_nfc_remove,
2892};
2893module_platform_driver(marvell_nfc_driver);
2894
2895MODULE_LICENSE("GPL");
2896MODULE_DESCRIPTION("Marvell NAND controller driver");
diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c
index c51d214d169e..40d86a861a70 100644
--- a/drivers/mtd/nand/mtk_ecc.c
+++ b/drivers/mtd/nand/mtk_ecc.c
@@ -34,34 +34,28 @@
34 34
35#define ECC_ENCCON (0x00) 35#define ECC_ENCCON (0x00)
36#define ECC_ENCCNFG (0x04) 36#define ECC_ENCCNFG (0x04)
37#define ECC_MODE_SHIFT (5)
38#define ECC_MS_SHIFT (16) 37#define ECC_MS_SHIFT (16)
39#define ECC_ENCDIADDR (0x08) 38#define ECC_ENCDIADDR (0x08)
40#define ECC_ENCIDLE (0x0C) 39#define ECC_ENCIDLE (0x0C)
41#define ECC_ENCIRQ_EN (0x80)
42#define ECC_ENCIRQ_STA (0x84)
43#define ECC_DECCON (0x100) 40#define ECC_DECCON (0x100)
44#define ECC_DECCNFG (0x104) 41#define ECC_DECCNFG (0x104)
45#define DEC_EMPTY_EN BIT(31) 42#define DEC_EMPTY_EN BIT(31)
46#define DEC_CNFG_CORRECT (0x3 << 12) 43#define DEC_CNFG_CORRECT (0x3 << 12)
47#define ECC_DECIDLE (0x10C) 44#define ECC_DECIDLE (0x10C)
48#define ECC_DECENUM0 (0x114) 45#define ECC_DECENUM0 (0x114)
49#define ECC_DECDONE (0x124)
50#define ECC_DECIRQ_EN (0x200)
51#define ECC_DECIRQ_STA (0x204)
52 46
53#define ECC_TIMEOUT (500000) 47#define ECC_TIMEOUT (500000)
54 48
55#define ECC_IDLE_REG(op) ((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE) 49#define ECC_IDLE_REG(op) ((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE)
56#define ECC_CTL_REG(op) ((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON) 50#define ECC_CTL_REG(op) ((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON)
57#define ECC_IRQ_REG(op) ((op) == ECC_ENCODE ? \
58 ECC_ENCIRQ_EN : ECC_DECIRQ_EN)
59 51
60struct mtk_ecc_caps { 52struct mtk_ecc_caps {
61 u32 err_mask; 53 u32 err_mask;
62 const u8 *ecc_strength; 54 const u8 *ecc_strength;
55 const u32 *ecc_regs;
63 u8 num_ecc_strength; 56 u8 num_ecc_strength;
64 u32 encode_parity_reg0; 57 u8 ecc_mode_shift;
58 u32 parity_bits;
65 int pg_irq_sel; 59 int pg_irq_sel;
66}; 60};
67 61
@@ -89,6 +83,46 @@ static const u8 ecc_strength_mt2712[] = {
89 40, 44, 48, 52, 56, 60, 68, 72, 80 83 40, 44, 48, 52, 56, 60, 68, 72, 80
90}; 84};
91 85
86static const u8 ecc_strength_mt7622[] = {
87 4, 6, 8, 10, 12, 14, 16
88};
89
90enum mtk_ecc_regs {
91 ECC_ENCPAR00,
92 ECC_ENCIRQ_EN,
93 ECC_ENCIRQ_STA,
94 ECC_DECDONE,
95 ECC_DECIRQ_EN,
96 ECC_DECIRQ_STA,
97};
98
99static int mt2701_ecc_regs[] = {
100 [ECC_ENCPAR00] = 0x10,
101 [ECC_ENCIRQ_EN] = 0x80,
102 [ECC_ENCIRQ_STA] = 0x84,
103 [ECC_DECDONE] = 0x124,
104 [ECC_DECIRQ_EN] = 0x200,
105 [ECC_DECIRQ_STA] = 0x204,
106};
107
108static int mt2712_ecc_regs[] = {
109 [ECC_ENCPAR00] = 0x300,
110 [ECC_ENCIRQ_EN] = 0x80,
111 [ECC_ENCIRQ_STA] = 0x84,
112 [ECC_DECDONE] = 0x124,
113 [ECC_DECIRQ_EN] = 0x200,
114 [ECC_DECIRQ_STA] = 0x204,
115};
116
117static int mt7622_ecc_regs[] = {
118 [ECC_ENCPAR00] = 0x10,
119 [ECC_ENCIRQ_EN] = 0x30,
120 [ECC_ENCIRQ_STA] = 0x34,
121 [ECC_DECDONE] = 0x11c,
122 [ECC_DECIRQ_EN] = 0x140,
123 [ECC_DECIRQ_STA] = 0x144,
124};
125
92static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc, 126static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc,
93 enum mtk_ecc_operation op) 127 enum mtk_ecc_operation op)
94{ 128{
@@ -107,32 +141,30 @@ static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc,
107static irqreturn_t mtk_ecc_irq(int irq, void *id) 141static irqreturn_t mtk_ecc_irq(int irq, void *id)
108{ 142{
109 struct mtk_ecc *ecc = id; 143 struct mtk_ecc *ecc = id;
110 enum mtk_ecc_operation op;
111 u32 dec, enc; 144 u32 dec, enc;
112 145
113 dec = readw(ecc->regs + ECC_DECIRQ_STA) & ECC_IRQ_EN; 146 dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA])
147 & ECC_IRQ_EN;
114 if (dec) { 148 if (dec) {
115 op = ECC_DECODE; 149 dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]);
116 dec = readw(ecc->regs + ECC_DECDONE);
117 if (dec & ecc->sectors) { 150 if (dec & ecc->sectors) {
118 /* 151 /*
119 * Clear decode IRQ status once again to ensure that 152 * Clear decode IRQ status once again to ensure that
120 * there will be no extra IRQ. 153 * there will be no extra IRQ.
121 */ 154 */
122 readw(ecc->regs + ECC_DECIRQ_STA); 155 readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA]);
123 ecc->sectors = 0; 156 ecc->sectors = 0;
124 complete(&ecc->done); 157 complete(&ecc->done);
125 } else { 158 } else {
126 return IRQ_HANDLED; 159 return IRQ_HANDLED;
127 } 160 }
128 } else { 161 } else {
129 enc = readl(ecc->regs + ECC_ENCIRQ_STA) & ECC_IRQ_EN; 162 enc = readl(ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_STA])
130 if (enc) { 163 & ECC_IRQ_EN;
131 op = ECC_ENCODE; 164 if (enc)
132 complete(&ecc->done); 165 complete(&ecc->done);
133 } else { 166 else
134 return IRQ_NONE; 167 return IRQ_NONE;
135 }
136 } 168 }
137 169
138 return IRQ_HANDLED; 170 return IRQ_HANDLED;
@@ -160,7 +192,7 @@ static int mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
160 /* configure ECC encoder (in bits) */ 192 /* configure ECC encoder (in bits) */
161 enc_sz = config->len << 3; 193 enc_sz = config->len << 3;
162 194
163 reg = ecc_bit | (config->mode << ECC_MODE_SHIFT); 195 reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift);
164 reg |= (enc_sz << ECC_MS_SHIFT); 196 reg |= (enc_sz << ECC_MS_SHIFT);
165 writel(reg, ecc->regs + ECC_ENCCNFG); 197 writel(reg, ecc->regs + ECC_ENCCNFG);
166 198
@@ -171,9 +203,9 @@ static int mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
171 } else { 203 } else {
172 /* configure ECC decoder (in bits) */ 204 /* configure ECC decoder (in bits) */
173 dec_sz = (config->len << 3) + 205 dec_sz = (config->len << 3) +
174 config->strength * ECC_PARITY_BITS; 206 config->strength * ecc->caps->parity_bits;
175 207
176 reg = ecc_bit | (config->mode << ECC_MODE_SHIFT); 208 reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift);
177 reg |= (dec_sz << ECC_MS_SHIFT) | DEC_CNFG_CORRECT; 209 reg |= (dec_sz << ECC_MS_SHIFT) | DEC_CNFG_CORRECT;
178 reg |= DEC_EMPTY_EN; 210 reg |= DEC_EMPTY_EN;
179 writel(reg, ecc->regs + ECC_DECCNFG); 211 writel(reg, ecc->regs + ECC_DECCNFG);
@@ -291,7 +323,12 @@ int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
291 */ 323 */
292 if (ecc->caps->pg_irq_sel && config->mode == ECC_NFI_MODE) 324 if (ecc->caps->pg_irq_sel && config->mode == ECC_NFI_MODE)
293 reg_val |= ECC_PG_IRQ_SEL; 325 reg_val |= ECC_PG_IRQ_SEL;
294 writew(reg_val, ecc->regs + ECC_IRQ_REG(op)); 326 if (op == ECC_ENCODE)
327 writew(reg_val, ecc->regs +
328 ecc->caps->ecc_regs[ECC_ENCIRQ_EN]);
329 else
330 writew(reg_val, ecc->regs +
331 ecc->caps->ecc_regs[ECC_DECIRQ_EN]);
295 } 332 }
296 333
297 writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op)); 334 writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op));
@@ -310,13 +347,17 @@ void mtk_ecc_disable(struct mtk_ecc *ecc)
310 347
311 /* disable it */ 348 /* disable it */
312 mtk_ecc_wait_idle(ecc, op); 349 mtk_ecc_wait_idle(ecc, op);
313 if (op == ECC_DECODE) 350 if (op == ECC_DECODE) {
314 /* 351 /*
315 * Clear decode IRQ status in case there is a timeout to wait 352 * Clear decode IRQ status in case there is a timeout to wait
316 * decode IRQ. 353 * decode IRQ.
317 */ 354 */
318 readw(ecc->regs + ECC_DECIRQ_STA); 355 readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]);
319 writew(0, ecc->regs + ECC_IRQ_REG(op)); 356 writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_EN]);
357 } else {
358 writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_EN]);
359 }
360
320 writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op)); 361 writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
321 362
322 mutex_unlock(&ecc->lock); 363 mutex_unlock(&ecc->lock);
@@ -367,11 +408,11 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
367 mtk_ecc_wait_idle(ecc, ECC_ENCODE); 408 mtk_ecc_wait_idle(ecc, ECC_ENCODE);
368 409
369 /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */ 410 /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
370 len = (config->strength * ECC_PARITY_BITS + 7) >> 3; 411 len = (config->strength * ecc->caps->parity_bits + 7) >> 3;
371 412
372 /* write the parity bytes generated by the ECC back to temp buffer */ 413 /* write the parity bytes generated by the ECC back to temp buffer */
373 __ioread32_copy(ecc->eccdata, 414 __ioread32_copy(ecc->eccdata,
374 ecc->regs + ecc->caps->encode_parity_reg0, 415 ecc->regs + ecc->caps->ecc_regs[ECC_ENCPAR00],
375 round_up(len, 4)); 416 round_up(len, 4));
376 417
377 /* copy into possibly unaligned OOB region with actual length */ 418 /* copy into possibly unaligned OOB region with actual length */
@@ -404,22 +445,42 @@ void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p)
404} 445}
405EXPORT_SYMBOL(mtk_ecc_adjust_strength); 446EXPORT_SYMBOL(mtk_ecc_adjust_strength);
406 447
448unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc)
449{
450 return ecc->caps->parity_bits;
451}
452EXPORT_SYMBOL(mtk_ecc_get_parity_bits);
453
407static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = { 454static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = {
408 .err_mask = 0x3f, 455 .err_mask = 0x3f,
409 .ecc_strength = ecc_strength_mt2701, 456 .ecc_strength = ecc_strength_mt2701,
457 .ecc_regs = mt2701_ecc_regs,
410 .num_ecc_strength = 20, 458 .num_ecc_strength = 20,
411 .encode_parity_reg0 = 0x10, 459 .ecc_mode_shift = 5,
460 .parity_bits = 14,
412 .pg_irq_sel = 0, 461 .pg_irq_sel = 0,
413}; 462};
414 463
415static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = { 464static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = {
416 .err_mask = 0x7f, 465 .err_mask = 0x7f,
417 .ecc_strength = ecc_strength_mt2712, 466 .ecc_strength = ecc_strength_mt2712,
467 .ecc_regs = mt2712_ecc_regs,
418 .num_ecc_strength = 23, 468 .num_ecc_strength = 23,
419 .encode_parity_reg0 = 0x300, 469 .ecc_mode_shift = 5,
470 .parity_bits = 14,
420 .pg_irq_sel = 1, 471 .pg_irq_sel = 1,
421}; 472};
422 473
474static const struct mtk_ecc_caps mtk_ecc_caps_mt7622 = {
475 .err_mask = 0x3f,
476 .ecc_strength = ecc_strength_mt7622,
477 .ecc_regs = mt7622_ecc_regs,
478 .num_ecc_strength = 7,
479 .ecc_mode_shift = 4,
480 .parity_bits = 13,
481 .pg_irq_sel = 0,
482};
483
423static const struct of_device_id mtk_ecc_dt_match[] = { 484static const struct of_device_id mtk_ecc_dt_match[] = {
424 { 485 {
425 .compatible = "mediatek,mt2701-ecc", 486 .compatible = "mediatek,mt2701-ecc",
@@ -427,6 +488,9 @@ static const struct of_device_id mtk_ecc_dt_match[] = {
427 }, { 488 }, {
428 .compatible = "mediatek,mt2712-ecc", 489 .compatible = "mediatek,mt2712-ecc",
429 .data = &mtk_ecc_caps_mt2712, 490 .data = &mtk_ecc_caps_mt2712,
491 }, {
492 .compatible = "mediatek,mt7622-ecc",
493 .data = &mtk_ecc_caps_mt7622,
430 }, 494 },
431 {}, 495 {},
432}; 496};
@@ -452,7 +516,7 @@ static int mtk_ecc_probe(struct platform_device *pdev)
452 516
453 max_eccdata_size = ecc->caps->num_ecc_strength - 1; 517 max_eccdata_size = ecc->caps->num_ecc_strength - 1;
454 max_eccdata_size = ecc->caps->ecc_strength[max_eccdata_size]; 518 max_eccdata_size = ecc->caps->ecc_strength[max_eccdata_size];
455 max_eccdata_size = (max_eccdata_size * ECC_PARITY_BITS + 7) >> 3; 519 max_eccdata_size = (max_eccdata_size * ecc->caps->parity_bits + 7) >> 3;
456 max_eccdata_size = round_up(max_eccdata_size, 4); 520 max_eccdata_size = round_up(max_eccdata_size, 4);
457 ecc->eccdata = devm_kzalloc(dev, max_eccdata_size, GFP_KERNEL); 521 ecc->eccdata = devm_kzalloc(dev, max_eccdata_size, GFP_KERNEL);
458 if (!ecc->eccdata) 522 if (!ecc->eccdata)
diff --git a/drivers/mtd/nand/mtk_ecc.h b/drivers/mtd/nand/mtk_ecc.h
index d245c14f1b80..a455df080952 100644
--- a/drivers/mtd/nand/mtk_ecc.h
+++ b/drivers/mtd/nand/mtk_ecc.h
@@ -14,8 +14,6 @@
14 14
15#include <linux/types.h> 15#include <linux/types.h>
16 16
17#define ECC_PARITY_BITS (14)
18
19enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1}; 17enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1};
20enum mtk_ecc_operation {ECC_ENCODE, ECC_DECODE}; 18enum mtk_ecc_operation {ECC_ENCODE, ECC_DECODE};
21 19
@@ -43,6 +41,7 @@ int mtk_ecc_wait_done(struct mtk_ecc *, enum mtk_ecc_operation);
43int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *); 41int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *);
44void mtk_ecc_disable(struct mtk_ecc *); 42void mtk_ecc_disable(struct mtk_ecc *);
45void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p); 43void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p);
44unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc);
46 45
47struct mtk_ecc *of_mtk_ecc_get(struct device_node *); 46struct mtk_ecc *of_mtk_ecc_get(struct device_node *);
48void mtk_ecc_release(struct mtk_ecc *); 47void mtk_ecc_release(struct mtk_ecc *);
diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c
index d86a7d131cc0..6977da3a26aa 100644
--- a/drivers/mtd/nand/mtk_nand.c
+++ b/drivers/mtd/nand/mtk_nand.c
@@ -97,7 +97,6 @@
97 97
98#define MTK_TIMEOUT (500000) 98#define MTK_TIMEOUT (500000)
99#define MTK_RESET_TIMEOUT (1000000) 99#define MTK_RESET_TIMEOUT (1000000)
100#define MTK_MAX_SECTOR (16)
101#define MTK_NAND_MAX_NSELS (2) 100#define MTK_NAND_MAX_NSELS (2)
102#define MTK_NFC_MIN_SPARE (16) 101#define MTK_NFC_MIN_SPARE (16)
103#define ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt) \ 102#define ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt) \
@@ -109,6 +108,8 @@ struct mtk_nfc_caps {
109 u8 num_spare_size; 108 u8 num_spare_size;
110 u8 pageformat_spare_shift; 109 u8 pageformat_spare_shift;
111 u8 nfi_clk_div; 110 u8 nfi_clk_div;
111 u8 max_sector;
112 u32 max_sector_size;
112}; 113};
113 114
114struct mtk_nfc_bad_mark_ctl { 115struct mtk_nfc_bad_mark_ctl {
@@ -173,6 +174,10 @@ static const u8 spare_size_mt2712[] = {
173 74 174 74
174}; 175};
175 176
177static const u8 spare_size_mt7622[] = {
178 16, 26, 27, 28
179};
180
176static inline struct mtk_nfc_nand_chip *to_mtk_nand(struct nand_chip *nand) 181static inline struct mtk_nfc_nand_chip *to_mtk_nand(struct nand_chip *nand)
177{ 182{
178 return container_of(nand, struct mtk_nfc_nand_chip, nand); 183 return container_of(nand, struct mtk_nfc_nand_chip, nand);
@@ -450,7 +455,7 @@ static inline u8 mtk_nfc_read_byte(struct mtd_info *mtd)
450 * set to max sector to allow the HW to continue reading over 455 * set to max sector to allow the HW to continue reading over
451 * unaligned accesses 456 * unaligned accesses
452 */ 457 */
453 reg = (MTK_MAX_SECTOR << CON_SEC_SHIFT) | CON_BRD; 458 reg = (nfc->caps->max_sector << CON_SEC_SHIFT) | CON_BRD;
454 nfi_writel(nfc, reg, NFI_CON); 459 nfi_writel(nfc, reg, NFI_CON);
455 460
456 /* trigger to fetch data */ 461 /* trigger to fetch data */
@@ -481,7 +486,7 @@ static void mtk_nfc_write_byte(struct mtd_info *mtd, u8 byte)
481 reg = nfi_readw(nfc, NFI_CNFG) | CNFG_BYTE_RW; 486 reg = nfi_readw(nfc, NFI_CNFG) | CNFG_BYTE_RW;
482 nfi_writew(nfc, reg, NFI_CNFG); 487 nfi_writew(nfc, reg, NFI_CNFG);
483 488
484 reg = MTK_MAX_SECTOR << CON_SEC_SHIFT | CON_BWR; 489 reg = nfc->caps->max_sector << CON_SEC_SHIFT | CON_BWR;
485 nfi_writel(nfc, reg, NFI_CON); 490 nfi_writel(nfc, reg, NFI_CON);
486 491
487 nfi_writew(nfc, STAR_EN, NFI_STRDATA); 492 nfi_writew(nfc, STAR_EN, NFI_STRDATA);
@@ -761,6 +766,8 @@ static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
761 u32 reg; 766 u32 reg;
762 int ret; 767 int ret;
763 768
769 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
770
764 if (!raw) { 771 if (!raw) {
765 /* OOB => FDM: from register, ECC: from HW */ 772 /* OOB => FDM: from register, ECC: from HW */
766 reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AUTO_FMT_EN; 773 reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AUTO_FMT_EN;
@@ -794,7 +801,10 @@ static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
794 if (!raw) 801 if (!raw)
795 mtk_ecc_disable(nfc->ecc); 802 mtk_ecc_disable(nfc->ecc);
796 803
797 return ret; 804 if (ret)
805 return ret;
806
807 return nand_prog_page_end_op(chip);
798} 808}
799 809
800static int mtk_nfc_write_page_hwecc(struct mtd_info *mtd, 810static int mtk_nfc_write_page_hwecc(struct mtd_info *mtd,
@@ -832,18 +842,7 @@ static int mtk_nfc_write_subpage_hwecc(struct mtd_info *mtd,
832static int mtk_nfc_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, 842static int mtk_nfc_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
833 int page) 843 int page)
834{ 844{
835 int ret; 845 return mtk_nfc_write_page_raw(mtd, chip, NULL, 1, page);
836
837 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
838
839 ret = mtk_nfc_write_page_raw(mtd, chip, NULL, 1, page);
840 if (ret < 0)
841 return -EIO;
842
843 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
844 ret = chip->waitfunc(mtd, chip);
845
846 return ret & NAND_STATUS_FAIL ? -EIO : 0;
847} 846}
848 847
849static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors) 848static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors)
@@ -892,8 +891,7 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
892 len = sectors * chip->ecc.size + (raw ? sectors * spare : 0); 891 len = sectors * chip->ecc.size + (raw ? sectors * spare : 0);
893 buf = bufpoi + start * chip->ecc.size; 892 buf = bufpoi + start * chip->ecc.size;
894 893
895 if (column != 0) 894 nand_read_page_op(chip, page, column, NULL, 0);
896 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, column, -1);
897 895
898 addr = dma_map_single(nfc->dev, buf, len, DMA_FROM_DEVICE); 896 addr = dma_map_single(nfc->dev, buf, len, DMA_FROM_DEVICE);
899 rc = dma_mapping_error(nfc->dev, addr); 897 rc = dma_mapping_error(nfc->dev, addr);
@@ -1016,8 +1014,6 @@ static int mtk_nfc_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1016static int mtk_nfc_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, 1014static int mtk_nfc_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
1017 int page) 1015 int page)
1018{ 1016{
1019 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
1020
1021 return mtk_nfc_read_page_raw(mtd, chip, NULL, 1, page); 1017 return mtk_nfc_read_page_raw(mtd, chip, NULL, 1, page);
1022} 1018}
1023 1019
@@ -1126,9 +1122,11 @@ static void mtk_nfc_set_fdm(struct mtk_nfc_fdm *fdm, struct mtd_info *mtd)
1126{ 1122{
1127 struct nand_chip *nand = mtd_to_nand(mtd); 1123 struct nand_chip *nand = mtd_to_nand(mtd);
1128 struct mtk_nfc_nand_chip *chip = to_mtk_nand(nand); 1124 struct mtk_nfc_nand_chip *chip = to_mtk_nand(nand);
1125 struct mtk_nfc *nfc = nand_get_controller_data(nand);
1129 u32 ecc_bytes; 1126 u32 ecc_bytes;
1130 1127
1131 ecc_bytes = DIV_ROUND_UP(nand->ecc.strength * ECC_PARITY_BITS, 8); 1128 ecc_bytes = DIV_ROUND_UP(nand->ecc.strength *
1129 mtk_ecc_get_parity_bits(nfc->ecc), 8);
1132 1130
1133 fdm->reg_size = chip->spare_per_sector - ecc_bytes; 1131 fdm->reg_size = chip->spare_per_sector - ecc_bytes;
1134 if (fdm->reg_size > NFI_FDM_MAX_SIZE) 1132 if (fdm->reg_size > NFI_FDM_MAX_SIZE)
@@ -1208,7 +1206,8 @@ static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
1208 * this controller only supports 512 and 1024 sizes 1206 * this controller only supports 512 and 1024 sizes
1209 */ 1207 */
1210 if (nand->ecc.size < 1024) { 1208 if (nand->ecc.size < 1024) {
1211 if (mtd->writesize > 512) { 1209 if (mtd->writesize > 512 &&
1210 nfc->caps->max_sector_size > 512) {
1212 nand->ecc.size = 1024; 1211 nand->ecc.size = 1024;
1213 nand->ecc.strength <<= 1; 1212 nand->ecc.strength <<= 1;
1214 } else { 1213 } else {
@@ -1223,7 +1222,8 @@ static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
1223 return ret; 1222 return ret;
1224 1223
1225 /* calculate oob bytes except ecc parity data */ 1224 /* calculate oob bytes except ecc parity data */
1226 free = ((nand->ecc.strength * ECC_PARITY_BITS) + 7) >> 3; 1225 free = (nand->ecc.strength * mtk_ecc_get_parity_bits(nfc->ecc)
1226 + 7) >> 3;
1227 free = spare - free; 1227 free = spare - free;
1228 1228
1229 /* 1229 /*
@@ -1233,10 +1233,12 @@ static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
1233 */ 1233 */
1234 if (free > NFI_FDM_MAX_SIZE) { 1234 if (free > NFI_FDM_MAX_SIZE) {
1235 spare -= NFI_FDM_MAX_SIZE; 1235 spare -= NFI_FDM_MAX_SIZE;
1236 nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS; 1236 nand->ecc.strength = (spare << 3) /
1237 mtk_ecc_get_parity_bits(nfc->ecc);
1237 } else if (free < 0) { 1238 } else if (free < 0) {
1238 spare -= NFI_FDM_MIN_SIZE; 1239 spare -= NFI_FDM_MIN_SIZE;
1239 nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS; 1240 nand->ecc.strength = (spare << 3) /
1241 mtk_ecc_get_parity_bits(nfc->ecc);
1240 } 1242 }
1241 } 1243 }
1242 1244
@@ -1389,6 +1391,8 @@ static const struct mtk_nfc_caps mtk_nfc_caps_mt2701 = {
1389 .num_spare_size = 16, 1391 .num_spare_size = 16,
1390 .pageformat_spare_shift = 4, 1392 .pageformat_spare_shift = 4,
1391 .nfi_clk_div = 1, 1393 .nfi_clk_div = 1,
1394 .max_sector = 16,
1395 .max_sector_size = 1024,
1392}; 1396};
1393 1397
1394static const struct mtk_nfc_caps mtk_nfc_caps_mt2712 = { 1398static const struct mtk_nfc_caps mtk_nfc_caps_mt2712 = {
@@ -1396,6 +1400,17 @@ static const struct mtk_nfc_caps mtk_nfc_caps_mt2712 = {
1396 .num_spare_size = 19, 1400 .num_spare_size = 19,
1397 .pageformat_spare_shift = 16, 1401 .pageformat_spare_shift = 16,
1398 .nfi_clk_div = 2, 1402 .nfi_clk_div = 2,
1403 .max_sector = 16,
1404 .max_sector_size = 1024,
1405};
1406
1407static const struct mtk_nfc_caps mtk_nfc_caps_mt7622 = {
1408 .spare_size = spare_size_mt7622,
1409 .num_spare_size = 4,
1410 .pageformat_spare_shift = 4,
1411 .nfi_clk_div = 1,
1412 .max_sector = 8,
1413 .max_sector_size = 512,
1399}; 1414};
1400 1415
1401static const struct of_device_id mtk_nfc_id_table[] = { 1416static const struct of_device_id mtk_nfc_id_table[] = {
@@ -1405,6 +1420,9 @@ static const struct of_device_id mtk_nfc_id_table[] = {
1405 }, { 1420 }, {
1406 .compatible = "mediatek,mt2712-nfc", 1421 .compatible = "mediatek,mt2712-nfc",
1407 .data = &mtk_nfc_caps_mt2712, 1422 .data = &mtk_nfc_caps_mt2712,
1423 }, {
1424 .compatible = "mediatek,mt7622-nfc",
1425 .data = &mtk_nfc_caps_mt7622,
1408 }, 1426 },
1409 {} 1427 {}
1410}; 1428};
@@ -1540,7 +1558,6 @@ static int mtk_nfc_resume(struct device *dev)
1540 struct mtk_nfc *nfc = dev_get_drvdata(dev); 1558 struct mtk_nfc *nfc = dev_get_drvdata(dev);
1541 struct mtk_nfc_nand_chip *chip; 1559 struct mtk_nfc_nand_chip *chip;
1542 struct nand_chip *nand; 1560 struct nand_chip *nand;
1543 struct mtd_info *mtd;
1544 int ret; 1561 int ret;
1545 u32 i; 1562 u32 i;
1546 1563
@@ -1553,11 +1570,8 @@ static int mtk_nfc_resume(struct device *dev)
1553 /* reset NAND chip if VCC was powered off */ 1570 /* reset NAND chip if VCC was powered off */
1554 list_for_each_entry(chip, &nfc->chips, node) { 1571 list_for_each_entry(chip, &nfc->chips, node) {
1555 nand = &chip->nand; 1572 nand = &chip->nand;
1556 mtd = nand_to_mtd(nand); 1573 for (i = 0; i < chip->nsels; i++)
1557 for (i = 0; i < chip->nsels; i++) { 1574 nand_reset(nand, i);
1558 nand->select_chip(mtd, i);
1559 nand->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
1560 }
1561 } 1575 }
1562 1576
1563 return 0; 1577 return 0;
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index e7ec55b1d368..e70ca16a5118 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -561,14 +561,19 @@ static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
561static int nand_check_wp(struct mtd_info *mtd) 561static int nand_check_wp(struct mtd_info *mtd)
562{ 562{
563 struct nand_chip *chip = mtd_to_nand(mtd); 563 struct nand_chip *chip = mtd_to_nand(mtd);
564 u8 status;
565 int ret;
564 566
565 /* Broken xD cards report WP despite being writable */ 567 /* Broken xD cards report WP despite being writable */
566 if (chip->options & NAND_BROKEN_XD) 568 if (chip->options & NAND_BROKEN_XD)
567 return 0; 569 return 0;
568 570
569 /* Check the WP bit */ 571 /* Check the WP bit */
570 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); 572 ret = nand_status_op(chip, &status);
571 return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1; 573 if (ret)
574 return ret;
575
576 return status & NAND_STATUS_WP ? 0 : 1;
572} 577}
573 578
574/** 579/**
@@ -667,16 +672,83 @@ EXPORT_SYMBOL_GPL(nand_wait_ready);
667static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo) 672static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
668{ 673{
669 register struct nand_chip *chip = mtd_to_nand(mtd); 674 register struct nand_chip *chip = mtd_to_nand(mtd);
675 int ret;
670 676
671 timeo = jiffies + msecs_to_jiffies(timeo); 677 timeo = jiffies + msecs_to_jiffies(timeo);
672 do { 678 do {
673 if ((chip->read_byte(mtd) & NAND_STATUS_READY)) 679 u8 status;
680
681 ret = nand_read_data_op(chip, &status, sizeof(status), true);
682 if (ret)
683 return;
684
685 if (status & NAND_STATUS_READY)
674 break; 686 break;
675 touch_softlockup_watchdog(); 687 touch_softlockup_watchdog();
676 } while (time_before(jiffies, timeo)); 688 } while (time_before(jiffies, timeo));
677}; 689};
678 690
679/** 691/**
692 * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1
693 * @chip: NAND chip structure
694 * @timeout_ms: Timeout in ms
695 *
696 * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1.
697 * If that does not happen whitin the specified timeout, -ETIMEDOUT is
698 * returned.
699 *
700 * This helper is intended to be used when the controller does not have access
701 * to the NAND R/B pin.
702 *
703 * Be aware that calling this helper from an ->exec_op() implementation means
704 * ->exec_op() must be re-entrant.
705 *
706 * Return 0 if the NAND chip is ready, a negative error otherwise.
707 */
708int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
709{
710 u8 status = 0;
711 int ret;
712
713 if (!chip->exec_op)
714 return -ENOTSUPP;
715
716 ret = nand_status_op(chip, NULL);
717 if (ret)
718 return ret;
719
720 timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
721 do {
722 ret = nand_read_data_op(chip, &status, sizeof(status), true);
723 if (ret)
724 break;
725
726 if (status & NAND_STATUS_READY)
727 break;
728
729 /*
730 * Typical lowest execution time for a tR on most NANDs is 10us,
731 * use this as polling delay before doing something smarter (ie.
732 * deriving a delay from the timeout value, timeout_ms/ratio).
733 */
734 udelay(10);
735 } while (time_before(jiffies, timeout_ms));
736
737 /*
738 * We have to exit READ_STATUS mode in order to read real data on the
739 * bus in case the WAITRDY instruction is preceding a DATA_IN
740 * instruction.
741 */
742 nand_exit_status_op(chip);
743
744 if (ret)
745 return ret;
746
747 return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
748};
749EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
750
751/**
680 * nand_command - [DEFAULT] Send command to NAND device 752 * nand_command - [DEFAULT] Send command to NAND device
681 * @mtd: MTD device structure 753 * @mtd: MTD device structure
682 * @command: the command to be sent 754 * @command: the command to be sent
@@ -710,7 +782,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
710 chip->cmd_ctrl(mtd, readcmd, ctrl); 782 chip->cmd_ctrl(mtd, readcmd, ctrl);
711 ctrl &= ~NAND_CTRL_CHANGE; 783 ctrl &= ~NAND_CTRL_CHANGE;
712 } 784 }
713 chip->cmd_ctrl(mtd, command, ctrl); 785 if (command != NAND_CMD_NONE)
786 chip->cmd_ctrl(mtd, command, ctrl);
714 787
715 /* Address cycle, when necessary */ 788 /* Address cycle, when necessary */
716 ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE; 789 ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
@@ -738,6 +811,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
738 */ 811 */
739 switch (command) { 812 switch (command) {
740 813
814 case NAND_CMD_NONE:
741 case NAND_CMD_PAGEPROG: 815 case NAND_CMD_PAGEPROG:
742 case NAND_CMD_ERASE1: 816 case NAND_CMD_ERASE1:
743 case NAND_CMD_ERASE2: 817 case NAND_CMD_ERASE2:
@@ -802,8 +876,8 @@ static void nand_ccs_delay(struct nand_chip *chip)
802 * Wait tCCS_min if it is correctly defined, otherwise wait 500ns 876 * Wait tCCS_min if it is correctly defined, otherwise wait 500ns
803 * (which should be safe for all NANDs). 877 * (which should be safe for all NANDs).
804 */ 878 */
805 if (chip->data_interface && chip->data_interface->timings.sdr.tCCS_min) 879 if (chip->setup_data_interface)
806 ndelay(chip->data_interface->timings.sdr.tCCS_min / 1000); 880 ndelay(chip->data_interface.timings.sdr.tCCS_min / 1000);
807 else 881 else
808 ndelay(500); 882 ndelay(500);
809} 883}
@@ -831,7 +905,9 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
831 } 905 }
832 906
833 /* Command latch cycle */ 907 /* Command latch cycle */
834 chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE); 908 if (command != NAND_CMD_NONE)
909 chip->cmd_ctrl(mtd, command,
910 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
835 911
836 if (column != -1 || page_addr != -1) { 912 if (column != -1 || page_addr != -1) {
837 int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE; 913 int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
@@ -866,6 +942,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
866 */ 942 */
867 switch (command) { 943 switch (command) {
868 944
945 case NAND_CMD_NONE:
869 case NAND_CMD_CACHEDPROG: 946 case NAND_CMD_CACHEDPROG:
870 case NAND_CMD_PAGEPROG: 947 case NAND_CMD_PAGEPROG:
871 case NAND_CMD_ERASE1: 948 case NAND_CMD_ERASE1:
@@ -1014,7 +1091,15 @@ static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
1014 if (chip->dev_ready(mtd)) 1091 if (chip->dev_ready(mtd))
1015 break; 1092 break;
1016 } else { 1093 } else {
1017 if (chip->read_byte(mtd) & NAND_STATUS_READY) 1094 int ret;
1095 u8 status;
1096
1097 ret = nand_read_data_op(chip, &status, sizeof(status),
1098 true);
1099 if (ret)
1100 return;
1101
1102 if (status & NAND_STATUS_READY)
1018 break; 1103 break;
1019 } 1104 }
1020 mdelay(1); 1105 mdelay(1);
@@ -1031,8 +1116,9 @@ static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
1031static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip) 1116static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
1032{ 1117{
1033 1118
1034 int status;
1035 unsigned long timeo = 400; 1119 unsigned long timeo = 400;
1120 u8 status;
1121 int ret;
1036 1122
1037 /* 1123 /*
1038 * Apply this short delay always to ensure that we do wait tWB in any 1124 * Apply this short delay always to ensure that we do wait tWB in any
@@ -1040,7 +1126,9 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
1040 */ 1126 */
1041 ndelay(100); 1127 ndelay(100);
1042 1128
1043 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); 1129 ret = nand_status_op(chip, NULL);
1130 if (ret)
1131 return ret;
1044 1132
1045 if (in_interrupt() || oops_in_progress) 1133 if (in_interrupt() || oops_in_progress)
1046 panic_nand_wait(mtd, chip, timeo); 1134 panic_nand_wait(mtd, chip, timeo);
@@ -1051,14 +1139,22 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
1051 if (chip->dev_ready(mtd)) 1139 if (chip->dev_ready(mtd))
1052 break; 1140 break;
1053 } else { 1141 } else {
1054 if (chip->read_byte(mtd) & NAND_STATUS_READY) 1142 ret = nand_read_data_op(chip, &status,
1143 sizeof(status), true);
1144 if (ret)
1145 return ret;
1146
1147 if (status & NAND_STATUS_READY)
1055 break; 1148 break;
1056 } 1149 }
1057 cond_resched(); 1150 cond_resched();
1058 } while (time_before(jiffies, timeo)); 1151 } while (time_before(jiffies, timeo));
1059 } 1152 }
1060 1153
1061 status = (int)chip->read_byte(mtd); 1154 ret = nand_read_data_op(chip, &status, sizeof(status), true);
1155 if (ret)
1156 return ret;
1157
1062 /* This can happen if in case of timeout or buggy dev_ready */ 1158 /* This can happen if in case of timeout or buggy dev_ready */
1063 WARN_ON(!(status & NAND_STATUS_READY)); 1159 WARN_ON(!(status & NAND_STATUS_READY));
1064 return status; 1160 return status;
@@ -1076,7 +1172,6 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
1076static int nand_reset_data_interface(struct nand_chip *chip, int chipnr) 1172static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
1077{ 1173{
1078 struct mtd_info *mtd = nand_to_mtd(chip); 1174 struct mtd_info *mtd = nand_to_mtd(chip);
1079 const struct nand_data_interface *conf;
1080 int ret; 1175 int ret;
1081 1176
1082 if (!chip->setup_data_interface) 1177 if (!chip->setup_data_interface)
@@ -1096,8 +1191,8 @@ static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
1096 * timings to timing mode 0. 1191 * timings to timing mode 0.
1097 */ 1192 */
1098 1193
1099 conf = nand_get_default_data_interface(); 1194 onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
1100 ret = chip->setup_data_interface(mtd, chipnr, conf); 1195 ret = chip->setup_data_interface(mtd, chipnr, &chip->data_interface);
1101 if (ret) 1196 if (ret)
1102 pr_err("Failed to configure data interface to SDR timing mode 0\n"); 1197 pr_err("Failed to configure data interface to SDR timing mode 0\n");
1103 1198
@@ -1122,7 +1217,7 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
1122 struct mtd_info *mtd = nand_to_mtd(chip); 1217 struct mtd_info *mtd = nand_to_mtd(chip);
1123 int ret; 1218 int ret;
1124 1219
1125 if (!chip->setup_data_interface || !chip->data_interface) 1220 if (!chip->setup_data_interface)
1126 return 0; 1221 return 0;
1127 1222
1128 /* 1223 /*
@@ -1143,7 +1238,7 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
1143 goto err; 1238 goto err;
1144 } 1239 }
1145 1240
1146 ret = chip->setup_data_interface(mtd, chipnr, chip->data_interface); 1241 ret = chip->setup_data_interface(mtd, chipnr, &chip->data_interface);
1147err: 1242err:
1148 return ret; 1243 return ret;
1149} 1244}
@@ -1183,21 +1278,19 @@ static int nand_init_data_interface(struct nand_chip *chip)
1183 modes = GENMASK(chip->onfi_timing_mode_default, 0); 1278 modes = GENMASK(chip->onfi_timing_mode_default, 0);
1184 } 1279 }
1185 1280
1186 chip->data_interface = kzalloc(sizeof(*chip->data_interface),
1187 GFP_KERNEL);
1188 if (!chip->data_interface)
1189 return -ENOMEM;
1190 1281
1191 for (mode = fls(modes) - 1; mode >= 0; mode--) { 1282 for (mode = fls(modes) - 1; mode >= 0; mode--) {
1192 ret = onfi_init_data_interface(chip, chip->data_interface, 1283 ret = onfi_fill_data_interface(chip, NAND_SDR_IFACE, mode);
1193 NAND_SDR_IFACE, mode);
1194 if (ret) 1284 if (ret)
1195 continue; 1285 continue;
1196 1286
1197 /* Pass -1 to only */ 1287 /*
1288 * Pass NAND_DATA_IFACE_CHECK_ONLY to only check if the
1289 * controller supports the requested timings.
1290 */
1198 ret = chip->setup_data_interface(mtd, 1291 ret = chip->setup_data_interface(mtd,
1199 NAND_DATA_IFACE_CHECK_ONLY, 1292 NAND_DATA_IFACE_CHECK_ONLY,
1200 chip->data_interface); 1293 &chip->data_interface);
1201 if (!ret) { 1294 if (!ret) {
1202 chip->onfi_timing_mode_default = mode; 1295 chip->onfi_timing_mode_default = mode;
1203 break; 1296 break;
@@ -1207,21 +1300,1429 @@ static int nand_init_data_interface(struct nand_chip *chip)
1207 return 0; 1300 return 0;
1208} 1301}
1209 1302
1210static void nand_release_data_interface(struct nand_chip *chip) 1303/**
1304 * nand_fill_column_cycles - fill the column cycles of an address
1305 * @chip: The NAND chip
1306 * @addrs: Array of address cycles to fill
1307 * @offset_in_page: The offset in the page
1308 *
1309 * Fills the first or the first two bytes of the @addrs field depending
1310 * on the NAND bus width and the page size.
1311 *
1312 * Returns the number of cycles needed to encode the column, or a negative
1313 * error code in case one of the arguments is invalid.
1314 */
1315static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
1316 unsigned int offset_in_page)
1317{
1318 struct mtd_info *mtd = nand_to_mtd(chip);
1319
1320 /* Make sure the offset is less than the actual page size. */
1321 if (offset_in_page > mtd->writesize + mtd->oobsize)
1322 return -EINVAL;
1323
1324 /*
1325 * On small page NANDs, there's a dedicated command to access the OOB
1326 * area, and the column address is relative to the start of the OOB
1327 * area, not the start of the page. Asjust the address accordingly.
1328 */
1329 if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
1330 offset_in_page -= mtd->writesize;
1331
1332 /*
1333 * The offset in page is expressed in bytes, if the NAND bus is 16-bit
1334 * wide, then it must be divided by 2.
1335 */
1336 if (chip->options & NAND_BUSWIDTH_16) {
1337 if (WARN_ON(offset_in_page % 2))
1338 return -EINVAL;
1339
1340 offset_in_page /= 2;
1341 }
1342
1343 addrs[0] = offset_in_page;
1344
1345 /*
1346 * Small page NANDs use 1 cycle for the columns, while large page NANDs
1347 * need 2
1348 */
1349 if (mtd->writesize <= 512)
1350 return 1;
1351
1352 addrs[1] = offset_in_page >> 8;
1353
1354 return 2;
1355}
1356
1357static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1358 unsigned int offset_in_page, void *buf,
1359 unsigned int len)
1360{
1361 struct mtd_info *mtd = nand_to_mtd(chip);
1362 const struct nand_sdr_timings *sdr =
1363 nand_get_sdr_timings(&chip->data_interface);
1364 u8 addrs[4];
1365 struct nand_op_instr instrs[] = {
1366 NAND_OP_CMD(NAND_CMD_READ0, 0),
1367 NAND_OP_ADDR(3, addrs, PSEC_TO_NSEC(sdr->tWB_max)),
1368 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1369 PSEC_TO_NSEC(sdr->tRR_min)),
1370 NAND_OP_DATA_IN(len, buf, 0),
1371 };
1372 struct nand_operation op = NAND_OPERATION(instrs);
1373 int ret;
1374
1375 /* Drop the DATA_IN instruction if len is set to 0. */
1376 if (!len)
1377 op.ninstrs--;
1378
1379 if (offset_in_page >= mtd->writesize)
1380 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1381 else if (offset_in_page >= 256 &&
1382 !(chip->options & NAND_BUSWIDTH_16))
1383 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1384
1385 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1386 if (ret < 0)
1387 return ret;
1388
1389 addrs[1] = page;
1390 addrs[2] = page >> 8;
1391
1392 if (chip->options & NAND_ROW_ADDR_3) {
1393 addrs[3] = page >> 16;
1394 instrs[1].ctx.addr.naddrs++;
1395 }
1396
1397 return nand_exec_op(chip, &op);
1398}
1399
1400static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1401 unsigned int offset_in_page, void *buf,
1402 unsigned int len)
1403{
1404 const struct nand_sdr_timings *sdr =
1405 nand_get_sdr_timings(&chip->data_interface);
1406 u8 addrs[5];
1407 struct nand_op_instr instrs[] = {
1408 NAND_OP_CMD(NAND_CMD_READ0, 0),
1409 NAND_OP_ADDR(4, addrs, 0),
1410 NAND_OP_CMD(NAND_CMD_READSTART, PSEC_TO_NSEC(sdr->tWB_max)),
1411 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1412 PSEC_TO_NSEC(sdr->tRR_min)),
1413 NAND_OP_DATA_IN(len, buf, 0),
1414 };
1415 struct nand_operation op = NAND_OPERATION(instrs);
1416 int ret;
1417
1418 /* Drop the DATA_IN instruction if len is set to 0. */
1419 if (!len)
1420 op.ninstrs--;
1421
1422 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1423 if (ret < 0)
1424 return ret;
1425
1426 addrs[2] = page;
1427 addrs[3] = page >> 8;
1428
1429 if (chip->options & NAND_ROW_ADDR_3) {
1430 addrs[4] = page >> 16;
1431 instrs[1].ctx.addr.naddrs++;
1432 }
1433
1434 return nand_exec_op(chip, &op);
1435}
1436
1437/**
1438 * nand_read_page_op - Do a READ PAGE operation
1439 * @chip: The NAND chip
1440 * @page: page to read
1441 * @offset_in_page: offset within the page
1442 * @buf: buffer used to store the data
1443 * @len: length of the buffer
1444 *
1445 * This function issues a READ PAGE operation.
1446 * This function does not select/unselect the CS line.
1447 *
1448 * Returns 0 on success, a negative error code otherwise.
1449 */
1450int nand_read_page_op(struct nand_chip *chip, unsigned int page,
1451 unsigned int offset_in_page, void *buf, unsigned int len)
1452{
1453 struct mtd_info *mtd = nand_to_mtd(chip);
1454
1455 if (len && !buf)
1456 return -EINVAL;
1457
1458 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1459 return -EINVAL;
1460
1461 if (chip->exec_op) {
1462 if (mtd->writesize > 512)
1463 return nand_lp_exec_read_page_op(chip, page,
1464 offset_in_page, buf,
1465 len);
1466
1467 return nand_sp_exec_read_page_op(chip, page, offset_in_page,
1468 buf, len);
1469 }
1470
1471 chip->cmdfunc(mtd, NAND_CMD_READ0, offset_in_page, page);
1472 if (len)
1473 chip->read_buf(mtd, buf, len);
1474
1475 return 0;
1476}
1477EXPORT_SYMBOL_GPL(nand_read_page_op);
1478
1479/**
1480 * nand_read_param_page_op - Do a READ PARAMETER PAGE operation
1481 * @chip: The NAND chip
1482 * @page: parameter page to read
1483 * @buf: buffer used to store the data
1484 * @len: length of the buffer
1485 *
1486 * This function issues a READ PARAMETER PAGE operation.
1487 * This function does not select/unselect the CS line.
1488 *
1489 * Returns 0 on success, a negative error code otherwise.
1490 */
1491static int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
1492 unsigned int len)
1493{
1494 struct mtd_info *mtd = nand_to_mtd(chip);
1495 unsigned int i;
1496 u8 *p = buf;
1497
1498 if (len && !buf)
1499 return -EINVAL;
1500
1501 if (chip->exec_op) {
1502 const struct nand_sdr_timings *sdr =
1503 nand_get_sdr_timings(&chip->data_interface);
1504 struct nand_op_instr instrs[] = {
1505 NAND_OP_CMD(NAND_CMD_PARAM, 0),
1506 NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)),
1507 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1508 PSEC_TO_NSEC(sdr->tRR_min)),
1509 NAND_OP_8BIT_DATA_IN(len, buf, 0),
1510 };
1511 struct nand_operation op = NAND_OPERATION(instrs);
1512
1513 /* Drop the DATA_IN instruction if len is set to 0. */
1514 if (!len)
1515 op.ninstrs--;
1516
1517 return nand_exec_op(chip, &op);
1518 }
1519
1520 chip->cmdfunc(mtd, NAND_CMD_PARAM, page, -1);
1521 for (i = 0; i < len; i++)
1522 p[i] = chip->read_byte(mtd);
1523
1524 return 0;
1525}
1526
1527/**
1528 * nand_change_read_column_op - Do a CHANGE READ COLUMN operation
1529 * @chip: The NAND chip
1530 * @offset_in_page: offset within the page
1531 * @buf: buffer used to store the data
1532 * @len: length of the buffer
1533 * @force_8bit: force 8-bit bus access
1534 *
1535 * This function issues a CHANGE READ COLUMN operation.
1536 * This function does not select/unselect the CS line.
1537 *
1538 * Returns 0 on success, a negative error code otherwise.
1539 */
1540int nand_change_read_column_op(struct nand_chip *chip,
1541 unsigned int offset_in_page, void *buf,
1542 unsigned int len, bool force_8bit)
1543{
1544 struct mtd_info *mtd = nand_to_mtd(chip);
1545
1546 if (len && !buf)
1547 return -EINVAL;
1548
1549 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1550 return -EINVAL;
1551
1552 /* Small page NANDs do not support column change. */
1553 if (mtd->writesize <= 512)
1554 return -ENOTSUPP;
1555
1556 if (chip->exec_op) {
1557 const struct nand_sdr_timings *sdr =
1558 nand_get_sdr_timings(&chip->data_interface);
1559 u8 addrs[2] = {};
1560 struct nand_op_instr instrs[] = {
1561 NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
1562 NAND_OP_ADDR(2, addrs, 0),
1563 NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
1564 PSEC_TO_NSEC(sdr->tCCS_min)),
1565 NAND_OP_DATA_IN(len, buf, 0),
1566 };
1567 struct nand_operation op = NAND_OPERATION(instrs);
1568 int ret;
1569
1570 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1571 if (ret < 0)
1572 return ret;
1573
1574 /* Drop the DATA_IN instruction if len is set to 0. */
1575 if (!len)
1576 op.ninstrs--;
1577
1578 instrs[3].ctx.data.force_8bit = force_8bit;
1579
1580 return nand_exec_op(chip, &op);
1581 }
1582
1583 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset_in_page, -1);
1584 if (len)
1585 chip->read_buf(mtd, buf, len);
1586
1587 return 0;
1588}
1589EXPORT_SYMBOL_GPL(nand_change_read_column_op);
1590
1591/**
1592 * nand_read_oob_op - Do a READ OOB operation
1593 * @chip: The NAND chip
1594 * @page: page to read
1595 * @offset_in_oob: offset within the OOB area
1596 * @buf: buffer used to store the data
1597 * @len: length of the buffer
1598 *
1599 * This function issues a READ OOB operation.
1600 * This function does not select/unselect the CS line.
1601 *
1602 * Returns 0 on success, a negative error code otherwise.
1603 */
1604int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
1605 unsigned int offset_in_oob, void *buf, unsigned int len)
1606{
1607 struct mtd_info *mtd = nand_to_mtd(chip);
1608
1609 if (len && !buf)
1610 return -EINVAL;
1611
1612 if (offset_in_oob + len > mtd->oobsize)
1613 return -EINVAL;
1614
1615 if (chip->exec_op)
1616 return nand_read_page_op(chip, page,
1617 mtd->writesize + offset_in_oob,
1618 buf, len);
1619
1620 chip->cmdfunc(mtd, NAND_CMD_READOOB, offset_in_oob, page);
1621 if (len)
1622 chip->read_buf(mtd, buf, len);
1623
1624 return 0;
1625}
1626EXPORT_SYMBOL_GPL(nand_read_oob_op);
1627
1628static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
1629 unsigned int offset_in_page, const void *buf,
1630 unsigned int len, bool prog)
1631{
1632 struct mtd_info *mtd = nand_to_mtd(chip);
1633 const struct nand_sdr_timings *sdr =
1634 nand_get_sdr_timings(&chip->data_interface);
1635 u8 addrs[5] = {};
1636 struct nand_op_instr instrs[] = {
1637 /*
1638 * The first instruction will be dropped if we're dealing
1639 * with a large page NAND and adjusted if we're dealing
1640 * with a small page NAND and the page offset is > 255.
1641 */
1642 NAND_OP_CMD(NAND_CMD_READ0, 0),
1643 NAND_OP_CMD(NAND_CMD_SEQIN, 0),
1644 NAND_OP_ADDR(0, addrs, PSEC_TO_NSEC(sdr->tADL_min)),
1645 NAND_OP_DATA_OUT(len, buf, 0),
1646 NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)),
1647 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1648 };
1649 struct nand_operation op = NAND_OPERATION(instrs);
1650 int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
1651 int ret;
1652 u8 status;
1653
1654 if (naddrs < 0)
1655 return naddrs;
1656
1657 addrs[naddrs++] = page;
1658 addrs[naddrs++] = page >> 8;
1659 if (chip->options & NAND_ROW_ADDR_3)
1660 addrs[naddrs++] = page >> 16;
1661
1662 instrs[2].ctx.addr.naddrs = naddrs;
1663
1664 /* Drop the last two instructions if we're not programming the page. */
1665 if (!prog) {
1666 op.ninstrs -= 2;
1667 /* Also drop the DATA_OUT instruction if empty. */
1668 if (!len)
1669 op.ninstrs--;
1670 }
1671
1672 if (mtd->writesize <= 512) {
1673 /*
1674 * Small pages need some more tweaking: we have to adjust the
1675 * first instruction depending on the page offset we're trying
1676 * to access.
1677 */
1678 if (offset_in_page >= mtd->writesize)
1679 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1680 else if (offset_in_page >= 256 &&
1681 !(chip->options & NAND_BUSWIDTH_16))
1682 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1683 } else {
1684 /*
1685 * Drop the first command if we're dealing with a large page
1686 * NAND.
1687 */
1688 op.instrs++;
1689 op.ninstrs--;
1690 }
1691
1692 ret = nand_exec_op(chip, &op);
1693 if (!prog || ret)
1694 return ret;
1695
1696 ret = nand_status_op(chip, &status);
1697 if (ret)
1698 return ret;
1699
1700 return status;
1701}
1702
1703/**
1704 * nand_prog_page_begin_op - starts a PROG PAGE operation
1705 * @chip: The NAND chip
1706 * @page: page to write
1707 * @offset_in_page: offset within the page
1708 * @buf: buffer containing the data to write to the page
1709 * @len: length of the buffer
1710 *
1711 * This function issues the first half of a PROG PAGE operation.
1712 * This function does not select/unselect the CS line.
1713 *
1714 * Returns 0 on success, a negative error code otherwise.
1715 */
1716int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
1717 unsigned int offset_in_page, const void *buf,
1718 unsigned int len)
1719{
1720 struct mtd_info *mtd = nand_to_mtd(chip);
1721
1722 if (len && !buf)
1723 return -EINVAL;
1724
1725 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1726 return -EINVAL;
1727
1728 if (chip->exec_op)
1729 return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1730 len, false);
1731
1732 chip->cmdfunc(mtd, NAND_CMD_SEQIN, offset_in_page, page);
1733
1734 if (buf)
1735 chip->write_buf(mtd, buf, len);
1736
1737 return 0;
1738}
1739EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
1740
1741/**
1742 * nand_prog_page_end_op - ends a PROG PAGE operation
1743 * @chip: The NAND chip
1744 *
1745 * This function issues the second half of a PROG PAGE operation.
1746 * This function does not select/unselect the CS line.
1747 *
1748 * Returns 0 on success, a negative error code otherwise.
1749 */
1750int nand_prog_page_end_op(struct nand_chip *chip)
1751{
1752 struct mtd_info *mtd = nand_to_mtd(chip);
1753 int ret;
1754 u8 status;
1755
1756 if (chip->exec_op) {
1757 const struct nand_sdr_timings *sdr =
1758 nand_get_sdr_timings(&chip->data_interface);
1759 struct nand_op_instr instrs[] = {
1760 NAND_OP_CMD(NAND_CMD_PAGEPROG,
1761 PSEC_TO_NSEC(sdr->tWB_max)),
1762 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1763 };
1764 struct nand_operation op = NAND_OPERATION(instrs);
1765
1766 ret = nand_exec_op(chip, &op);
1767 if (ret)
1768 return ret;
1769
1770 ret = nand_status_op(chip, &status);
1771 if (ret)
1772 return ret;
1773 } else {
1774 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1775 ret = chip->waitfunc(mtd, chip);
1776 if (ret < 0)
1777 return ret;
1778
1779 status = ret;
1780 }
1781
1782 if (status & NAND_STATUS_FAIL)
1783 return -EIO;
1784
1785 return 0;
1786}
1787EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
1788
1789/**
1790 * nand_prog_page_op - Do a full PROG PAGE operation
1791 * @chip: The NAND chip
1792 * @page: page to write
1793 * @offset_in_page: offset within the page
1794 * @buf: buffer containing the data to write to the page
1795 * @len: length of the buffer
1796 *
1797 * This function issues a full PROG PAGE operation.
1798 * This function does not select/unselect the CS line.
1799 *
1800 * Returns 0 on success, a negative error code otherwise.
1801 */
1802int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
1803 unsigned int offset_in_page, const void *buf,
1804 unsigned int len)
1805{
1806 struct mtd_info *mtd = nand_to_mtd(chip);
1807 int status;
1808
1809 if (!len || !buf)
1810 return -EINVAL;
1811
1812 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1813 return -EINVAL;
1814
1815 if (chip->exec_op) {
1816 status = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1817 len, true);
1818 } else {
1819 chip->cmdfunc(mtd, NAND_CMD_SEQIN, offset_in_page, page);
1820 chip->write_buf(mtd, buf, len);
1821 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1822 status = chip->waitfunc(mtd, chip);
1823 }
1824
1825 if (status & NAND_STATUS_FAIL)
1826 return -EIO;
1827
1828 return 0;
1829}
1830EXPORT_SYMBOL_GPL(nand_prog_page_op);
1831
1832/**
1833 * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation
1834 * @chip: The NAND chip
1835 * @offset_in_page: offset within the page
1836 * @buf: buffer containing the data to send to the NAND
1837 * @len: length of the buffer
1838 * @force_8bit: force 8-bit bus access
1839 *
1840 * This function issues a CHANGE WRITE COLUMN operation.
1841 * This function does not select/unselect the CS line.
1842 *
1843 * Returns 0 on success, a negative error code otherwise.
1844 */
1845int nand_change_write_column_op(struct nand_chip *chip,
1846 unsigned int offset_in_page,
1847 const void *buf, unsigned int len,
1848 bool force_8bit)
1849{
1850 struct mtd_info *mtd = nand_to_mtd(chip);
1851
1852 if (len && !buf)
1853 return -EINVAL;
1854
1855 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1856 return -EINVAL;
1857
1858 /* Small page NANDs do not support column change. */
1859 if (mtd->writesize <= 512)
1860 return -ENOTSUPP;
1861
1862 if (chip->exec_op) {
1863 const struct nand_sdr_timings *sdr =
1864 nand_get_sdr_timings(&chip->data_interface);
1865 u8 addrs[2];
1866 struct nand_op_instr instrs[] = {
1867 NAND_OP_CMD(NAND_CMD_RNDIN, 0),
1868 NAND_OP_ADDR(2, addrs, PSEC_TO_NSEC(sdr->tCCS_min)),
1869 NAND_OP_DATA_OUT(len, buf, 0),
1870 };
1871 struct nand_operation op = NAND_OPERATION(instrs);
1872 int ret;
1873
1874 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1875 if (ret < 0)
1876 return ret;
1877
1878 instrs[2].ctx.data.force_8bit = force_8bit;
1879
1880 /* Drop the DATA_OUT instruction if len is set to 0. */
1881 if (!len)
1882 op.ninstrs--;
1883
1884 return nand_exec_op(chip, &op);
1885 }
1886
1887 chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset_in_page, -1);
1888 if (len)
1889 chip->write_buf(mtd, buf, len);
1890
1891 return 0;
1892}
1893EXPORT_SYMBOL_GPL(nand_change_write_column_op);
1894
1895/**
1896 * nand_readid_op - Do a READID operation
1897 * @chip: The NAND chip
1898 * @addr: address cycle to pass after the READID command
1899 * @buf: buffer used to store the ID
1900 * @len: length of the buffer
1901 *
1902 * This function sends a READID command and reads back the ID returned by the
1903 * NAND.
1904 * This function does not select/unselect the CS line.
1905 *
1906 * Returns 0 on success, a negative error code otherwise.
1907 */
1908int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
1909 unsigned int len)
1910{
1911 struct mtd_info *mtd = nand_to_mtd(chip);
1912 unsigned int i;
1913 u8 *id = buf;
1914
1915 if (len && !buf)
1916 return -EINVAL;
1917
1918 if (chip->exec_op) {
1919 const struct nand_sdr_timings *sdr =
1920 nand_get_sdr_timings(&chip->data_interface);
1921 struct nand_op_instr instrs[] = {
1922 NAND_OP_CMD(NAND_CMD_READID, 0),
1923 NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)),
1924 NAND_OP_8BIT_DATA_IN(len, buf, 0),
1925 };
1926 struct nand_operation op = NAND_OPERATION(instrs);
1927
1928 /* Drop the DATA_IN instruction if len is set to 0. */
1929 if (!len)
1930 op.ninstrs--;
1931
1932 return nand_exec_op(chip, &op);
1933 }
1934
1935 chip->cmdfunc(mtd, NAND_CMD_READID, addr, -1);
1936
1937 for (i = 0; i < len; i++)
1938 id[i] = chip->read_byte(mtd);
1939
1940 return 0;
1941}
1942EXPORT_SYMBOL_GPL(nand_readid_op);
1943
1944/**
1945 * nand_status_op - Do a STATUS operation
1946 * @chip: The NAND chip
1947 * @status: out variable to store the NAND status
1948 *
1949 * This function sends a STATUS command and reads back the status returned by
1950 * the NAND.
1951 * This function does not select/unselect the CS line.
1952 *
1953 * Returns 0 on success, a negative error code otherwise.
1954 */
1955int nand_status_op(struct nand_chip *chip, u8 *status)
1956{
1957 struct mtd_info *mtd = nand_to_mtd(chip);
1958
1959 if (chip->exec_op) {
1960 const struct nand_sdr_timings *sdr =
1961 nand_get_sdr_timings(&chip->data_interface);
1962 struct nand_op_instr instrs[] = {
1963 NAND_OP_CMD(NAND_CMD_STATUS,
1964 PSEC_TO_NSEC(sdr->tADL_min)),
1965 NAND_OP_8BIT_DATA_IN(1, status, 0),
1966 };
1967 struct nand_operation op = NAND_OPERATION(instrs);
1968
1969 if (!status)
1970 op.ninstrs--;
1971
1972 return nand_exec_op(chip, &op);
1973 }
1974
1975 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
1976 if (status)
1977 *status = chip->read_byte(mtd);
1978
1979 return 0;
1980}
1981EXPORT_SYMBOL_GPL(nand_status_op);
1982
1983/**
1984 * nand_exit_status_op - Exit a STATUS operation
1985 * @chip: The NAND chip
1986 *
1987 * This function sends a READ0 command to cancel the effect of the STATUS
1988 * command to avoid reading only the status until a new read command is sent.
1989 *
1990 * This function does not select/unselect the CS line.
1991 *
1992 * Returns 0 on success, a negative error code otherwise.
1993 */
1994int nand_exit_status_op(struct nand_chip *chip)
1995{
1996 struct mtd_info *mtd = nand_to_mtd(chip);
1997
1998 if (chip->exec_op) {
1999 struct nand_op_instr instrs[] = {
2000 NAND_OP_CMD(NAND_CMD_READ0, 0),
2001 };
2002 struct nand_operation op = NAND_OPERATION(instrs);
2003
2004 return nand_exec_op(chip, &op);
2005 }
2006
2007 chip->cmdfunc(mtd, NAND_CMD_READ0, -1, -1);
2008
2009 return 0;
2010}
2011EXPORT_SYMBOL_GPL(nand_exit_status_op);
2012
2013/**
2014 * nand_erase_op - Do an erase operation
2015 * @chip: The NAND chip
2016 * @eraseblock: block to erase
2017 *
2018 * This function sends an ERASE command and waits for the NAND to be ready
2019 * before returning.
2020 * This function does not select/unselect the CS line.
2021 *
2022 * Returns 0 on success, a negative error code otherwise.
2023 */
2024int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
2025{
2026 struct mtd_info *mtd = nand_to_mtd(chip);
2027 unsigned int page = eraseblock <<
2028 (chip->phys_erase_shift - chip->page_shift);
2029 int ret;
2030 u8 status;
2031
2032 if (chip->exec_op) {
2033 const struct nand_sdr_timings *sdr =
2034 nand_get_sdr_timings(&chip->data_interface);
2035 u8 addrs[3] = { page, page >> 8, page >> 16 };
2036 struct nand_op_instr instrs[] = {
2037 NAND_OP_CMD(NAND_CMD_ERASE1, 0),
2038 NAND_OP_ADDR(2, addrs, 0),
2039 NAND_OP_CMD(NAND_CMD_ERASE2,
2040 PSEC_TO_MSEC(sdr->tWB_max)),
2041 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tBERS_max), 0),
2042 };
2043 struct nand_operation op = NAND_OPERATION(instrs);
2044
2045 if (chip->options & NAND_ROW_ADDR_3)
2046 instrs[1].ctx.addr.naddrs++;
2047
2048 ret = nand_exec_op(chip, &op);
2049 if (ret)
2050 return ret;
2051
2052 ret = nand_status_op(chip, &status);
2053 if (ret)
2054 return ret;
2055 } else {
2056 chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
2057 chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
2058
2059 ret = chip->waitfunc(mtd, chip);
2060 if (ret < 0)
2061 return ret;
2062
2063 status = ret;
2064 }
2065
2066 if (status & NAND_STATUS_FAIL)
2067 return -EIO;
2068
2069 return 0;
2070}
2071EXPORT_SYMBOL_GPL(nand_erase_op);
2072
2073/**
2074 * nand_set_features_op - Do a SET FEATURES operation
2075 * @chip: The NAND chip
2076 * @feature: feature id
2077 * @data: 4 bytes of data
2078 *
2079 * This function sends a SET FEATURES command and waits for the NAND to be
2080 * ready before returning.
2081 * This function does not select/unselect the CS line.
2082 *
2083 * Returns 0 on success, a negative error code otherwise.
2084 */
2085static int nand_set_features_op(struct nand_chip *chip, u8 feature,
2086 const void *data)
2087{
2088 struct mtd_info *mtd = nand_to_mtd(chip);
2089 const u8 *params = data;
2090 int i, ret;
2091 u8 status;
2092
2093 if (chip->exec_op) {
2094 const struct nand_sdr_timings *sdr =
2095 nand_get_sdr_timings(&chip->data_interface);
2096 struct nand_op_instr instrs[] = {
2097 NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
2098 NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)),
2099 NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
2100 PSEC_TO_NSEC(sdr->tWB_max)),
2101 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 0),
2102 };
2103 struct nand_operation op = NAND_OPERATION(instrs);
2104
2105 ret = nand_exec_op(chip, &op);
2106 if (ret)
2107 return ret;
2108
2109 ret = nand_status_op(chip, &status);
2110 if (ret)
2111 return ret;
2112 } else {
2113 chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, feature, -1);
2114 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
2115 chip->write_byte(mtd, params[i]);
2116
2117 ret = chip->waitfunc(mtd, chip);
2118 if (ret < 0)
2119 return ret;
2120
2121 status = ret;
2122 }
2123
2124 if (status & NAND_STATUS_FAIL)
2125 return -EIO;
2126
2127 return 0;
2128}
2129
2130/**
2131 * nand_get_features_op - Do a GET FEATURES operation
2132 * @chip: The NAND chip
2133 * @feature: feature id
2134 * @data: 4 bytes of data
2135 *
2136 * This function sends a GET FEATURES command and waits for the NAND to be
2137 * ready before returning.
2138 * This function does not select/unselect the CS line.
2139 *
2140 * Returns 0 on success, a negative error code otherwise.
2141 */
2142static int nand_get_features_op(struct nand_chip *chip, u8 feature,
2143 void *data)
2144{
2145 struct mtd_info *mtd = nand_to_mtd(chip);
2146 u8 *params = data;
2147 int i;
2148
2149 if (chip->exec_op) {
2150 const struct nand_sdr_timings *sdr =
2151 nand_get_sdr_timings(&chip->data_interface);
2152 struct nand_op_instr instrs[] = {
2153 NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
2154 NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)),
2155 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max),
2156 PSEC_TO_NSEC(sdr->tRR_min)),
2157 NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
2158 data, 0),
2159 };
2160 struct nand_operation op = NAND_OPERATION(instrs);
2161
2162 return nand_exec_op(chip, &op);
2163 }
2164
2165 chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, feature, -1);
2166 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
2167 params[i] = chip->read_byte(mtd);
2168
2169 return 0;
2170}
2171
2172/**
2173 * nand_reset_op - Do a reset operation
2174 * @chip: The NAND chip
2175 *
2176 * This function sends a RESET command and waits for the NAND to be ready
2177 * before returning.
2178 * This function does not select/unselect the CS line.
2179 *
2180 * Returns 0 on success, a negative error code otherwise.
2181 */
2182int nand_reset_op(struct nand_chip *chip)
2183{
2184 struct mtd_info *mtd = nand_to_mtd(chip);
2185
2186 if (chip->exec_op) {
2187 const struct nand_sdr_timings *sdr =
2188 nand_get_sdr_timings(&chip->data_interface);
2189 struct nand_op_instr instrs[] = {
2190 NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)),
2191 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0),
2192 };
2193 struct nand_operation op = NAND_OPERATION(instrs);
2194
2195 return nand_exec_op(chip, &op);
2196 }
2197
2198 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
2199
2200 return 0;
2201}
2202EXPORT_SYMBOL_GPL(nand_reset_op);
2203
2204/**
2205 * nand_read_data_op - Read data from the NAND
2206 * @chip: The NAND chip
2207 * @buf: buffer used to store the data
2208 * @len: length of the buffer
2209 * @force_8bit: force 8-bit bus access
2210 *
2211 * This function does a raw data read on the bus. Usually used after launching
2212 * another NAND operation like nand_read_page_op().
2213 * This function does not select/unselect the CS line.
2214 *
2215 * Returns 0 on success, a negative error code otherwise.
2216 */
2217int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
2218 bool force_8bit)
2219{
2220 struct mtd_info *mtd = nand_to_mtd(chip);
2221
2222 if (!len || !buf)
2223 return -EINVAL;
2224
2225 if (chip->exec_op) {
2226 struct nand_op_instr instrs[] = {
2227 NAND_OP_DATA_IN(len, buf, 0),
2228 };
2229 struct nand_operation op = NAND_OPERATION(instrs);
2230
2231 instrs[0].ctx.data.force_8bit = force_8bit;
2232
2233 return nand_exec_op(chip, &op);
2234 }
2235
2236 if (force_8bit) {
2237 u8 *p = buf;
2238 unsigned int i;
2239
2240 for (i = 0; i < len; i++)
2241 p[i] = chip->read_byte(mtd);
2242 } else {
2243 chip->read_buf(mtd, buf, len);
2244 }
2245
2246 return 0;
2247}
2248EXPORT_SYMBOL_GPL(nand_read_data_op);
2249
2250/**
2251 * nand_write_data_op - Write data from the NAND
2252 * @chip: The NAND chip
2253 * @buf: buffer containing the data to send on the bus
2254 * @len: length of the buffer
2255 * @force_8bit: force 8-bit bus access
2256 *
2257 * This function does a raw data write on the bus. Usually used after launching
2258 * another NAND operation like nand_write_page_begin_op().
2259 * This function does not select/unselect the CS line.
2260 *
2261 * Returns 0 on success, a negative error code otherwise.
2262 */
2263int nand_write_data_op(struct nand_chip *chip, const void *buf,
2264 unsigned int len, bool force_8bit)
2265{
2266 struct mtd_info *mtd = nand_to_mtd(chip);
2267
2268 if (!len || !buf)
2269 return -EINVAL;
2270
2271 if (chip->exec_op) {
2272 struct nand_op_instr instrs[] = {
2273 NAND_OP_DATA_OUT(len, buf, 0),
2274 };
2275 struct nand_operation op = NAND_OPERATION(instrs);
2276
2277 instrs[0].ctx.data.force_8bit = force_8bit;
2278
2279 return nand_exec_op(chip, &op);
2280 }
2281
2282 if (force_8bit) {
2283 const u8 *p = buf;
2284 unsigned int i;
2285
2286 for (i = 0; i < len; i++)
2287 chip->write_byte(mtd, p[i]);
2288 } else {
2289 chip->write_buf(mtd, buf, len);
2290 }
2291
2292 return 0;
2293}
2294EXPORT_SYMBOL_GPL(nand_write_data_op);
2295
2296/**
2297 * struct nand_op_parser_ctx - Context used by the parser
2298 * @instrs: array of all the instructions that must be addressed
2299 * @ninstrs: length of the @instrs array
2300 * @subop: Sub-operation to be passed to the NAND controller
2301 *
2302 * This structure is used by the core to split NAND operations into
2303 * sub-operations that can be handled by the NAND controller.
2304 */
2305struct nand_op_parser_ctx {
2306 const struct nand_op_instr *instrs;
2307 unsigned int ninstrs;
2308 struct nand_subop subop;
2309};
2310
2311/**
2312 * nand_op_parser_must_split_instr - Checks if an instruction must be split
2313 * @pat: the parser pattern element that matches @instr
2314 * @instr: pointer to the instruction to check
2315 * @start_offset: this is an in/out parameter. If @instr has already been
2316 * split, then @start_offset is the offset from which to start
2317 * (either an address cycle or an offset in the data buffer).
2318 * Conversely, if the function returns true (ie. instr must be
2319 * split), this parameter is updated to point to the first
2320 * data/address cycle that has not been taken care of.
2321 *
2322 * Some NAND controllers are limited and cannot send X address cycles with a
2323 * unique operation, or cannot read/write more than Y bytes at the same time.
2324 * In this case, split the instruction that does not fit in a single
2325 * controller-operation into two or more chunks.
2326 *
2327 * Returns true if the instruction must be split, false otherwise.
2328 * The @start_offset parameter is also updated to the offset at which the next
2329 * bundle of instruction must start (if an address or a data instruction).
2330 */
2331static bool
2332nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
2333 const struct nand_op_instr *instr,
2334 unsigned int *start_offset)
2335{
2336 switch (pat->type) {
2337 case NAND_OP_ADDR_INSTR:
2338 if (!pat->ctx.addr.maxcycles)
2339 break;
2340
2341 if (instr->ctx.addr.naddrs - *start_offset >
2342 pat->ctx.addr.maxcycles) {
2343 *start_offset += pat->ctx.addr.maxcycles;
2344 return true;
2345 }
2346 break;
2347
2348 case NAND_OP_DATA_IN_INSTR:
2349 case NAND_OP_DATA_OUT_INSTR:
2350 if (!pat->ctx.data.maxlen)
2351 break;
2352
2353 if (instr->ctx.data.len - *start_offset >
2354 pat->ctx.data.maxlen) {
2355 *start_offset += pat->ctx.data.maxlen;
2356 return true;
2357 }
2358 break;
2359
2360 default:
2361 break;
2362 }
2363
2364 return false;
2365}
2366
2367/**
2368 * nand_op_parser_match_pat - Checks if a pattern matches the instructions
2369 * remaining in the parser context
2370 * @pat: the pattern to test
2371 * @ctx: the parser context structure to match with the pattern @pat
2372 *
2373 * Check if @pat matches the set or a sub-set of instructions remaining in @ctx.
2374 * Returns true if this is the case, false ortherwise. When true is returned,
2375 * @ctx->subop is updated with the set of instructions to be passed to the
2376 * controller driver.
2377 */
2378static bool
2379nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
2380 struct nand_op_parser_ctx *ctx)
2381{
2382 unsigned int instr_offset = ctx->subop.first_instr_start_off;
2383 const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
2384 const struct nand_op_instr *instr = ctx->subop.instrs;
2385 unsigned int i, ninstrs;
2386
2387 for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
2388 /*
2389 * The pattern instruction does not match the operation
2390 * instruction. If the instruction is marked optional in the
2391 * pattern definition, we skip the pattern element and continue
2392 * to the next one. If the element is mandatory, there's no
2393 * match and we can return false directly.
2394 */
2395 if (instr->type != pat->elems[i].type) {
2396 if (!pat->elems[i].optional)
2397 return false;
2398
2399 continue;
2400 }
2401
2402 /*
2403 * Now check the pattern element constraints. If the pattern is
2404 * not able to handle the whole instruction in a single step,
2405 * we have to split it.
2406 * The last_instr_end_off value comes back updated to point to
2407 * the position where we have to split the instruction (the
2408 * start of the next subop chunk).
2409 */
2410 if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
2411 &instr_offset)) {
2412 ninstrs++;
2413 i++;
2414 break;
2415 }
2416
2417 instr++;
2418 ninstrs++;
2419 instr_offset = 0;
2420 }
2421
2422 /*
2423 * This can happen if all instructions of a pattern are optional.
2424 * Still, if there's not at least one instruction handled by this
2425 * pattern, this is not a match, and we should try the next one (if
2426 * any).
2427 */
2428 if (!ninstrs)
2429 return false;
2430
2431 /*
2432 * We had a match on the pattern head, but the pattern may be longer
2433 * than the instructions we're asked to execute. We need to make sure
2434 * there's no mandatory elements in the pattern tail.
2435 */
2436 for (; i < pat->nelems; i++) {
2437 if (!pat->elems[i].optional)
2438 return false;
2439 }
2440
2441 /*
2442 * We have a match: update the subop structure accordingly and return
2443 * true.
2444 */
2445 ctx->subop.ninstrs = ninstrs;
2446 ctx->subop.last_instr_end_off = instr_offset;
2447
2448 return true;
2449}
2450
2451#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
2452static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2453{
2454 const struct nand_op_instr *instr;
2455 char *prefix = " ";
2456 unsigned int i;
2457
2458 pr_debug("executing subop:\n");
2459
2460 for (i = 0; i < ctx->ninstrs; i++) {
2461 instr = &ctx->instrs[i];
2462
2463 if (instr == &ctx->subop.instrs[0])
2464 prefix = " ->";
2465
2466 switch (instr->type) {
2467 case NAND_OP_CMD_INSTR:
2468 pr_debug("%sCMD [0x%02x]\n", prefix,
2469 instr->ctx.cmd.opcode);
2470 break;
2471 case NAND_OP_ADDR_INSTR:
2472 pr_debug("%sADDR [%d cyc: %*ph]\n", prefix,
2473 instr->ctx.addr.naddrs,
2474 instr->ctx.addr.naddrs < 64 ?
2475 instr->ctx.addr.naddrs : 64,
2476 instr->ctx.addr.addrs);
2477 break;
2478 case NAND_OP_DATA_IN_INSTR:
2479 pr_debug("%sDATA_IN [%d B%s]\n", prefix,
2480 instr->ctx.data.len,
2481 instr->ctx.data.force_8bit ?
2482 ", force 8-bit" : "");
2483 break;
2484 case NAND_OP_DATA_OUT_INSTR:
2485 pr_debug("%sDATA_OUT [%d B%s]\n", prefix,
2486 instr->ctx.data.len,
2487 instr->ctx.data.force_8bit ?
2488 ", force 8-bit" : "");
2489 break;
2490 case NAND_OP_WAITRDY_INSTR:
2491 pr_debug("%sWAITRDY [max %d ms]\n", prefix,
2492 instr->ctx.waitrdy.timeout_ms);
2493 break;
2494 }
2495
2496 if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
2497 prefix = " ";
2498 }
2499}
2500#else
2501static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2502{
2503 /* NOP */
2504}
2505#endif
2506
2507/**
2508 * nand_op_parser_exec_op - exec_op parser
2509 * @chip: the NAND chip
2510 * @parser: patterns description provided by the controller driver
2511 * @op: the NAND operation to address
2512 * @check_only: when true, the function only checks if @op can be handled but
2513 * does not execute the operation
2514 *
2515 * Helper function designed to ease integration of NAND controller drivers that
2516 * only support a limited set of instruction sequences. The supported sequences
2517 * are described in @parser, and the framework takes care of splitting @op into
2518 * multiple sub-operations (if required) and pass them back to the ->exec()
2519 * callback of the matching pattern if @check_only is set to false.
2520 *
2521 * NAND controller drivers should call this function from their own ->exec_op()
2522 * implementation.
2523 *
2524 * Returns 0 on success, a negative error code otherwise. A failure can be
2525 * caused by an unsupported operation (none of the supported patterns is able
2526 * to handle the requested operation), or an error returned by one of the
2527 * matching pattern->exec() hook.
2528 */
2529int nand_op_parser_exec_op(struct nand_chip *chip,
2530 const struct nand_op_parser *parser,
2531 const struct nand_operation *op, bool check_only)
2532{
2533 struct nand_op_parser_ctx ctx = {
2534 .subop.instrs = op->instrs,
2535 .instrs = op->instrs,
2536 .ninstrs = op->ninstrs,
2537 };
2538 unsigned int i;
2539
2540 while (ctx.subop.instrs < op->instrs + op->ninstrs) {
2541 int ret;
2542
2543 for (i = 0; i < parser->npatterns; i++) {
2544 const struct nand_op_parser_pattern *pattern;
2545
2546 pattern = &parser->patterns[i];
2547 if (!nand_op_parser_match_pat(pattern, &ctx))
2548 continue;
2549
2550 nand_op_parser_trace(&ctx);
2551
2552 if (check_only)
2553 break;
2554
2555 ret = pattern->exec(chip, &ctx.subop);
2556 if (ret)
2557 return ret;
2558
2559 break;
2560 }
2561
2562 if (i == parser->npatterns) {
2563 pr_debug("->exec_op() parser: pattern not found!\n");
2564 return -ENOTSUPP;
2565 }
2566
2567 /*
2568 * Update the context structure by pointing to the start of the
2569 * next subop.
2570 */
2571 ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
2572 if (ctx.subop.last_instr_end_off)
2573 ctx.subop.instrs -= 1;
2574
2575 ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
2576 }
2577
2578 return 0;
2579}
2580EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
2581
2582static bool nand_instr_is_data(const struct nand_op_instr *instr)
2583{
2584 return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
2585 instr->type == NAND_OP_DATA_OUT_INSTR);
2586}
2587
2588static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
2589 unsigned int instr_idx)
1211{ 2590{
1212 kfree(chip->data_interface); 2591 return subop && instr_idx < subop->ninstrs;
1213} 2592}
1214 2593
2594static int nand_subop_get_start_off(const struct nand_subop *subop,
2595 unsigned int instr_idx)
2596{
2597 if (instr_idx)
2598 return 0;
2599
2600 return subop->first_instr_start_off;
2601}
2602
2603/**
2604 * nand_subop_get_addr_start_off - Get the start offset in an address array
2605 * @subop: The entire sub-operation
2606 * @instr_idx: Index of the instruction inside the sub-operation
2607 *
2608 * During driver development, one could be tempted to directly use the
2609 * ->addr.addrs field of address instructions. This is wrong as address
2610 * instructions might be split.
2611 *
2612 * Given an address instruction, returns the offset of the first cycle to issue.
2613 */
2614int nand_subop_get_addr_start_off(const struct nand_subop *subop,
2615 unsigned int instr_idx)
2616{
2617 if (!nand_subop_instr_is_valid(subop, instr_idx) ||
2618 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)
2619 return -EINVAL;
2620
2621 return nand_subop_get_start_off(subop, instr_idx);
2622}
2623EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
2624
2625/**
2626 * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert
2627 * @subop: The entire sub-operation
2628 * @instr_idx: Index of the instruction inside the sub-operation
2629 *
2630 * During driver development, one could be tempted to directly use the
2631 * ->addr->naddrs field of a data instruction. This is wrong as instructions
2632 * might be split.
2633 *
2634 * Given an address instruction, returns the number of address cycle to issue.
2635 */
2636int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
2637 unsigned int instr_idx)
2638{
2639 int start_off, end_off;
2640
2641 if (!nand_subop_instr_is_valid(subop, instr_idx) ||
2642 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)
2643 return -EINVAL;
2644
2645 start_off = nand_subop_get_addr_start_off(subop, instr_idx);
2646
2647 if (instr_idx == subop->ninstrs - 1 &&
2648 subop->last_instr_end_off)
2649 end_off = subop->last_instr_end_off;
2650 else
2651 end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
2652
2653 return end_off - start_off;
2654}
2655EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
2656
2657/**
2658 * nand_subop_get_data_start_off - Get the start offset in a data array
2659 * @subop: The entire sub-operation
2660 * @instr_idx: Index of the instruction inside the sub-operation
2661 *
2662 * During driver development, one could be tempted to directly use the
2663 * ->data->buf.{in,out} field of data instructions. This is wrong as data
2664 * instructions might be split.
2665 *
2666 * Given a data instruction, returns the offset to start from.
2667 */
2668int nand_subop_get_data_start_off(const struct nand_subop *subop,
2669 unsigned int instr_idx)
2670{
2671 if (!nand_subop_instr_is_valid(subop, instr_idx) ||
2672 !nand_instr_is_data(&subop->instrs[instr_idx]))
2673 return -EINVAL;
2674
2675 return nand_subop_get_start_off(subop, instr_idx);
2676}
2677EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
2678
2679/**
2680 * nand_subop_get_data_len - Get the number of bytes to retrieve
2681 * @subop: The entire sub-operation
2682 * @instr_idx: Index of the instruction inside the sub-operation
2683 *
2684 * During driver development, one could be tempted to directly use the
2685 * ->data->len field of a data instruction. This is wrong as data instructions
2686 * might be split.
2687 *
2688 * Returns the length of the chunk of data to send/receive.
2689 */
2690int nand_subop_get_data_len(const struct nand_subop *subop,
2691 unsigned int instr_idx)
2692{
2693 int start_off = 0, end_off;
2694
2695 if (!nand_subop_instr_is_valid(subop, instr_idx) ||
2696 !nand_instr_is_data(&subop->instrs[instr_idx]))
2697 return -EINVAL;
2698
2699 start_off = nand_subop_get_data_start_off(subop, instr_idx);
2700
2701 if (instr_idx == subop->ninstrs - 1 &&
2702 subop->last_instr_end_off)
2703 end_off = subop->last_instr_end_off;
2704 else
2705 end_off = subop->instrs[instr_idx].ctx.data.len;
2706
2707 return end_off - start_off;
2708}
2709EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
2710
1215/** 2711/**
1216 * nand_reset - Reset and initialize a NAND device 2712 * nand_reset - Reset and initialize a NAND device
1217 * @chip: The NAND chip 2713 * @chip: The NAND chip
1218 * @chipnr: Internal die id 2714 * @chipnr: Internal die id
1219 * 2715 *
1220 * Returns 0 for success or negative error code otherwise 2716 * Save the timings data structure, then apply SDR timings mode 0 (see
2717 * nand_reset_data_interface for details), do the reset operation, and
2718 * apply back the previous timings.
2719 *
2720 * Returns 0 on success, a negative error code otherwise.
1221 */ 2721 */
1222int nand_reset(struct nand_chip *chip, int chipnr) 2722int nand_reset(struct nand_chip *chip, int chipnr)
1223{ 2723{
1224 struct mtd_info *mtd = nand_to_mtd(chip); 2724 struct mtd_info *mtd = nand_to_mtd(chip);
2725 struct nand_data_interface saved_data_intf = chip->data_interface;
1225 int ret; 2726 int ret;
1226 2727
1227 ret = nand_reset_data_interface(chip, chipnr); 2728 ret = nand_reset_data_interface(chip, chipnr);
@@ -1233,10 +2734,13 @@ int nand_reset(struct nand_chip *chip, int chipnr)
1233 * interface settings, hence this weird ->select_chip() dance. 2734 * interface settings, hence this weird ->select_chip() dance.
1234 */ 2735 */
1235 chip->select_chip(mtd, chipnr); 2736 chip->select_chip(mtd, chipnr);
1236 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); 2737 ret = nand_reset_op(chip);
1237 chip->select_chip(mtd, -1); 2738 chip->select_chip(mtd, -1);
2739 if (ret)
2740 return ret;
1238 2741
1239 chip->select_chip(mtd, chipnr); 2742 chip->select_chip(mtd, chipnr);
2743 chip->data_interface = saved_data_intf;
1240 ret = nand_setup_data_interface(chip, chipnr); 2744 ret = nand_setup_data_interface(chip, chipnr);
1241 chip->select_chip(mtd, -1); 2745 chip->select_chip(mtd, -1);
1242 if (ret) 2746 if (ret)
@@ -1390,9 +2894,19 @@ EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
1390int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 2894int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1391 uint8_t *buf, int oob_required, int page) 2895 uint8_t *buf, int oob_required, int page)
1392{ 2896{
1393 chip->read_buf(mtd, buf, mtd->writesize); 2897 int ret;
1394 if (oob_required) 2898
1395 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 2899 ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
2900 if (ret)
2901 return ret;
2902
2903 if (oob_required) {
2904 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
2905 false);
2906 if (ret)
2907 return ret;
2908 }
2909
1396 return 0; 2910 return 0;
1397} 2911}
1398EXPORT_SYMBOL(nand_read_page_raw); 2912EXPORT_SYMBOL(nand_read_page_raw);
@@ -1414,29 +2928,50 @@ static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
1414 int eccsize = chip->ecc.size; 2928 int eccsize = chip->ecc.size;
1415 int eccbytes = chip->ecc.bytes; 2929 int eccbytes = chip->ecc.bytes;
1416 uint8_t *oob = chip->oob_poi; 2930 uint8_t *oob = chip->oob_poi;
1417 int steps, size; 2931 int steps, size, ret;
2932
2933 ret = nand_read_page_op(chip, page, 0, NULL, 0);
2934 if (ret)
2935 return ret;
1418 2936
1419 for (steps = chip->ecc.steps; steps > 0; steps--) { 2937 for (steps = chip->ecc.steps; steps > 0; steps--) {
1420 chip->read_buf(mtd, buf, eccsize); 2938 ret = nand_read_data_op(chip, buf, eccsize, false);
2939 if (ret)
2940 return ret;
2941
1421 buf += eccsize; 2942 buf += eccsize;
1422 2943
1423 if (chip->ecc.prepad) { 2944 if (chip->ecc.prepad) {
1424 chip->read_buf(mtd, oob, chip->ecc.prepad); 2945 ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
2946 false);
2947 if (ret)
2948 return ret;
2949
1425 oob += chip->ecc.prepad; 2950 oob += chip->ecc.prepad;
1426 } 2951 }
1427 2952
1428 chip->read_buf(mtd, oob, eccbytes); 2953 ret = nand_read_data_op(chip, oob, eccbytes, false);
2954 if (ret)
2955 return ret;
2956
1429 oob += eccbytes; 2957 oob += eccbytes;
1430 2958
1431 if (chip->ecc.postpad) { 2959 if (chip->ecc.postpad) {
1432 chip->read_buf(mtd, oob, chip->ecc.postpad); 2960 ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
2961 false);
2962 if (ret)
2963 return ret;
2964
1433 oob += chip->ecc.postpad; 2965 oob += chip->ecc.postpad;
1434 } 2966 }
1435 } 2967 }
1436 2968
1437 size = mtd->oobsize - (oob - chip->oob_poi); 2969 size = mtd->oobsize - (oob - chip->oob_poi);
1438 if (size) 2970 if (size) {
1439 chip->read_buf(mtd, oob, size); 2971 ret = nand_read_data_op(chip, oob, size, false);
2972 if (ret)
2973 return ret;
2974 }
1440 2975
1441 return 0; 2976 return 0;
1442} 2977}
@@ -1456,8 +2991,8 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1456 int eccbytes = chip->ecc.bytes; 2991 int eccbytes = chip->ecc.bytes;
1457 int eccsteps = chip->ecc.steps; 2992 int eccsteps = chip->ecc.steps;
1458 uint8_t *p = buf; 2993 uint8_t *p = buf;
1459 uint8_t *ecc_calc = chip->buffers->ecccalc; 2994 uint8_t *ecc_calc = chip->ecc.calc_buf;
1460 uint8_t *ecc_code = chip->buffers->ecccode; 2995 uint8_t *ecc_code = chip->ecc.code_buf;
1461 unsigned int max_bitflips = 0; 2996 unsigned int max_bitflips = 0;
1462 2997
1463 chip->ecc.read_page_raw(mtd, chip, buf, 1, page); 2998 chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
@@ -1521,15 +3056,14 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1521 3056
1522 data_col_addr = start_step * chip->ecc.size; 3057 data_col_addr = start_step * chip->ecc.size;
1523 /* If we read not a page aligned data */ 3058 /* If we read not a page aligned data */
1524 if (data_col_addr != 0)
1525 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_col_addr, -1);
1526
1527 p = bufpoi + data_col_addr; 3059 p = bufpoi + data_col_addr;
1528 chip->read_buf(mtd, p, datafrag_len); 3060 ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
3061 if (ret)
3062 return ret;
1529 3063
1530 /* Calculate ECC */ 3064 /* Calculate ECC */
1531 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) 3065 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
1532 chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]); 3066 chip->ecc.calculate(mtd, p, &chip->ecc.calc_buf[i]);
1533 3067
1534 /* 3068 /*
1535 * The performance is faster if we position offsets according to 3069 * The performance is faster if we position offsets according to
@@ -1543,8 +3077,11 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1543 gaps = 1; 3077 gaps = 1;
1544 3078
1545 if (gaps) { 3079 if (gaps) {
1546 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1); 3080 ret = nand_change_read_column_op(chip, mtd->writesize,
1547 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 3081 chip->oob_poi, mtd->oobsize,
3082 false);
3083 if (ret)
3084 return ret;
1548 } else { 3085 } else {
1549 /* 3086 /*
1550 * Send the command to read the particular ECC bytes take care 3087 * Send the command to read the particular ECC bytes take care
@@ -1558,12 +3095,15 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1558 (busw - 1)) 3095 (busw - 1))
1559 aligned_len++; 3096 aligned_len++;
1560 3097
1561 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 3098 ret = nand_change_read_column_op(chip,
1562 mtd->writesize + aligned_pos, -1); 3099 mtd->writesize + aligned_pos,
1563 chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len); 3100 &chip->oob_poi[aligned_pos],
3101 aligned_len, false);
3102 if (ret)
3103 return ret;
1564 } 3104 }
1565 3105
1566 ret = mtd_ooblayout_get_eccbytes(mtd, chip->buffers->ecccode, 3106 ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
1567 chip->oob_poi, index, eccfrag_len); 3107 chip->oob_poi, index, eccfrag_len);
1568 if (ret) 3108 if (ret)
1569 return ret; 3109 return ret;
@@ -1572,13 +3112,13 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1572 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) { 3112 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
1573 int stat; 3113 int stat;
1574 3114
1575 stat = chip->ecc.correct(mtd, p, 3115 stat = chip->ecc.correct(mtd, p, &chip->ecc.code_buf[i],
1576 &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]); 3116 &chip->ecc.calc_buf[i]);
1577 if (stat == -EBADMSG && 3117 if (stat == -EBADMSG &&
1578 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { 3118 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1579 /* check for empty pages with bitflips */ 3119 /* check for empty pages with bitflips */
1580 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size, 3120 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
1581 &chip->buffers->ecccode[i], 3121 &chip->ecc.code_buf[i],
1582 chip->ecc.bytes, 3122 chip->ecc.bytes,
1583 NULL, 0, 3123 NULL, 0,
1584 chip->ecc.strength); 3124 chip->ecc.strength);
@@ -1611,16 +3151,27 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1611 int eccbytes = chip->ecc.bytes; 3151 int eccbytes = chip->ecc.bytes;
1612 int eccsteps = chip->ecc.steps; 3152 int eccsteps = chip->ecc.steps;
1613 uint8_t *p = buf; 3153 uint8_t *p = buf;
1614 uint8_t *ecc_calc = chip->buffers->ecccalc; 3154 uint8_t *ecc_calc = chip->ecc.calc_buf;
1615 uint8_t *ecc_code = chip->buffers->ecccode; 3155 uint8_t *ecc_code = chip->ecc.code_buf;
1616 unsigned int max_bitflips = 0; 3156 unsigned int max_bitflips = 0;
1617 3157
3158 ret = nand_read_page_op(chip, page, 0, NULL, 0);
3159 if (ret)
3160 return ret;
3161
1618 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 3162 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1619 chip->ecc.hwctl(mtd, NAND_ECC_READ); 3163 chip->ecc.hwctl(mtd, NAND_ECC_READ);
1620 chip->read_buf(mtd, p, eccsize); 3164
3165 ret = nand_read_data_op(chip, p, eccsize, false);
3166 if (ret)
3167 return ret;
3168
1621 chip->ecc.calculate(mtd, p, &ecc_calc[i]); 3169 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1622 } 3170 }
1623 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 3171
3172 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false);
3173 if (ret)
3174 return ret;
1624 3175
1625 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, 3176 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1626 chip->ecc.total); 3177 chip->ecc.total);
@@ -1674,14 +3225,18 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
1674 int eccbytes = chip->ecc.bytes; 3225 int eccbytes = chip->ecc.bytes;
1675 int eccsteps = chip->ecc.steps; 3226 int eccsteps = chip->ecc.steps;
1676 uint8_t *p = buf; 3227 uint8_t *p = buf;
1677 uint8_t *ecc_code = chip->buffers->ecccode; 3228 uint8_t *ecc_code = chip->ecc.code_buf;
1678 uint8_t *ecc_calc = chip->buffers->ecccalc; 3229 uint8_t *ecc_calc = chip->ecc.calc_buf;
1679 unsigned int max_bitflips = 0; 3230 unsigned int max_bitflips = 0;
1680 3231
1681 /* Read the OOB area first */ 3232 /* Read the OOB area first */
1682 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); 3233 ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
1683 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 3234 if (ret)
1684 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); 3235 return ret;
3236
3237 ret = nand_read_page_op(chip, page, 0, NULL, 0);
3238 if (ret)
3239 return ret;
1685 3240
1686 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, 3241 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1687 chip->ecc.total); 3242 chip->ecc.total);
@@ -1692,7 +3247,11 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
1692 int stat; 3247 int stat;
1693 3248
1694 chip->ecc.hwctl(mtd, NAND_ECC_READ); 3249 chip->ecc.hwctl(mtd, NAND_ECC_READ);
1695 chip->read_buf(mtd, p, eccsize); 3250
3251 ret = nand_read_data_op(chip, p, eccsize, false);
3252 if (ret)
3253 return ret;
3254
1696 chip->ecc.calculate(mtd, p, &ecc_calc[i]); 3255 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1697 3256
1698 stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL); 3257 stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
@@ -1729,7 +3288,7 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
1729static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip, 3288static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1730 uint8_t *buf, int oob_required, int page) 3289 uint8_t *buf, int oob_required, int page)
1731{ 3290{
1732 int i, eccsize = chip->ecc.size; 3291 int ret, i, eccsize = chip->ecc.size;
1733 int eccbytes = chip->ecc.bytes; 3292 int eccbytes = chip->ecc.bytes;
1734 int eccsteps = chip->ecc.steps; 3293 int eccsteps = chip->ecc.steps;
1735 int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad; 3294 int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
@@ -1737,25 +3296,44 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1737 uint8_t *oob = chip->oob_poi; 3296 uint8_t *oob = chip->oob_poi;
1738 unsigned int max_bitflips = 0; 3297 unsigned int max_bitflips = 0;
1739 3298
3299 ret = nand_read_page_op(chip, page, 0, NULL, 0);
3300 if (ret)
3301 return ret;
3302
1740 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 3303 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1741 int stat; 3304 int stat;
1742 3305
1743 chip->ecc.hwctl(mtd, NAND_ECC_READ); 3306 chip->ecc.hwctl(mtd, NAND_ECC_READ);
1744 chip->read_buf(mtd, p, eccsize); 3307
3308 ret = nand_read_data_op(chip, p, eccsize, false);
3309 if (ret)
3310 return ret;
1745 3311
1746 if (chip->ecc.prepad) { 3312 if (chip->ecc.prepad) {
1747 chip->read_buf(mtd, oob, chip->ecc.prepad); 3313 ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
3314 false);
3315 if (ret)
3316 return ret;
3317
1748 oob += chip->ecc.prepad; 3318 oob += chip->ecc.prepad;
1749 } 3319 }
1750 3320
1751 chip->ecc.hwctl(mtd, NAND_ECC_READSYN); 3321 chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
1752 chip->read_buf(mtd, oob, eccbytes); 3322
3323 ret = nand_read_data_op(chip, oob, eccbytes, false);
3324 if (ret)
3325 return ret;
3326
1753 stat = chip->ecc.correct(mtd, p, oob, NULL); 3327 stat = chip->ecc.correct(mtd, p, oob, NULL);
1754 3328
1755 oob += eccbytes; 3329 oob += eccbytes;
1756 3330
1757 if (chip->ecc.postpad) { 3331 if (chip->ecc.postpad) {
1758 chip->read_buf(mtd, oob, chip->ecc.postpad); 3332 ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
3333 false);
3334 if (ret)
3335 return ret;
3336
1759 oob += chip->ecc.postpad; 3337 oob += chip->ecc.postpad;
1760 } 3338 }
1761 3339
@@ -1779,8 +3357,11 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1779 3357
1780 /* Calculate remaining oob bytes */ 3358 /* Calculate remaining oob bytes */
1781 i = mtd->oobsize - (oob - chip->oob_poi); 3359 i = mtd->oobsize - (oob - chip->oob_poi);
1782 if (i) 3360 if (i) {
1783 chip->read_buf(mtd, oob, i); 3361 ret = nand_read_data_op(chip, oob, i, false);
3362 if (ret)
3363 return ret;
3364 }
1784 3365
1785 return max_bitflips; 3366 return max_bitflips;
1786} 3367}
@@ -1894,16 +3475,13 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1894 3475
1895 /* Is the current page in the buffer? */ 3476 /* Is the current page in the buffer? */
1896 if (realpage != chip->pagebuf || oob) { 3477 if (realpage != chip->pagebuf || oob) {
1897 bufpoi = use_bufpoi ? chip->buffers->databuf : buf; 3478 bufpoi = use_bufpoi ? chip->data_buf : buf;
1898 3479
1899 if (use_bufpoi && aligned) 3480 if (use_bufpoi && aligned)
1900 pr_debug("%s: using read bounce buffer for buf@%p\n", 3481 pr_debug("%s: using read bounce buffer for buf@%p\n",
1901 __func__, buf); 3482 __func__, buf);
1902 3483
1903read_retry: 3484read_retry:
1904 if (nand_standard_page_accessors(&chip->ecc))
1905 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1906
1907 /* 3485 /*
1908 * Now read the page into the buffer. Absent an error, 3486 * Now read the page into the buffer. Absent an error,
1909 * the read methods return max bitflips per ecc step. 3487 * the read methods return max bitflips per ecc step.
@@ -1938,7 +3516,7 @@ read_retry:
1938 /* Invalidate page cache */ 3516 /* Invalidate page cache */
1939 chip->pagebuf = -1; 3517 chip->pagebuf = -1;
1940 } 3518 }
1941 memcpy(buf, chip->buffers->databuf + col, bytes); 3519 memcpy(buf, chip->data_buf + col, bytes);
1942 } 3520 }
1943 3521
1944 if (unlikely(oob)) { 3522 if (unlikely(oob)) {
@@ -1979,7 +3557,7 @@ read_retry:
1979 buf += bytes; 3557 buf += bytes;
1980 max_bitflips = max_t(unsigned int, max_bitflips, ret); 3558 max_bitflips = max_t(unsigned int, max_bitflips, ret);
1981 } else { 3559 } else {
1982 memcpy(buf, chip->buffers->databuf + col, bytes); 3560 memcpy(buf, chip->data_buf + col, bytes);
1983 buf += bytes; 3561 buf += bytes;
1984 max_bitflips = max_t(unsigned int, max_bitflips, 3562 max_bitflips = max_t(unsigned int, max_bitflips,
1985 chip->pagebuf_bitflips); 3563 chip->pagebuf_bitflips);
@@ -2034,9 +3612,7 @@ read_retry:
2034 */ 3612 */
2035int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page) 3613int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
2036{ 3614{
2037 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); 3615 return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
2038 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
2039 return 0;
2040} 3616}
2041EXPORT_SYMBOL(nand_read_oob_std); 3617EXPORT_SYMBOL(nand_read_oob_std);
2042 3618
@@ -2054,25 +3630,43 @@ int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
2054 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad; 3630 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
2055 int eccsize = chip->ecc.size; 3631 int eccsize = chip->ecc.size;
2056 uint8_t *bufpoi = chip->oob_poi; 3632 uint8_t *bufpoi = chip->oob_poi;
2057 int i, toread, sndrnd = 0, pos; 3633 int i, toread, sndrnd = 0, pos, ret;
3634
3635 ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
3636 if (ret)
3637 return ret;
2058 3638
2059 chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page);
2060 for (i = 0; i < chip->ecc.steps; i++) { 3639 for (i = 0; i < chip->ecc.steps; i++) {
2061 if (sndrnd) { 3640 if (sndrnd) {
3641 int ret;
3642
2062 pos = eccsize + i * (eccsize + chunk); 3643 pos = eccsize + i * (eccsize + chunk);
2063 if (mtd->writesize > 512) 3644 if (mtd->writesize > 512)
2064 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, pos, -1); 3645 ret = nand_change_read_column_op(chip, pos,
3646 NULL, 0,
3647 false);
2065 else 3648 else
2066 chip->cmdfunc(mtd, NAND_CMD_READ0, pos, page); 3649 ret = nand_read_page_op(chip, page, pos, NULL,
3650 0);
3651
3652 if (ret)
3653 return ret;
2067 } else 3654 } else
2068 sndrnd = 1; 3655 sndrnd = 1;
2069 toread = min_t(int, length, chunk); 3656 toread = min_t(int, length, chunk);
2070 chip->read_buf(mtd, bufpoi, toread); 3657
3658 ret = nand_read_data_op(chip, bufpoi, toread, false);
3659 if (ret)
3660 return ret;
3661
2071 bufpoi += toread; 3662 bufpoi += toread;
2072 length -= toread; 3663 length -= toread;
2073 } 3664 }
2074 if (length > 0) 3665 if (length > 0) {
2075 chip->read_buf(mtd, bufpoi, length); 3666 ret = nand_read_data_op(chip, bufpoi, length, false);
3667 if (ret)
3668 return ret;
3669 }
2076 3670
2077 return 0; 3671 return 0;
2078} 3672}
@@ -2086,18 +3680,8 @@ EXPORT_SYMBOL(nand_read_oob_syndrome);
2086 */ 3680 */
2087int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page) 3681int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
2088{ 3682{
2089 int status = 0; 3683 return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
2090 const uint8_t *buf = chip->oob_poi; 3684 mtd->oobsize);
2091 int length = mtd->oobsize;
2092
2093 chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
2094 chip->write_buf(mtd, buf, length);
2095 /* Send command to program the OOB data */
2096 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2097
2098 status = chip->waitfunc(mtd, chip);
2099
2100 return status & NAND_STATUS_FAIL ? -EIO : 0;
2101} 3685}
2102EXPORT_SYMBOL(nand_write_oob_std); 3686EXPORT_SYMBOL(nand_write_oob_std);
2103 3687
@@ -2113,7 +3697,7 @@ int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
2113{ 3697{
2114 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad; 3698 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
2115 int eccsize = chip->ecc.size, length = mtd->oobsize; 3699 int eccsize = chip->ecc.size, length = mtd->oobsize;
2116 int i, len, pos, status = 0, sndcmd = 0, steps = chip->ecc.steps; 3700 int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
2117 const uint8_t *bufpoi = chip->oob_poi; 3701 const uint8_t *bufpoi = chip->oob_poi;
2118 3702
2119 /* 3703 /*
@@ -2127,7 +3711,10 @@ int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
2127 } else 3711 } else
2128 pos = eccsize; 3712 pos = eccsize;
2129 3713
2130 chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page); 3714 ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
3715 if (ret)
3716 return ret;
3717
2131 for (i = 0; i < steps; i++) { 3718 for (i = 0; i < steps; i++) {
2132 if (sndcmd) { 3719 if (sndcmd) {
2133 if (mtd->writesize <= 512) { 3720 if (mtd->writesize <= 512) {
@@ -2136,28 +3723,40 @@ int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
2136 len = eccsize; 3723 len = eccsize;
2137 while (len > 0) { 3724 while (len > 0) {
2138 int num = min_t(int, len, 4); 3725 int num = min_t(int, len, 4);
2139 chip->write_buf(mtd, (uint8_t *)&fill, 3726
2140 num); 3727 ret = nand_write_data_op(chip, &fill,
3728 num, false);
3729 if (ret)
3730 return ret;
3731
2141 len -= num; 3732 len -= num;
2142 } 3733 }
2143 } else { 3734 } else {
2144 pos = eccsize + i * (eccsize + chunk); 3735 pos = eccsize + i * (eccsize + chunk);
2145 chip->cmdfunc(mtd, NAND_CMD_RNDIN, pos, -1); 3736 ret = nand_change_write_column_op(chip, pos,
3737 NULL, 0,
3738 false);
3739 if (ret)
3740 return ret;
2146 } 3741 }
2147 } else 3742 } else
2148 sndcmd = 1; 3743 sndcmd = 1;
2149 len = min_t(int, length, chunk); 3744 len = min_t(int, length, chunk);
2150 chip->write_buf(mtd, bufpoi, len); 3745
3746 ret = nand_write_data_op(chip, bufpoi, len, false);
3747 if (ret)
3748 return ret;
3749
2151 bufpoi += len; 3750 bufpoi += len;
2152 length -= len; 3751 length -= len;
2153 } 3752 }
2154 if (length > 0) 3753 if (length > 0) {
2155 chip->write_buf(mtd, bufpoi, length); 3754 ret = nand_write_data_op(chip, bufpoi, length, false);
2156 3755 if (ret)
2157 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); 3756 return ret;
2158 status = chip->waitfunc(mtd, chip); 3757 }
2159 3758
2160 return status & NAND_STATUS_FAIL ? -EIO : 0; 3759 return nand_prog_page_end_op(chip);
2161} 3760}
2162EXPORT_SYMBOL(nand_write_oob_syndrome); 3761EXPORT_SYMBOL(nand_write_oob_syndrome);
2163 3762
@@ -2172,6 +3771,7 @@ EXPORT_SYMBOL(nand_write_oob_syndrome);
2172static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, 3771static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
2173 struct mtd_oob_ops *ops) 3772 struct mtd_oob_ops *ops)
2174{ 3773{
3774 unsigned int max_bitflips = 0;
2175 int page, realpage, chipnr; 3775 int page, realpage, chipnr;
2176 struct nand_chip *chip = mtd_to_nand(mtd); 3776 struct nand_chip *chip = mtd_to_nand(mtd);
2177 struct mtd_ecc_stats stats; 3777 struct mtd_ecc_stats stats;
@@ -2214,6 +3814,8 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
2214 nand_wait_ready(mtd); 3814 nand_wait_ready(mtd);
2215 } 3815 }
2216 3816
3817 max_bitflips = max_t(unsigned int, max_bitflips, ret);
3818
2217 readlen -= len; 3819 readlen -= len;
2218 if (!readlen) 3820 if (!readlen)
2219 break; 3821 break;
@@ -2239,7 +3841,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
2239 if (mtd->ecc_stats.failed - stats.failed) 3841 if (mtd->ecc_stats.failed - stats.failed)
2240 return -EBADMSG; 3842 return -EBADMSG;
2241 3843
2242 return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0; 3844 return max_bitflips;
2243} 3845}
2244 3846
2245/** 3847/**
@@ -2287,11 +3889,20 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
2287int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 3889int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
2288 const uint8_t *buf, int oob_required, int page) 3890 const uint8_t *buf, int oob_required, int page)
2289{ 3891{
2290 chip->write_buf(mtd, buf, mtd->writesize); 3892 int ret;
2291 if (oob_required)
2292 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2293 3893
2294 return 0; 3894 ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
3895 if (ret)
3896 return ret;
3897
3898 if (oob_required) {
3899 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
3900 false);
3901 if (ret)
3902 return ret;
3903 }
3904
3905 return nand_prog_page_end_op(chip);
2295} 3906}
2296EXPORT_SYMBOL(nand_write_page_raw); 3907EXPORT_SYMBOL(nand_write_page_raw);
2297 3908
@@ -2313,31 +3924,52 @@ static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
2313 int eccsize = chip->ecc.size; 3924 int eccsize = chip->ecc.size;
2314 int eccbytes = chip->ecc.bytes; 3925 int eccbytes = chip->ecc.bytes;
2315 uint8_t *oob = chip->oob_poi; 3926 uint8_t *oob = chip->oob_poi;
2316 int steps, size; 3927 int steps, size, ret;
3928
3929 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3930 if (ret)
3931 return ret;
2317 3932
2318 for (steps = chip->ecc.steps; steps > 0; steps--) { 3933 for (steps = chip->ecc.steps; steps > 0; steps--) {
2319 chip->write_buf(mtd, buf, eccsize); 3934 ret = nand_write_data_op(chip, buf, eccsize, false);
3935 if (ret)
3936 return ret;
3937
2320 buf += eccsize; 3938 buf += eccsize;
2321 3939
2322 if (chip->ecc.prepad) { 3940 if (chip->ecc.prepad) {
2323 chip->write_buf(mtd, oob, chip->ecc.prepad); 3941 ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
3942 false);
3943 if (ret)
3944 return ret;
3945
2324 oob += chip->ecc.prepad; 3946 oob += chip->ecc.prepad;
2325 } 3947 }
2326 3948
2327 chip->write_buf(mtd, oob, eccbytes); 3949 ret = nand_write_data_op(chip, oob, eccbytes, false);
3950 if (ret)
3951 return ret;
3952
2328 oob += eccbytes; 3953 oob += eccbytes;
2329 3954
2330 if (chip->ecc.postpad) { 3955 if (chip->ecc.postpad) {
2331 chip->write_buf(mtd, oob, chip->ecc.postpad); 3956 ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
3957 false);
3958 if (ret)
3959 return ret;
3960
2332 oob += chip->ecc.postpad; 3961 oob += chip->ecc.postpad;
2333 } 3962 }
2334 } 3963 }
2335 3964
2336 size = mtd->oobsize - (oob - chip->oob_poi); 3965 size = mtd->oobsize - (oob - chip->oob_poi);
2337 if (size) 3966 if (size) {
2338 chip->write_buf(mtd, oob, size); 3967 ret = nand_write_data_op(chip, oob, size, false);
3968 if (ret)
3969 return ret;
3970 }
2339 3971
2340 return 0; 3972 return nand_prog_page_end_op(chip);
2341} 3973}
2342/** 3974/**
2343 * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function 3975 * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
@@ -2354,7 +3986,7 @@ static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
2354 int i, eccsize = chip->ecc.size, ret; 3986 int i, eccsize = chip->ecc.size, ret;
2355 int eccbytes = chip->ecc.bytes; 3987 int eccbytes = chip->ecc.bytes;
2356 int eccsteps = chip->ecc.steps; 3988 int eccsteps = chip->ecc.steps;
2357 uint8_t *ecc_calc = chip->buffers->ecccalc; 3989 uint8_t *ecc_calc = chip->ecc.calc_buf;
2358 const uint8_t *p = buf; 3990 const uint8_t *p = buf;
2359 3991
2360 /* Software ECC calculation */ 3992 /* Software ECC calculation */
@@ -2384,12 +4016,20 @@ static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
2384 int i, eccsize = chip->ecc.size, ret; 4016 int i, eccsize = chip->ecc.size, ret;
2385 int eccbytes = chip->ecc.bytes; 4017 int eccbytes = chip->ecc.bytes;
2386 int eccsteps = chip->ecc.steps; 4018 int eccsteps = chip->ecc.steps;
2387 uint8_t *ecc_calc = chip->buffers->ecccalc; 4019 uint8_t *ecc_calc = chip->ecc.calc_buf;
2388 const uint8_t *p = buf; 4020 const uint8_t *p = buf;
2389 4021
4022 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
4023 if (ret)
4024 return ret;
4025
2390 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 4026 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2391 chip->ecc.hwctl(mtd, NAND_ECC_WRITE); 4027 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2392 chip->write_buf(mtd, p, eccsize); 4028
4029 ret = nand_write_data_op(chip, p, eccsize, false);
4030 if (ret)
4031 return ret;
4032
2393 chip->ecc.calculate(mtd, p, &ecc_calc[i]); 4033 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2394 } 4034 }
2395 4035
@@ -2398,9 +4038,11 @@ static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
2398 if (ret) 4038 if (ret)
2399 return ret; 4039 return ret;
2400 4040
2401 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); 4041 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
4042 if (ret)
4043 return ret;
2402 4044
2403 return 0; 4045 return nand_prog_page_end_op(chip);
2404} 4046}
2405 4047
2406 4048
@@ -2420,7 +4062,7 @@ static int nand_write_subpage_hwecc(struct mtd_info *mtd,
2420 int oob_required, int page) 4062 int oob_required, int page)
2421{ 4063{
2422 uint8_t *oob_buf = chip->oob_poi; 4064 uint8_t *oob_buf = chip->oob_poi;
2423 uint8_t *ecc_calc = chip->buffers->ecccalc; 4065 uint8_t *ecc_calc = chip->ecc.calc_buf;
2424 int ecc_size = chip->ecc.size; 4066 int ecc_size = chip->ecc.size;
2425 int ecc_bytes = chip->ecc.bytes; 4067 int ecc_bytes = chip->ecc.bytes;
2426 int ecc_steps = chip->ecc.steps; 4068 int ecc_steps = chip->ecc.steps;
@@ -2429,12 +4071,18 @@ static int nand_write_subpage_hwecc(struct mtd_info *mtd,
2429 int oob_bytes = mtd->oobsize / ecc_steps; 4071 int oob_bytes = mtd->oobsize / ecc_steps;
2430 int step, ret; 4072 int step, ret;
2431 4073
4074 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
4075 if (ret)
4076 return ret;
4077
2432 for (step = 0; step < ecc_steps; step++) { 4078 for (step = 0; step < ecc_steps; step++) {
2433 /* configure controller for WRITE access */ 4079 /* configure controller for WRITE access */
2434 chip->ecc.hwctl(mtd, NAND_ECC_WRITE); 4080 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2435 4081
2436 /* write data (untouched subpages already masked by 0xFF) */ 4082 /* write data (untouched subpages already masked by 0xFF) */
2437 chip->write_buf(mtd, buf, ecc_size); 4083 ret = nand_write_data_op(chip, buf, ecc_size, false);
4084 if (ret)
4085 return ret;
2438 4086
2439 /* mask ECC of un-touched subpages by padding 0xFF */ 4087 /* mask ECC of un-touched subpages by padding 0xFF */
2440 if ((step < start_step) || (step > end_step)) 4088 if ((step < start_step) || (step > end_step))
@@ -2454,16 +4102,18 @@ static int nand_write_subpage_hwecc(struct mtd_info *mtd,
2454 4102
2455 /* copy calculated ECC for whole page to chip->buffer->oob */ 4103 /* copy calculated ECC for whole page to chip->buffer->oob */
2456 /* this include masked-value(0xFF) for unwritten subpages */ 4104 /* this include masked-value(0xFF) for unwritten subpages */
2457 ecc_calc = chip->buffers->ecccalc; 4105 ecc_calc = chip->ecc.calc_buf;
2458 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, 4106 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2459 chip->ecc.total); 4107 chip->ecc.total);
2460 if (ret) 4108 if (ret)
2461 return ret; 4109 return ret;
2462 4110
2463 /* write OOB buffer to NAND device */ 4111 /* write OOB buffer to NAND device */
2464 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); 4112 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
4113 if (ret)
4114 return ret;
2465 4115
2466 return 0; 4116 return nand_prog_page_end_op(chip);
2467} 4117}
2468 4118
2469 4119
@@ -2488,33 +4138,55 @@ static int nand_write_page_syndrome(struct mtd_info *mtd,
2488 int eccsteps = chip->ecc.steps; 4138 int eccsteps = chip->ecc.steps;
2489 const uint8_t *p = buf; 4139 const uint8_t *p = buf;
2490 uint8_t *oob = chip->oob_poi; 4140 uint8_t *oob = chip->oob_poi;
4141 int ret;
2491 4142
2492 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 4143 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
4144 if (ret)
4145 return ret;
2493 4146
4147 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2494 chip->ecc.hwctl(mtd, NAND_ECC_WRITE); 4148 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2495 chip->write_buf(mtd, p, eccsize); 4149
4150 ret = nand_write_data_op(chip, p, eccsize, false);
4151 if (ret)
4152 return ret;
2496 4153
2497 if (chip->ecc.prepad) { 4154 if (chip->ecc.prepad) {
2498 chip->write_buf(mtd, oob, chip->ecc.prepad); 4155 ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
4156 false);
4157 if (ret)
4158 return ret;
4159
2499 oob += chip->ecc.prepad; 4160 oob += chip->ecc.prepad;
2500 } 4161 }
2501 4162
2502 chip->ecc.calculate(mtd, p, oob); 4163 chip->ecc.calculate(mtd, p, oob);
2503 chip->write_buf(mtd, oob, eccbytes); 4164
4165 ret = nand_write_data_op(chip, oob, eccbytes, false);
4166 if (ret)
4167 return ret;
4168
2504 oob += eccbytes; 4169 oob += eccbytes;
2505 4170
2506 if (chip->ecc.postpad) { 4171 if (chip->ecc.postpad) {
2507 chip->write_buf(mtd, oob, chip->ecc.postpad); 4172 ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
4173 false);
4174 if (ret)
4175 return ret;
4176
2508 oob += chip->ecc.postpad; 4177 oob += chip->ecc.postpad;
2509 } 4178 }
2510 } 4179 }
2511 4180
2512 /* Calculate remaining oob bytes */ 4181 /* Calculate remaining oob bytes */
2513 i = mtd->oobsize - (oob - chip->oob_poi); 4182 i = mtd->oobsize - (oob - chip->oob_poi);
2514 if (i) 4183 if (i) {
2515 chip->write_buf(mtd, oob, i); 4184 ret = nand_write_data_op(chip, oob, i, false);
4185 if (ret)
4186 return ret;
4187 }
2516 4188
2517 return 0; 4189 return nand_prog_page_end_op(chip);
2518} 4190}
2519 4191
2520/** 4192/**
@@ -2540,9 +4212,6 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
2540 else 4212 else
2541 subpage = 0; 4213 subpage = 0;
2542 4214
2543 if (nand_standard_page_accessors(&chip->ecc))
2544 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
2545
2546 if (unlikely(raw)) 4215 if (unlikely(raw))
2547 status = chip->ecc.write_page_raw(mtd, chip, buf, 4216 status = chip->ecc.write_page_raw(mtd, chip, buf,
2548 oob_required, page); 4217 oob_required, page);
@@ -2556,14 +4225,6 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
2556 if (status < 0) 4225 if (status < 0)
2557 return status; 4226 return status;
2558 4227
2559 if (nand_standard_page_accessors(&chip->ecc)) {
2560 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2561
2562 status = chip->waitfunc(mtd, chip);
2563 if (status & NAND_STATUS_FAIL)
2564 return -EIO;
2565 }
2566
2567 return 0; 4228 return 0;
2568} 4229}
2569 4230
@@ -2688,9 +4349,9 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2688 if (part_pagewr) 4349 if (part_pagewr)
2689 bytes = min_t(int, bytes - column, writelen); 4350 bytes = min_t(int, bytes - column, writelen);
2690 chip->pagebuf = -1; 4351 chip->pagebuf = -1;
2691 memset(chip->buffers->databuf, 0xff, mtd->writesize); 4352 memset(chip->data_buf, 0xff, mtd->writesize);
2692 memcpy(&chip->buffers->databuf[column], buf, bytes); 4353 memcpy(&chip->data_buf[column], buf, bytes);
2693 wbuf = chip->buffers->databuf; 4354 wbuf = chip->data_buf;
2694 } 4355 }
2695 4356
2696 if (unlikely(oob)) { 4357 if (unlikely(oob)) {
@@ -2885,11 +4546,12 @@ out:
2885static int single_erase(struct mtd_info *mtd, int page) 4546static int single_erase(struct mtd_info *mtd, int page)
2886{ 4547{
2887 struct nand_chip *chip = mtd_to_nand(mtd); 4548 struct nand_chip *chip = mtd_to_nand(mtd);
4549 unsigned int eraseblock;
4550
2888 /* Send commands to erase a block */ 4551 /* Send commands to erase a block */
2889 chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page); 4552 eraseblock = page >> (chip->phys_erase_shift - chip->page_shift);
2890 chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
2891 4553
2892 return chip->waitfunc(mtd, chip); 4554 return nand_erase_op(chip, eraseblock);
2893} 4555}
2894 4556
2895/** 4557/**
@@ -2973,7 +4635,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2973 status = chip->erase(mtd, page & chip->pagemask); 4635 status = chip->erase(mtd, page & chip->pagemask);
2974 4636
2975 /* See if block erase succeeded */ 4637 /* See if block erase succeeded */
2976 if (status & NAND_STATUS_FAIL) { 4638 if (status) {
2977 pr_debug("%s: failed erase, page 0x%08x\n", 4639 pr_debug("%s: failed erase, page 0x%08x\n",
2978 __func__, page); 4640 __func__, page);
2979 instr->state = MTD_ERASE_FAILED; 4641 instr->state = MTD_ERASE_FAILED;
@@ -3116,22 +4778,12 @@ static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
3116static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip, 4778static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
3117 int addr, uint8_t *subfeature_param) 4779 int addr, uint8_t *subfeature_param)
3118{ 4780{
3119 int status;
3120 int i;
3121
3122 if (!chip->onfi_version || 4781 if (!chip->onfi_version ||
3123 !(le16_to_cpu(chip->onfi_params.opt_cmd) 4782 !(le16_to_cpu(chip->onfi_params.opt_cmd)
3124 & ONFI_OPT_CMD_SET_GET_FEATURES)) 4783 & ONFI_OPT_CMD_SET_GET_FEATURES))
3125 return -EINVAL; 4784 return -EINVAL;
3126 4785
3127 chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1); 4786 return nand_set_features_op(chip, addr, subfeature_param);
3128 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
3129 chip->write_byte(mtd, subfeature_param[i]);
3130
3131 status = chip->waitfunc(mtd, chip);
3132 if (status & NAND_STATUS_FAIL)
3133 return -EIO;
3134 return 0;
3135} 4787}
3136 4788
3137/** 4789/**
@@ -3144,17 +4796,12 @@ static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
3144static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip, 4796static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
3145 int addr, uint8_t *subfeature_param) 4797 int addr, uint8_t *subfeature_param)
3146{ 4798{
3147 int i;
3148
3149 if (!chip->onfi_version || 4799 if (!chip->onfi_version ||
3150 !(le16_to_cpu(chip->onfi_params.opt_cmd) 4800 !(le16_to_cpu(chip->onfi_params.opt_cmd)
3151 & ONFI_OPT_CMD_SET_GET_FEATURES)) 4801 & ONFI_OPT_CMD_SET_GET_FEATURES))
3152 return -EINVAL; 4802 return -EINVAL;
3153 4803
3154 chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1); 4804 return nand_get_features_op(chip, addr, subfeature_param);
3155 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
3156 *subfeature_param++ = chip->read_byte(mtd);
3157 return 0;
3158} 4805}
3159 4806
3160/** 4807/**
@@ -3220,7 +4867,7 @@ static void nand_set_defaults(struct nand_chip *chip)
3220 chip->chip_delay = 20; 4867 chip->chip_delay = 20;
3221 4868
3222 /* check, if a user supplied command function given */ 4869 /* check, if a user supplied command function given */
3223 if (chip->cmdfunc == NULL) 4870 if (!chip->cmdfunc && !chip->exec_op)
3224 chip->cmdfunc = nand_command; 4871 chip->cmdfunc = nand_command;
3225 4872
3226 /* check, if a user supplied wait function given */ 4873 /* check, if a user supplied wait function given */
@@ -3297,12 +4944,11 @@ static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
3297static int nand_flash_detect_ext_param_page(struct nand_chip *chip, 4944static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
3298 struct nand_onfi_params *p) 4945 struct nand_onfi_params *p)
3299{ 4946{
3300 struct mtd_info *mtd = nand_to_mtd(chip);
3301 struct onfi_ext_param_page *ep; 4947 struct onfi_ext_param_page *ep;
3302 struct onfi_ext_section *s; 4948 struct onfi_ext_section *s;
3303 struct onfi_ext_ecc_info *ecc; 4949 struct onfi_ext_ecc_info *ecc;
3304 uint8_t *cursor; 4950 uint8_t *cursor;
3305 int ret = -EINVAL; 4951 int ret;
3306 int len; 4952 int len;
3307 int i; 4953 int i;
3308 4954
@@ -3312,14 +4958,18 @@ static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
3312 return -ENOMEM; 4958 return -ENOMEM;
3313 4959
3314 /* Send our own NAND_CMD_PARAM. */ 4960 /* Send our own NAND_CMD_PARAM. */
3315 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1); 4961 ret = nand_read_param_page_op(chip, 0, NULL, 0);
4962 if (ret)
4963 goto ext_out;
3316 4964
3317 /* Use the Change Read Column command to skip the ONFI param pages. */ 4965 /* Use the Change Read Column command to skip the ONFI param pages. */
3318 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 4966 ret = nand_change_read_column_op(chip,
3319 sizeof(*p) * p->num_of_param_pages , -1); 4967 sizeof(*p) * p->num_of_param_pages,
4968 ep, len, true);
4969 if (ret)
4970 goto ext_out;
3320 4971
3321 /* Read out the Extended Parameter Page. */ 4972 ret = -EINVAL;
3322 chip->read_buf(mtd, (uint8_t *)ep, len);
3323 if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2) 4973 if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
3324 != le16_to_cpu(ep->crc))) { 4974 != le16_to_cpu(ep->crc))) {
3325 pr_debug("fail in the CRC.\n"); 4975 pr_debug("fail in the CRC.\n");
@@ -3372,19 +5022,23 @@ static int nand_flash_detect_onfi(struct nand_chip *chip)
3372{ 5022{
3373 struct mtd_info *mtd = nand_to_mtd(chip); 5023 struct mtd_info *mtd = nand_to_mtd(chip);
3374 struct nand_onfi_params *p = &chip->onfi_params; 5024 struct nand_onfi_params *p = &chip->onfi_params;
3375 int i, j; 5025 char id[4];
3376 int val; 5026 int i, ret, val;
3377 5027
3378 /* Try ONFI for unknown chip or LP */ 5028 /* Try ONFI for unknown chip or LP */
3379 chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1); 5029 ret = nand_readid_op(chip, 0x20, id, sizeof(id));
3380 if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' || 5030 if (ret || strncmp(id, "ONFI", 4))
3381 chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I') 5031 return 0;
5032
5033 ret = nand_read_param_page_op(chip, 0, NULL, 0);
5034 if (ret)
3382 return 0; 5035 return 0;
3383 5036
3384 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
3385 for (i = 0; i < 3; i++) { 5037 for (i = 0; i < 3; i++) {
3386 for (j = 0; j < sizeof(*p); j++) 5038 ret = nand_read_data_op(chip, p, sizeof(*p), true);
3387 ((uint8_t *)p)[j] = chip->read_byte(mtd); 5039 if (ret)
5040 return 0;
5041
3388 if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) == 5042 if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
3389 le16_to_cpu(p->crc)) { 5043 le16_to_cpu(p->crc)) {
3390 break; 5044 break;
@@ -3475,20 +5129,22 @@ static int nand_flash_detect_jedec(struct nand_chip *chip)
3475 struct mtd_info *mtd = nand_to_mtd(chip); 5129 struct mtd_info *mtd = nand_to_mtd(chip);
3476 struct nand_jedec_params *p = &chip->jedec_params; 5130 struct nand_jedec_params *p = &chip->jedec_params;
3477 struct jedec_ecc_info *ecc; 5131 struct jedec_ecc_info *ecc;
3478 int val; 5132 char id[5];
3479 int i, j; 5133 int i, val, ret;
3480 5134
3481 /* Try JEDEC for unknown chip or LP */ 5135 /* Try JEDEC for unknown chip or LP */
3482 chip->cmdfunc(mtd, NAND_CMD_READID, 0x40, -1); 5136 ret = nand_readid_op(chip, 0x40, id, sizeof(id));
3483 if (chip->read_byte(mtd) != 'J' || chip->read_byte(mtd) != 'E' || 5137 if (ret || strncmp(id, "JEDEC", sizeof(id)))
3484 chip->read_byte(mtd) != 'D' || chip->read_byte(mtd) != 'E' || 5138 return 0;
3485 chip->read_byte(mtd) != 'C') 5139
5140 ret = nand_read_param_page_op(chip, 0x40, NULL, 0);
5141 if (ret)
3486 return 0; 5142 return 0;
3487 5143
3488 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0x40, -1);
3489 for (i = 0; i < 3; i++) { 5144 for (i = 0; i < 3; i++) {
3490 for (j = 0; j < sizeof(*p); j++) 5145 ret = nand_read_data_op(chip, p, sizeof(*p), true);
3491 ((uint8_t *)p)[j] = chip->read_byte(mtd); 5146 if (ret)
5147 return 0;
3492 5148
3493 if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) == 5149 if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
3494 le16_to_cpu(p->crc)) 5150 le16_to_cpu(p->crc))
@@ -3767,8 +5423,7 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
3767{ 5423{
3768 const struct nand_manufacturer *manufacturer; 5424 const struct nand_manufacturer *manufacturer;
3769 struct mtd_info *mtd = nand_to_mtd(chip); 5425 struct mtd_info *mtd = nand_to_mtd(chip);
3770 int busw; 5426 int busw, ret;
3771 int i;
3772 u8 *id_data = chip->id.data; 5427 u8 *id_data = chip->id.data;
3773 u8 maf_id, dev_id; 5428 u8 maf_id, dev_id;
3774 5429
@@ -3776,17 +5431,21 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
3776 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx) 5431 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
3777 * after power-up. 5432 * after power-up.
3778 */ 5433 */
3779 nand_reset(chip, 0); 5434 ret = nand_reset(chip, 0);
5435 if (ret)
5436 return ret;
3780 5437
3781 /* Select the device */ 5438 /* Select the device */
3782 chip->select_chip(mtd, 0); 5439 chip->select_chip(mtd, 0);
3783 5440
3784 /* Send the command for reading device ID */ 5441 /* Send the command for reading device ID */
3785 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); 5442 ret = nand_readid_op(chip, 0, id_data, 2);
5443 if (ret)
5444 return ret;
3786 5445
3787 /* Read manufacturer and device IDs */ 5446 /* Read manufacturer and device IDs */
3788 maf_id = chip->read_byte(mtd); 5447 maf_id = id_data[0];
3789 dev_id = chip->read_byte(mtd); 5448 dev_id = id_data[1];
3790 5449
3791 /* 5450 /*
3792 * Try again to make sure, as some systems the bus-hold or other 5451 * Try again to make sure, as some systems the bus-hold or other
@@ -3795,11 +5454,10 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
3795 * not match, ignore the device completely. 5454 * not match, ignore the device completely.
3796 */ 5455 */
3797 5456
3798 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
3799
3800 /* Read entire ID string */ 5457 /* Read entire ID string */
3801 for (i = 0; i < ARRAY_SIZE(chip->id.data); i++) 5458 ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
3802 id_data[i] = chip->read_byte(mtd); 5459 if (ret)
5460 return ret;
3803 5461
3804 if (id_data[0] != maf_id || id_data[1] != dev_id) { 5462 if (id_data[0] != maf_id || id_data[1] != dev_id) {
3805 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n", 5463 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
@@ -4091,6 +5749,9 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
4091 struct nand_chip *chip = mtd_to_nand(mtd); 5749 struct nand_chip *chip = mtd_to_nand(mtd);
4092 int ret; 5750 int ret;
4093 5751
5752 /* Enforce the right timings for reset/detection */
5753 onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
5754
4094 ret = nand_dt_init(chip); 5755 ret = nand_dt_init(chip);
4095 if (ret) 5756 if (ret)
4096 return ret; 5757 return ret;
@@ -4098,15 +5759,21 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
4098 if (!mtd->name && mtd->dev.parent) 5759 if (!mtd->name && mtd->dev.parent)
4099 mtd->name = dev_name(mtd->dev.parent); 5760 mtd->name = dev_name(mtd->dev.parent);
4100 5761
4101 if ((!chip->cmdfunc || !chip->select_chip) && !chip->cmd_ctrl) { 5762 /*
5763 * ->cmdfunc() is legacy and will only be used if ->exec_op() is not
5764 * populated.
5765 */
5766 if (!chip->exec_op) {
4102 /* 5767 /*
4103 * Default functions assigned for chip_select() and 5768 * Default functions assigned for ->cmdfunc() and
4104 * cmdfunc() both expect cmd_ctrl() to be populated, 5769 * ->select_chip() both expect ->cmd_ctrl() to be populated.
4105 * so we need to check that that's the case
4106 */ 5770 */
4107 pr_err("chip.cmd_ctrl() callback is not provided"); 5771 if ((!chip->cmdfunc || !chip->select_chip) && !chip->cmd_ctrl) {
4108 return -EINVAL; 5772 pr_err("->cmd_ctrl() should be provided\n");
5773 return -EINVAL;
5774 }
4109 } 5775 }
5776
4110 /* Set the default functions */ 5777 /* Set the default functions */
4111 nand_set_defaults(chip); 5778 nand_set_defaults(chip);
4112 5779
@@ -4126,15 +5793,16 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
4126 5793
4127 /* Check for a chip array */ 5794 /* Check for a chip array */
4128 for (i = 1; i < maxchips; i++) { 5795 for (i = 1; i < maxchips; i++) {
5796 u8 id[2];
5797
4129 /* See comment in nand_get_flash_type for reset */ 5798 /* See comment in nand_get_flash_type for reset */
4130 nand_reset(chip, i); 5799 nand_reset(chip, i);
4131 5800
4132 chip->select_chip(mtd, i); 5801 chip->select_chip(mtd, i);
4133 /* Send the command for reading device ID */ 5802 /* Send the command for reading device ID */
4134 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); 5803 nand_readid_op(chip, 0, id, sizeof(id));
4135 /* Read manufacturer and device IDs */ 5804 /* Read manufacturer and device IDs */
4136 if (nand_maf_id != chip->read_byte(mtd) || 5805 if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
4137 nand_dev_id != chip->read_byte(mtd)) {
4138 chip->select_chip(mtd, -1); 5806 chip->select_chip(mtd, -1);
4139 break; 5807 break;
4140 } 5808 }
@@ -4501,26 +6169,6 @@ static bool nand_ecc_strength_good(struct mtd_info *mtd)
4501 return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds; 6169 return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
4502} 6170}
4503 6171
4504static bool invalid_ecc_page_accessors(struct nand_chip *chip)
4505{
4506 struct nand_ecc_ctrl *ecc = &chip->ecc;
4507
4508 if (nand_standard_page_accessors(ecc))
4509 return false;
4510
4511 /*
4512 * NAND_ECC_CUSTOM_PAGE_ACCESS flag is set, make sure the NAND
4513 * controller driver implements all the page accessors because
4514 * default helpers are not suitable when the core does not
4515 * send the READ0/PAGEPROG commands.
4516 */
4517 return (!ecc->read_page || !ecc->write_page ||
4518 !ecc->read_page_raw || !ecc->write_page_raw ||
4519 (NAND_HAS_SUBPAGE_READ(chip) && !ecc->read_subpage) ||
4520 (NAND_HAS_SUBPAGE_WRITE(chip) && !ecc->write_subpage &&
4521 ecc->hwctl && ecc->calculate));
4522}
4523
4524/** 6172/**
4525 * nand_scan_tail - [NAND Interface] Scan for the NAND device 6173 * nand_scan_tail - [NAND Interface] Scan for the NAND device
4526 * @mtd: MTD device structure 6174 * @mtd: MTD device structure
@@ -4533,7 +6181,6 @@ int nand_scan_tail(struct mtd_info *mtd)
4533{ 6181{
4534 struct nand_chip *chip = mtd_to_nand(mtd); 6182 struct nand_chip *chip = mtd_to_nand(mtd);
4535 struct nand_ecc_ctrl *ecc = &chip->ecc; 6183 struct nand_ecc_ctrl *ecc = &chip->ecc;
4536 struct nand_buffers *nbuf = NULL;
4537 int ret, i; 6184 int ret, i;
4538 6185
4539 /* New bad blocks should be marked in OOB, flash-based BBT, or both */ 6186 /* New bad blocks should be marked in OOB, flash-based BBT, or both */
@@ -4542,39 +6189,9 @@ int nand_scan_tail(struct mtd_info *mtd)
4542 return -EINVAL; 6189 return -EINVAL;
4543 } 6190 }
4544 6191
4545 if (invalid_ecc_page_accessors(chip)) { 6192 chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
4546 pr_err("Invalid ECC page accessors setup\n"); 6193 if (!chip->data_buf)
4547 return -EINVAL;
4548 }
4549
4550 if (!(chip->options & NAND_OWN_BUFFERS)) {
4551 nbuf = kzalloc(sizeof(*nbuf), GFP_KERNEL);
4552 if (!nbuf)
4553 return -ENOMEM;
4554
4555 nbuf->ecccalc = kmalloc(mtd->oobsize, GFP_KERNEL);
4556 if (!nbuf->ecccalc) {
4557 ret = -ENOMEM;
4558 goto err_free_nbuf;
4559 }
4560
4561 nbuf->ecccode = kmalloc(mtd->oobsize, GFP_KERNEL);
4562 if (!nbuf->ecccode) {
4563 ret = -ENOMEM;
4564 goto err_free_nbuf;
4565 }
4566
4567 nbuf->databuf = kmalloc(mtd->writesize + mtd->oobsize,
4568 GFP_KERNEL);
4569 if (!nbuf->databuf) {
4570 ret = -ENOMEM;
4571 goto err_free_nbuf;
4572 }
4573
4574 chip->buffers = nbuf;
4575 } else if (!chip->buffers) {
4576 return -ENOMEM; 6194 return -ENOMEM;
4577 }
4578 6195
4579 /* 6196 /*
4580 * FIXME: some NAND manufacturer drivers expect the first die to be 6197 * FIXME: some NAND manufacturer drivers expect the first die to be
@@ -4586,10 +6203,10 @@ int nand_scan_tail(struct mtd_info *mtd)
4586 ret = nand_manufacturer_init(chip); 6203 ret = nand_manufacturer_init(chip);
4587 chip->select_chip(mtd, -1); 6204 chip->select_chip(mtd, -1);
4588 if (ret) 6205 if (ret)
4589 goto err_free_nbuf; 6206 goto err_free_buf;
4590 6207
4591 /* Set the internal oob buffer location, just after the page data */ 6208 /* Set the internal oob buffer location, just after the page data */
4592 chip->oob_poi = chip->buffers->databuf + mtd->writesize; 6209 chip->oob_poi = chip->data_buf + mtd->writesize;
4593 6210
4594 /* 6211 /*
4595 * If no default placement scheme is given, select an appropriate one. 6212 * If no default placement scheme is given, select an appropriate one.
@@ -4737,6 +6354,15 @@ int nand_scan_tail(struct mtd_info *mtd)
4737 goto err_nand_manuf_cleanup; 6354 goto err_nand_manuf_cleanup;
4738 } 6355 }
4739 6356
6357 if (ecc->correct || ecc->calculate) {
6358 ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
6359 ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
6360 if (!ecc->calc_buf || !ecc->code_buf) {
6361 ret = -ENOMEM;
6362 goto err_nand_manuf_cleanup;
6363 }
6364 }
6365
4740 /* For many systems, the standard OOB write also works for raw */ 6366 /* For many systems, the standard OOB write also works for raw */
4741 if (!ecc->read_oob_raw) 6367 if (!ecc->read_oob_raw)
4742 ecc->read_oob_raw = ecc->read_oob; 6368 ecc->read_oob_raw = ecc->read_oob;
@@ -4853,7 +6479,7 @@ int nand_scan_tail(struct mtd_info *mtd)
4853 chip->select_chip(mtd, -1); 6479 chip->select_chip(mtd, -1);
4854 6480
4855 if (ret) 6481 if (ret)
4856 goto err_nand_data_iface_cleanup; 6482 goto err_nand_manuf_cleanup;
4857 } 6483 }
4858 6484
4859 /* Check, if we should skip the bad block table scan */ 6485 /* Check, if we should skip the bad block table scan */
@@ -4863,23 +6489,18 @@ int nand_scan_tail(struct mtd_info *mtd)
4863 /* Build bad block table */ 6489 /* Build bad block table */
4864 ret = chip->scan_bbt(mtd); 6490 ret = chip->scan_bbt(mtd);
4865 if (ret) 6491 if (ret)
4866 goto err_nand_data_iface_cleanup; 6492 goto err_nand_manuf_cleanup;
4867 6493
4868 return 0; 6494 return 0;
4869 6495
4870err_nand_data_iface_cleanup:
4871 nand_release_data_interface(chip);
4872 6496
4873err_nand_manuf_cleanup: 6497err_nand_manuf_cleanup:
4874 nand_manufacturer_cleanup(chip); 6498 nand_manufacturer_cleanup(chip);
4875 6499
4876err_free_nbuf: 6500err_free_buf:
4877 if (nbuf) { 6501 kfree(chip->data_buf);
4878 kfree(nbuf->databuf); 6502 kfree(ecc->code_buf);
4879 kfree(nbuf->ecccode); 6503 kfree(ecc->calc_buf);
4880 kfree(nbuf->ecccalc);
4881 kfree(nbuf);
4882 }
4883 6504
4884 return ret; 6505 return ret;
4885} 6506}
@@ -4927,16 +6548,11 @@ void nand_cleanup(struct nand_chip *chip)
4927 chip->ecc.algo == NAND_ECC_BCH) 6548 chip->ecc.algo == NAND_ECC_BCH)
4928 nand_bch_free((struct nand_bch_control *)chip->ecc.priv); 6549 nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
4929 6550
4930 nand_release_data_interface(chip);
4931
4932 /* Free bad block table memory */ 6551 /* Free bad block table memory */
4933 kfree(chip->bbt); 6552 kfree(chip->bbt);
4934 if (!(chip->options & NAND_OWN_BUFFERS) && chip->buffers) { 6553 kfree(chip->data_buf);
4935 kfree(chip->buffers->databuf); 6554 kfree(chip->ecc.code_buf);
4936 kfree(chip->buffers->ecccode); 6555 kfree(chip->ecc.calc_buf);
4937 kfree(chip->buffers->ecccalc);
4938 kfree(chip->buffers);
4939 }
4940 6556
4941 /* Free bad block descriptor memory */ 6557 /* Free bad block descriptor memory */
4942 if (chip->badblock_pattern && chip->badblock_pattern->options 6558 if (chip->badblock_pattern && chip->badblock_pattern->options
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 2915b6739bf8..36092850be2c 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -898,7 +898,7 @@ static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *b
898{ 898{
899 struct nand_chip *this = mtd_to_nand(mtd); 899 struct nand_chip *this = mtd_to_nand(mtd);
900 900
901 return create_bbt(mtd, this->buffers->databuf, bd, -1); 901 return create_bbt(mtd, this->data_buf, bd, -1);
902} 902}
903 903
904/** 904/**
diff --git a/drivers/mtd/nand/nand_hynix.c b/drivers/mtd/nand/nand_hynix.c
index 985751eda317..d542908a0ebb 100644
--- a/drivers/mtd/nand/nand_hynix.c
+++ b/drivers/mtd/nand/nand_hynix.c
@@ -67,15 +67,43 @@ struct hynix_read_retry_otp {
67 67
68static bool hynix_nand_has_valid_jedecid(struct nand_chip *chip) 68static bool hynix_nand_has_valid_jedecid(struct nand_chip *chip)
69{ 69{
70 u8 jedecid[5] = { };
71 int ret;
72
73 ret = nand_readid_op(chip, 0x40, jedecid, sizeof(jedecid));
74 if (ret)
75 return false;
76
77 return !strncmp("JEDEC", jedecid, sizeof(jedecid));
78}
79
80static int hynix_nand_cmd_op(struct nand_chip *chip, u8 cmd)
81{
82 struct mtd_info *mtd = nand_to_mtd(chip);
83
84 if (chip->exec_op) {
85 struct nand_op_instr instrs[] = {
86 NAND_OP_CMD(cmd, 0),
87 };
88 struct nand_operation op = NAND_OPERATION(instrs);
89
90 return nand_exec_op(chip, &op);
91 }
92
93 chip->cmdfunc(mtd, cmd, -1, -1);
94
95 return 0;
96}
97
98static int hynix_nand_reg_write_op(struct nand_chip *chip, u8 addr, u8 val)
99{
70 struct mtd_info *mtd = nand_to_mtd(chip); 100 struct mtd_info *mtd = nand_to_mtd(chip);
71 u8 jedecid[6] = { }; 101 u16 column = ((u16)addr << 8) | addr;
72 int i = 0;
73 102
74 chip->cmdfunc(mtd, NAND_CMD_READID, 0x40, -1); 103 chip->cmdfunc(mtd, NAND_CMD_NONE, column, -1);
75 for (i = 0; i < 5; i++) 104 chip->write_byte(mtd, val);
76 jedecid[i] = chip->read_byte(mtd);
77 105
78 return !strcmp("JEDEC", jedecid); 106 return 0;
79} 107}
80 108
81static int hynix_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode) 109static int hynix_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
@@ -83,14 +111,15 @@ static int hynix_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
83 struct nand_chip *chip = mtd_to_nand(mtd); 111 struct nand_chip *chip = mtd_to_nand(mtd);
84 struct hynix_nand *hynix = nand_get_manufacturer_data(chip); 112 struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
85 const u8 *values; 113 const u8 *values;
86 int status; 114 int i, ret;
87 int i;
88 115
89 values = hynix->read_retry->values + 116 values = hynix->read_retry->values +
90 (retry_mode * hynix->read_retry->nregs); 117 (retry_mode * hynix->read_retry->nregs);
91 118
92 /* Enter 'Set Hynix Parameters' mode */ 119 /* Enter 'Set Hynix Parameters' mode */
93 chip->cmdfunc(mtd, NAND_HYNIX_CMD_SET_PARAMS, -1, -1); 120 ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
121 if (ret)
122 return ret;
94 123
95 /* 124 /*
96 * Configure the NAND in the requested read-retry mode. 125 * Configure the NAND in the requested read-retry mode.
@@ -102,21 +131,14 @@ static int hynix_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
102 * probably tweaked at production in this case). 131 * probably tweaked at production in this case).
103 */ 132 */
104 for (i = 0; i < hynix->read_retry->nregs; i++) { 133 for (i = 0; i < hynix->read_retry->nregs; i++) {
105 int column = hynix->read_retry->regs[i]; 134 ret = hynix_nand_reg_write_op(chip, hynix->read_retry->regs[i],
106 135 values[i]);
107 column |= column << 8; 136 if (ret)
108 chip->cmdfunc(mtd, NAND_CMD_NONE, column, -1); 137 return ret;
109 chip->write_byte(mtd, values[i]);
110 } 138 }
111 139
112 /* Apply the new settings. */ 140 /* Apply the new settings. */
113 chip->cmdfunc(mtd, NAND_HYNIX_CMD_APPLY_PARAMS, -1, -1); 141 return hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
114
115 status = chip->waitfunc(mtd, chip);
116 if (status & NAND_STATUS_FAIL)
117 return -EIO;
118
119 return 0;
120} 142}
121 143
122/** 144/**
@@ -172,40 +194,63 @@ static int hynix_read_rr_otp(struct nand_chip *chip,
172 const struct hynix_read_retry_otp *info, 194 const struct hynix_read_retry_otp *info,
173 void *buf) 195 void *buf)
174{ 196{
175 struct mtd_info *mtd = nand_to_mtd(chip); 197 int i, ret;
176 int i;
177 198
178 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); 199 ret = nand_reset_op(chip);
200 if (ret)
201 return ret;
179 202
180 chip->cmdfunc(mtd, NAND_HYNIX_CMD_SET_PARAMS, -1, -1); 203 ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
204 if (ret)
205 return ret;
181 206
182 for (i = 0; i < info->nregs; i++) { 207 for (i = 0; i < info->nregs; i++) {
183 int column = info->regs[i]; 208 ret = hynix_nand_reg_write_op(chip, info->regs[i],
184 209 info->values[i]);
185 column |= column << 8; 210 if (ret)
186 chip->cmdfunc(mtd, NAND_CMD_NONE, column, -1); 211 return ret;
187 chip->write_byte(mtd, info->values[i]);
188 } 212 }
189 213
190 chip->cmdfunc(mtd, NAND_HYNIX_CMD_APPLY_PARAMS, -1, -1); 214 ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
215 if (ret)
216 return ret;
191 217
192 /* Sequence to enter OTP mode? */ 218 /* Sequence to enter OTP mode? */
193 chip->cmdfunc(mtd, 0x17, -1, -1); 219 ret = hynix_nand_cmd_op(chip, 0x17);
194 chip->cmdfunc(mtd, 0x04, -1, -1); 220 if (ret)
195 chip->cmdfunc(mtd, 0x19, -1, -1); 221 return ret;
222
223 ret = hynix_nand_cmd_op(chip, 0x4);
224 if (ret)
225 return ret;
226
227 ret = hynix_nand_cmd_op(chip, 0x19);
228 if (ret)
229 return ret;
196 230
197 /* Now read the page */ 231 /* Now read the page */
198 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x0, info->page); 232 ret = nand_read_page_op(chip, info->page, 0, buf, info->size);
199 chip->read_buf(mtd, buf, info->size); 233 if (ret)
234 return ret;
200 235
201 /* Put everything back to normal */ 236 /* Put everything back to normal */
202 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); 237 ret = nand_reset_op(chip);
203 chip->cmdfunc(mtd, NAND_HYNIX_CMD_SET_PARAMS, 0x38, -1); 238 if (ret)
204 chip->write_byte(mtd, 0x0); 239 return ret;
205 chip->cmdfunc(mtd, NAND_HYNIX_CMD_APPLY_PARAMS, -1, -1);
206 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x0, -1);
207 240
208 return 0; 241 ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
242 if (ret)
243 return ret;
244
245 ret = hynix_nand_reg_write_op(chip, 0x38, 0);
246 if (ret)
247 return ret;
248
249 ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
250 if (ret)
251 return ret;
252
253 return nand_read_page_op(chip, 0, 0, NULL, 0);
209} 254}
210 255
211#define NAND_HYNIX_1XNM_RR_COUNT_OFFS 0 256#define NAND_HYNIX_1XNM_RR_COUNT_OFFS 0
diff --git a/drivers/mtd/nand/nand_micron.c b/drivers/mtd/nand/nand_micron.c
index abf6a3c376e8..02e109ae73f1 100644
--- a/drivers/mtd/nand/nand_micron.c
+++ b/drivers/mtd/nand/nand_micron.c
@@ -117,16 +117,28 @@ micron_nand_read_page_on_die_ecc(struct mtd_info *mtd, struct nand_chip *chip,
117 uint8_t *buf, int oob_required, 117 uint8_t *buf, int oob_required,
118 int page) 118 int page)
119{ 119{
120 int status; 120 u8 status;
121 int max_bitflips = 0; 121 int ret, max_bitflips = 0;
122 122
123 micron_nand_on_die_ecc_setup(chip, true); 123 ret = micron_nand_on_die_ecc_setup(chip, true);
124 if (ret)
125 return ret;
126
127 ret = nand_read_page_op(chip, page, 0, NULL, 0);
128 if (ret)
129 goto out;
130
131 ret = nand_status_op(chip, &status);
132 if (ret)
133 goto out;
134
135 ret = nand_exit_status_op(chip);
136 if (ret)
137 goto out;
124 138
125 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
126 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
127 status = chip->read_byte(mtd);
128 if (status & NAND_STATUS_FAIL) 139 if (status & NAND_STATUS_FAIL)
129 mtd->ecc_stats.failed++; 140 mtd->ecc_stats.failed++;
141
130 /* 142 /*
131 * The internal ECC doesn't tell us the number of bitflips 143 * The internal ECC doesn't tell us the number of bitflips
132 * that have been corrected, but tells us if it recommends to 144 * that have been corrected, but tells us if it recommends to
@@ -137,13 +149,15 @@ micron_nand_read_page_on_die_ecc(struct mtd_info *mtd, struct nand_chip *chip,
137 else if (status & NAND_STATUS_WRITE_RECOMMENDED) 149 else if (status & NAND_STATUS_WRITE_RECOMMENDED)
138 max_bitflips = chip->ecc.strength; 150 max_bitflips = chip->ecc.strength;
139 151
140 chip->cmdfunc(mtd, NAND_CMD_READ0, -1, -1); 152 ret = nand_read_data_op(chip, buf, mtd->writesize, false);
141 153 if (!ret && oob_required)
142 nand_read_page_raw(mtd, chip, buf, oob_required, page); 154 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
155 false);
143 156
157out:
144 micron_nand_on_die_ecc_setup(chip, false); 158 micron_nand_on_die_ecc_setup(chip, false);
145 159
146 return max_bitflips; 160 return ret ? ret : max_bitflips;
147} 161}
148 162
149static int 163static int
@@ -151,46 +165,16 @@ micron_nand_write_page_on_die_ecc(struct mtd_info *mtd, struct nand_chip *chip,
151 const uint8_t *buf, int oob_required, 165 const uint8_t *buf, int oob_required,
152 int page) 166 int page)
153{ 167{
154 int status; 168 int ret;
155
156 micron_nand_on_die_ecc_setup(chip, true);
157 169
158 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); 170 ret = micron_nand_on_die_ecc_setup(chip, true);
159 nand_write_page_raw(mtd, chip, buf, oob_required, page); 171 if (ret)
160 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); 172 return ret;
161 status = chip->waitfunc(mtd, chip);
162 173
174 ret = nand_write_page_raw(mtd, chip, buf, oob_required, page);
163 micron_nand_on_die_ecc_setup(chip, false); 175 micron_nand_on_die_ecc_setup(chip, false);
164 176
165 return status & NAND_STATUS_FAIL ? -EIO : 0; 177 return ret;
166}
167
168static int
169micron_nand_read_page_raw_on_die_ecc(struct mtd_info *mtd,
170 struct nand_chip *chip,
171 uint8_t *buf, int oob_required,
172 int page)
173{
174 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
175 nand_read_page_raw(mtd, chip, buf, oob_required, page);
176
177 return 0;
178}
179
180static int
181micron_nand_write_page_raw_on_die_ecc(struct mtd_info *mtd,
182 struct nand_chip *chip,
183 const uint8_t *buf, int oob_required,
184 int page)
185{
186 int status;
187
188 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
189 nand_write_page_raw(mtd, chip, buf, oob_required, page);
190 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
191 status = chip->waitfunc(mtd, chip);
192
193 return status & NAND_STATUS_FAIL ? -EIO : 0;
194} 178}
195 179
196enum { 180enum {
@@ -285,17 +269,14 @@ static int micron_nand_init(struct nand_chip *chip)
285 return -EINVAL; 269 return -EINVAL;
286 } 270 }
287 271
288 chip->ecc.options = NAND_ECC_CUSTOM_PAGE_ACCESS;
289 chip->ecc.bytes = 8; 272 chip->ecc.bytes = 8;
290 chip->ecc.size = 512; 273 chip->ecc.size = 512;
291 chip->ecc.strength = 4; 274 chip->ecc.strength = 4;
292 chip->ecc.algo = NAND_ECC_BCH; 275 chip->ecc.algo = NAND_ECC_BCH;
293 chip->ecc.read_page = micron_nand_read_page_on_die_ecc; 276 chip->ecc.read_page = micron_nand_read_page_on_die_ecc;
294 chip->ecc.write_page = micron_nand_write_page_on_die_ecc; 277 chip->ecc.write_page = micron_nand_write_page_on_die_ecc;
295 chip->ecc.read_page_raw = 278 chip->ecc.read_page_raw = nand_read_page_raw;
296 micron_nand_read_page_raw_on_die_ecc; 279 chip->ecc.write_page_raw = nand_write_page_raw;
297 chip->ecc.write_page_raw =
298 micron_nand_write_page_raw_on_die_ecc;
299 280
300 mtd_set_ooblayout(mtd, &micron_nand_on_die_ooblayout_ops); 281 mtd_set_ooblayout(mtd, &micron_nand_on_die_ooblayout_ops);
301 } 282 }
diff --git a/drivers/mtd/nand/nand_samsung.c b/drivers/mtd/nand/nand_samsung.c
index d348f0129ae7..ef022f62f74c 100644
--- a/drivers/mtd/nand/nand_samsung.c
+++ b/drivers/mtd/nand/nand_samsung.c
@@ -91,6 +91,25 @@ static void samsung_nand_decode_id(struct nand_chip *chip)
91 } 91 }
92 } else { 92 } else {
93 nand_decode_ext_id(chip); 93 nand_decode_ext_id(chip);
94
95 if (nand_is_slc(chip)) {
96 switch (chip->id.data[1]) {
97 /* K9F4G08U0D-S[I|C]B0(T00) */
98 case 0xDC:
99 chip->ecc_step_ds = 512;
100 chip->ecc_strength_ds = 1;
101 break;
102
103 /* K9F1G08U0E 21nm chips do not support subpage write */
104 case 0xF1:
105 if (chip->id.len > 4 &&
106 (chip->id.data[4] & GENMASK(1, 0)) == 0x1)
107 chip->options |= NAND_NO_SUBPAGE_WRITE;
108 break;
109 default:
110 break;
111 }
112 }
94 } 113 }
95} 114}
96 115
diff --git a/drivers/mtd/nand/nand_timings.c b/drivers/mtd/nand/nand_timings.c
index 5d1533bcc5bd..9400d039ddbd 100644
--- a/drivers/mtd/nand/nand_timings.c
+++ b/drivers/mtd/nand/nand_timings.c
@@ -283,16 +283,16 @@ const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode)
283EXPORT_SYMBOL(onfi_async_timing_mode_to_sdr_timings); 283EXPORT_SYMBOL(onfi_async_timing_mode_to_sdr_timings);
284 284
285/** 285/**
286 * onfi_init_data_interface - [NAND Interface] Initialize a data interface from 286 * onfi_fill_data_interface - [NAND Interface] Initialize a data interface from
287 * given ONFI mode 287 * given ONFI mode
288 * @iface: The data interface to be initialized
289 * @mode: The ONFI timing mode 288 * @mode: The ONFI timing mode
290 */ 289 */
291int onfi_init_data_interface(struct nand_chip *chip, 290int onfi_fill_data_interface(struct nand_chip *chip,
292 struct nand_data_interface *iface,
293 enum nand_data_interface_type type, 291 enum nand_data_interface_type type,
294 int timing_mode) 292 int timing_mode)
295{ 293{
294 struct nand_data_interface *iface = &chip->data_interface;
295
296 if (type != NAND_SDR_IFACE) 296 if (type != NAND_SDR_IFACE)
297 return -EINVAL; 297 return -EINVAL;
298 298
@@ -321,15 +321,4 @@ int onfi_init_data_interface(struct nand_chip *chip,
321 321
322 return 0; 322 return 0;
323} 323}
324EXPORT_SYMBOL(onfi_init_data_interface); 324EXPORT_SYMBOL(onfi_fill_data_interface);
325
326/**
327 * nand_get_default_data_interface - [NAND Interface] Retrieve NAND
328 * data interface for mode 0. This is used as default timing after
329 * reset.
330 */
331const struct nand_data_interface *nand_get_default_data_interface(void)
332{
333 return &onfi_sdr_timings[0];
334}
335EXPORT_SYMBOL(nand_get_default_data_interface);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index dad438c4906a..8cdf7d3d8fa7 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1530,7 +1530,9 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
1530 const uint8_t *buf, int oob_required, int page) 1530 const uint8_t *buf, int oob_required, int page)
1531{ 1531{
1532 int ret; 1532 int ret;
1533 uint8_t *ecc_calc = chip->buffers->ecccalc; 1533 uint8_t *ecc_calc = chip->ecc.calc_buf;
1534
1535 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
1534 1536
1535 /* Enable GPMC ecc engine */ 1537 /* Enable GPMC ecc engine */
1536 chip->ecc.hwctl(mtd, NAND_ECC_WRITE); 1538 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
@@ -1548,7 +1550,8 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
1548 1550
1549 /* Write ecc vector to OOB area */ 1551 /* Write ecc vector to OOB area */
1550 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); 1552 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1551 return 0; 1553
1554 return nand_prog_page_end_op(chip);
1552} 1555}
1553 1556
1554/** 1557/**
@@ -1568,7 +1571,7 @@ static int omap_write_subpage_bch(struct mtd_info *mtd,
1568 u32 data_len, const u8 *buf, 1571 u32 data_len, const u8 *buf,
1569 int oob_required, int page) 1572 int oob_required, int page)
1570{ 1573{
1571 u8 *ecc_calc = chip->buffers->ecccalc; 1574 u8 *ecc_calc = chip->ecc.calc_buf;
1572 int ecc_size = chip->ecc.size; 1575 int ecc_size = chip->ecc.size;
1573 int ecc_bytes = chip->ecc.bytes; 1576 int ecc_bytes = chip->ecc.bytes;
1574 int ecc_steps = chip->ecc.steps; 1577 int ecc_steps = chip->ecc.steps;
@@ -1582,6 +1585,7 @@ static int omap_write_subpage_bch(struct mtd_info *mtd,
1582 * ECC is calculated for all subpages but we choose 1585 * ECC is calculated for all subpages but we choose
1583 * only what we want. 1586 * only what we want.
1584 */ 1587 */
1588 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
1585 1589
1586 /* Enable GPMC ECC engine */ 1590 /* Enable GPMC ECC engine */
1587 chip->ecc.hwctl(mtd, NAND_ECC_WRITE); 1591 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
@@ -1605,7 +1609,7 @@ static int omap_write_subpage_bch(struct mtd_info *mtd,
1605 1609
1606 /* copy calculated ECC for whole page to chip->buffer->oob */ 1610 /* copy calculated ECC for whole page to chip->buffer->oob */
1607 /* this include masked-value(0xFF) for unwritten subpages */ 1611 /* this include masked-value(0xFF) for unwritten subpages */
1608 ecc_calc = chip->buffers->ecccalc; 1612 ecc_calc = chip->ecc.calc_buf;
1609 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, 1613 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
1610 chip->ecc.total); 1614 chip->ecc.total);
1611 if (ret) 1615 if (ret)
@@ -1614,7 +1618,7 @@ static int omap_write_subpage_bch(struct mtd_info *mtd,
1614 /* write OOB buffer to NAND device */ 1618 /* write OOB buffer to NAND device */
1615 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); 1619 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1616 1620
1617 return 0; 1621 return nand_prog_page_end_op(chip);
1618} 1622}
1619 1623
1620/** 1624/**
@@ -1635,11 +1639,13 @@ static int omap_write_subpage_bch(struct mtd_info *mtd,
1635static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip, 1639static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
1636 uint8_t *buf, int oob_required, int page) 1640 uint8_t *buf, int oob_required, int page)
1637{ 1641{
1638 uint8_t *ecc_calc = chip->buffers->ecccalc; 1642 uint8_t *ecc_calc = chip->ecc.calc_buf;
1639 uint8_t *ecc_code = chip->buffers->ecccode; 1643 uint8_t *ecc_code = chip->ecc.code_buf;
1640 int stat, ret; 1644 int stat, ret;
1641 unsigned int max_bitflips = 0; 1645 unsigned int max_bitflips = 0;
1642 1646
1647 nand_read_page_op(chip, page, 0, NULL, 0);
1648
1643 /* Enable GPMC ecc engine */ 1649 /* Enable GPMC ecc engine */
1644 chip->ecc.hwctl(mtd, NAND_ECC_READ); 1650 chip->ecc.hwctl(mtd, NAND_ECC_READ);
1645 1651
@@ -1647,10 +1653,10 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
1647 chip->read_buf(mtd, buf, mtd->writesize); 1653 chip->read_buf(mtd, buf, mtd->writesize);
1648 1654
1649 /* Read oob bytes */ 1655 /* Read oob bytes */
1650 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 1656 nand_change_read_column_op(chip,
1651 mtd->writesize + BADBLOCK_MARKER_LENGTH, -1); 1657 mtd->writesize + BADBLOCK_MARKER_LENGTH,
1652 chip->read_buf(mtd, chip->oob_poi + BADBLOCK_MARKER_LENGTH, 1658 chip->oob_poi + BADBLOCK_MARKER_LENGTH,
1653 chip->ecc.total); 1659 chip->ecc.total, false);
1654 1660
1655 /* Calculate ecc bytes */ 1661 /* Calculate ecc bytes */
1656 omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc); 1662 omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc);
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 90b9a9ccbe60..021374fe59dc 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -520,15 +520,13 @@ static int pxa3xx_nand_init_timings_compat(struct pxa3xx_nand_host *host,
520 struct nand_chip *chip = &host->chip; 520 struct nand_chip *chip = &host->chip;
521 struct pxa3xx_nand_info *info = host->info_data; 521 struct pxa3xx_nand_info *info = host->info_data;
522 const struct pxa3xx_nand_flash *f = NULL; 522 const struct pxa3xx_nand_flash *f = NULL;
523 struct mtd_info *mtd = nand_to_mtd(&host->chip);
524 int i, id, ntypes; 523 int i, id, ntypes;
524 u8 idbuf[2];
525 525
526 ntypes = ARRAY_SIZE(builtin_flash_types); 526 ntypes = ARRAY_SIZE(builtin_flash_types);
527 527
528 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); 528 nand_readid_op(chip, 0, idbuf, sizeof(idbuf));
529 529 id = idbuf[0] | (idbuf[1] << 8);
530 id = chip->read_byte(mtd);
531 id |= chip->read_byte(mtd) << 0x8;
532 530
533 for (i = 0; i < ntypes; i++) { 531 for (i = 0; i < ntypes; i++) {
534 f = &builtin_flash_types[i]; 532 f = &builtin_flash_types[i];
@@ -1350,10 +1348,10 @@ static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1350 struct nand_chip *chip, const uint8_t *buf, int oob_required, 1348 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1351 int page) 1349 int page)
1352{ 1350{
1353 chip->write_buf(mtd, buf, mtd->writesize); 1351 nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
1354 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); 1352 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1355 1353
1356 return 0; 1354 return nand_prog_page_end_op(chip);
1357} 1355}
1358 1356
1359static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd, 1357static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
@@ -1363,7 +1361,7 @@ static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1363 struct pxa3xx_nand_host *host = nand_get_controller_data(chip); 1361 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1364 struct pxa3xx_nand_info *info = host->info_data; 1362 struct pxa3xx_nand_info *info = host->info_data;
1365 1363
1366 chip->read_buf(mtd, buf, mtd->writesize); 1364 nand_read_page_op(chip, page, 0, buf, mtd->writesize);
1367 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 1365 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1368 1366
1369 if (info->retcode == ERR_CORERR && info->use_ecc) { 1367 if (info->retcode == ERR_CORERR && info->use_ecc) {
diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c
index 2656c1ac5646..6be555806eca 100644
--- a/drivers/mtd/nand/qcom_nandc.c
+++ b/drivers/mtd/nand/qcom_nandc.c
@@ -1725,6 +1725,7 @@ static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1725 u8 *data_buf, *oob_buf = NULL; 1725 u8 *data_buf, *oob_buf = NULL;
1726 int ret; 1726 int ret;
1727 1727
1728 nand_read_page_op(chip, page, 0, NULL, 0);
1728 data_buf = buf; 1729 data_buf = buf;
1729 oob_buf = oob_required ? chip->oob_poi : NULL; 1730 oob_buf = oob_required ? chip->oob_poi : NULL;
1730 1731
@@ -1750,6 +1751,7 @@ static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
1750 int i, ret; 1751 int i, ret;
1751 int read_loc; 1752 int read_loc;
1752 1753
1754 nand_read_page_op(chip, page, 0, NULL, 0);
1753 data_buf = buf; 1755 data_buf = buf;
1754 oob_buf = chip->oob_poi; 1756 oob_buf = chip->oob_poi;
1755 1757
@@ -1850,6 +1852,8 @@ static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1850 u8 *data_buf, *oob_buf; 1852 u8 *data_buf, *oob_buf;
1851 int i, ret; 1853 int i, ret;
1852 1854
1855 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
1856
1853 clear_read_regs(nandc); 1857 clear_read_regs(nandc);
1854 clear_bam_transaction(nandc); 1858 clear_bam_transaction(nandc);
1855 1859
@@ -1902,6 +1906,9 @@ static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1902 1906
1903 free_descs(nandc); 1907 free_descs(nandc);
1904 1908
1909 if (!ret)
1910 ret = nand_prog_page_end_op(chip);
1911
1905 return ret; 1912 return ret;
1906} 1913}
1907 1914
@@ -1916,6 +1923,7 @@ static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
1916 u8 *data_buf, *oob_buf; 1923 u8 *data_buf, *oob_buf;
1917 int i, ret; 1924 int i, ret;
1918 1925
1926 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
1919 clear_read_regs(nandc); 1927 clear_read_regs(nandc);
1920 clear_bam_transaction(nandc); 1928 clear_bam_transaction(nandc);
1921 1929
@@ -1970,6 +1978,9 @@ static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
1970 1978
1971 free_descs(nandc); 1979 free_descs(nandc);
1972 1980
1981 if (!ret)
1982 ret = nand_prog_page_end_op(chip);
1983
1973 return ret; 1984 return ret;
1974} 1985}
1975 1986
@@ -1990,7 +2001,7 @@ static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
1990 struct nand_ecc_ctrl *ecc = &chip->ecc; 2001 struct nand_ecc_ctrl *ecc = &chip->ecc;
1991 u8 *oob = chip->oob_poi; 2002 u8 *oob = chip->oob_poi;
1992 int data_size, oob_size; 2003 int data_size, oob_size;
1993 int ret, status = 0; 2004 int ret;
1994 2005
1995 host->use_ecc = true; 2006 host->use_ecc = true;
1996 2007
@@ -2027,11 +2038,7 @@ static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
2027 return -EIO; 2038 return -EIO;
2028 } 2039 }
2029 2040
2030 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); 2041 return nand_prog_page_end_op(chip);
2031
2032 status = chip->waitfunc(mtd, chip);
2033
2034 return status & NAND_STATUS_FAIL ? -EIO : 0;
2035} 2042}
2036 2043
2037static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs) 2044static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
@@ -2081,7 +2088,7 @@ static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
2081 struct qcom_nand_host *host = to_qcom_nand_host(chip); 2088 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2082 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 2089 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2083 struct nand_ecc_ctrl *ecc = &chip->ecc; 2090 struct nand_ecc_ctrl *ecc = &chip->ecc;
2084 int page, ret, status = 0; 2091 int page, ret;
2085 2092
2086 clear_read_regs(nandc); 2093 clear_read_regs(nandc);
2087 clear_bam_transaction(nandc); 2094 clear_bam_transaction(nandc);
@@ -2114,11 +2121,7 @@ static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
2114 return -EIO; 2121 return -EIO;
2115 } 2122 }
2116 2123
2117 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); 2124 return nand_prog_page_end_op(chip);
2118
2119 status = chip->waitfunc(mtd, chip);
2120
2121 return status & NAND_STATUS_FAIL ? -EIO : 0;
2122} 2125}
2123 2126
2124/* 2127/*
@@ -2636,6 +2639,9 @@ static int qcom_nand_host_init(struct qcom_nand_controller *nandc,
2636 2639
2637 nand_set_flash_node(chip, dn); 2640 nand_set_flash_node(chip, dn);
2638 mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs); 2641 mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
2642 if (!mtd->name)
2643 return -ENOMEM;
2644
2639 mtd->owner = THIS_MODULE; 2645 mtd->owner = THIS_MODULE;
2640 mtd->dev.parent = dev; 2646 mtd->dev.parent = dev;
2641 2647
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index fc9287af4614..595635b9e9de 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -364,7 +364,7 @@ static int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
364 struct r852_device *dev = nand_get_controller_data(chip); 364 struct r852_device *dev = nand_get_controller_data(chip);
365 365
366 unsigned long timeout; 366 unsigned long timeout;
367 int status; 367 u8 status;
368 368
369 timeout = jiffies + (chip->state == FL_ERASING ? 369 timeout = jiffies + (chip->state == FL_ERASING ?
370 msecs_to_jiffies(400) : msecs_to_jiffies(20)); 370 msecs_to_jiffies(400) : msecs_to_jiffies(20));
@@ -373,8 +373,7 @@ static int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
373 if (chip->dev_ready(mtd)) 373 if (chip->dev_ready(mtd))
374 break; 374 break;
375 375
376 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); 376 nand_status_op(chip, &status);
377 status = (int)chip->read_byte(mtd);
378 377
379 /* Unfortunelly, no way to send detailed error status... */ 378 /* Unfortunelly, no way to send detailed error status... */
380 if (dev->dma_error) { 379 if (dev->dma_error) {
@@ -522,9 +521,7 @@ exit:
522static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip, 521static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
523 int page) 522 int page)
524{ 523{
525 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); 524 return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
526 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
527 return 0;
528} 525}
529 526
530/* 527/*
@@ -1046,7 +1043,7 @@ static int r852_resume(struct device *device)
1046 if (dev->card_registred) { 1043 if (dev->card_registred) {
1047 r852_engine_enable(dev); 1044 r852_engine_enable(dev);
1048 dev->chip->select_chip(mtd, 0); 1045 dev->chip->select_chip(mtd, 0);
1049 dev->chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); 1046 nand_reset_op(dev->chip);
1050 dev->chip->select_chip(mtd, -1); 1047 dev->chip->select_chip(mtd, -1);
1051 } 1048 }
1052 1049
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 3c5008a4f5f3..c4e7755448e6 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -614,7 +614,7 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va
614static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 614static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
615 uint8_t *buf, int oob_required, int page) 615 uint8_t *buf, int oob_required, int page)
616{ 616{
617 chip->read_buf(mtd, buf, mtd->writesize); 617 nand_read_page_op(chip, page, 0, buf, mtd->writesize);
618 if (oob_required) 618 if (oob_required)
619 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 619 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
620 return 0; 620 return 0;
@@ -624,9 +624,9 @@ static int flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
624 const uint8_t *buf, int oob_required, 624 const uint8_t *buf, int oob_required,
625 int page) 625 int page)
626{ 626{
627 chip->write_buf(mtd, buf, mtd->writesize); 627 nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
628 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); 628 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
629 return 0; 629 return nand_prog_page_end_op(chip);
630} 630}
631 631
632static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr) 632static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
diff --git a/drivers/mtd/nand/sm_common.h b/drivers/mtd/nand/sm_common.h
index d3e028e58b0f..1581671b05ae 100644
--- a/drivers/mtd/nand/sm_common.h
+++ b/drivers/mtd/nand/sm_common.h
@@ -36,7 +36,7 @@ struct sm_oob {
36#define SM_SMALL_OOB_SIZE 8 36#define SM_SMALL_OOB_SIZE 8
37 37
38 38
39extern int sm_register_device(struct mtd_info *mtd, int smartmedia); 39int sm_register_device(struct mtd_info *mtd, int smartmedia);
40 40
41 41
42static inline int sm_sector_valid(struct sm_oob *oob) 42static inline int sm_sector_valid(struct sm_oob *oob)
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index 82244be3e766..f5a55c63935c 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -958,12 +958,12 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
958 int ret; 958 int ret;
959 959
960 if (*cur_off != data_off) 960 if (*cur_off != data_off)
961 nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1); 961 nand_change_read_column_op(nand, data_off, NULL, 0, false);
962 962
963 sunxi_nfc_randomizer_read_buf(mtd, NULL, ecc->size, false, page); 963 sunxi_nfc_randomizer_read_buf(mtd, NULL, ecc->size, false, page);
964 964
965 if (data_off + ecc->size != oob_off) 965 if (data_off + ecc->size != oob_off)
966 nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1); 966 nand_change_read_column_op(nand, oob_off, NULL, 0, false);
967 967
968 ret = sunxi_nfc_wait_cmd_fifo_empty(nfc); 968 ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
969 if (ret) 969 if (ret)
@@ -991,16 +991,15 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
991 * Re-read the data with the randomizer disabled to identify 991 * Re-read the data with the randomizer disabled to identify
992 * bitflips in erased pages. 992 * bitflips in erased pages.
993 */ 993 */
994 if (nand->options & NAND_NEED_SCRAMBLING) { 994 if (nand->options & NAND_NEED_SCRAMBLING)
995 nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1); 995 nand_change_read_column_op(nand, data_off, data,
996 nand->read_buf(mtd, data, ecc->size); 996 ecc->size, false);
997 } else { 997 else
998 memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, 998 memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE,
999 ecc->size); 999 ecc->size);
1000 }
1001 1000
1002 nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1); 1001 nand_change_read_column_op(nand, oob_off, oob, ecc->bytes + 4,
1003 nand->read_buf(mtd, oob, ecc->bytes + 4); 1002 false);
1004 1003
1005 ret = nand_check_erased_ecc_chunk(data, ecc->size, 1004 ret = nand_check_erased_ecc_chunk(data, ecc->size,
1006 oob, ecc->bytes + 4, 1005 oob, ecc->bytes + 4,
@@ -1011,7 +1010,8 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
1011 memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size); 1010 memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size);
1012 1011
1013 if (oob_required) { 1012 if (oob_required) {
1014 nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1); 1013 nand_change_read_column_op(nand, oob_off, NULL, 0,
1014 false);
1015 sunxi_nfc_randomizer_read_buf(mtd, oob, ecc->bytes + 4, 1015 sunxi_nfc_randomizer_read_buf(mtd, oob, ecc->bytes + 4,
1016 true, page); 1016 true, page);
1017 1017
@@ -1038,8 +1038,8 @@ static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
1038 return; 1038 return;
1039 1039
1040 if (!cur_off || *cur_off != offset) 1040 if (!cur_off || *cur_off != offset)
1041 nand->cmdfunc(mtd, NAND_CMD_RNDOUT, 1041 nand_change_read_column_op(nand, mtd->writesize, NULL, 0,
1042 offset + mtd->writesize, -1); 1042 false);
1043 1043
1044 if (!randomize) 1044 if (!randomize)
1045 sunxi_nfc_read_buf(mtd, oob + offset, len); 1045 sunxi_nfc_read_buf(mtd, oob + offset, len);
@@ -1116,9 +1116,9 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
1116 1116
1117 if (oob_required && !erased) { 1117 if (oob_required && !erased) {
1118 /* TODO: use DMA to retrieve OOB */ 1118 /* TODO: use DMA to retrieve OOB */
1119 nand->cmdfunc(mtd, NAND_CMD_RNDOUT, 1119 nand_change_read_column_op(nand,
1120 mtd->writesize + oob_off, -1); 1120 mtd->writesize + oob_off,
1121 nand->read_buf(mtd, oob, ecc->bytes + 4); 1121 oob, ecc->bytes + 4, false);
1122 1122
1123 sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, i, 1123 sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, i,
1124 !i, page); 1124 !i, page);
@@ -1143,18 +1143,17 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
1143 /* 1143 /*
1144 * Re-read the data with the randomizer disabled to 1144 * Re-read the data with the randomizer disabled to
1145 * identify bitflips in erased pages. 1145 * identify bitflips in erased pages.
1146 * TODO: use DMA to read page in raw mode
1146 */ 1147 */
1147 if (randomized) { 1148 if (randomized)
1148 /* TODO: use DMA to read page in raw mode */ 1149 nand_change_read_column_op(nand, data_off,
1149 nand->cmdfunc(mtd, NAND_CMD_RNDOUT, 1150 data, ecc->size,
1150 data_off, -1); 1151 false);
1151 nand->read_buf(mtd, data, ecc->size);
1152 }
1153 1152
1154 /* TODO: use DMA to retrieve OOB */ 1153 /* TODO: use DMA to retrieve OOB */
1155 nand->cmdfunc(mtd, NAND_CMD_RNDOUT, 1154 nand_change_read_column_op(nand,
1156 mtd->writesize + oob_off, -1); 1155 mtd->writesize + oob_off,
1157 nand->read_buf(mtd, oob, ecc->bytes + 4); 1156 oob, ecc->bytes + 4, false);
1158 1157
1159 ret = nand_check_erased_ecc_chunk(data, ecc->size, 1158 ret = nand_check_erased_ecc_chunk(data, ecc->size,
1160 oob, ecc->bytes + 4, 1159 oob, ecc->bytes + 4,
@@ -1187,12 +1186,12 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
1187 int ret; 1186 int ret;
1188 1187
1189 if (data_off != *cur_off) 1188 if (data_off != *cur_off)
1190 nand->cmdfunc(mtd, NAND_CMD_RNDIN, data_off, -1); 1189 nand_change_write_column_op(nand, data_off, NULL, 0, false);
1191 1190
1192 sunxi_nfc_randomizer_write_buf(mtd, data, ecc->size, false, page); 1191 sunxi_nfc_randomizer_write_buf(mtd, data, ecc->size, false, page);
1193 1192
1194 if (data_off + ecc->size != oob_off) 1193 if (data_off + ecc->size != oob_off)
1195 nand->cmdfunc(mtd, NAND_CMD_RNDIN, oob_off, -1); 1194 nand_change_write_column_op(nand, oob_off, NULL, 0, false);
1196 1195
1197 ret = sunxi_nfc_wait_cmd_fifo_empty(nfc); 1196 ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
1198 if (ret) 1197 if (ret)
@@ -1228,8 +1227,8 @@ static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd,
1228 return; 1227 return;
1229 1228
1230 if (!cur_off || *cur_off != offset) 1229 if (!cur_off || *cur_off != offset)
1231 nand->cmdfunc(mtd, NAND_CMD_RNDIN, 1230 nand_change_write_column_op(nand, offset + mtd->writesize,
1232 offset + mtd->writesize, -1); 1231 NULL, 0, false);
1233 1232
1234 sunxi_nfc_randomizer_write_buf(mtd, oob + offset, len, false, page); 1233 sunxi_nfc_randomizer_write_buf(mtd, oob + offset, len, false, page);
1235 1234
@@ -1246,6 +1245,8 @@ static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
1246 int ret, i, cur_off = 0; 1245 int ret, i, cur_off = 0;
1247 bool raw_mode = false; 1246 bool raw_mode = false;
1248 1247
1248 nand_read_page_op(chip, page, 0, NULL, 0);
1249
1249 sunxi_nfc_hw_ecc_enable(mtd); 1250 sunxi_nfc_hw_ecc_enable(mtd);
1250 1251
1251 for (i = 0; i < ecc->steps; i++) { 1252 for (i = 0; i < ecc->steps; i++) {
@@ -1279,14 +1280,14 @@ static int sunxi_nfc_hw_ecc_read_page_dma(struct mtd_info *mtd,
1279{ 1280{
1280 int ret; 1281 int ret;
1281 1282
1283 nand_read_page_op(chip, page, 0, NULL, 0);
1284
1282 ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, oob_required, page, 1285 ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, oob_required, page,
1283 chip->ecc.steps); 1286 chip->ecc.steps);
1284 if (ret >= 0) 1287 if (ret >= 0)
1285 return ret; 1288 return ret;
1286 1289
1287 /* Fallback to PIO mode */ 1290 /* Fallback to PIO mode */
1288 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
1289
1290 return sunxi_nfc_hw_ecc_read_page(mtd, chip, buf, oob_required, page); 1291 return sunxi_nfc_hw_ecc_read_page(mtd, chip, buf, oob_required, page);
1291} 1292}
1292 1293
@@ -1299,6 +1300,8 @@ static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd,
1299 int ret, i, cur_off = 0; 1300 int ret, i, cur_off = 0;
1300 unsigned int max_bitflips = 0; 1301 unsigned int max_bitflips = 0;
1301 1302
1303 nand_read_page_op(chip, page, 0, NULL, 0);
1304
1302 sunxi_nfc_hw_ecc_enable(mtd); 1305 sunxi_nfc_hw_ecc_enable(mtd);
1303 1306
1304 for (i = data_offs / ecc->size; 1307 for (i = data_offs / ecc->size;
@@ -1330,13 +1333,13 @@ static int sunxi_nfc_hw_ecc_read_subpage_dma(struct mtd_info *mtd,
1330 int nchunks = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size); 1333 int nchunks = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
1331 int ret; 1334 int ret;
1332 1335
1336 nand_read_page_op(chip, page, 0, NULL, 0);
1337
1333 ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, false, page, nchunks); 1338 ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, false, page, nchunks);
1334 if (ret >= 0) 1339 if (ret >= 0)
1335 return ret; 1340 return ret;
1336 1341
1337 /* Fallback to PIO mode */ 1342 /* Fallback to PIO mode */
1338 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
1339
1340 return sunxi_nfc_hw_ecc_read_subpage(mtd, chip, data_offs, readlen, 1343 return sunxi_nfc_hw_ecc_read_subpage(mtd, chip, data_offs, readlen,
1341 buf, page); 1344 buf, page);
1342} 1345}
@@ -1349,6 +1352,8 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
1349 struct nand_ecc_ctrl *ecc = &chip->ecc; 1352 struct nand_ecc_ctrl *ecc = &chip->ecc;
1350 int ret, i, cur_off = 0; 1353 int ret, i, cur_off = 0;
1351 1354
1355 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
1356
1352 sunxi_nfc_hw_ecc_enable(mtd); 1357 sunxi_nfc_hw_ecc_enable(mtd);
1353 1358
1354 for (i = 0; i < ecc->steps; i++) { 1359 for (i = 0; i < ecc->steps; i++) {
@@ -1370,7 +1375,7 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
1370 1375
1371 sunxi_nfc_hw_ecc_disable(mtd); 1376 sunxi_nfc_hw_ecc_disable(mtd);
1372 1377
1373 return 0; 1378 return nand_prog_page_end_op(chip);
1374} 1379}
1375 1380
1376static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd, 1381static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd,
@@ -1382,6 +1387,8 @@ static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd,
1382 struct nand_ecc_ctrl *ecc = &chip->ecc; 1387 struct nand_ecc_ctrl *ecc = &chip->ecc;
1383 int ret, i, cur_off = 0; 1388 int ret, i, cur_off = 0;
1384 1389
1390 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
1391
1385 sunxi_nfc_hw_ecc_enable(mtd); 1392 sunxi_nfc_hw_ecc_enable(mtd);
1386 1393
1387 for (i = data_offs / ecc->size; 1394 for (i = data_offs / ecc->size;
@@ -1400,7 +1407,7 @@ static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd,
1400 1407
1401 sunxi_nfc_hw_ecc_disable(mtd); 1408 sunxi_nfc_hw_ecc_disable(mtd);
1402 1409
1403 return 0; 1410 return nand_prog_page_end_op(chip);
1404} 1411}
1405 1412
1406static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd, 1413static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
@@ -1430,6 +1437,8 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
1430 sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, i, !i, page); 1437 sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, i, !i, page);
1431 } 1438 }
1432 1439
1440 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
1441
1433 sunxi_nfc_hw_ecc_enable(mtd); 1442 sunxi_nfc_hw_ecc_enable(mtd);
1434 sunxi_nfc_randomizer_config(mtd, page, false); 1443 sunxi_nfc_randomizer_config(mtd, page, false);
1435 sunxi_nfc_randomizer_enable(mtd); 1444 sunxi_nfc_randomizer_enable(mtd);
@@ -1460,7 +1469,7 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
1460 sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi, 1469 sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
1461 NULL, page); 1470 NULL, page);
1462 1471
1463 return 0; 1472 return nand_prog_page_end_op(chip);
1464 1473
1465pio_fallback: 1474pio_fallback:
1466 return sunxi_nfc_hw_ecc_write_page(mtd, chip, buf, oob_required, page); 1475 return sunxi_nfc_hw_ecc_write_page(mtd, chip, buf, oob_required, page);
@@ -1476,6 +1485,8 @@ static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd,
1476 int ret, i, cur_off = 0; 1485 int ret, i, cur_off = 0;
1477 bool raw_mode = false; 1486 bool raw_mode = false;
1478 1487
1488 nand_read_page_op(chip, page, 0, NULL, 0);
1489
1479 sunxi_nfc_hw_ecc_enable(mtd); 1490 sunxi_nfc_hw_ecc_enable(mtd);
1480 1491
1481 for (i = 0; i < ecc->steps; i++) { 1492 for (i = 0; i < ecc->steps; i++) {
@@ -1512,6 +1523,8 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
1512 struct nand_ecc_ctrl *ecc = &chip->ecc; 1523 struct nand_ecc_ctrl *ecc = &chip->ecc;
1513 int ret, i, cur_off = 0; 1524 int ret, i, cur_off = 0;
1514 1525
1526 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
1527
1515 sunxi_nfc_hw_ecc_enable(mtd); 1528 sunxi_nfc_hw_ecc_enable(mtd);
1516 1529
1517 for (i = 0; i < ecc->steps; i++) { 1530 for (i = 0; i < ecc->steps; i++) {
@@ -1533,41 +1546,33 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
1533 1546
1534 sunxi_nfc_hw_ecc_disable(mtd); 1547 sunxi_nfc_hw_ecc_disable(mtd);
1535 1548
1536 return 0; 1549 return nand_prog_page_end_op(chip);
1537} 1550}
1538 1551
1539static int sunxi_nfc_hw_common_ecc_read_oob(struct mtd_info *mtd, 1552static int sunxi_nfc_hw_common_ecc_read_oob(struct mtd_info *mtd,
1540 struct nand_chip *chip, 1553 struct nand_chip *chip,
1541 int page) 1554 int page)
1542{ 1555{
1543 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
1544
1545 chip->pagebuf = -1; 1556 chip->pagebuf = -1;
1546 1557
1547 return chip->ecc.read_page(mtd, chip, chip->buffers->databuf, 1, page); 1558 return chip->ecc.read_page(mtd, chip, chip->data_buf, 1, page);
1548} 1559}
1549 1560
1550static int sunxi_nfc_hw_common_ecc_write_oob(struct mtd_info *mtd, 1561static int sunxi_nfc_hw_common_ecc_write_oob(struct mtd_info *mtd,
1551 struct nand_chip *chip, 1562 struct nand_chip *chip,
1552 int page) 1563 int page)
1553{ 1564{
1554 int ret, status; 1565 int ret;
1555
1556 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
1557 1566
1558 chip->pagebuf = -1; 1567 chip->pagebuf = -1;
1559 1568
1560 memset(chip->buffers->databuf, 0xff, mtd->writesize); 1569 memset(chip->data_buf, 0xff, mtd->writesize);
1561 ret = chip->ecc.write_page(mtd, chip, chip->buffers->databuf, 1, page); 1570 ret = chip->ecc.write_page(mtd, chip, chip->data_buf, 1, page);
1562 if (ret) 1571 if (ret)
1563 return ret; 1572 return ret;
1564 1573
1565 /* Send command to program the OOB data */ 1574 /* Send command to program the OOB data */
1566 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); 1575 return nand_prog_page_end_op(chip);
1567
1568 status = chip->waitfunc(mtd, chip);
1569
1570 return status & NAND_STATUS_FAIL ? -EIO : 0;
1571} 1576}
1572 1577
1573static const s32 tWB_lut[] = {6, 12, 16, 20}; 1578static const s32 tWB_lut[] = {6, 12, 16, 20};
@@ -1853,8 +1858,14 @@ static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd,
1853 1858
1854 /* Add ECC info retrieval from DT */ 1859 /* Add ECC info retrieval from DT */
1855 for (i = 0; i < ARRAY_SIZE(strengths); i++) { 1860 for (i = 0; i < ARRAY_SIZE(strengths); i++) {
1856 if (ecc->strength <= strengths[i]) 1861 if (ecc->strength <= strengths[i]) {
1862 /*
1863 * Update ecc->strength value with the actual strength
1864 * that will be used by the ECC engine.
1865 */
1866 ecc->strength = strengths[i];
1857 break; 1867 break;
1868 }
1858 } 1869 }
1859 1870
1860 if (i >= ARRAY_SIZE(strengths)) { 1871 if (i >= ARRAY_SIZE(strengths)) {
diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c
index 766906f03943..c5bee00b7f5e 100644
--- a/drivers/mtd/nand/tango_nand.c
+++ b/drivers/mtd/nand/tango_nand.c
@@ -329,7 +329,7 @@ static void aux_read(struct nand_chip *chip, u8 **buf, int len, int *pos)
329 329
330 if (!*buf) { 330 if (!*buf) {
331 /* skip over "len" bytes */ 331 /* skip over "len" bytes */
332 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, *pos, -1); 332 nand_change_read_column_op(chip, *pos, NULL, 0, false);
333 } else { 333 } else {
334 tango_read_buf(mtd, *buf, len); 334 tango_read_buf(mtd, *buf, len);
335 *buf += len; 335 *buf += len;
@@ -344,7 +344,7 @@ static void aux_write(struct nand_chip *chip, const u8 **buf, int len, int *pos)
344 344
345 if (!*buf) { 345 if (!*buf) {
346 /* skip over "len" bytes */ 346 /* skip over "len" bytes */
347 chip->cmdfunc(mtd, NAND_CMD_RNDIN, *pos, -1); 347 nand_change_write_column_op(chip, *pos, NULL, 0, false);
348 } else { 348 } else {
349 tango_write_buf(mtd, *buf, len); 349 tango_write_buf(mtd, *buf, len);
350 *buf += len; 350 *buf += len;
@@ -427,7 +427,7 @@ static void raw_write(struct nand_chip *chip, const u8 *buf, const u8 *oob)
427static int tango_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 427static int tango_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
428 u8 *buf, int oob_required, int page) 428 u8 *buf, int oob_required, int page)
429{ 429{
430 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); 430 nand_read_page_op(chip, page, 0, NULL, 0);
431 raw_read(chip, buf, chip->oob_poi); 431 raw_read(chip, buf, chip->oob_poi);
432 return 0; 432 return 0;
433} 433}
@@ -435,23 +435,15 @@ static int tango_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
435static int tango_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 435static int tango_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
436 const u8 *buf, int oob_required, int page) 436 const u8 *buf, int oob_required, int page)
437{ 437{
438 int status; 438 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
439
440 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
441 raw_write(chip, buf, chip->oob_poi); 439 raw_write(chip, buf, chip->oob_poi);
442 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); 440 return nand_prog_page_end_op(chip);
443
444 status = chip->waitfunc(mtd, chip);
445 if (status & NAND_STATUS_FAIL)
446 return -EIO;
447
448 return 0;
449} 441}
450 442
451static int tango_read_oob(struct mtd_info *mtd, struct nand_chip *chip, 443static int tango_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
452 int page) 444 int page)
453{ 445{
454 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); 446 nand_read_page_op(chip, page, 0, NULL, 0);
455 raw_read(chip, NULL, chip->oob_poi); 447 raw_read(chip, NULL, chip->oob_poi);
456 return 0; 448 return 0;
457} 449}
@@ -459,11 +451,9 @@ static int tango_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
459static int tango_write_oob(struct mtd_info *mtd, struct nand_chip *chip, 451static int tango_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
460 int page) 452 int page)
461{ 453{
462 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page); 454 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
463 raw_write(chip, NULL, chip->oob_poi); 455 raw_write(chip, NULL, chip->oob_poi);
464 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); 456 return nand_prog_page_end_op(chip);
465 chip->waitfunc(mtd, chip);
466 return 0;
467} 457}
468 458
469static int oob_ecc(struct mtd_info *mtd, int idx, struct mtd_oob_region *res) 459static int oob_ecc(struct mtd_info *mtd, int idx, struct mtd_oob_region *res)
@@ -590,7 +580,6 @@ static int chip_init(struct device *dev, struct device_node *np)
590 ecc->write_page = tango_write_page; 580 ecc->write_page = tango_write_page;
591 ecc->read_oob = tango_read_oob; 581 ecc->read_oob = tango_read_oob;
592 ecc->write_oob = tango_write_oob; 582 ecc->write_oob = tango_write_oob;
593 ecc->options = NAND_ECC_CUSTOM_PAGE_ACCESS;
594 583
595 err = nand_scan_tail(mtd); 584 err = nand_scan_tail(mtd);
596 if (err) 585 if (err)
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index 84dbf32332e1..dcaa924502de 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -192,6 +192,7 @@ tmio_nand_wait(struct mtd_info *mtd, struct nand_chip *nand_chip)
192{ 192{
193 struct tmio_nand *tmio = mtd_to_tmio(mtd); 193 struct tmio_nand *tmio = mtd_to_tmio(mtd);
194 long timeout; 194 long timeout;
195 u8 status;
195 196
196 /* enable RDYREQ interrupt */ 197 /* enable RDYREQ interrupt */
197 tmio_iowrite8(0x0f, tmio->fcr + FCR_ISR); 198 tmio_iowrite8(0x0f, tmio->fcr + FCR_ISR);
@@ -212,8 +213,8 @@ tmio_nand_wait(struct mtd_info *mtd, struct nand_chip *nand_chip)
212 dev_warn(&tmio->dev->dev, "timeout waiting for interrupt\n"); 213 dev_warn(&tmio->dev->dev, "timeout waiting for interrupt\n");
213 } 214 }
214 215
215 nand_chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); 216 nand_status_op(nand_chip, &status);
216 return nand_chip->read_byte(mtd); 217 return status;
217} 218}
218 219
219/* 220/*
diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c
index 8037d4b48a05..80d31a58e558 100644
--- a/drivers/mtd/nand/vf610_nfc.c
+++ b/drivers/mtd/nand/vf610_nfc.c
@@ -560,7 +560,7 @@ static int vf610_nfc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
560 int eccsize = chip->ecc.size; 560 int eccsize = chip->ecc.size;
561 int stat; 561 int stat;
562 562
563 vf610_nfc_read_buf(mtd, buf, eccsize); 563 nand_read_page_op(chip, page, 0, buf, eccsize);
564 if (oob_required) 564 if (oob_required)
565 vf610_nfc_read_buf(mtd, chip->oob_poi, mtd->oobsize); 565 vf610_nfc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
566 566
@@ -580,7 +580,7 @@ static int vf610_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
580{ 580{
581 struct vf610_nfc *nfc = mtd_to_nfc(mtd); 581 struct vf610_nfc *nfc = mtd_to_nfc(mtd);
582 582
583 vf610_nfc_write_buf(mtd, buf, mtd->writesize); 583 nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
584 if (oob_required) 584 if (oob_required)
585 vf610_nfc_write_buf(mtd, chip->oob_poi, mtd->oobsize); 585 vf610_nfc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
586 586
@@ -588,7 +588,7 @@ static int vf610_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
588 nfc->use_hw_ecc = true; 588 nfc->use_hw_ecc = true;
589 nfc->write_sz = mtd->writesize + mtd->oobsize; 589 nfc->write_sz = mtd->writesize + mtd->oobsize;
590 590
591 return 0; 591 return nand_prog_page_end_op(chip);
592} 592}
593 593
594static const struct of_device_id vf610_nfc_dt_ids[] = { 594static const struct of_device_id vf610_nfc_dt_ids[] = {
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index dcae2f6a2b11..9dc15748947b 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -4,8 +4,7 @@ menuconfig MTD_ONENAND
4 depends on HAS_IOMEM 4 depends on HAS_IOMEM
5 help 5 help
6 This enables support for accessing all type of OneNAND flash 6 This enables support for accessing all type of OneNAND flash
7 devices. For further information see 7 devices.
8 <http://www.samsung.com/Products/Semiconductor/OneNAND/index.htm>
9 8
10if MTD_ONENAND 9if MTD_ONENAND
11 10
@@ -26,9 +25,11 @@ config MTD_ONENAND_GENERIC
26config MTD_ONENAND_OMAP2 25config MTD_ONENAND_OMAP2
27 tristate "OneNAND on OMAP2/OMAP3 support" 26 tristate "OneNAND on OMAP2/OMAP3 support"
28 depends on ARCH_OMAP2 || ARCH_OMAP3 27 depends on ARCH_OMAP2 || ARCH_OMAP3
28 depends on OF || COMPILE_TEST
29 help 29 help
30 Support for a OneNAND flash device connected to an OMAP2/OMAP3 CPU 30 Support for a OneNAND flash device connected to an OMAP2/OMAP3 SoC
31 via the GPMC memory controller. 31 via the GPMC memory controller.
32 Enable dmaengine and gpiolib for better performance.
32 33
33config MTD_ONENAND_SAMSUNG 34config MTD_ONENAND_SAMSUNG
34 tristate "OneNAND on Samsung SOC controller support" 35 tristate "OneNAND on Samsung SOC controller support"
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 24a1388d3031..87c34f607a75 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -28,19 +28,18 @@
28#include <linux/mtd/mtd.h> 28#include <linux/mtd/mtd.h>
29#include <linux/mtd/onenand.h> 29#include <linux/mtd/onenand.h>
30#include <linux/mtd/partitions.h> 30#include <linux/mtd/partitions.h>
31#include <linux/of_device.h>
32#include <linux/omap-gpmc.h>
31#include <linux/platform_device.h> 33#include <linux/platform_device.h>
32#include <linux/interrupt.h> 34#include <linux/interrupt.h>
33#include <linux/delay.h> 35#include <linux/delay.h>
34#include <linux/dma-mapping.h> 36#include <linux/dma-mapping.h>
37#include <linux/dmaengine.h>
35#include <linux/io.h> 38#include <linux/io.h>
36#include <linux/slab.h> 39#include <linux/slab.h>
37#include <linux/regulator/consumer.h> 40#include <linux/gpio/consumer.h>
38#include <linux/gpio.h>
39 41
40#include <asm/mach/flash.h> 42#include <asm/mach/flash.h>
41#include <linux/platform_data/mtd-onenand-omap2.h>
42
43#include <linux/omap-dma.h>
44 43
45#define DRIVER_NAME "omap2-onenand" 44#define DRIVER_NAME "omap2-onenand"
46 45
@@ -50,24 +49,17 @@ struct omap2_onenand {
50 struct platform_device *pdev; 49 struct platform_device *pdev;
51 int gpmc_cs; 50 int gpmc_cs;
52 unsigned long phys_base; 51 unsigned long phys_base;
53 unsigned int mem_size; 52 struct gpio_desc *int_gpiod;
54 int gpio_irq;
55 struct mtd_info mtd; 53 struct mtd_info mtd;
56 struct onenand_chip onenand; 54 struct onenand_chip onenand;
57 struct completion irq_done; 55 struct completion irq_done;
58 struct completion dma_done; 56 struct completion dma_done;
59 int dma_channel; 57 struct dma_chan *dma_chan;
60 int freq;
61 int (*setup)(void __iomem *base, int *freq_ptr);
62 struct regulator *regulator;
63 u8 flags;
64}; 58};
65 59
66static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data) 60static void omap2_onenand_dma_complete_func(void *completion)
67{ 61{
68 struct omap2_onenand *c = data; 62 complete(completion);
69
70 complete(&c->dma_done);
71} 63}
72 64
73static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id) 65static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
@@ -90,6 +82,65 @@ static inline void write_reg(struct omap2_onenand *c, unsigned short value,
90 writew(value, c->onenand.base + reg); 82 writew(value, c->onenand.base + reg);
91} 83}
92 84
85static int omap2_onenand_set_cfg(struct omap2_onenand *c,
86 bool sr, bool sw,
87 int latency, int burst_len)
88{
89 unsigned short reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT;
90
91 reg |= latency << ONENAND_SYS_CFG1_BRL_SHIFT;
92
93 switch (burst_len) {
94 case 0: /* continuous */
95 break;
96 case 4:
97 reg |= ONENAND_SYS_CFG1_BL_4;
98 break;
99 case 8:
100 reg |= ONENAND_SYS_CFG1_BL_8;
101 break;
102 case 16:
103 reg |= ONENAND_SYS_CFG1_BL_16;
104 break;
105 case 32:
106 reg |= ONENAND_SYS_CFG1_BL_32;
107 break;
108 default:
109 return -EINVAL;
110 }
111
112 if (latency > 5)
113 reg |= ONENAND_SYS_CFG1_HF;
114 if (latency > 7)
115 reg |= ONENAND_SYS_CFG1_VHF;
116 if (sr)
117 reg |= ONENAND_SYS_CFG1_SYNC_READ;
118 if (sw)
119 reg |= ONENAND_SYS_CFG1_SYNC_WRITE;
120
121 write_reg(c, reg, ONENAND_REG_SYS_CFG1);
122
123 return 0;
124}
125
126static int omap2_onenand_get_freq(int ver)
127{
128 switch ((ver >> 4) & 0xf) {
129 case 0:
130 return 40;
131 case 1:
132 return 54;
133 case 2:
134 return 66;
135 case 3:
136 return 83;
137 case 4:
138 return 104;
139 }
140
141 return -EINVAL;
142}
143
93static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr) 144static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
94{ 145{
95 printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n", 146 printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
@@ -153,28 +204,22 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
153 if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) { 204 if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
154 syscfg |= ONENAND_SYS_CFG1_IOBE; 205 syscfg |= ONENAND_SYS_CFG1_IOBE;
155 write_reg(c, syscfg, ONENAND_REG_SYS_CFG1); 206 write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
156 if (c->flags & ONENAND_IN_OMAP34XX) 207 /* Add a delay to let GPIO settle */
157 /* Add a delay to let GPIO settle */ 208 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
158 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
159 } 209 }
160 210
161 reinit_completion(&c->irq_done); 211 reinit_completion(&c->irq_done);
162 if (c->gpio_irq) { 212 result = gpiod_get_value(c->int_gpiod);
163 result = gpio_get_value(c->gpio_irq); 213 if (result < 0) {
164 if (result == -1) { 214 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
165 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS); 215 intr = read_reg(c, ONENAND_REG_INTERRUPT);
166 intr = read_reg(c, ONENAND_REG_INTERRUPT); 216 wait_err("gpio error", state, ctrl, intr);
167 wait_err("gpio error", state, ctrl, intr); 217 return result;
168 return -EIO; 218 } else if (result == 0) {
169 }
170 } else
171 result = 0;
172 if (result == 0) {
173 int retry_cnt = 0; 219 int retry_cnt = 0;
174retry: 220retry:
175 result = wait_for_completion_timeout(&c->irq_done, 221 if (!wait_for_completion_io_timeout(&c->irq_done,
176 msecs_to_jiffies(20)); 222 msecs_to_jiffies(20))) {
177 if (result == 0) {
178 /* Timeout after 20ms */ 223 /* Timeout after 20ms */
179 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS); 224 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
180 if (ctrl & ONENAND_CTRL_ONGO && 225 if (ctrl & ONENAND_CTRL_ONGO &&
@@ -291,9 +336,42 @@ static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
291 return 0; 336 return 0;
292} 337}
293 338
294#if defined(CONFIG_ARCH_OMAP3) || defined(MULTI_OMAP2) 339static inline int omap2_onenand_dma_transfer(struct omap2_onenand *c,
340 dma_addr_t src, dma_addr_t dst,
341 size_t count)
342{
343 struct dma_async_tx_descriptor *tx;
344 dma_cookie_t cookie;
345
346 tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count, 0);
347 if (!tx) {
348 dev_err(&c->pdev->dev, "Failed to prepare DMA memcpy\n");
349 return -EIO;
350 }
351
352 reinit_completion(&c->dma_done);
353
354 tx->callback = omap2_onenand_dma_complete_func;
355 tx->callback_param = &c->dma_done;
356
357 cookie = tx->tx_submit(tx);
358 if (dma_submit_error(cookie)) {
359 dev_err(&c->pdev->dev, "Failed to do DMA tx_submit\n");
360 return -EIO;
361 }
362
363 dma_async_issue_pending(c->dma_chan);
364
365 if (!wait_for_completion_io_timeout(&c->dma_done,
366 msecs_to_jiffies(20))) {
367 dmaengine_terminate_sync(c->dma_chan);
368 return -ETIMEDOUT;
369 }
370
371 return 0;
372}
295 373
296static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area, 374static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
297 unsigned char *buffer, int offset, 375 unsigned char *buffer, int offset,
298 size_t count) 376 size_t count)
299{ 377{
@@ -301,10 +379,9 @@ static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
301 struct onenand_chip *this = mtd->priv; 379 struct onenand_chip *this = mtd->priv;
302 dma_addr_t dma_src, dma_dst; 380 dma_addr_t dma_src, dma_dst;
303 int bram_offset; 381 int bram_offset;
304 unsigned long timeout;
305 void *buf = (void *)buffer; 382 void *buf = (void *)buffer;
306 size_t xtra; 383 size_t xtra;
307 volatile unsigned *done; 384 int ret;
308 385
309 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset; 386 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
310 if (bram_offset & 3 || (size_t)buf & 3 || count < 384) 387 if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
@@ -341,25 +418,10 @@ static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
341 goto out_copy; 418 goto out_copy;
342 } 419 }
343 420
344 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32, 421 ret = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
345 count >> 2, 1, 0, 0, 0);
346 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
347 dma_src, 0, 0);
348 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
349 dma_dst, 0, 0);
350
351 reinit_completion(&c->dma_done);
352 omap_start_dma(c->dma_channel);
353
354 timeout = jiffies + msecs_to_jiffies(20);
355 done = &c->dma_done.done;
356 while (time_before(jiffies, timeout))
357 if (*done)
358 break;
359
360 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE); 422 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
361 423
362 if (!*done) { 424 if (ret) {
363 dev_err(&c->pdev->dev, "timeout waiting for DMA\n"); 425 dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
364 goto out_copy; 426 goto out_copy;
365 } 427 }
@@ -371,7 +433,7 @@ out_copy:
371 return 0; 433 return 0;
372} 434}
373 435
374static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area, 436static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
375 const unsigned char *buffer, 437 const unsigned char *buffer,
376 int offset, size_t count) 438 int offset, size_t count)
377{ 439{
@@ -379,9 +441,8 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
379 struct onenand_chip *this = mtd->priv; 441 struct onenand_chip *this = mtd->priv;
380 dma_addr_t dma_src, dma_dst; 442 dma_addr_t dma_src, dma_dst;
381 int bram_offset; 443 int bram_offset;
382 unsigned long timeout;
383 void *buf = (void *)buffer; 444 void *buf = (void *)buffer;
384 volatile unsigned *done; 445 int ret;
385 446
386 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset; 447 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
387 if (bram_offset & 3 || (size_t)buf & 3 || count < 384) 448 if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
@@ -412,25 +473,10 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
412 return -1; 473 return -1;
413 } 474 }
414 475
415 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32, 476 ret = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
416 count >> 2, 1, 0, 0, 0);
417 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
418 dma_src, 0, 0);
419 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
420 dma_dst, 0, 0);
421
422 reinit_completion(&c->dma_done);
423 omap_start_dma(c->dma_channel);
424
425 timeout = jiffies + msecs_to_jiffies(20);
426 done = &c->dma_done.done;
427 while (time_before(jiffies, timeout))
428 if (*done)
429 break;
430
431 dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE); 477 dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
432 478
433 if (!*done) { 479 if (ret) {
434 dev_err(&c->pdev->dev, "timeout waiting for DMA\n"); 480 dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
435 goto out_copy; 481 goto out_copy;
436 } 482 }
@@ -442,136 +488,6 @@ out_copy:
442 return 0; 488 return 0;
443} 489}
444 490
445#else
446
447static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
448 unsigned char *buffer, int offset,
449 size_t count)
450{
451 return -ENOSYS;
452}
453
454static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
455 const unsigned char *buffer,
456 int offset, size_t count)
457{
458 return -ENOSYS;
459}
460
461#endif
462
463#if defined(CONFIG_ARCH_OMAP2) || defined(MULTI_OMAP2)
464
465static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
466 unsigned char *buffer, int offset,
467 size_t count)
468{
469 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
470 struct onenand_chip *this = mtd->priv;
471 dma_addr_t dma_src, dma_dst;
472 int bram_offset;
473
474 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
475 /* DMA is not used. Revisit PM requirements before enabling it. */
476 if (1 || (c->dma_channel < 0) ||
477 ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
478 (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
479 memcpy(buffer, (__force void *)(this->base + bram_offset),
480 count);
481 return 0;
482 }
483
484 dma_src = c->phys_base + bram_offset;
485 dma_dst = dma_map_single(&c->pdev->dev, buffer, count,
486 DMA_FROM_DEVICE);
487 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
488 dev_err(&c->pdev->dev,
489 "Couldn't DMA map a %d byte buffer\n",
490 count);
491 return -1;
492 }
493
494 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
495 count / 4, 1, 0, 0, 0);
496 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
497 dma_src, 0, 0);
498 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
499 dma_dst, 0, 0);
500
501 reinit_completion(&c->dma_done);
502 omap_start_dma(c->dma_channel);
503 wait_for_completion(&c->dma_done);
504
505 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
506
507 return 0;
508}
509
510static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
511 const unsigned char *buffer,
512 int offset, size_t count)
513{
514 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
515 struct onenand_chip *this = mtd->priv;
516 dma_addr_t dma_src, dma_dst;
517 int bram_offset;
518
519 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
520 /* DMA is not used. Revisit PM requirements before enabling it. */
521 if (1 || (c->dma_channel < 0) ||
522 ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
523 (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
524 memcpy((__force void *)(this->base + bram_offset), buffer,
525 count);
526 return 0;
527 }
528
529 dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
530 DMA_TO_DEVICE);
531 dma_dst = c->phys_base + bram_offset;
532 if (dma_mapping_error(&c->pdev->dev, dma_src)) {
533 dev_err(&c->pdev->dev,
534 "Couldn't DMA map a %d byte buffer\n",
535 count);
536 return -1;
537 }
538
539 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S16,
540 count / 2, 1, 0, 0, 0);
541 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
542 dma_src, 0, 0);
543 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
544 dma_dst, 0, 0);
545
546 reinit_completion(&c->dma_done);
547 omap_start_dma(c->dma_channel);
548 wait_for_completion(&c->dma_done);
549
550 dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
551
552 return 0;
553}
554
555#else
556
557static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
558 unsigned char *buffer, int offset,
559 size_t count)
560{
561 return -ENOSYS;
562}
563
564static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
565 const unsigned char *buffer,
566 int offset, size_t count)
567{
568 return -ENOSYS;
569}
570
571#endif
572
573static struct platform_driver omap2_onenand_driver;
574
575static void omap2_onenand_shutdown(struct platform_device *pdev) 491static void omap2_onenand_shutdown(struct platform_device *pdev)
576{ 492{
577 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev); 493 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
@@ -583,168 +499,117 @@ static void omap2_onenand_shutdown(struct platform_device *pdev)
583 memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE); 499 memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
584} 500}
585 501
586static int omap2_onenand_enable(struct mtd_info *mtd)
587{
588 int ret;
589 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
590
591 ret = regulator_enable(c->regulator);
592 if (ret != 0)
593 dev_err(&c->pdev->dev, "can't enable regulator\n");
594
595 return ret;
596}
597
598static int omap2_onenand_disable(struct mtd_info *mtd)
599{
600 int ret;
601 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
602
603 ret = regulator_disable(c->regulator);
604 if (ret != 0)
605 dev_err(&c->pdev->dev, "can't disable regulator\n");
606
607 return ret;
608}
609
610static int omap2_onenand_probe(struct platform_device *pdev) 502static int omap2_onenand_probe(struct platform_device *pdev)
611{ 503{
612 struct omap_onenand_platform_data *pdata; 504 u32 val;
613 struct omap2_onenand *c; 505 dma_cap_mask_t mask;
614 struct onenand_chip *this; 506 int freq, latency, r;
615 int r;
616 struct resource *res; 507 struct resource *res;
508 struct omap2_onenand *c;
509 struct gpmc_onenand_info info;
510 struct device *dev = &pdev->dev;
511 struct device_node *np = dev->of_node;
512
513 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
514 if (!res) {
515 dev_err(dev, "error getting memory resource\n");
516 return -EINVAL;
517 }
617 518
618 pdata = dev_get_platdata(&pdev->dev); 519 r = of_property_read_u32(np, "reg", &val);
619 if (pdata == NULL) { 520 if (r) {
620 dev_err(&pdev->dev, "platform data missing\n"); 521 dev_err(dev, "reg not found in DT\n");
621 return -ENODEV; 522 return r;
622 } 523 }
623 524
624 c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL); 525 c = devm_kzalloc(dev, sizeof(struct omap2_onenand), GFP_KERNEL);
625 if (!c) 526 if (!c)
626 return -ENOMEM; 527 return -ENOMEM;
627 528
628 init_completion(&c->irq_done); 529 init_completion(&c->irq_done);
629 init_completion(&c->dma_done); 530 init_completion(&c->dma_done);
630 c->flags = pdata->flags; 531 c->gpmc_cs = val;
631 c->gpmc_cs = pdata->cs;
632 c->gpio_irq = pdata->gpio_irq;
633 c->dma_channel = pdata->dma_channel;
634 if (c->dma_channel < 0) {
635 /* if -1, don't use DMA */
636 c->gpio_irq = 0;
637 }
638
639 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
640 if (res == NULL) {
641 r = -EINVAL;
642 dev_err(&pdev->dev, "error getting memory resource\n");
643 goto err_kfree;
644 }
645
646 c->phys_base = res->start; 532 c->phys_base = res->start;
647 c->mem_size = resource_size(res);
648
649 if (request_mem_region(c->phys_base, c->mem_size,
650 pdev->dev.driver->name) == NULL) {
651 dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, size: 0x%x\n",
652 c->phys_base, c->mem_size);
653 r = -EBUSY;
654 goto err_kfree;
655 }
656 c->onenand.base = ioremap(c->phys_base, c->mem_size);
657 if (c->onenand.base == NULL) {
658 r = -ENOMEM;
659 goto err_release_mem_region;
660 }
661 533
662 if (pdata->onenand_setup != NULL) { 534 c->onenand.base = devm_ioremap_resource(dev, res);
663 r = pdata->onenand_setup(c->onenand.base, &c->freq); 535 if (IS_ERR(c->onenand.base))
664 if (r < 0) { 536 return PTR_ERR(c->onenand.base);
665 dev_err(&pdev->dev, "Onenand platform setup failed: " 537
666 "%d\n", r); 538 c->int_gpiod = devm_gpiod_get_optional(dev, "int", GPIOD_IN);
667 goto err_iounmap; 539 if (IS_ERR(c->int_gpiod)) {
668 } 540 r = PTR_ERR(c->int_gpiod);
669 c->setup = pdata->onenand_setup; 541 /* Just try again if this happens */
542 if (r != -EPROBE_DEFER)
543 dev_err(dev, "error getting gpio: %d\n", r);
544 return r;
670 } 545 }
671 546
672 if (c->gpio_irq) { 547 if (c->int_gpiod) {
673 if ((r = gpio_request(c->gpio_irq, "OneNAND irq")) < 0) { 548 r = devm_request_irq(dev, gpiod_to_irq(c->int_gpiod),
674 dev_err(&pdev->dev, "Failed to request GPIO%d for " 549 omap2_onenand_interrupt,
675 "OneNAND\n", c->gpio_irq); 550 IRQF_TRIGGER_RISING, "onenand", c);
676 goto err_iounmap; 551 if (r)
677 } 552 return r;
678 gpio_direction_input(c->gpio_irq);
679 553
680 if ((r = request_irq(gpio_to_irq(c->gpio_irq), 554 c->onenand.wait = omap2_onenand_wait;
681 omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
682 pdev->dev.driver->name, c)) < 0)
683 goto err_release_gpio;
684 } 555 }
685 556
686 if (c->dma_channel >= 0) { 557 dma_cap_zero(mask);
687 r = omap_request_dma(0, pdev->dev.driver->name, 558 dma_cap_set(DMA_MEMCPY, mask);
688 omap2_onenand_dma_cb, (void *) c,
689 &c->dma_channel);
690 if (r == 0) {
691 omap_set_dma_write_mode(c->dma_channel,
692 OMAP_DMA_WRITE_NON_POSTED);
693 omap_set_dma_src_data_pack(c->dma_channel, 1);
694 omap_set_dma_src_burst_mode(c->dma_channel,
695 OMAP_DMA_DATA_BURST_8);
696 omap_set_dma_dest_data_pack(c->dma_channel, 1);
697 omap_set_dma_dest_burst_mode(c->dma_channel,
698 OMAP_DMA_DATA_BURST_8);
699 } else {
700 dev_info(&pdev->dev,
701 "failed to allocate DMA for OneNAND, "
702 "using PIO instead\n");
703 c->dma_channel = -1;
704 }
705 }
706 559
707 dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual " 560 c->dma_chan = dma_request_channel(mask, NULL, NULL);
708 "base %p, freq %d MHz\n", c->gpmc_cs, c->phys_base, 561 if (c->dma_chan) {
709 c->onenand.base, c->freq); 562 c->onenand.read_bufferram = omap2_onenand_read_bufferram;
563 c->onenand.write_bufferram = omap2_onenand_write_bufferram;
564 }
710 565
711 c->pdev = pdev; 566 c->pdev = pdev;
712 c->mtd.priv = &c->onenand; 567 c->mtd.priv = &c->onenand;
568 c->mtd.dev.parent = dev;
569 mtd_set_of_node(&c->mtd, dev->of_node);
713 570
714 c->mtd.dev.parent = &pdev->dev; 571 dev_info(dev, "initializing on CS%d (0x%08lx), va %p, %s mode\n",
715 mtd_set_of_node(&c->mtd, pdata->of_node); 572 c->gpmc_cs, c->phys_base, c->onenand.base,
716 573 c->dma_chan ? "DMA" : "PIO");
717 this = &c->onenand;
718 if (c->dma_channel >= 0) {
719 this->wait = omap2_onenand_wait;
720 if (c->flags & ONENAND_IN_OMAP34XX) {
721 this->read_bufferram = omap3_onenand_read_bufferram;
722 this->write_bufferram = omap3_onenand_write_bufferram;
723 } else {
724 this->read_bufferram = omap2_onenand_read_bufferram;
725 this->write_bufferram = omap2_onenand_write_bufferram;
726 }
727 }
728 574
729 if (pdata->regulator_can_sleep) { 575 if ((r = onenand_scan(&c->mtd, 1)) < 0)
730 c->regulator = regulator_get(&pdev->dev, "vonenand"); 576 goto err_release_dma;
731 if (IS_ERR(c->regulator)) { 577
732 dev_err(&pdev->dev, "Failed to get regulator\n"); 578 freq = omap2_onenand_get_freq(c->onenand.version_id);
733 r = PTR_ERR(c->regulator); 579 if (freq > 0) {
734 goto err_release_dma; 580 switch (freq) {
581 case 104:
582 latency = 7;
583 break;
584 case 83:
585 latency = 6;
586 break;
587 case 66:
588 latency = 5;
589 break;
590 case 56:
591 latency = 4;
592 break;
593 default: /* 40 MHz or lower */
594 latency = 3;
595 break;
735 } 596 }
736 c->onenand.enable = omap2_onenand_enable;
737 c->onenand.disable = omap2_onenand_disable;
738 }
739 597
740 if (pdata->skip_initial_unlocking) 598 r = gpmc_omap_onenand_set_timings(dev, c->gpmc_cs,
741 this->options |= ONENAND_SKIP_INITIAL_UNLOCKING; 599 freq, latency, &info);
600 if (r)
601 goto err_release_onenand;
742 602
743 if ((r = onenand_scan(&c->mtd, 1)) < 0) 603 r = omap2_onenand_set_cfg(c, info.sync_read, info.sync_write,
744 goto err_release_regulator; 604 latency, info.burst_len);
605 if (r)
606 goto err_release_onenand;
745 607
746 r = mtd_device_register(&c->mtd, pdata ? pdata->parts : NULL, 608 if (info.sync_read || info.sync_write)
747 pdata ? pdata->nr_parts : 0); 609 dev_info(dev, "optimized timings for %d MHz\n", freq);
610 }
611
612 r = mtd_device_register(&c->mtd, NULL, 0);
748 if (r) 613 if (r)
749 goto err_release_onenand; 614 goto err_release_onenand;
750 615
@@ -754,22 +619,9 @@ static int omap2_onenand_probe(struct platform_device *pdev)
754 619
755err_release_onenand: 620err_release_onenand:
756 onenand_release(&c->mtd); 621 onenand_release(&c->mtd);
757err_release_regulator:
758 regulator_put(c->regulator);
759err_release_dma: 622err_release_dma:
760 if (c->dma_channel != -1) 623 if (c->dma_chan)
761 omap_free_dma(c->dma_channel); 624 dma_release_channel(c->dma_chan);
762 if (c->gpio_irq)
763 free_irq(gpio_to_irq(c->gpio_irq), c);
764err_release_gpio:
765 if (c->gpio_irq)
766 gpio_free(c->gpio_irq);
767err_iounmap:
768 iounmap(c->onenand.base);
769err_release_mem_region:
770 release_mem_region(c->phys_base, c->mem_size);
771err_kfree:
772 kfree(c);
773 625
774 return r; 626 return r;
775} 627}
@@ -779,27 +631,26 @@ static int omap2_onenand_remove(struct platform_device *pdev)
779 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev); 631 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
780 632
781 onenand_release(&c->mtd); 633 onenand_release(&c->mtd);
782 regulator_put(c->regulator); 634 if (c->dma_chan)
783 if (c->dma_channel != -1) 635 dma_release_channel(c->dma_chan);
784 omap_free_dma(c->dma_channel);
785 omap2_onenand_shutdown(pdev); 636 omap2_onenand_shutdown(pdev);
786 if (c->gpio_irq) {
787 free_irq(gpio_to_irq(c->gpio_irq), c);
788 gpio_free(c->gpio_irq);
789 }
790 iounmap(c->onenand.base);
791 release_mem_region(c->phys_base, c->mem_size);
792 kfree(c);
793 637
794 return 0; 638 return 0;
795} 639}
796 640
641static const struct of_device_id omap2_onenand_id_table[] = {
642 { .compatible = "ti,omap2-onenand", },
643 {},
644};
645MODULE_DEVICE_TABLE(of, omap2_onenand_id_table);
646
797static struct platform_driver omap2_onenand_driver = { 647static struct platform_driver omap2_onenand_driver = {
798 .probe = omap2_onenand_probe, 648 .probe = omap2_onenand_probe,
799 .remove = omap2_onenand_remove, 649 .remove = omap2_onenand_remove,
800 .shutdown = omap2_onenand_shutdown, 650 .shutdown = omap2_onenand_shutdown,
801 .driver = { 651 .driver = {
802 .name = DRIVER_NAME, 652 .name = DRIVER_NAME,
653 .of_match_table = omap2_onenand_id_table,
803 }, 654 },
804}; 655};
805 656
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index af0ac1a7bf8f..2e9d076e445a 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -25,8 +25,6 @@
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/io.h> 26#include <linux/io.h>
27 27
28#include <asm/mach/flash.h>
29
30#include "samsung.h" 28#include "samsung.h"
31 29
32enum soc_type { 30enum soc_type {
@@ -129,16 +127,13 @@ struct s3c_onenand {
129 struct platform_device *pdev; 127 struct platform_device *pdev;
130 enum soc_type type; 128 enum soc_type type;
131 void __iomem *base; 129 void __iomem *base;
132 struct resource *base_res;
133 void __iomem *ahb_addr; 130 void __iomem *ahb_addr;
134 struct resource *ahb_res;
135 int bootram_command; 131 int bootram_command;
136 void __iomem *page_buf; 132 void *page_buf;
137 void __iomem *oob_buf; 133 void *oob_buf;
138 unsigned int (*mem_addr)(int fba, int fpa, int fsa); 134 unsigned int (*mem_addr)(int fba, int fpa, int fsa);
139 unsigned int (*cmd_map)(unsigned int type, unsigned int val); 135 unsigned int (*cmd_map)(unsigned int type, unsigned int val);
140 void __iomem *dma_addr; 136 void __iomem *dma_addr;
141 struct resource *dma_res;
142 unsigned long phys_base; 137 unsigned long phys_base;
143 struct completion complete; 138 struct completion complete;
144}; 139};
@@ -413,8 +408,8 @@ static int s3c_onenand_command(struct mtd_info *mtd, int cmd, loff_t addr,
413 /* 408 /*
414 * Emulate Two BufferRAMs and access with 4 bytes pointer 409 * Emulate Two BufferRAMs and access with 4 bytes pointer
415 */ 410 */
416 m = (unsigned int *) onenand->page_buf; 411 m = onenand->page_buf;
417 s = (unsigned int *) onenand->oob_buf; 412 s = onenand->oob_buf;
418 413
419 if (index) { 414 if (index) {
420 m += (this->writesize >> 2); 415 m += (this->writesize >> 2);
@@ -486,11 +481,11 @@ static unsigned char *s3c_get_bufferram(struct mtd_info *mtd, int area)
486 unsigned char *p; 481 unsigned char *p;
487 482
488 if (area == ONENAND_DATARAM) { 483 if (area == ONENAND_DATARAM) {
489 p = (unsigned char *) onenand->page_buf; 484 p = onenand->page_buf;
490 if (index == 1) 485 if (index == 1)
491 p += this->writesize; 486 p += this->writesize;
492 } else { 487 } else {
493 p = (unsigned char *) onenand->oob_buf; 488 p = onenand->oob_buf;
494 if (index == 1) 489 if (index == 1)
495 p += mtd->oobsize; 490 p += mtd->oobsize;
496 } 491 }
@@ -851,15 +846,14 @@ static int s3c_onenand_probe(struct platform_device *pdev)
851 /* No need to check pdata. the platform data is optional */ 846 /* No need to check pdata. the platform data is optional */
852 847
853 size = sizeof(struct mtd_info) + sizeof(struct onenand_chip); 848 size = sizeof(struct mtd_info) + sizeof(struct onenand_chip);
854 mtd = kzalloc(size, GFP_KERNEL); 849 mtd = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
855 if (!mtd) 850 if (!mtd)
856 return -ENOMEM; 851 return -ENOMEM;
857 852
858 onenand = kzalloc(sizeof(struct s3c_onenand), GFP_KERNEL); 853 onenand = devm_kzalloc(&pdev->dev, sizeof(struct s3c_onenand),
859 if (!onenand) { 854 GFP_KERNEL);
860 err = -ENOMEM; 855 if (!onenand)
861 goto onenand_fail; 856 return -ENOMEM;
862 }
863 857
864 this = (struct onenand_chip *) &mtd[1]; 858 this = (struct onenand_chip *) &mtd[1];
865 mtd->priv = this; 859 mtd->priv = this;
@@ -870,26 +864,12 @@ static int s3c_onenand_probe(struct platform_device *pdev)
870 s3c_onenand_setup(mtd); 864 s3c_onenand_setup(mtd);
871 865
872 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 866 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
873 if (!r) { 867 onenand->base = devm_ioremap_resource(&pdev->dev, r);
874 dev_err(&pdev->dev, "no memory resource defined\n"); 868 if (IS_ERR(onenand->base))
875 return -ENOENT; 869 return PTR_ERR(onenand->base);
876 goto ahb_resource_failed;
877 }
878 870
879 onenand->base_res = request_mem_region(r->start, resource_size(r), 871 onenand->phys_base = r->start;
880 pdev->name);
881 if (!onenand->base_res) {
882 dev_err(&pdev->dev, "failed to request memory resource\n");
883 err = -EBUSY;
884 goto resource_failed;
885 }
886 872
887 onenand->base = ioremap(r->start, resource_size(r));
888 if (!onenand->base) {
889 dev_err(&pdev->dev, "failed to map memory resource\n");
890 err = -EFAULT;
891 goto ioremap_failed;
892 }
893 /* Set onenand_chip also */ 873 /* Set onenand_chip also */
894 this->base = onenand->base; 874 this->base = onenand->base;
895 875
@@ -898,40 +878,20 @@ static int s3c_onenand_probe(struct platform_device *pdev)
898 878
899 if (onenand->type != TYPE_S5PC110) { 879 if (onenand->type != TYPE_S5PC110) {
900 r = platform_get_resource(pdev, IORESOURCE_MEM, 1); 880 r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
901 if (!r) { 881 onenand->ahb_addr = devm_ioremap_resource(&pdev->dev, r);
902 dev_err(&pdev->dev, "no buffer memory resource defined\n"); 882 if (IS_ERR(onenand->ahb_addr))
903 err = -ENOENT; 883 return PTR_ERR(onenand->ahb_addr);
904 goto ahb_resource_failed;
905 }
906
907 onenand->ahb_res = request_mem_region(r->start, resource_size(r),
908 pdev->name);
909 if (!onenand->ahb_res) {
910 dev_err(&pdev->dev, "failed to request buffer memory resource\n");
911 err = -EBUSY;
912 goto ahb_resource_failed;
913 }
914
915 onenand->ahb_addr = ioremap(r->start, resource_size(r));
916 if (!onenand->ahb_addr) {
917 dev_err(&pdev->dev, "failed to map buffer memory resource\n");
918 err = -EINVAL;
919 goto ahb_ioremap_failed;
920 }
921 884
922 /* Allocate 4KiB BufferRAM */ 885 /* Allocate 4KiB BufferRAM */
923 onenand->page_buf = kzalloc(SZ_4K, GFP_KERNEL); 886 onenand->page_buf = devm_kzalloc(&pdev->dev, SZ_4K,
924 if (!onenand->page_buf) { 887 GFP_KERNEL);
925 err = -ENOMEM; 888 if (!onenand->page_buf)
926 goto page_buf_fail; 889 return -ENOMEM;
927 }
928 890
929 /* Allocate 128 SpareRAM */ 891 /* Allocate 128 SpareRAM */
930 onenand->oob_buf = kzalloc(128, GFP_KERNEL); 892 onenand->oob_buf = devm_kzalloc(&pdev->dev, 128, GFP_KERNEL);
931 if (!onenand->oob_buf) { 893 if (!onenand->oob_buf)
932 err = -ENOMEM; 894 return -ENOMEM;
933 goto oob_buf_fail;
934 }
935 895
936 /* S3C doesn't handle subpage write */ 896 /* S3C doesn't handle subpage write */
937 mtd->subpage_sft = 0; 897 mtd->subpage_sft = 0;
@@ -939,28 +899,9 @@ static int s3c_onenand_probe(struct platform_device *pdev)
939 899
940 } else { /* S5PC110 */ 900 } else { /* S5PC110 */
941 r = platform_get_resource(pdev, IORESOURCE_MEM, 1); 901 r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
942 if (!r) { 902 onenand->dma_addr = devm_ioremap_resource(&pdev->dev, r);
943 dev_err(&pdev->dev, "no dma memory resource defined\n"); 903 if (IS_ERR(onenand->dma_addr))
944 err = -ENOENT; 904 return PTR_ERR(onenand->dma_addr);
945 goto dma_resource_failed;
946 }
947
948 onenand->dma_res = request_mem_region(r->start, resource_size(r),
949 pdev->name);
950 if (!onenand->dma_res) {
951 dev_err(&pdev->dev, "failed to request dma memory resource\n");
952 err = -EBUSY;
953 goto dma_resource_failed;
954 }
955
956 onenand->dma_addr = ioremap(r->start, resource_size(r));
957 if (!onenand->dma_addr) {
958 dev_err(&pdev->dev, "failed to map dma memory resource\n");
959 err = -EINVAL;
960 goto dma_ioremap_failed;
961 }
962
963 onenand->phys_base = onenand->base_res->start;
964 905
965 s5pc110_dma_ops = s5pc110_dma_poll; 906 s5pc110_dma_ops = s5pc110_dma_poll;
966 /* Interrupt support */ 907 /* Interrupt support */
@@ -968,19 +909,20 @@ static int s3c_onenand_probe(struct platform_device *pdev)
968 if (r) { 909 if (r) {
969 init_completion(&onenand->complete); 910 init_completion(&onenand->complete);
970 s5pc110_dma_ops = s5pc110_dma_irq; 911 s5pc110_dma_ops = s5pc110_dma_irq;
971 err = request_irq(r->start, s5pc110_onenand_irq, 912 err = devm_request_irq(&pdev->dev, r->start,
972 IRQF_SHARED, "onenand", &onenand); 913 s5pc110_onenand_irq,
914 IRQF_SHARED, "onenand",
915 &onenand);
973 if (err) { 916 if (err) {
974 dev_err(&pdev->dev, "failed to get irq\n"); 917 dev_err(&pdev->dev, "failed to get irq\n");
975 goto scan_failed; 918 return err;
976 } 919 }
977 } 920 }
978 } 921 }
979 922
980 if (onenand_scan(mtd, 1)) { 923 err = onenand_scan(mtd, 1);
981 err = -EFAULT; 924 if (err)
982 goto scan_failed; 925 return err;
983 }
984 926
985 if (onenand->type != TYPE_S5PC110) { 927 if (onenand->type != TYPE_S5PC110) {
986 /* S3C doesn't handle subpage write */ 928 /* S3C doesn't handle subpage write */
@@ -994,40 +936,15 @@ static int s3c_onenand_probe(struct platform_device *pdev)
994 err = mtd_device_parse_register(mtd, NULL, NULL, 936 err = mtd_device_parse_register(mtd, NULL, NULL,
995 pdata ? pdata->parts : NULL, 937 pdata ? pdata->parts : NULL,
996 pdata ? pdata->nr_parts : 0); 938 pdata ? pdata->nr_parts : 0);
939 if (err) {
940 dev_err(&pdev->dev, "failed to parse partitions and register the MTD device\n");
941 onenand_release(mtd);
942 return err;
943 }
997 944
998 platform_set_drvdata(pdev, mtd); 945 platform_set_drvdata(pdev, mtd);
999 946
1000 return 0; 947 return 0;
1001
1002scan_failed:
1003 if (onenand->dma_addr)
1004 iounmap(onenand->dma_addr);
1005dma_ioremap_failed:
1006 if (onenand->dma_res)
1007 release_mem_region(onenand->dma_res->start,
1008 resource_size(onenand->dma_res));
1009 kfree(onenand->oob_buf);
1010oob_buf_fail:
1011 kfree(onenand->page_buf);
1012page_buf_fail:
1013 if (onenand->ahb_addr)
1014 iounmap(onenand->ahb_addr);
1015ahb_ioremap_failed:
1016 if (onenand->ahb_res)
1017 release_mem_region(onenand->ahb_res->start,
1018 resource_size(onenand->ahb_res));
1019dma_resource_failed:
1020ahb_resource_failed:
1021 iounmap(onenand->base);
1022ioremap_failed:
1023 if (onenand->base_res)
1024 release_mem_region(onenand->base_res->start,
1025 resource_size(onenand->base_res));
1026resource_failed:
1027 kfree(onenand);
1028onenand_fail:
1029 kfree(mtd);
1030 return err;
1031} 948}
1032 949
1033static int s3c_onenand_remove(struct platform_device *pdev) 950static int s3c_onenand_remove(struct platform_device *pdev)
@@ -1035,25 +952,7 @@ static int s3c_onenand_remove(struct platform_device *pdev)
1035 struct mtd_info *mtd = platform_get_drvdata(pdev); 952 struct mtd_info *mtd = platform_get_drvdata(pdev);
1036 953
1037 onenand_release(mtd); 954 onenand_release(mtd);
1038 if (onenand->ahb_addr) 955
1039 iounmap(onenand->ahb_addr);
1040 if (onenand->ahb_res)
1041 release_mem_region(onenand->ahb_res->start,
1042 resource_size(onenand->ahb_res));
1043 if (onenand->dma_addr)
1044 iounmap(onenand->dma_addr);
1045 if (onenand->dma_res)
1046 release_mem_region(onenand->dma_res->start,
1047 resource_size(onenand->dma_res));
1048
1049 iounmap(onenand->base);
1050 release_mem_region(onenand->base_res->start,
1051 resource_size(onenand->base_res));
1052
1053 kfree(onenand->oob_buf);
1054 kfree(onenand->page_buf);
1055 kfree(onenand);
1056 kfree(mtd);
1057 return 0; 956 return 0;
1058} 957}
1059 958
diff --git a/drivers/mtd/tests/nandbiterrs.c b/drivers/mtd/tests/nandbiterrs.c
index 5f03b8c885a9..cde19c99e77b 100644
--- a/drivers/mtd/tests/nandbiterrs.c
+++ b/drivers/mtd/tests/nandbiterrs.c
@@ -151,7 +151,7 @@ static int read_page(int log)
151 memcpy(&oldstats, &mtd->ecc_stats, sizeof(oldstats)); 151 memcpy(&oldstats, &mtd->ecc_stats, sizeof(oldstats));
152 152
153 err = mtd_read(mtd, offset, mtd->writesize, &read, rbuffer); 153 err = mtd_read(mtd, offset, mtd->writesize, &read, rbuffer);
154 if (err == -EUCLEAN) 154 if (!err || err == -EUCLEAN)
155 err = mtd->ecc_stats.corrected - oldstats.corrected; 155 err = mtd->ecc_stats.corrected - oldstats.corrected;
156 156
157 if (err < 0 || read != mtd->writesize) { 157 if (err < 0 || read != mtd->writesize) {
diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c
index 1cb3f7758fb6..766b2c385682 100644
--- a/drivers/mtd/tests/oobtest.c
+++ b/drivers/mtd/tests/oobtest.c
@@ -193,6 +193,9 @@ static int verify_eraseblock(int ebnum)
193 ops.datbuf = NULL; 193 ops.datbuf = NULL;
194 ops.oobbuf = readbuf; 194 ops.oobbuf = readbuf;
195 err = mtd_read_oob(mtd, addr, &ops); 195 err = mtd_read_oob(mtd, addr, &ops);
196 if (mtd_is_bitflip(err))
197 err = 0;
198
196 if (err || ops.oobretlen != use_len) { 199 if (err || ops.oobretlen != use_len) {
197 pr_err("error: readoob failed at %#llx\n", 200 pr_err("error: readoob failed at %#llx\n",
198 (long long)addr); 201 (long long)addr);
@@ -227,6 +230,9 @@ static int verify_eraseblock(int ebnum)
227 ops.datbuf = NULL; 230 ops.datbuf = NULL;
228 ops.oobbuf = readbuf; 231 ops.oobbuf = readbuf;
229 err = mtd_read_oob(mtd, addr, &ops); 232 err = mtd_read_oob(mtd, addr, &ops);
233 if (mtd_is_bitflip(err))
234 err = 0;
235
230 if (err || ops.oobretlen != mtd->oobavail) { 236 if (err || ops.oobretlen != mtd->oobavail) {
231 pr_err("error: readoob failed at %#llx\n", 237 pr_err("error: readoob failed at %#llx\n",
232 (long long)addr); 238 (long long)addr);
@@ -286,6 +292,9 @@ static int verify_eraseblock_in_one_go(int ebnum)
286 292
287 /* read entire block's OOB at one go */ 293 /* read entire block's OOB at one go */
288 err = mtd_read_oob(mtd, addr, &ops); 294 err = mtd_read_oob(mtd, addr, &ops);
295 if (mtd_is_bitflip(err))
296 err = 0;
297
289 if (err || ops.oobretlen != len) { 298 if (err || ops.oobretlen != len) {
290 pr_err("error: readoob failed at %#llx\n", 299 pr_err("error: readoob failed at %#llx\n",
291 (long long)addr); 300 (long long)addr);
@@ -527,6 +536,9 @@ static int __init mtd_oobtest_init(void)
527 pr_info("attempting to start read past end of OOB\n"); 536 pr_info("attempting to start read past end of OOB\n");
528 pr_info("an error is expected...\n"); 537 pr_info("an error is expected...\n");
529 err = mtd_read_oob(mtd, addr0, &ops); 538 err = mtd_read_oob(mtd, addr0, &ops);
539 if (mtd_is_bitflip(err))
540 err = 0;
541
530 if (err) { 542 if (err) {
531 pr_info("error occurred as expected\n"); 543 pr_info("error occurred as expected\n");
532 err = 0; 544 err = 0;
@@ -571,6 +583,9 @@ static int __init mtd_oobtest_init(void)
571 pr_info("attempting to read past end of device\n"); 583 pr_info("attempting to read past end of device\n");
572 pr_info("an error is expected...\n"); 584 pr_info("an error is expected...\n");
573 err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops); 585 err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
586 if (mtd_is_bitflip(err))
587 err = 0;
588
574 if (err) { 589 if (err) {
575 pr_info("error occurred as expected\n"); 590 pr_info("error occurred as expected\n");
576 err = 0; 591 err = 0;
@@ -615,6 +630,9 @@ static int __init mtd_oobtest_init(void)
615 pr_info("attempting to read past end of device\n"); 630 pr_info("attempting to read past end of device\n");
616 pr_info("an error is expected...\n"); 631 pr_info("an error is expected...\n");
617 err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops); 632 err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
633 if (mtd_is_bitflip(err))
634 err = 0;
635
618 if (err) { 636 if (err) {
619 pr_info("error occurred as expected\n"); 637 pr_info("error occurred as expected\n");
620 err = 0; 638 err = 0;
@@ -684,6 +702,9 @@ static int __init mtd_oobtest_init(void)
684 ops.datbuf = NULL; 702 ops.datbuf = NULL;
685 ops.oobbuf = readbuf; 703 ops.oobbuf = readbuf;
686 err = mtd_read_oob(mtd, addr, &ops); 704 err = mtd_read_oob(mtd, addr, &ops);
705 if (mtd_is_bitflip(err))
706 err = 0;
707
687 if (err) 708 if (err)
688 goto out; 709 goto out;
689 if (memcmpshow(addr, readbuf, writebuf, 710 if (memcmpshow(addr, readbuf, writebuf,
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c
index 87595c594b12..264ad362d858 100644
--- a/drivers/staging/mt29f_spinand/mt29f_spinand.c
+++ b/drivers/staging/mt29f_spinand/mt29f_spinand.c
@@ -637,8 +637,7 @@ static int spinand_write_page_hwecc(struct mtd_info *mtd,
637 int eccsteps = chip->ecc.steps; 637 int eccsteps = chip->ecc.steps;
638 638
639 enable_hw_ecc = 1; 639 enable_hw_ecc = 1;
640 chip->write_buf(mtd, p, eccsize * eccsteps); 640 return nand_prog_page_op(chip, page, 0, p, eccsize * eccsteps);
641 return 0;
642} 641}
643 642
644static int spinand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 643static int spinand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
@@ -653,7 +652,7 @@ static int spinand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
653 652
654 enable_read_hw_ecc = 1; 653 enable_read_hw_ecc = 1;
655 654
656 chip->read_buf(mtd, p, eccsize * eccsteps); 655 nand_read_page_op(chip, page, 0, p, eccsize * eccsteps);
657 if (oob_required) 656 if (oob_required)
658 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 657 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
659 658
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 749bb08c4772..56c5570aadbe 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -133,12 +133,6 @@ enum nand_ecc_algo {
133 */ 133 */
134#define NAND_ECC_GENERIC_ERASED_CHECK BIT(0) 134#define NAND_ECC_GENERIC_ERASED_CHECK BIT(0)
135#define NAND_ECC_MAXIMIZE BIT(1) 135#define NAND_ECC_MAXIMIZE BIT(1)
136/*
137 * If your controller already sends the required NAND commands when
138 * reading or writing a page, then the framework is not supposed to
139 * send READ0 and SEQIN/PAGEPROG respectively.
140 */
141#define NAND_ECC_CUSTOM_PAGE_ACCESS BIT(2)
142 136
143/* Bit mask for flags passed to do_nand_read_ecc */ 137/* Bit mask for flags passed to do_nand_read_ecc */
144#define NAND_GET_DEVICE 0x80 138#define NAND_GET_DEVICE 0x80
@@ -191,11 +185,6 @@ enum nand_ecc_algo {
191/* Non chip related options */ 185/* Non chip related options */
192/* This option skips the bbt scan during initialization. */ 186/* This option skips the bbt scan during initialization. */
193#define NAND_SKIP_BBTSCAN 0x00010000 187#define NAND_SKIP_BBTSCAN 0x00010000
194/*
195 * This option is defined if the board driver allocates its own buffers
196 * (e.g. because it needs them DMA-coherent).
197 */
198#define NAND_OWN_BUFFERS 0x00020000
199/* Chip may not exist, so silence any errors in scan */ 188/* Chip may not exist, so silence any errors in scan */
200#define NAND_SCAN_SILENT_NODEV 0x00040000 189#define NAND_SCAN_SILENT_NODEV 0x00040000
201/* 190/*
@@ -525,6 +514,8 @@ static const struct nand_ecc_caps __name = { \
525 * @postpad: padding information for syndrome based ECC generators 514 * @postpad: padding information for syndrome based ECC generators
526 * @options: ECC specific options (see NAND_ECC_XXX flags defined above) 515 * @options: ECC specific options (see NAND_ECC_XXX flags defined above)
527 * @priv: pointer to private ECC control data 516 * @priv: pointer to private ECC control data
517 * @calc_buf: buffer for calculated ECC, size is oobsize.
518 * @code_buf: buffer for ECC read from flash, size is oobsize.
528 * @hwctl: function to control hardware ECC generator. Must only 519 * @hwctl: function to control hardware ECC generator. Must only
529 * be provided if an hardware ECC is available 520 * be provided if an hardware ECC is available
530 * @calculate: function for ECC calculation or readback from ECC hardware 521 * @calculate: function for ECC calculation or readback from ECC hardware
@@ -575,6 +566,8 @@ struct nand_ecc_ctrl {
575 int postpad; 566 int postpad;
576 unsigned int options; 567 unsigned int options;
577 void *priv; 568 void *priv;
569 u8 *calc_buf;
570 u8 *code_buf;
578 void (*hwctl)(struct mtd_info *mtd, int mode); 571 void (*hwctl)(struct mtd_info *mtd, int mode);
579 int (*calculate)(struct mtd_info *mtd, const uint8_t *dat, 572 int (*calculate)(struct mtd_info *mtd, const uint8_t *dat,
580 uint8_t *ecc_code); 573 uint8_t *ecc_code);
@@ -602,26 +595,6 @@ struct nand_ecc_ctrl {
602 int page); 595 int page);
603}; 596};
604 597
605static inline int nand_standard_page_accessors(struct nand_ecc_ctrl *ecc)
606{
607 return !(ecc->options & NAND_ECC_CUSTOM_PAGE_ACCESS);
608}
609
610/**
611 * struct nand_buffers - buffer structure for read/write
612 * @ecccalc: buffer pointer for calculated ECC, size is oobsize.
613 * @ecccode: buffer pointer for ECC read from flash, size is oobsize.
614 * @databuf: buffer pointer for data, size is (page size + oobsize).
615 *
616 * Do not change the order of buffers. databuf and oobrbuf must be in
617 * consecutive order.
618 */
619struct nand_buffers {
620 uint8_t *ecccalc;
621 uint8_t *ecccode;
622 uint8_t *databuf;
623};
624
625/** 598/**
626 * struct nand_sdr_timings - SDR NAND chip timings 599 * struct nand_sdr_timings - SDR NAND chip timings
627 * 600 *
@@ -762,6 +735,350 @@ struct nand_manufacturer_ops {
762}; 735};
763 736
764/** 737/**
738 * struct nand_op_cmd_instr - Definition of a command instruction
739 * @opcode: the command to issue in one cycle
740 */
741struct nand_op_cmd_instr {
742 u8 opcode;
743};
744
745/**
746 * struct nand_op_addr_instr - Definition of an address instruction
747 * @naddrs: length of the @addrs array
748 * @addrs: array containing the address cycles to issue
749 */
750struct nand_op_addr_instr {
751 unsigned int naddrs;
752 const u8 *addrs;
753};
754
755/**
756 * struct nand_op_data_instr - Definition of a data instruction
757 * @len: number of data bytes to move
758 * @in: buffer to fill when reading from the NAND chip
759 * @out: buffer to read from when writing to the NAND chip
760 * @force_8bit: force 8-bit access
761 *
762 * Please note that "in" and "out" are inverted from the ONFI specification
763 * and are from the controller perspective, so a "in" is a read from the NAND
764 * chip while a "out" is a write to the NAND chip.
765 */
766struct nand_op_data_instr {
767 unsigned int len;
768 union {
769 void *in;
770 const void *out;
771 } buf;
772 bool force_8bit;
773};
774
775/**
776 * struct nand_op_waitrdy_instr - Definition of a wait ready instruction
777 * @timeout_ms: maximum delay while waiting for the ready/busy pin in ms
778 */
779struct nand_op_waitrdy_instr {
780 unsigned int timeout_ms;
781};
782
783/**
784 * enum nand_op_instr_type - Definition of all instruction types
785 * @NAND_OP_CMD_INSTR: command instruction
786 * @NAND_OP_ADDR_INSTR: address instruction
787 * @NAND_OP_DATA_IN_INSTR: data in instruction
788 * @NAND_OP_DATA_OUT_INSTR: data out instruction
789 * @NAND_OP_WAITRDY_INSTR: wait ready instruction
790 */
791enum nand_op_instr_type {
792 NAND_OP_CMD_INSTR,
793 NAND_OP_ADDR_INSTR,
794 NAND_OP_DATA_IN_INSTR,
795 NAND_OP_DATA_OUT_INSTR,
796 NAND_OP_WAITRDY_INSTR,
797};
798
799/**
800 * struct nand_op_instr - Instruction object
801 * @type: the instruction type
802 * @cmd/@addr/@data/@waitrdy: extra data associated to the instruction.
803 * You'll have to use the appropriate element
804 * depending on @type
805 * @delay_ns: delay the controller should apply after the instruction has been
806 * issued on the bus. Most modern controllers have internal timings
807 * control logic, and in this case, the controller driver can ignore
808 * this field.
809 */
810struct nand_op_instr {
811 enum nand_op_instr_type type;
812 union {
813 struct nand_op_cmd_instr cmd;
814 struct nand_op_addr_instr addr;
815 struct nand_op_data_instr data;
816 struct nand_op_waitrdy_instr waitrdy;
817 } ctx;
818 unsigned int delay_ns;
819};
820
821/*
822 * Special handling must be done for the WAITRDY timeout parameter as it usually
823 * is either tPROG (after a prog), tR (before a read), tRST (during a reset) or
824 * tBERS (during an erase) which all of them are u64 values that cannot be
825 * divided by usual kernel macros and must be handled with the special
826 * DIV_ROUND_UP_ULL() macro.
827 */
828#define __DIVIDE(dividend, divisor) ({ \
829 sizeof(dividend) == sizeof(u32) ? \
830 DIV_ROUND_UP(dividend, divisor) : \
831 DIV_ROUND_UP_ULL(dividend, divisor); \
832 })
833#define PSEC_TO_NSEC(x) __DIVIDE(x, 1000)
834#define PSEC_TO_MSEC(x) __DIVIDE(x, 1000000000)
835
836#define NAND_OP_CMD(id, ns) \
837 { \
838 .type = NAND_OP_CMD_INSTR, \
839 .ctx.cmd.opcode = id, \
840 .delay_ns = ns, \
841 }
842
843#define NAND_OP_ADDR(ncycles, cycles, ns) \
844 { \
845 .type = NAND_OP_ADDR_INSTR, \
846 .ctx.addr = { \
847 .naddrs = ncycles, \
848 .addrs = cycles, \
849 }, \
850 .delay_ns = ns, \
851 }
852
853#define NAND_OP_DATA_IN(l, b, ns) \
854 { \
855 .type = NAND_OP_DATA_IN_INSTR, \
856 .ctx.data = { \
857 .len = l, \
858 .buf.in = b, \
859 .force_8bit = false, \
860 }, \
861 .delay_ns = ns, \
862 }
863
864#define NAND_OP_DATA_OUT(l, b, ns) \
865 { \
866 .type = NAND_OP_DATA_OUT_INSTR, \
867 .ctx.data = { \
868 .len = l, \
869 .buf.out = b, \
870 .force_8bit = false, \
871 }, \
872 .delay_ns = ns, \
873 }
874
875#define NAND_OP_8BIT_DATA_IN(l, b, ns) \
876 { \
877 .type = NAND_OP_DATA_IN_INSTR, \
878 .ctx.data = { \
879 .len = l, \
880 .buf.in = b, \
881 .force_8bit = true, \
882 }, \
883 .delay_ns = ns, \
884 }
885
886#define NAND_OP_8BIT_DATA_OUT(l, b, ns) \
887 { \
888 .type = NAND_OP_DATA_OUT_INSTR, \
889 .ctx.data = { \
890 .len = l, \
891 .buf.out = b, \
892 .force_8bit = true, \
893 }, \
894 .delay_ns = ns, \
895 }
896
897#define NAND_OP_WAIT_RDY(tout_ms, ns) \
898 { \
899 .type = NAND_OP_WAITRDY_INSTR, \
900 .ctx.waitrdy.timeout_ms = tout_ms, \
901 .delay_ns = ns, \
902 }
903
904/**
905 * struct nand_subop - a sub operation
906 * @instrs: array of instructions
907 * @ninstrs: length of the @instrs array
908 * @first_instr_start_off: offset to start from for the first instruction
909 * of the sub-operation
910 * @last_instr_end_off: offset to end at (excluded) for the last instruction
911 * of the sub-operation
912 *
913 * Both @first_instr_start_off and @last_instr_end_off only apply to data or
914 * address instructions.
915 *
916 * When an operation cannot be handled as is by the NAND controller, it will
917 * be split by the parser into sub-operations which will be passed to the
918 * controller driver.
919 */
920struct nand_subop {
921 const struct nand_op_instr *instrs;
922 unsigned int ninstrs;
923 unsigned int first_instr_start_off;
924 unsigned int last_instr_end_off;
925};
926
927int nand_subop_get_addr_start_off(const struct nand_subop *subop,
928 unsigned int op_id);
929int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
930 unsigned int op_id);
931int nand_subop_get_data_start_off(const struct nand_subop *subop,
932 unsigned int op_id);
933int nand_subop_get_data_len(const struct nand_subop *subop,
934 unsigned int op_id);
935
936/**
937 * struct nand_op_parser_addr_constraints - Constraints for address instructions
938 * @maxcycles: maximum number of address cycles the controller can issue in a
939 * single step
940 */
941struct nand_op_parser_addr_constraints {
942 unsigned int maxcycles;
943};
944
945/**
946 * struct nand_op_parser_data_constraints - Constraints for data instructions
947 * @maxlen: maximum data length that the controller can handle in a single step
948 */
949struct nand_op_parser_data_constraints {
950 unsigned int maxlen;
951};
952
953/**
954 * struct nand_op_parser_pattern_elem - One element of a pattern
955 * @type: the instructuction type
956 * @optional: whether this element of the pattern is optional or mandatory
957 * @addr/@data: address or data constraint (number of cycles or data length)
958 */
959struct nand_op_parser_pattern_elem {
960 enum nand_op_instr_type type;
961 bool optional;
962 union {
963 struct nand_op_parser_addr_constraints addr;
964 struct nand_op_parser_data_constraints data;
965 } ctx;
966};
967
968#define NAND_OP_PARSER_PAT_CMD_ELEM(_opt) \
969 { \
970 .type = NAND_OP_CMD_INSTR, \
971 .optional = _opt, \
972 }
973
974#define NAND_OP_PARSER_PAT_ADDR_ELEM(_opt, _maxcycles) \
975 { \
976 .type = NAND_OP_ADDR_INSTR, \
977 .optional = _opt, \
978 .ctx.addr.maxcycles = _maxcycles, \
979 }
980
981#define NAND_OP_PARSER_PAT_DATA_IN_ELEM(_opt, _maxlen) \
982 { \
983 .type = NAND_OP_DATA_IN_INSTR, \
984 .optional = _opt, \
985 .ctx.data.maxlen = _maxlen, \
986 }
987
988#define NAND_OP_PARSER_PAT_DATA_OUT_ELEM(_opt, _maxlen) \
989 { \
990 .type = NAND_OP_DATA_OUT_INSTR, \
991 .optional = _opt, \
992 .ctx.data.maxlen = _maxlen, \
993 }
994
995#define NAND_OP_PARSER_PAT_WAITRDY_ELEM(_opt) \
996 { \
997 .type = NAND_OP_WAITRDY_INSTR, \
998 .optional = _opt, \
999 }
1000
1001/**
1002 * struct nand_op_parser_pattern - NAND sub-operation pattern descriptor
1003 * @elems: array of pattern elements
1004 * @nelems: number of pattern elements in @elems array
1005 * @exec: the function that will issue a sub-operation
1006 *
1007 * A pattern is a list of elements, each element reprensenting one instruction
1008 * with its constraints. The pattern itself is used by the core to match NAND
1009 * chip operation with NAND controller operations.
1010 * Once a match between a NAND controller operation pattern and a NAND chip
1011 * operation (or a sub-set of a NAND operation) is found, the pattern ->exec()
1012 * hook is called so that the controller driver can issue the operation on the
1013 * bus.
1014 *
1015 * Controller drivers should declare as many patterns as they support and pass
1016 * this list of patterns (created with the help of the following macro) to
1017 * the nand_op_parser_exec_op() helper.
1018 */
1019struct nand_op_parser_pattern {
1020 const struct nand_op_parser_pattern_elem *elems;
1021 unsigned int nelems;
1022 int (*exec)(struct nand_chip *chip, const struct nand_subop *subop);
1023};
1024
1025#define NAND_OP_PARSER_PATTERN(_exec, ...) \
1026 { \
1027 .exec = _exec, \
1028 .elems = (struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }, \
1029 .nelems = sizeof((struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }) / \
1030 sizeof(struct nand_op_parser_pattern_elem), \
1031 }
1032
1033/**
1034 * struct nand_op_parser - NAND controller operation parser descriptor
1035 * @patterns: array of supported patterns
1036 * @npatterns: length of the @patterns array
1037 *
1038 * The parser descriptor is just an array of supported patterns which will be
1039 * iterated by nand_op_parser_exec_op() everytime it tries to execute an
1040 * NAND operation (or tries to determine if a specific operation is supported).
1041 *
1042 * It is worth mentioning that patterns will be tested in their declaration
1043 * order, and the first match will be taken, so it's important to order patterns
1044 * appropriately so that simple/inefficient patterns are placed at the end of
1045 * the list. Usually, this is where you put single instruction patterns.
1046 */
1047struct nand_op_parser {
1048 const struct nand_op_parser_pattern *patterns;
1049 unsigned int npatterns;
1050};
1051
1052#define NAND_OP_PARSER(...) \
1053 { \
1054 .patterns = (struct nand_op_parser_pattern[]) { __VA_ARGS__ }, \
1055 .npatterns = sizeof((struct nand_op_parser_pattern[]) { __VA_ARGS__ }) / \
1056 sizeof(struct nand_op_parser_pattern), \
1057 }
1058
1059/**
1060 * struct nand_operation - NAND operation descriptor
1061 * @instrs: array of instructions to execute
1062 * @ninstrs: length of the @instrs array
1063 *
1064 * The actual operation structure that will be passed to chip->exec_op().
1065 */
1066struct nand_operation {
1067 const struct nand_op_instr *instrs;
1068 unsigned int ninstrs;
1069};
1070
1071#define NAND_OPERATION(_instrs) \
1072 { \
1073 .instrs = _instrs, \
1074 .ninstrs = ARRAY_SIZE(_instrs), \
1075 }
1076
1077int nand_op_parser_exec_op(struct nand_chip *chip,
1078 const struct nand_op_parser *parser,
1079 const struct nand_operation *op, bool check_only);
1080
1081/**
765 * struct nand_chip - NAND Private Flash Chip Data 1082 * struct nand_chip - NAND Private Flash Chip Data
766 * @mtd: MTD device registered to the MTD framework 1083 * @mtd: MTD device registered to the MTD framework
767 * @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the 1084 * @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the
@@ -787,10 +1104,13 @@ struct nand_manufacturer_ops {
787 * commands to the chip. 1104 * commands to the chip.
788 * @waitfunc: [REPLACEABLE] hardwarespecific function for wait on 1105 * @waitfunc: [REPLACEABLE] hardwarespecific function for wait on
789 * ready. 1106 * ready.
1107 * @exec_op: controller specific method to execute NAND operations.
1108 * This method replaces ->cmdfunc(),
1109 * ->{read,write}_{buf,byte,word}(), ->dev_ready() and
1110 * ->waifunc().
790 * @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for 1111 * @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for
791 * setting the read-retry mode. Mostly needed for MLC NAND. 1112 * setting the read-retry mode. Mostly needed for MLC NAND.
792 * @ecc: [BOARDSPECIFIC] ECC control structure 1113 * @ecc: [BOARDSPECIFIC] ECC control structure
793 * @buffers: buffer structure for read/write
794 * @buf_align: minimum buffer alignment required by a platform 1114 * @buf_align: minimum buffer alignment required by a platform
795 * @hwcontrol: platform-specific hardware control structure 1115 * @hwcontrol: platform-specific hardware control structure
796 * @erase: [REPLACEABLE] erase function 1116 * @erase: [REPLACEABLE] erase function
@@ -830,6 +1150,7 @@ struct nand_manufacturer_ops {
830 * @numchips: [INTERN] number of physical chips 1150 * @numchips: [INTERN] number of physical chips
831 * @chipsize: [INTERN] the size of one chip for multichip arrays 1151 * @chipsize: [INTERN] the size of one chip for multichip arrays
832 * @pagemask: [INTERN] page number mask = number of (pages / chip) - 1 1152 * @pagemask: [INTERN] page number mask = number of (pages / chip) - 1
1153 * @data_buf: [INTERN] buffer for data, size is (page size + oobsize).
833 * @pagebuf: [INTERN] holds the pagenumber which is currently in 1154 * @pagebuf: [INTERN] holds the pagenumber which is currently in
834 * data_buf. 1155 * data_buf.
835 * @pagebuf_bitflips: [INTERN] holds the bitflip count for the page which is 1156 * @pagebuf_bitflips: [INTERN] holds the bitflip count for the page which is
@@ -886,6 +1207,9 @@ struct nand_chip {
886 void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column, 1207 void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column,
887 int page_addr); 1208 int page_addr);
888 int(*waitfunc)(struct mtd_info *mtd, struct nand_chip *this); 1209 int(*waitfunc)(struct mtd_info *mtd, struct nand_chip *this);
1210 int (*exec_op)(struct nand_chip *chip,
1211 const struct nand_operation *op,
1212 bool check_only);
889 int (*erase)(struct mtd_info *mtd, int page); 1213 int (*erase)(struct mtd_info *mtd, int page);
890 int (*scan_bbt)(struct mtd_info *mtd); 1214 int (*scan_bbt)(struct mtd_info *mtd);
891 int (*onfi_set_features)(struct mtd_info *mtd, struct nand_chip *chip, 1215 int (*onfi_set_features)(struct mtd_info *mtd, struct nand_chip *chip,
@@ -896,7 +1220,6 @@ struct nand_chip {
896 int (*setup_data_interface)(struct mtd_info *mtd, int chipnr, 1220 int (*setup_data_interface)(struct mtd_info *mtd, int chipnr,
897 const struct nand_data_interface *conf); 1221 const struct nand_data_interface *conf);
898 1222
899
900 int chip_delay; 1223 int chip_delay;
901 unsigned int options; 1224 unsigned int options;
902 unsigned int bbt_options; 1225 unsigned int bbt_options;
@@ -908,6 +1231,7 @@ struct nand_chip {
908 int numchips; 1231 int numchips;
909 uint64_t chipsize; 1232 uint64_t chipsize;
910 int pagemask; 1233 int pagemask;
1234 u8 *data_buf;
911 int pagebuf; 1235 int pagebuf;
912 unsigned int pagebuf_bitflips; 1236 unsigned int pagebuf_bitflips;
913 int subpagesize; 1237 int subpagesize;
@@ -928,7 +1252,7 @@ struct nand_chip {
928 u16 max_bb_per_die; 1252 u16 max_bb_per_die;
929 u32 blocks_per_die; 1253 u32 blocks_per_die;
930 1254
931 struct nand_data_interface *data_interface; 1255 struct nand_data_interface data_interface;
932 1256
933 int read_retries; 1257 int read_retries;
934 1258
@@ -938,7 +1262,6 @@ struct nand_chip {
938 struct nand_hw_control *controller; 1262 struct nand_hw_control *controller;
939 1263
940 struct nand_ecc_ctrl ecc; 1264 struct nand_ecc_ctrl ecc;
941 struct nand_buffers *buffers;
942 unsigned long buf_align; 1265 unsigned long buf_align;
943 struct nand_hw_control hwcontrol; 1266 struct nand_hw_control hwcontrol;
944 1267
@@ -956,6 +1279,15 @@ struct nand_chip {
956 } manufacturer; 1279 } manufacturer;
957}; 1280};
958 1281
1282static inline int nand_exec_op(struct nand_chip *chip,
1283 const struct nand_operation *op)
1284{
1285 if (!chip->exec_op)
1286 return -ENOTSUPP;
1287
1288 return chip->exec_op(chip, op, false);
1289}
1290
959extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops; 1291extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops;
960extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops; 1292extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops;
961 1293
@@ -1225,8 +1557,7 @@ static inline int onfi_get_sync_timing_mode(struct nand_chip *chip)
1225 return le16_to_cpu(chip->onfi_params.src_sync_timing_mode); 1557 return le16_to_cpu(chip->onfi_params.src_sync_timing_mode);
1226} 1558}
1227 1559
1228int onfi_init_data_interface(struct nand_chip *chip, 1560int onfi_fill_data_interface(struct nand_chip *chip,
1229 struct nand_data_interface *iface,
1230 enum nand_data_interface_type type, 1561 enum nand_data_interface_type type,
1231 int timing_mode); 1562 int timing_mode);
1232 1563
@@ -1269,8 +1600,6 @@ static inline int jedec_feature(struct nand_chip *chip)
1269 1600
1270/* get timing characteristics from ONFI timing mode. */ 1601/* get timing characteristics from ONFI timing mode. */
1271const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode); 1602const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode);
1272/* get data interface from ONFI timing mode 0, used after reset. */
1273const struct nand_data_interface *nand_get_default_data_interface(void);
1274 1603
1275int nand_check_erased_ecc_chunk(void *data, int datalen, 1604int nand_check_erased_ecc_chunk(void *data, int datalen,
1276 void *ecc, int ecclen, 1605 void *ecc, int ecclen,
@@ -1316,9 +1645,45 @@ int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1316/* Reset and initialize a NAND device */ 1645/* Reset and initialize a NAND device */
1317int nand_reset(struct nand_chip *chip, int chipnr); 1646int nand_reset(struct nand_chip *chip, int chipnr);
1318 1647
1648/* NAND operation helpers */
1649int nand_reset_op(struct nand_chip *chip);
1650int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
1651 unsigned int len);
1652int nand_status_op(struct nand_chip *chip, u8 *status);
1653int nand_exit_status_op(struct nand_chip *chip);
1654int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock);
1655int nand_read_page_op(struct nand_chip *chip, unsigned int page,
1656 unsigned int offset_in_page, void *buf, unsigned int len);
1657int nand_change_read_column_op(struct nand_chip *chip,
1658 unsigned int offset_in_page, void *buf,
1659 unsigned int len, bool force_8bit);
1660int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
1661 unsigned int offset_in_page, void *buf, unsigned int len);
1662int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
1663 unsigned int offset_in_page, const void *buf,
1664 unsigned int len);
1665int nand_prog_page_end_op(struct nand_chip *chip);
1666int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
1667 unsigned int offset_in_page, const void *buf,
1668 unsigned int len);
1669int nand_change_write_column_op(struct nand_chip *chip,
1670 unsigned int offset_in_page, const void *buf,
1671 unsigned int len, bool force_8bit);
1672int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
1673 bool force_8bit);
1674int nand_write_data_op(struct nand_chip *chip, const void *buf,
1675 unsigned int len, bool force_8bit);
1676
1319/* Free resources held by the NAND device */ 1677/* Free resources held by the NAND device */
1320void nand_cleanup(struct nand_chip *chip); 1678void nand_cleanup(struct nand_chip *chip);
1321 1679
1322/* Default extended ID decoding function */ 1680/* Default extended ID decoding function */
1323void nand_decode_ext_id(struct nand_chip *chip); 1681void nand_decode_ext_id(struct nand_chip *chip);
1682
1683/*
1684 * External helper for controller drivers that have to implement the WAITRDY
1685 * instruction and have no physical pin to check it.
1686 */
1687int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms);
1688
1324#endif /* __LINUX_MTD_RAWNAND_H */ 1689#endif /* __LINUX_MTD_RAWNAND_H */
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
index edfa280c3d56..053feb41510a 100644
--- a/include/linux/omap-gpmc.h
+++ b/include/linux/omap-gpmc.h
@@ -25,15 +25,43 @@ struct gpmc_nand_ops {
25 25
26struct gpmc_nand_regs; 26struct gpmc_nand_regs;
27 27
28struct gpmc_onenand_info {
29 bool sync_read;
30 bool sync_write;
31 int burst_len;
32};
33
28#if IS_ENABLED(CONFIG_OMAP_GPMC) 34#if IS_ENABLED(CONFIG_OMAP_GPMC)
29struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs, 35struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs,
30 int cs); 36 int cs);
37/**
38 * gpmc_omap_onenand_set_timings - set optimized sync timings.
39 * @cs: Chip Select Region
40 * @freq: Chip frequency
41 * @latency: Burst latency cycle count
42 * @info: Structure describing parameters used
43 *
44 * Sets optimized timings for the @cs region based on @freq and @latency.
45 * Updates the @info structure based on the GPMC settings.
46 */
47int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq,
48 int latency,
49 struct gpmc_onenand_info *info);
50
31#else 51#else
32static inline struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs, 52static inline struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs,
33 int cs) 53 int cs)
34{ 54{
35 return NULL; 55 return NULL;
36} 56}
57
58static inline
59int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq,
60 int latency,
61 struct gpmc_onenand_info *info)
62{
63 return -EINVAL;
64}
37#endif /* CONFIG_OMAP_GPMC */ 65#endif /* CONFIG_OMAP_GPMC */
38 66
39extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t, 67extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t,
diff --git a/include/linux/platform_data/mtd-onenand-omap2.h b/include/linux/platform_data/mtd-onenand-omap2.h
deleted file mode 100644
index 56ff0e6f5ad1..000000000000
--- a/include/linux/platform_data/mtd-onenand-omap2.h
+++ /dev/null
@@ -1,34 +0,0 @@
1/*
2 * Copyright (C) 2006 Nokia Corporation
3 * Author: Juha Yrjola
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#ifndef __MTD_ONENAND_OMAP2_H
11#define __MTD_ONENAND_OMAP2_H
12
13#include <linux/mtd/mtd.h>
14#include <linux/mtd/partitions.h>
15
16#define ONENAND_SYNC_READ (1 << 0)
17#define ONENAND_SYNC_READWRITE (1 << 1)
18#define ONENAND_IN_OMAP34XX (1 << 2)
19
20struct omap_onenand_platform_data {
21 int cs;
22 int gpio_irq;
23 struct mtd_partition *parts;
24 int nr_parts;
25 int (*onenand_setup)(void __iomem *, int *freq_ptr);
26 int dma_channel;
27 u8 flags;
28 u8 regulator_can_sleep;
29 u8 skip_initial_unlocking;
30
31 /* for passing the partitions */
32 struct device_node *of_node;
33};
34#endif