aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-11 13:44:22 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-11 13:44:22 -0400
commit9786e34e0a6055dbd1b46e16dfa791ac2b3da289 (patch)
tree0151608bdb134bec3a59748f40c67324798a224e /drivers
parent791a9a666d1afe2603bcb2c6a4852d684e879252 (diff)
parenta9402889f41cc2db7a9b162990bef271be098ff0 (diff)
Merge tag 'for-linus-20170510' of git://git.infradead.org/linux-mtd
Pull MTD updates from Brian Norris: "NAND, from Boris: - some minor fixes/improvements on existing drivers (fsmc, gpio, ifc, davinci, brcmnand, omap) - a huge cleanup/rework of the denali driver accompanied with core fixes/improvements to simplify the driver code - a complete rewrite of the atmel driver to support new DT bindings make future evolution easier - the addition of per-vendor detection/initialization steps to avoid extending the nand_ids table with more extended-id entries SPI NOR, from Cyrille: - fixes in the hisi, intel and Mediatek SPI controller drivers - fixes to some SPI flash memories not supporting the Chip Erase command. - add support to some new memory parts (Winbond, Macronix, Micron, ESMT). - add new driver for the STM32 QSPI controller And a few fixes for Gemini and Versatile platforms on physmap-of" * tag 'for-linus-20170510' of git://git.infradead.org/linux-mtd: (100 commits) MAINTAINERS: Update NAND subsystem git repositories mtd: nand: gpio: update binding mtd: nand: add ooblayout for old hamming layout mtd: oxnas_nand: Allocating more than necessary in probe() dt-bindings: mtd: Document the STM32 QSPI bindings mtd: mtk-nor: set controller's address width according to nor flash mtd: spi-nor: add driver for STM32 quad spi flash controller mtd: nand: brcmnand: Check flash #WP pin status before nand erase/program mtd: nand: davinci: add comment on NAND subpage write status on keystone mtd: nand: omap2: Fix partition creation via cmdline mtdparts mtd: nand: NULL terminate a of_device_id table mtd: nand: Fix a couple error codes mtd: nand: allow drivers to request minimum alignment for passed buffer mtd: nand: allocate aligned buffers if NAND_OWN_BUFFERS is unset mtd: nand: denali: allow to override revision number mtd: nand: denali_dt: use pdev instead of ofdev for platform_device mtd: nand: denali_dt: remove dma-mask DT property mtd: nand: denali: support 64bit capable DMA engine mtd: nand: denali_dt: enable HW_ECC_FIXUP for Altera SOCFPGA variant mtd: nand: denali: support HW_ECC_FIXUP capability ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/memory/Kconfig2
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c12
-rw-r--r--drivers/mtd/maps/Makefile10
-rw-r--r--drivers/mtd/maps/physmap_of_core.c (renamed from drivers/mtd/maps/physmap_of.c)30
-rw-r--r--drivers/mtd/mtdswap.c6
-rw-r--r--drivers/mtd/nand/Kconfig23
-rw-r--r--drivers/mtd/nand/Makefile11
-rw-r--r--drivers/mtd/nand/atmel/Makefile4
-rw-r--r--drivers/mtd/nand/atmel/nand-controller.c2197
-rw-r--r--drivers/mtd/nand/atmel/pmecc.c1020
-rw-r--r--drivers/mtd/nand/atmel/pmecc.h73
-rw-r--r--drivers/mtd/nand/atmel_nand.c2479
-rw-r--r--drivers/mtd/nand/atmel_nand_ecc.h163
-rw-r--r--drivers/mtd/nand/atmel_nand_nfc.h103
-rw-r--r--drivers/mtd/nand/brcmnand/brcmnand.c61
-rw-r--r--drivers/mtd/nand/cmx270_nand.c4
-rw-r--r--drivers/mtd/nand/davinci_nand.c11
-rw-r--r--drivers/mtd/nand/denali.c567
-rw-r--r--drivers/mtd/nand/denali.h192
-rw-r--r--drivers/mtd/nand/denali_dt.c74
-rw-r--r--drivers/mtd/nand/fsmc_nand.c236
-rw-r--r--drivers/mtd/nand/gpio.c18
-rw-r--r--drivers/mtd/nand/nand_amd.c51
-rw-r--r--drivers/mtd/nand/nand_base.c588
-rw-r--r--drivers/mtd/nand/nand_hynix.c631
-rw-r--r--drivers/mtd/nand/nand_ids.c39
-rw-r--r--drivers/mtd/nand/nand_macronix.c30
-rw-r--r--drivers/mtd/nand/nand_micron.c86
-rw-r--r--drivers/mtd/nand/nand_samsung.c112
-rw-r--r--drivers/mtd/nand/nand_toshiba.c51
-rw-r--r--drivers/mtd/nand/nandsim.c2
-rw-r--r--drivers/mtd/nand/omap2.c9
-rw-r--r--drivers/mtd/nand/orion_nand.c48
-rw-r--r--drivers/mtd/nand/oxnas_nand.c2
-rw-r--r--drivers/mtd/nand/sunxi_nand.c20
-rw-r--r--drivers/mtd/nand/tango_nand.c8
-rw-r--r--drivers/mtd/ofpart.c4
-rw-r--r--drivers/mtd/spi-nor/Kconfig7
-rw-r--r--drivers/mtd/spi-nor/Makefile1
-rw-r--r--drivers/mtd/spi-nor/hisi-sfc.c5
-rw-r--r--drivers/mtd/spi-nor/intel-spi.c4
-rw-r--r--drivers/mtd/spi-nor/mtk-quadspi.c27
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c18
-rw-r--r--drivers/mtd/spi-nor/stm32-quadspi.c693
44 files changed, 5942 insertions, 3790 deletions
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 3ecc429297a0..ffc350258041 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -116,7 +116,7 @@ config FSL_CORENET_CF
116 116
117config FSL_IFC 117config FSL_IFC
118 bool 118 bool
119 depends on FSL_SOC || ARCH_LAYERSCAPE 119 depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A
120 120
121config JZ4780_NEMC 121config JZ4780_NEMC
122 bool "Ingenic JZ4780 SoC NEMC driver" 122 bool "Ingenic JZ4780 SoC NEMC driver"
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 9dca881bb378..56aa6b75213d 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -323,7 +323,8 @@ static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
323 * it should report a size of 8KBytes (0x0020*256). 323 * it should report a size of 8KBytes (0x0020*256).
324 */ 324 */
325 cfi->cfiq->EraseRegionInfo[0] = 0x002003ff; 325 cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
326 pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name); 326 pr_warn("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n",
327 mtd->name);
327} 328}
328 329
329static void fixup_s29gl064n_sectors(struct mtd_info *mtd) 330static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
@@ -333,7 +334,8 @@ static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
333 334
334 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) { 335 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
335 cfi->cfiq->EraseRegionInfo[0] |= 0x0040; 336 cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
336 pr_warning("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", mtd->name); 337 pr_warn("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n",
338 mtd->name);
337 } 339 }
338} 340}
339 341
@@ -344,7 +346,8 @@ static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
344 346
345 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) { 347 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
346 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040; 348 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
347 pr_warning("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", mtd->name); 349 pr_warn("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n",
350 mtd->name);
348 } 351 }
349} 352}
350 353
@@ -358,7 +361,8 @@ static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
358 * which is not permitted by CFI. 361 * which is not permitted by CFI.
359 */ 362 */
360 cfi->cfiq->EraseRegionInfo[0] = 0x020001ff; 363 cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
361 pr_warning("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", mtd->name); 364 pr_warn("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n",
365 mtd->name);
362} 366}
363 367
364/* Used to fix CFI-Tables of chips without Extended Query Tables */ 368/* Used to fix CFI-Tables of chips without Extended Query Tables */
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index aef1846b4de2..5a09a72ab112 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -17,12 +17,10 @@ obj-$(CONFIG_MTD_CK804XROM) += ck804xrom.o
17obj-$(CONFIG_MTD_TSUNAMI) += tsunami_flash.o 17obj-$(CONFIG_MTD_TSUNAMI) += tsunami_flash.o
18obj-$(CONFIG_MTD_PXA2XX) += pxa2xx-flash.o 18obj-$(CONFIG_MTD_PXA2XX) += pxa2xx-flash.o
19obj-$(CONFIG_MTD_PHYSMAP) += physmap.o 19obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
20ifdef CONFIG_MTD_PHYSMAP_OF_VERSATILE 20physmap_of-objs-y += physmap_of_core.o
21physmap_of-objs += physmap_of_versatile.o 21physmap_of-objs-$(CONFIG_MTD_PHYSMAP_OF_VERSATILE) += physmap_of_versatile.o
22endif 22physmap_of-objs-$(CONFIG_MTD_PHYSMAP_OF_GEMINI) += physmap_of_gemini.o
23ifdef CONFIG_MTD_PHYSMAP_OF_GEMINI 23physmap_of-objs := $(physmap_of-objs-y)
24physmap_of-objs += physmap_of_gemini.o
25endif
26obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o 24obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o
27obj-$(CONFIG_MTD_PISMO) += pismo.o 25obj-$(CONFIG_MTD_PISMO) += pismo.o
28obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o 26obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of_core.c
index 14e8909c9955..62fa6836f218 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of_core.c
@@ -116,32 +116,22 @@ static const char * const part_probe_types_def[] = {
116 116
117static const char * const *of_get_probes(struct device_node *dp) 117static const char * const *of_get_probes(struct device_node *dp)
118{ 118{
119 const char *cp;
120 int cplen;
121 unsigned int l;
122 unsigned int count;
123 const char **res; 119 const char **res;
120 int count;
124 121
125 cp = of_get_property(dp, "linux,part-probe", &cplen); 122 count = of_property_count_strings(dp, "linux,part-probe");
126 if (cp == NULL) 123 if (count < 0)
127 return part_probe_types_def; 124 return part_probe_types_def;
128 125
129 count = 0; 126 res = kzalloc((count + 1) * sizeof(*res), GFP_KERNEL);
130 for (l = 0; l != cplen; l++)
131 if (cp[l] == 0)
132 count++;
133
134 res = kzalloc((count + 1)*sizeof(*res), GFP_KERNEL);
135 if (!res) 127 if (!res)
136 return NULL; 128 return NULL;
137 count = 0; 129
138 while (cplen > 0) { 130 count = of_property_read_string_array(dp, "linux,part-probe", res,
139 res[count] = cp; 131 count);
140 l = strlen(cp) + 1; 132 if (count < 0)
141 cp += l; 133 return NULL;
142 cplen -= l; 134
143 count++;
144 }
145 return res; 135 return res;
146} 136}
147 137
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index c40e2c951758..f12879a3d4ff 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -1235,10 +1235,8 @@ static int mtdswap_show(struct seq_file *s, void *data)
1235 1235
1236 if (root->rb_node) { 1236 if (root->rb_node) {
1237 count[i] = d->trees[i].count; 1237 count[i] = d->trees[i].count;
1238 min[i] = rb_entry(rb_first(root), struct swap_eb, 1238 min[i] = MTDSWAP_ECNT_MIN(root);
1239 rb)->erase_count; 1239 max[i] = MTDSWAP_ECNT_MAX(root);
1240 max[i] = rb_entry(rb_last(root), struct swap_eb,
1241 rb)->erase_count;
1242 } else 1240 } else
1243 count[i] = 0; 1241 count[i] = 0;
1244 } 1242 }
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 6d4d5672d1d8..c3029528063b 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -13,7 +13,6 @@ config MTD_NAND_ECC_SMC
13menuconfig MTD_NAND 13menuconfig MTD_NAND
14 tristate "NAND Device Support" 14 tristate "NAND Device Support"
15 depends on MTD 15 depends on MTD
16 select MTD_NAND_IDS
17 select MTD_NAND_ECC 16 select MTD_NAND_ECC
18 help 17 help
19 This enables support for accessing all type of NAND flash 18 This enables support for accessing all type of NAND flash
@@ -60,17 +59,6 @@ config MTD_NAND_DENALI_DT
60 Enable the driver for NAND flash on platforms using a Denali NAND 59 Enable the driver for NAND flash on platforms using a Denali NAND
61 controller as a DT device. 60 controller as a DT device.
62 61
63config MTD_NAND_DENALI_SCRATCH_REG_ADDR
64 hex "Denali NAND size scratch register address"
65 default "0xFF108018"
66 depends on MTD_NAND_DENALI_PCI
67 help
68 Some platforms place the NAND chip size in a scratch register
69 because (some versions of) the driver aren't able to automatically
70 determine the size of certain chips. Set the address of the
71 scratch register here to enable this feature. On Intel Moorestown
72 boards, the scratch register is at 0xFF108018.
73
74config MTD_NAND_GPIO 62config MTD_NAND_GPIO
75 tristate "GPIO assisted NAND Flash driver" 63 tristate "GPIO assisted NAND Flash driver"
76 depends on GPIOLIB || COMPILE_TEST 64 depends on GPIOLIB || COMPILE_TEST
@@ -109,9 +97,6 @@ config MTD_NAND_OMAP_BCH
109config MTD_NAND_OMAP_BCH_BUILD 97config MTD_NAND_OMAP_BCH_BUILD
110 def_tristate MTD_NAND_OMAP2 && MTD_NAND_OMAP_BCH 98 def_tristate MTD_NAND_OMAP2 && MTD_NAND_OMAP_BCH
111 99
112config MTD_NAND_IDS
113 tristate
114
115config MTD_NAND_RICOH 100config MTD_NAND_RICOH
116 tristate "Ricoh xD card reader" 101 tristate "Ricoh xD card reader"
117 default n 102 default n
@@ -321,11 +306,11 @@ config MTD_NAND_CS553X
321 If you say "m", the module will be called cs553x_nand. 306 If you say "m", the module will be called cs553x_nand.
322 307
323config MTD_NAND_ATMEL 308config MTD_NAND_ATMEL
324 tristate "Support for NAND Flash / SmartMedia on AT91 and AVR32" 309 tristate "Support for NAND Flash / SmartMedia on AT91"
325 depends on ARCH_AT91 || AVR32 310 depends on ARCH_AT91
326 help 311 help
327 Enables support for NAND Flash / Smart Media Card interface 312 Enables support for NAND Flash / Smart Media Card interface
328 on Atmel AT91 and AVR32 processors. 313 on Atmel AT91 processors.
329 314
330config MTD_NAND_PXA3xx 315config MTD_NAND_PXA3xx
331 tristate "NAND support on PXA3xx and Armada 370/XP" 316 tristate "NAND support on PXA3xx and Armada 370/XP"
@@ -443,7 +428,7 @@ config MTD_NAND_FSL_ELBC
443 428
444config MTD_NAND_FSL_IFC 429config MTD_NAND_FSL_IFC
445 tristate "NAND support for Freescale IFC controller" 430 tristate "NAND support for Freescale IFC controller"
446 depends on FSL_SOC || ARCH_LAYERSCAPE 431 depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A
447 select FSL_IFC 432 select FSL_IFC
448 select MEMORY 433 select MEMORY
449 help 434 help
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 19a66e404d5b..ade5fc4c3819 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -5,7 +5,6 @@
5obj-$(CONFIG_MTD_NAND) += nand.o 5obj-$(CONFIG_MTD_NAND) += nand.o
6obj-$(CONFIG_MTD_NAND_ECC) += nand_ecc.o 6obj-$(CONFIG_MTD_NAND_ECC) += nand_ecc.o
7obj-$(CONFIG_MTD_NAND_BCH) += nand_bch.o 7obj-$(CONFIG_MTD_NAND_BCH) += nand_bch.o
8obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o
9obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o 8obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o
10 9
11obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o 10obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o
@@ -25,7 +24,7 @@ obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o
25obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o 24obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o
26obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o 25obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o
27obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o 26obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
28obj-$(CONFIG_MTD_NAND_ATMEL) += atmel_nand.o 27obj-$(CONFIG_MTD_NAND_ATMEL) += atmel/
29obj-$(CONFIG_MTD_NAND_GPIO) += gpio.o 28obj-$(CONFIG_MTD_NAND_GPIO) += gpio.o
30omap2_nand-objs := omap2.o 29omap2_nand-objs := omap2.o
31obj-$(CONFIG_MTD_NAND_OMAP2) += omap2_nand.o 30obj-$(CONFIG_MTD_NAND_OMAP2) += omap2_nand.o
@@ -61,4 +60,10 @@ obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
61obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o 60obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o
62obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o mtk_ecc.o 61obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o mtk_ecc.o
63 62
64nand-objs := nand_base.o nand_bbt.o nand_timings.o 63nand-objs := nand_base.o nand_bbt.o nand_timings.o nand_ids.o
64nand-objs += nand_amd.o
65nand-objs += nand_hynix.o
66nand-objs += nand_macronix.o
67nand-objs += nand_micron.o
68nand-objs += nand_samsung.o
69nand-objs += nand_toshiba.o
diff --git a/drivers/mtd/nand/atmel/Makefile b/drivers/mtd/nand/atmel/Makefile
new file mode 100644
index 000000000000..288db4f38a8f
--- /dev/null
+++ b/drivers/mtd/nand/atmel/Makefile
@@ -0,0 +1,4 @@
1obj-$(CONFIG_MTD_NAND_ATMEL) += atmel-nand-controller.o atmel-pmecc.o
2
3atmel-nand-controller-objs := nand-controller.o
4atmel-pmecc-objs := pmecc.o
diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c
new file mode 100644
index 000000000000..3b2446896147
--- /dev/null
+++ b/drivers/mtd/nand/atmel/nand-controller.c
@@ -0,0 +1,2197 @@
1/*
2 * Copyright 2017 ATMEL
3 * Copyright 2017 Free Electrons
4 *
5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
6 *
7 * Derived from the atmel_nand.c driver which contained the following
8 * copyrights:
9 *
10 * Copyright 2003 Rick Bronson
11 *
12 * Derived from drivers/mtd/nand/autcpu12.c
13 * Copyright 2001 Thomas Gleixner (gleixner@autronix.de)
14 *
15 * Derived from drivers/mtd/spia.c
16 * Copyright 2000 Steven J. Hill (sjhill@cotw.com)
17 *
18 *
19 * Add Hardware ECC support for AT91SAM9260 / AT91SAM9263
20 * Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright 2007
21 *
22 * Derived from Das U-Boot source code
23 * (u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c)
24 * Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
25 *
26 * Add Programmable Multibit ECC support for various AT91 SoC
27 * Copyright 2012 ATMEL, Hong Xu
28 *
29 * Add Nand Flash Controller support for SAMA5 SoC
30 * Copyright 2013 ATMEL, Josh Wu (josh.wu@atmel.com)
31 *
32 * This program is free software; you can redistribute it and/or modify
33 * it under the terms of the GNU General Public License version 2 as
34 * published by the Free Software Foundation.
35 *
36 * A few words about the naming convention in this file. This convention
37 * applies to structure and function names.
38 *
39 * Prefixes:
40 *
41 * - atmel_nand_: all generic structures/functions
42 * - atmel_smc_nand_: all structures/functions specific to the SMC interface
43 * (at91sam9 and avr32 SoCs)
44 * - atmel_hsmc_nand_: all structures/functions specific to the HSMC interface
45 * (sama5 SoCs and later)
46 * - atmel_nfc_: all structures/functions used to manipulate the NFC sub-block
47 * that is available in the HSMC block
48 * - <soc>_nand_: all SoC specific structures/functions
49 */
50
51#include <linux/clk.h>
52#include <linux/dma-mapping.h>
53#include <linux/dmaengine.h>
54#include <linux/genalloc.h>
55#include <linux/gpio.h>
56#include <linux/gpio/consumer.h>
57#include <linux/interrupt.h>
58#include <linux/mfd/syscon.h>
59#include <linux/mfd/syscon/atmel-matrix.h>
60#include <linux/module.h>
61#include <linux/mtd/nand.h>
62#include <linux/of_address.h>
63#include <linux/of_irq.h>
64#include <linux/of_platform.h>
65#include <linux/iopoll.h>
66#include <linux/platform_device.h>
67#include <linux/platform_data/atmel.h>
68#include <linux/regmap.h>
69
70#include "pmecc.h"
71
72#define ATMEL_HSMC_NFC_CFG 0x0
73#define ATMEL_HSMC_NFC_CFG_SPARESIZE(x) (((x) / 4) << 24)
74#define ATMEL_HSMC_NFC_CFG_SPARESIZE_MASK GENMASK(30, 24)
75#define ATMEL_HSMC_NFC_CFG_DTO(cyc, mul) (((cyc) << 16) | ((mul) << 20))
76#define ATMEL_HSMC_NFC_CFG_DTO_MAX GENMASK(22, 16)
77#define ATMEL_HSMC_NFC_CFG_RBEDGE BIT(13)
78#define ATMEL_HSMC_NFC_CFG_FALLING_EDGE BIT(12)
79#define ATMEL_HSMC_NFC_CFG_RSPARE BIT(9)
80#define ATMEL_HSMC_NFC_CFG_WSPARE BIT(8)
81#define ATMEL_HSMC_NFC_CFG_PAGESIZE_MASK GENMASK(2, 0)
82#define ATMEL_HSMC_NFC_CFG_PAGESIZE(x) (fls((x) / 512) - 1)
83
84#define ATMEL_HSMC_NFC_CTRL 0x4
85#define ATMEL_HSMC_NFC_CTRL_EN BIT(0)
86#define ATMEL_HSMC_NFC_CTRL_DIS BIT(1)
87
88#define ATMEL_HSMC_NFC_SR 0x8
89#define ATMEL_HSMC_NFC_IER 0xc
90#define ATMEL_HSMC_NFC_IDR 0x10
91#define ATMEL_HSMC_NFC_IMR 0x14
92#define ATMEL_HSMC_NFC_SR_ENABLED BIT(1)
93#define ATMEL_HSMC_NFC_SR_RB_RISE BIT(4)
94#define ATMEL_HSMC_NFC_SR_RB_FALL BIT(5)
95#define ATMEL_HSMC_NFC_SR_BUSY BIT(8)
96#define ATMEL_HSMC_NFC_SR_WR BIT(11)
97#define ATMEL_HSMC_NFC_SR_CSID GENMASK(14, 12)
98#define ATMEL_HSMC_NFC_SR_XFRDONE BIT(16)
99#define ATMEL_HSMC_NFC_SR_CMDDONE BIT(17)
100#define ATMEL_HSMC_NFC_SR_DTOE BIT(20)
101#define ATMEL_HSMC_NFC_SR_UNDEF BIT(21)
102#define ATMEL_HSMC_NFC_SR_AWB BIT(22)
103#define ATMEL_HSMC_NFC_SR_NFCASE BIT(23)
104#define ATMEL_HSMC_NFC_SR_ERRORS (ATMEL_HSMC_NFC_SR_DTOE | \
105 ATMEL_HSMC_NFC_SR_UNDEF | \
106 ATMEL_HSMC_NFC_SR_AWB | \
107 ATMEL_HSMC_NFC_SR_NFCASE)
108#define ATMEL_HSMC_NFC_SR_RBEDGE(x) BIT((x) + 24)
109
110#define ATMEL_HSMC_NFC_ADDR 0x18
111#define ATMEL_HSMC_NFC_BANK 0x1c
112
113#define ATMEL_NFC_MAX_RB_ID 7
114
115#define ATMEL_NFC_SRAM_SIZE 0x2400
116
117#define ATMEL_NFC_CMD(pos, cmd) ((cmd) << (((pos) * 8) + 2))
118#define ATMEL_NFC_VCMD2 BIT(18)
119#define ATMEL_NFC_ACYCLE(naddrs) ((naddrs) << 19)
120#define ATMEL_NFC_CSID(cs) ((cs) << 22)
121#define ATMEL_NFC_DATAEN BIT(25)
122#define ATMEL_NFC_NFCWR BIT(26)
123
124#define ATMEL_NFC_MAX_ADDR_CYCLES 5
125
126#define ATMEL_NAND_ALE_OFFSET BIT(21)
127#define ATMEL_NAND_CLE_OFFSET BIT(22)
128
129#define DEFAULT_TIMEOUT_MS 1000
130#define MIN_DMA_LEN 128
131
132enum atmel_nand_rb_type {
133 ATMEL_NAND_NO_RB,
134 ATMEL_NAND_NATIVE_RB,
135 ATMEL_NAND_GPIO_RB,
136};
137
138struct atmel_nand_rb {
139 enum atmel_nand_rb_type type;
140 union {
141 struct gpio_desc *gpio;
142 int id;
143 };
144};
145
146struct atmel_nand_cs {
147 int id;
148 struct atmel_nand_rb rb;
149 struct gpio_desc *csgpio;
150 struct {
151 void __iomem *virt;
152 dma_addr_t dma;
153 } io;
154};
155
156struct atmel_nand {
157 struct list_head node;
158 struct device *dev;
159 struct nand_chip base;
160 struct atmel_nand_cs *activecs;
161 struct atmel_pmecc_user *pmecc;
162 struct gpio_desc *cdgpio;
163 int numcs;
164 struct atmel_nand_cs cs[];
165};
166
167static inline struct atmel_nand *to_atmel_nand(struct nand_chip *chip)
168{
169 return container_of(chip, struct atmel_nand, base);
170}
171
172enum atmel_nfc_data_xfer {
173 ATMEL_NFC_NO_DATA,
174 ATMEL_NFC_READ_DATA,
175 ATMEL_NFC_WRITE_DATA,
176};
177
178struct atmel_nfc_op {
179 u8 cs;
180 u8 ncmds;
181 u8 cmds[2];
182 u8 naddrs;
183 u8 addrs[5];
184 enum atmel_nfc_data_xfer data;
185 u32 wait;
186 u32 errors;
187};
188
189struct atmel_nand_controller;
190struct atmel_nand_controller_caps;
191
192struct atmel_nand_controller_ops {
193 int (*probe)(struct platform_device *pdev,
194 const struct atmel_nand_controller_caps *caps);
195 int (*remove)(struct atmel_nand_controller *nc);
196 void (*nand_init)(struct atmel_nand_controller *nc,
197 struct atmel_nand *nand);
198 int (*ecc_init)(struct atmel_nand *nand);
199};
200
201struct atmel_nand_controller_caps {
202 bool has_dma;
203 bool legacy_of_bindings;
204 u32 ale_offs;
205 u32 cle_offs;
206 const struct atmel_nand_controller_ops *ops;
207};
208
209struct atmel_nand_controller {
210 struct nand_hw_control base;
211 const struct atmel_nand_controller_caps *caps;
212 struct device *dev;
213 struct regmap *smc;
214 struct dma_chan *dmac;
215 struct atmel_pmecc *pmecc;
216 struct list_head chips;
217 struct clk *mck;
218};
219
220static inline struct atmel_nand_controller *
221to_nand_controller(struct nand_hw_control *ctl)
222{
223 return container_of(ctl, struct atmel_nand_controller, base);
224}
225
226struct atmel_smc_nand_controller {
227 struct atmel_nand_controller base;
228 struct regmap *matrix;
229 unsigned int ebi_csa_offs;
230};
231
232static inline struct atmel_smc_nand_controller *
233to_smc_nand_controller(struct nand_hw_control *ctl)
234{
235 return container_of(to_nand_controller(ctl),
236 struct atmel_smc_nand_controller, base);
237}
238
239struct atmel_hsmc_nand_controller {
240 struct atmel_nand_controller base;
241 struct {
242 struct gen_pool *pool;
243 void __iomem *virt;
244 dma_addr_t dma;
245 } sram;
246 struct regmap *io;
247 struct atmel_nfc_op op;
248 struct completion complete;
249 int irq;
250
251 /* Only used when instantiating from legacy DT bindings. */
252 struct clk *clk;
253};
254
255static inline struct atmel_hsmc_nand_controller *
256to_hsmc_nand_controller(struct nand_hw_control *ctl)
257{
258 return container_of(to_nand_controller(ctl),
259 struct atmel_hsmc_nand_controller, base);
260}
261
262static bool atmel_nfc_op_done(struct atmel_nfc_op *op, u32 status)
263{
264 op->errors |= status & ATMEL_HSMC_NFC_SR_ERRORS;
265 op->wait ^= status & op->wait;
266
267 return !op->wait || op->errors;
268}
269
270static irqreturn_t atmel_nfc_interrupt(int irq, void *data)
271{
272 struct atmel_hsmc_nand_controller *nc = data;
273 u32 sr, rcvd;
274 bool done;
275
276 regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &sr);
277
278 rcvd = sr & (nc->op.wait | ATMEL_HSMC_NFC_SR_ERRORS);
279 done = atmel_nfc_op_done(&nc->op, sr);
280
281 if (rcvd)
282 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, rcvd);
283
284 if (done)
285 complete(&nc->complete);
286
287 return rcvd ? IRQ_HANDLED : IRQ_NONE;
288}
289
290static int atmel_nfc_wait(struct atmel_hsmc_nand_controller *nc, bool poll,
291 unsigned int timeout_ms)
292{
293 int ret;
294
295 if (!timeout_ms)
296 timeout_ms = DEFAULT_TIMEOUT_MS;
297
298 if (poll) {
299 u32 status;
300
301 ret = regmap_read_poll_timeout(nc->base.smc,
302 ATMEL_HSMC_NFC_SR, status,
303 atmel_nfc_op_done(&nc->op,
304 status),
305 0, timeout_ms * 1000);
306 } else {
307 init_completion(&nc->complete);
308 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IER,
309 nc->op.wait | ATMEL_HSMC_NFC_SR_ERRORS);
310 ret = wait_for_completion_timeout(&nc->complete,
311 msecs_to_jiffies(timeout_ms));
312 if (!ret)
313 ret = -ETIMEDOUT;
314 else
315 ret = 0;
316
317 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, 0xffffffff);
318 }
319
320 if (nc->op.errors & ATMEL_HSMC_NFC_SR_DTOE) {
321 dev_err(nc->base.dev, "Waiting NAND R/B Timeout\n");
322 ret = -ETIMEDOUT;
323 }
324
325 if (nc->op.errors & ATMEL_HSMC_NFC_SR_UNDEF) {
326 dev_err(nc->base.dev, "Access to an undefined area\n");
327 ret = -EIO;
328 }
329
330 if (nc->op.errors & ATMEL_HSMC_NFC_SR_AWB) {
331 dev_err(nc->base.dev, "Access while busy\n");
332 ret = -EIO;
333 }
334
335 if (nc->op.errors & ATMEL_HSMC_NFC_SR_NFCASE) {
336 dev_err(nc->base.dev, "Wrong access size\n");
337 ret = -EIO;
338 }
339
340 return ret;
341}
342
343static void atmel_nand_dma_transfer_finished(void *data)
344{
345 struct completion *finished = data;
346
347 complete(finished);
348}
349
350static int atmel_nand_dma_transfer(struct atmel_nand_controller *nc,
351 void *buf, dma_addr_t dev_dma, size_t len,
352 enum dma_data_direction dir)
353{
354 DECLARE_COMPLETION_ONSTACK(finished);
355 dma_addr_t src_dma, dst_dma, buf_dma;
356 struct dma_async_tx_descriptor *tx;
357 dma_cookie_t cookie;
358
359 buf_dma = dma_map_single(nc->dev, buf, len, dir);
360 if (dma_mapping_error(nc->dev, dev_dma)) {
361 dev_err(nc->dev,
362 "Failed to prepare a buffer for DMA access\n");
363 goto err;
364 }
365
366 if (dir == DMA_FROM_DEVICE) {
367 src_dma = dev_dma;
368 dst_dma = buf_dma;
369 } else {
370 src_dma = buf_dma;
371 dst_dma = dev_dma;
372 }
373
374 tx = dmaengine_prep_dma_memcpy(nc->dmac, dst_dma, src_dma, len,
375 DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
376 if (!tx) {
377 dev_err(nc->dev, "Failed to prepare DMA memcpy\n");
378 goto err_unmap;
379 }
380
381 tx->callback = atmel_nand_dma_transfer_finished;
382 tx->callback_param = &finished;
383
384 cookie = dmaengine_submit(tx);
385 if (dma_submit_error(cookie)) {
386 dev_err(nc->dev, "Failed to do DMA tx_submit\n");
387 goto err_unmap;
388 }
389
390 dma_async_issue_pending(nc->dmac);
391 wait_for_completion(&finished);
392
393 return 0;
394
395err_unmap:
396 dma_unmap_single(nc->dev, buf_dma, len, dir);
397
398err:
399 dev_dbg(nc->dev, "Fall back to CPU I/O\n");
400
401 return -EIO;
402}
403
404static u8 atmel_nand_read_byte(struct mtd_info *mtd)
405{
406 struct nand_chip *chip = mtd_to_nand(mtd);
407 struct atmel_nand *nand = to_atmel_nand(chip);
408
409 return ioread8(nand->activecs->io.virt);
410}
411
412static u16 atmel_nand_read_word(struct mtd_info *mtd)
413{
414 struct nand_chip *chip = mtd_to_nand(mtd);
415 struct atmel_nand *nand = to_atmel_nand(chip);
416
417 return ioread16(nand->activecs->io.virt);
418}
419
420static void atmel_nand_write_byte(struct mtd_info *mtd, u8 byte)
421{
422 struct nand_chip *chip = mtd_to_nand(mtd);
423 struct atmel_nand *nand = to_atmel_nand(chip);
424
425 if (chip->options & NAND_BUSWIDTH_16)
426 iowrite16(byte | (byte << 8), nand->activecs->io.virt);
427 else
428 iowrite8(byte, nand->activecs->io.virt);
429}
430
431static void atmel_nand_read_buf(struct mtd_info *mtd, u8 *buf, int len)
432{
433 struct nand_chip *chip = mtd_to_nand(mtd);
434 struct atmel_nand *nand = to_atmel_nand(chip);
435 struct atmel_nand_controller *nc;
436
437 nc = to_nand_controller(chip->controller);
438
439 /*
440 * If the controller supports DMA, the buffer address is DMA-able and
441 * len is long enough to make DMA transfers profitable, let's trigger
442 * a DMA transfer. If it fails, fallback to PIO mode.
443 */
444 if (nc->dmac && virt_addr_valid(buf) &&
445 len >= MIN_DMA_LEN &&
446 !atmel_nand_dma_transfer(nc, buf, nand->activecs->io.dma, len,
447 DMA_FROM_DEVICE))
448 return;
449
450 if (chip->options & NAND_BUSWIDTH_16)
451 ioread16_rep(nand->activecs->io.virt, buf, len / 2);
452 else
453 ioread8_rep(nand->activecs->io.virt, buf, len);
454}
455
456static void atmel_nand_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
457{
458 struct nand_chip *chip = mtd_to_nand(mtd);
459 struct atmel_nand *nand = to_atmel_nand(chip);
460 struct atmel_nand_controller *nc;
461
462 nc = to_nand_controller(chip->controller);
463
464 /*
465 * If the controller supports DMA, the buffer address is DMA-able and
466 * len is long enough to make DMA transfers profitable, let's trigger
467 * a DMA transfer. If it fails, fallback to PIO mode.
468 */
469 if (nc->dmac && virt_addr_valid(buf) &&
470 len >= MIN_DMA_LEN &&
471 !atmel_nand_dma_transfer(nc, (void *)buf, nand->activecs->io.dma,
472 len, DMA_TO_DEVICE))
473 return;
474
475 if (chip->options & NAND_BUSWIDTH_16)
476 iowrite16_rep(nand->activecs->io.virt, buf, len / 2);
477 else
478 iowrite8_rep(nand->activecs->io.virt, buf, len);
479}
480
481static int atmel_nand_dev_ready(struct mtd_info *mtd)
482{
483 struct nand_chip *chip = mtd_to_nand(mtd);
484 struct atmel_nand *nand = to_atmel_nand(chip);
485
486 return gpiod_get_value(nand->activecs->rb.gpio);
487}
488
489static void atmel_nand_select_chip(struct mtd_info *mtd, int cs)
490{
491 struct nand_chip *chip = mtd_to_nand(mtd);
492 struct atmel_nand *nand = to_atmel_nand(chip);
493
494 if (cs < 0 || cs >= nand->numcs) {
495 nand->activecs = NULL;
496 chip->dev_ready = NULL;
497 return;
498 }
499
500 nand->activecs = &nand->cs[cs];
501
502 if (nand->activecs->rb.type == ATMEL_NAND_GPIO_RB)
503 chip->dev_ready = atmel_nand_dev_ready;
504}
505
506static int atmel_hsmc_nand_dev_ready(struct mtd_info *mtd)
507{
508 struct nand_chip *chip = mtd_to_nand(mtd);
509 struct atmel_nand *nand = to_atmel_nand(chip);
510 struct atmel_hsmc_nand_controller *nc;
511 u32 status;
512
513 nc = to_hsmc_nand_controller(chip->controller);
514
515 regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &status);
516
517 return status & ATMEL_HSMC_NFC_SR_RBEDGE(nand->activecs->rb.id);
518}
519
520static void atmel_hsmc_nand_select_chip(struct mtd_info *mtd, int cs)
521{
522 struct nand_chip *chip = mtd_to_nand(mtd);
523 struct atmel_nand *nand = to_atmel_nand(chip);
524 struct atmel_hsmc_nand_controller *nc;
525
526 nc = to_hsmc_nand_controller(chip->controller);
527
528 atmel_nand_select_chip(mtd, cs);
529
530 if (!nand->activecs) {
531 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CTRL,
532 ATMEL_HSMC_NFC_CTRL_DIS);
533 return;
534 }
535
536 if (nand->activecs->rb.type == ATMEL_NAND_NATIVE_RB)
537 chip->dev_ready = atmel_hsmc_nand_dev_ready;
538
539 regmap_update_bits(nc->base.smc, ATMEL_HSMC_NFC_CFG,
540 ATMEL_HSMC_NFC_CFG_PAGESIZE_MASK |
541 ATMEL_HSMC_NFC_CFG_SPARESIZE_MASK |
542 ATMEL_HSMC_NFC_CFG_RSPARE |
543 ATMEL_HSMC_NFC_CFG_WSPARE,
544 ATMEL_HSMC_NFC_CFG_PAGESIZE(mtd->writesize) |
545 ATMEL_HSMC_NFC_CFG_SPARESIZE(mtd->oobsize) |
546 ATMEL_HSMC_NFC_CFG_RSPARE);
547 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CTRL,
548 ATMEL_HSMC_NFC_CTRL_EN);
549}
550
551static int atmel_nfc_exec_op(struct atmel_hsmc_nand_controller *nc, bool poll)
552{
553 u8 *addrs = nc->op.addrs;
554 unsigned int op = 0;
555 u32 addr, val;
556 int i, ret;
557
558 nc->op.wait = ATMEL_HSMC_NFC_SR_CMDDONE;
559
560 for (i = 0; i < nc->op.ncmds; i++)
561 op |= ATMEL_NFC_CMD(i, nc->op.cmds[i]);
562
563 if (nc->op.naddrs == ATMEL_NFC_MAX_ADDR_CYCLES)
564 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_ADDR, *addrs++);
565
566 op |= ATMEL_NFC_CSID(nc->op.cs) |
567 ATMEL_NFC_ACYCLE(nc->op.naddrs);
568
569 if (nc->op.ncmds > 1)
570 op |= ATMEL_NFC_VCMD2;
571
572 addr = addrs[0] | (addrs[1] << 8) | (addrs[2] << 16) |
573 (addrs[3] << 24);
574
575 if (nc->op.data != ATMEL_NFC_NO_DATA) {
576 op |= ATMEL_NFC_DATAEN;
577 nc->op.wait |= ATMEL_HSMC_NFC_SR_XFRDONE;
578
579 if (nc->op.data == ATMEL_NFC_WRITE_DATA)
580 op |= ATMEL_NFC_NFCWR;
581 }
582
583 /* Clear all flags. */
584 regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &val);
585
586 /* Send the command. */
587 regmap_write(nc->io, op, addr);
588
589 ret = atmel_nfc_wait(nc, poll, 0);
590 if (ret)
591 dev_err(nc->base.dev,
592 "Failed to send NAND command (err = %d)!",
593 ret);
594
595 /* Reset the op state. */
596 memset(&nc->op, 0, sizeof(nc->op));
597
598 return ret;
599}
600
601static void atmel_hsmc_nand_cmd_ctrl(struct mtd_info *mtd, int dat,
602 unsigned int ctrl)
603{
604 struct nand_chip *chip = mtd_to_nand(mtd);
605 struct atmel_nand *nand = to_atmel_nand(chip);
606 struct atmel_hsmc_nand_controller *nc;
607
608 nc = to_hsmc_nand_controller(chip->controller);
609
610 if (ctrl & NAND_ALE) {
611 if (nc->op.naddrs == ATMEL_NFC_MAX_ADDR_CYCLES)
612 return;
613
614 nc->op.addrs[nc->op.naddrs++] = dat;
615 } else if (ctrl & NAND_CLE) {
616 if (nc->op.ncmds > 1)
617 return;
618
619 nc->op.cmds[nc->op.ncmds++] = dat;
620 }
621
622 if (dat == NAND_CMD_NONE) {
623 nc->op.cs = nand->activecs->id;
624 atmel_nfc_exec_op(nc, true);
625 }
626}
627
628static void atmel_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
629 unsigned int ctrl)
630{
631 struct nand_chip *chip = mtd_to_nand(mtd);
632 struct atmel_nand *nand = to_atmel_nand(chip);
633 struct atmel_nand_controller *nc;
634
635 nc = to_nand_controller(chip->controller);
636
637 if ((ctrl & NAND_CTRL_CHANGE) && nand->activecs->csgpio) {
638 if (ctrl & NAND_NCE)
639 gpiod_set_value(nand->activecs->csgpio, 0);
640 else
641 gpiod_set_value(nand->activecs->csgpio, 1);
642 }
643
644 if (ctrl & NAND_ALE)
645 writeb(cmd, nand->activecs->io.virt + nc->caps->ale_offs);
646 else if (ctrl & NAND_CLE)
647 writeb(cmd, nand->activecs->io.virt + nc->caps->cle_offs);
648}
649
650static void atmel_nfc_copy_to_sram(struct nand_chip *chip, const u8 *buf,
651 bool oob_required)
652{
653 struct mtd_info *mtd = nand_to_mtd(chip);
654 struct atmel_hsmc_nand_controller *nc;
655 int ret = -EIO;
656
657 nc = to_hsmc_nand_controller(chip->controller);
658
659 if (nc->base.dmac)
660 ret = atmel_nand_dma_transfer(&nc->base, (void *)buf,
661 nc->sram.dma, mtd->writesize,
662 DMA_TO_DEVICE);
663
664 /* Falling back to CPU copy. */
665 if (ret)
666 memcpy_toio(nc->sram.virt, buf, mtd->writesize);
667
668 if (oob_required)
669 memcpy_toio(nc->sram.virt + mtd->writesize, chip->oob_poi,
670 mtd->oobsize);
671}
672
673static void atmel_nfc_copy_from_sram(struct nand_chip *chip, u8 *buf,
674 bool oob_required)
675{
676 struct mtd_info *mtd = nand_to_mtd(chip);
677 struct atmel_hsmc_nand_controller *nc;
678 int ret = -EIO;
679
680 nc = to_hsmc_nand_controller(chip->controller);
681
682 if (nc->base.dmac)
683 ret = atmel_nand_dma_transfer(&nc->base, buf, nc->sram.dma,
684 mtd->writesize, DMA_FROM_DEVICE);
685
686 /* Falling back to CPU copy. */
687 if (ret)
688 memcpy_fromio(buf, nc->sram.virt, mtd->writesize);
689
690 if (oob_required)
691 memcpy_fromio(chip->oob_poi, nc->sram.virt + mtd->writesize,
692 mtd->oobsize);
693}
694
695static void atmel_nfc_set_op_addr(struct nand_chip *chip, int page, int column)
696{
697 struct mtd_info *mtd = nand_to_mtd(chip);
698 struct atmel_hsmc_nand_controller *nc;
699
700 nc = to_hsmc_nand_controller(chip->controller);
701
702 if (column >= 0) {
703 nc->op.addrs[nc->op.naddrs++] = column;
704
705 /*
706 * 2 address cycles for the column offset on large page NANDs.
707 */
708 if (mtd->writesize > 512)
709 nc->op.addrs[nc->op.naddrs++] = column >> 8;
710 }
711
712 if (page >= 0) {
713 nc->op.addrs[nc->op.naddrs++] = page;
714 nc->op.addrs[nc->op.naddrs++] = page >> 8;
715
716 if ((mtd->writesize > 512 && chip->chipsize > SZ_128M) ||
717 (mtd->writesize <= 512 && chip->chipsize > SZ_32M))
718 nc->op.addrs[nc->op.naddrs++] = page >> 16;
719 }
720}
721
722static int atmel_nand_pmecc_enable(struct nand_chip *chip, int op, bool raw)
723{
724 struct atmel_nand *nand = to_atmel_nand(chip);
725 struct atmel_nand_controller *nc;
726 int ret;
727
728 nc = to_nand_controller(chip->controller);
729
730 if (raw)
731 return 0;
732
733 ret = atmel_pmecc_enable(nand->pmecc, op);
734 if (ret)
735 dev_err(nc->dev,
736 "Failed to enable ECC engine (err = %d)\n", ret);
737
738 return ret;
739}
740
741static void atmel_nand_pmecc_disable(struct nand_chip *chip, bool raw)
742{
743 struct atmel_nand *nand = to_atmel_nand(chip);
744
745 if (!raw)
746 atmel_pmecc_disable(nand->pmecc);
747}
748
749static int atmel_nand_pmecc_generate_eccbytes(struct nand_chip *chip, bool raw)
750{
751 struct atmel_nand *nand = to_atmel_nand(chip);
752 struct mtd_info *mtd = nand_to_mtd(chip);
753 struct atmel_nand_controller *nc;
754 struct mtd_oob_region oobregion;
755 void *eccbuf;
756 int ret, i;
757
758 nc = to_nand_controller(chip->controller);
759
760 if (raw)
761 return 0;
762
763 ret = atmel_pmecc_wait_rdy(nand->pmecc);
764 if (ret) {
765 dev_err(nc->dev,
766 "Failed to transfer NAND page data (err = %d)\n",
767 ret);
768 return ret;
769 }
770
771 mtd_ooblayout_ecc(mtd, 0, &oobregion);
772 eccbuf = chip->oob_poi + oobregion.offset;
773
774 for (i = 0; i < chip->ecc.steps; i++) {
775 atmel_pmecc_get_generated_eccbytes(nand->pmecc, i,
776 eccbuf);
777 eccbuf += chip->ecc.bytes;
778 }
779
780 return 0;
781}
782
783static int atmel_nand_pmecc_correct_data(struct nand_chip *chip, void *buf,
784 bool raw)
785{
786 struct atmel_nand *nand = to_atmel_nand(chip);
787 struct mtd_info *mtd = nand_to_mtd(chip);
788 struct atmel_nand_controller *nc;
789 struct mtd_oob_region oobregion;
790 int ret, i, max_bitflips = 0;
791 void *databuf, *eccbuf;
792
793 nc = to_nand_controller(chip->controller);
794
795 if (raw)
796 return 0;
797
798 ret = atmel_pmecc_wait_rdy(nand->pmecc);
799 if (ret) {
800 dev_err(nc->dev,
801 "Failed to read NAND page data (err = %d)\n",
802 ret);
803 return ret;
804 }
805
806 mtd_ooblayout_ecc(mtd, 0, &oobregion);
807 eccbuf = chip->oob_poi + oobregion.offset;
808 databuf = buf;
809
810 for (i = 0; i < chip->ecc.steps; i++) {
811 ret = atmel_pmecc_correct_sector(nand->pmecc, i, databuf,
812 eccbuf);
813 if (ret < 0 && !atmel_pmecc_correct_erased_chunks(nand->pmecc))
814 ret = nand_check_erased_ecc_chunk(databuf,
815 chip->ecc.size,
816 eccbuf,
817 chip->ecc.bytes,
818 NULL, 0,
819 chip->ecc.strength);
820
821 if (ret >= 0)
822 max_bitflips = max(ret, max_bitflips);
823 else
824 mtd->ecc_stats.failed++;
825
826 databuf += chip->ecc.size;
827 eccbuf += chip->ecc.bytes;
828 }
829
830 return max_bitflips;
831}
832
833static int atmel_nand_pmecc_write_pg(struct nand_chip *chip, const u8 *buf,
834 bool oob_required, int page, bool raw)
835{
836 struct mtd_info *mtd = nand_to_mtd(chip);
837 struct atmel_nand *nand = to_atmel_nand(chip);
838 int ret;
839
840 ret = atmel_nand_pmecc_enable(chip, NAND_ECC_WRITE, raw);
841 if (ret)
842 return ret;
843
844 atmel_nand_write_buf(mtd, buf, mtd->writesize);
845
846 ret = atmel_nand_pmecc_generate_eccbytes(chip, raw);
847 if (ret) {
848 atmel_pmecc_disable(nand->pmecc);
849 return ret;
850 }
851
852 atmel_nand_pmecc_disable(chip, raw);
853
854 atmel_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
855
856 return 0;
857}
858
859static int atmel_nand_pmecc_write_page(struct mtd_info *mtd,
860 struct nand_chip *chip, const u8 *buf,
861 int oob_required, int page)
862{
863 return atmel_nand_pmecc_write_pg(chip, buf, oob_required, page, false);
864}
865
866static int atmel_nand_pmecc_write_page_raw(struct mtd_info *mtd,
867 struct nand_chip *chip,
868 const u8 *buf, int oob_required,
869 int page)
870{
871 return atmel_nand_pmecc_write_pg(chip, buf, oob_required, page, true);
872}
873
874static int atmel_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
875 bool oob_required, int page, bool raw)
876{
877 struct mtd_info *mtd = nand_to_mtd(chip);
878 int ret;
879
880 ret = atmel_nand_pmecc_enable(chip, NAND_ECC_READ, raw);
881 if (ret)
882 return ret;
883
884 atmel_nand_read_buf(mtd, buf, mtd->writesize);
885 atmel_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize);
886
887 ret = atmel_nand_pmecc_correct_data(chip, buf, raw);
888
889 atmel_nand_pmecc_disable(chip, raw);
890
891 return ret;
892}
893
894static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
895 struct nand_chip *chip, u8 *buf,
896 int oob_required, int page)
897{
898 return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page, false);
899}
900
901static int atmel_nand_pmecc_read_page_raw(struct mtd_info *mtd,
902 struct nand_chip *chip, u8 *buf,
903 int oob_required, int page)
904{
905 return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page, true);
906}
907
908static int atmel_hsmc_nand_pmecc_write_pg(struct nand_chip *chip,
909 const u8 *buf, bool oob_required,
910 int page, bool raw)
911{
912 struct mtd_info *mtd = nand_to_mtd(chip);
913 struct atmel_nand *nand = to_atmel_nand(chip);
914 struct atmel_hsmc_nand_controller *nc;
915 int ret;
916
917 nc = to_hsmc_nand_controller(chip->controller);
918
919 atmel_nfc_copy_to_sram(chip, buf, false);
920
921 nc->op.cmds[0] = NAND_CMD_SEQIN;
922 nc->op.ncmds = 1;
923 atmel_nfc_set_op_addr(chip, page, 0x0);
924 nc->op.cs = nand->activecs->id;
925 nc->op.data = ATMEL_NFC_WRITE_DATA;
926
927 ret = atmel_nand_pmecc_enable(chip, NAND_ECC_WRITE, raw);
928 if (ret)
929 return ret;
930
931 ret = atmel_nfc_exec_op(nc, false);
932 if (ret) {
933 atmel_nand_pmecc_disable(chip, raw);
934 dev_err(nc->base.dev,
935 "Failed to transfer NAND page data (err = %d)\n",
936 ret);
937 return ret;
938 }
939
940 ret = atmel_nand_pmecc_generate_eccbytes(chip, raw);
941
942 atmel_nand_pmecc_disable(chip, raw);
943
944 if (ret)
945 return ret;
946
947 atmel_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
948
949 nc->op.cmds[0] = NAND_CMD_PAGEPROG;
950 nc->op.ncmds = 1;
951 nc->op.cs = nand->activecs->id;
952 ret = atmel_nfc_exec_op(nc, false);
953 if (ret)
954 dev_err(nc->base.dev, "Failed to program NAND page (err = %d)\n",
955 ret);
956
957 return ret;
958}
959
960static int atmel_hsmc_nand_pmecc_write_page(struct mtd_info *mtd,
961 struct nand_chip *chip,
962 const u8 *buf, int oob_required,
963 int page)
964{
965 return atmel_hsmc_nand_pmecc_write_pg(chip, buf, oob_required, page,
966 false);
967}
968
969static int atmel_hsmc_nand_pmecc_write_page_raw(struct mtd_info *mtd,
970 struct nand_chip *chip,
971 const u8 *buf,
972 int oob_required, int page)
973{
974 return atmel_hsmc_nand_pmecc_write_pg(chip, buf, oob_required, page,
975 true);
976}
977
978static int atmel_hsmc_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
979 bool oob_required, int page,
980 bool raw)
981{
982 struct mtd_info *mtd = nand_to_mtd(chip);
983 struct atmel_nand *nand = to_atmel_nand(chip);
984 struct atmel_hsmc_nand_controller *nc;
985 int ret;
986
987 nc = to_hsmc_nand_controller(chip->controller);
988
989 /*
990 * Optimized read page accessors only work when the NAND R/B pin is
991 * connected to a native SoC R/B pin. If that's not the case, fallback
992 * to the non-optimized one.
993 */
994 if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB) {
995 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
996
997 return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page,
998 raw);
999 }
1000
1001 nc->op.cmds[nc->op.ncmds++] = NAND_CMD_READ0;
1002
1003 if (mtd->writesize > 512)
1004 nc->op.cmds[nc->op.ncmds++] = NAND_CMD_READSTART;
1005
1006 atmel_nfc_set_op_addr(chip, page, 0x0);
1007 nc->op.cs = nand->activecs->id;
1008 nc->op.data = ATMEL_NFC_READ_DATA;
1009
1010 ret = atmel_nand_pmecc_enable(chip, NAND_ECC_READ, raw);
1011 if (ret)
1012 return ret;
1013
1014 ret = atmel_nfc_exec_op(nc, false);
1015 if (ret) {
1016 atmel_nand_pmecc_disable(chip, raw);
1017 dev_err(nc->base.dev,
1018 "Failed to load NAND page data (err = %d)\n",
1019 ret);
1020 return ret;
1021 }
1022
1023 atmel_nfc_copy_from_sram(chip, buf, true);
1024
1025 ret = atmel_nand_pmecc_correct_data(chip, buf, raw);
1026
1027 atmel_nand_pmecc_disable(chip, raw);
1028
1029 return ret;
1030}
1031
1032static int atmel_hsmc_nand_pmecc_read_page(struct mtd_info *mtd,
1033 struct nand_chip *chip, u8 *buf,
1034 int oob_required, int page)
1035{
1036 return atmel_hsmc_nand_pmecc_read_pg(chip, buf, oob_required, page,
1037 false);
1038}
1039
1040static int atmel_hsmc_nand_pmecc_read_page_raw(struct mtd_info *mtd,
1041 struct nand_chip *chip,
1042 u8 *buf, int oob_required,
1043 int page)
1044{
1045 return atmel_hsmc_nand_pmecc_read_pg(chip, buf, oob_required, page,
1046 true);
1047}
1048
1049static int atmel_nand_pmecc_init(struct nand_chip *chip)
1050{
1051 struct mtd_info *mtd = nand_to_mtd(chip);
1052 struct atmel_nand *nand = to_atmel_nand(chip);
1053 struct atmel_nand_controller *nc;
1054 struct atmel_pmecc_user_req req;
1055
1056 nc = to_nand_controller(chip->controller);
1057
1058 if (!nc->pmecc) {
1059 dev_err(nc->dev, "HW ECC not supported\n");
1060 return -ENOTSUPP;
1061 }
1062
1063 if (nc->caps->legacy_of_bindings) {
1064 u32 val;
1065
1066 if (!of_property_read_u32(nc->dev->of_node, "atmel,pmecc-cap",
1067 &val))
1068 chip->ecc.strength = val;
1069
1070 if (!of_property_read_u32(nc->dev->of_node,
1071 "atmel,pmecc-sector-size",
1072 &val))
1073 chip->ecc.size = val;
1074 }
1075
1076 if (chip->ecc.options & NAND_ECC_MAXIMIZE)
1077 req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH;
1078 else if (chip->ecc.strength)
1079 req.ecc.strength = chip->ecc.strength;
1080 else if (chip->ecc_strength_ds)
1081 req.ecc.strength = chip->ecc_strength_ds;
1082 else
1083 req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH;
1084
1085 if (chip->ecc.size)
1086 req.ecc.sectorsize = chip->ecc.size;
1087 else if (chip->ecc_step_ds)
1088 req.ecc.sectorsize = chip->ecc_step_ds;
1089 else
1090 req.ecc.sectorsize = ATMEL_PMECC_SECTOR_SIZE_AUTO;
1091
1092 req.pagesize = mtd->writesize;
1093 req.oobsize = mtd->oobsize;
1094
1095 if (mtd->writesize <= 512) {
1096 req.ecc.bytes = 4;
1097 req.ecc.ooboffset = 0;
1098 } else {
1099 req.ecc.bytes = mtd->oobsize - 2;
1100 req.ecc.ooboffset = ATMEL_PMECC_OOBOFFSET_AUTO;
1101 }
1102
1103 nand->pmecc = atmel_pmecc_create_user(nc->pmecc, &req);
1104 if (IS_ERR(nand->pmecc))
1105 return PTR_ERR(nand->pmecc);
1106
1107 chip->ecc.algo = NAND_ECC_BCH;
1108 chip->ecc.size = req.ecc.sectorsize;
1109 chip->ecc.bytes = req.ecc.bytes / req.ecc.nsectors;
1110 chip->ecc.strength = req.ecc.strength;
1111
1112 chip->options |= NAND_NO_SUBPAGE_WRITE;
1113
1114 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
1115
1116 return 0;
1117}
1118
1119static int atmel_nand_ecc_init(struct atmel_nand *nand)
1120{
1121 struct nand_chip *chip = &nand->base;
1122 struct atmel_nand_controller *nc;
1123 int ret;
1124
1125 nc = to_nand_controller(chip->controller);
1126
1127 switch (chip->ecc.mode) {
1128 case NAND_ECC_NONE:
1129 case NAND_ECC_SOFT:
1130 /*
1131 * Nothing to do, the core will initialize everything for us.
1132 */
1133 break;
1134
1135 case NAND_ECC_HW:
1136 ret = atmel_nand_pmecc_init(chip);
1137 if (ret)
1138 return ret;
1139
1140 chip->ecc.read_page = atmel_nand_pmecc_read_page;
1141 chip->ecc.write_page = atmel_nand_pmecc_write_page;
1142 chip->ecc.read_page_raw = atmel_nand_pmecc_read_page_raw;
1143 chip->ecc.write_page_raw = atmel_nand_pmecc_write_page_raw;
1144 break;
1145
1146 default:
1147 /* Other modes are not supported. */
1148 dev_err(nc->dev, "Unsupported ECC mode: %d\n",
1149 chip->ecc.mode);
1150 return -ENOTSUPP;
1151 }
1152
1153 return 0;
1154}
1155
1156static int atmel_hsmc_nand_ecc_init(struct atmel_nand *nand)
1157{
1158 struct nand_chip *chip = &nand->base;
1159 int ret;
1160
1161 ret = atmel_nand_ecc_init(nand);
1162 if (ret)
1163 return ret;
1164
1165 if (chip->ecc.mode != NAND_ECC_HW)
1166 return 0;
1167
1168 /* Adjust the ECC operations for the HSMC IP. */
1169 chip->ecc.read_page = atmel_hsmc_nand_pmecc_read_page;
1170 chip->ecc.write_page = atmel_hsmc_nand_pmecc_write_page;
1171 chip->ecc.read_page_raw = atmel_hsmc_nand_pmecc_read_page_raw;
1172 chip->ecc.write_page_raw = atmel_hsmc_nand_pmecc_write_page_raw;
1173 chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS;
1174
1175 return 0;
1176}
1177
1178static void atmel_nand_init(struct atmel_nand_controller *nc,
1179 struct atmel_nand *nand)
1180{
1181 struct nand_chip *chip = &nand->base;
1182 struct mtd_info *mtd = nand_to_mtd(chip);
1183
1184 mtd->dev.parent = nc->dev;
1185 nand->base.controller = &nc->base;
1186
1187 chip->cmd_ctrl = atmel_nand_cmd_ctrl;
1188 chip->read_byte = atmel_nand_read_byte;
1189 chip->read_word = atmel_nand_read_word;
1190 chip->write_byte = atmel_nand_write_byte;
1191 chip->read_buf = atmel_nand_read_buf;
1192 chip->write_buf = atmel_nand_write_buf;
1193 chip->select_chip = atmel_nand_select_chip;
1194
1195 /* Some NANDs require a longer delay than the default one (20us). */
1196 chip->chip_delay = 40;
1197
1198 /*
1199 * Use a bounce buffer when the buffer passed by the MTD user is not
1200 * suitable for DMA.
1201 */
1202 if (nc->dmac)
1203 chip->options |= NAND_USE_BOUNCE_BUFFER;
1204
1205 /* Default to HW ECC if pmecc is available. */
1206 if (nc->pmecc)
1207 chip->ecc.mode = NAND_ECC_HW;
1208}
1209
1210static void atmel_smc_nand_init(struct atmel_nand_controller *nc,
1211 struct atmel_nand *nand)
1212{
1213 struct nand_chip *chip = &nand->base;
1214 struct atmel_smc_nand_controller *smc_nc;
1215 int i;
1216
1217 atmel_nand_init(nc, nand);
1218
1219 smc_nc = to_smc_nand_controller(chip->controller);
1220 if (!smc_nc->matrix)
1221 return;
1222
1223 /* Attach the CS to the NAND Flash logic. */
1224 for (i = 0; i < nand->numcs; i++)
1225 regmap_update_bits(smc_nc->matrix, smc_nc->ebi_csa_offs,
1226 BIT(nand->cs[i].id), BIT(nand->cs[i].id));
1227}
1228
1229static void atmel_hsmc_nand_init(struct atmel_nand_controller *nc,
1230 struct atmel_nand *nand)
1231{
1232 struct nand_chip *chip = &nand->base;
1233
1234 atmel_nand_init(nc, nand);
1235
1236 /* Overload some methods for the HSMC controller. */
1237 chip->cmd_ctrl = atmel_hsmc_nand_cmd_ctrl;
1238 chip->select_chip = atmel_hsmc_nand_select_chip;
1239}
1240
1241static int atmel_nand_detect(struct atmel_nand *nand)
1242{
1243 struct nand_chip *chip = &nand->base;
1244 struct mtd_info *mtd = nand_to_mtd(chip);
1245 struct atmel_nand_controller *nc;
1246 int ret;
1247
1248 nc = to_nand_controller(chip->controller);
1249
1250 ret = nand_scan_ident(mtd, nand->numcs, NULL);
1251 if (ret)
1252 dev_err(nc->dev, "nand_scan_ident() failed: %d\n", ret);
1253
1254 return ret;
1255}
1256
1257static int atmel_nand_unregister(struct atmel_nand *nand)
1258{
1259 struct nand_chip *chip = &nand->base;
1260 struct mtd_info *mtd = nand_to_mtd(chip);
1261 int ret;
1262
1263 ret = mtd_device_unregister(mtd);
1264 if (ret)
1265 return ret;
1266
1267 nand_cleanup(chip);
1268 list_del(&nand->node);
1269
1270 return 0;
1271}
1272
1273static int atmel_nand_register(struct atmel_nand *nand)
1274{
1275 struct nand_chip *chip = &nand->base;
1276 struct mtd_info *mtd = nand_to_mtd(chip);
1277 struct atmel_nand_controller *nc;
1278 int ret;
1279
1280 nc = to_nand_controller(chip->controller);
1281
1282 if (nc->caps->legacy_of_bindings || !nc->dev->of_node) {
1283 /*
1284 * We keep the MTD name unchanged to avoid breaking platforms
1285 * where the MTD cmdline parser is used and the bootloader
1286 * has not been updated to use the new naming scheme.
1287 */
1288 mtd->name = "atmel_nand";
1289 } else if (!mtd->name) {
1290 /*
1291 * If the new bindings are used and the bootloader has not been
1292 * updated to pass a new mtdparts parameter on the cmdline, you
1293 * should define the following property in your nand node:
1294 *
1295 * label = "atmel_nand";
1296 *
1297 * This way, mtd->name will be set by the core when
1298 * nand_set_flash_node() is called.
1299 */
1300 mtd->name = devm_kasprintf(nc->dev, GFP_KERNEL,
1301 "%s:nand.%d", dev_name(nc->dev),
1302 nand->cs[0].id);
1303 if (!mtd->name) {
1304 dev_err(nc->dev, "Failed to allocate mtd->name\n");
1305 return -ENOMEM;
1306 }
1307 }
1308
1309 ret = nand_scan_tail(mtd);
1310 if (ret) {
1311 dev_err(nc->dev, "nand_scan_tail() failed: %d\n", ret);
1312 return ret;
1313 }
1314
1315 ret = mtd_device_register(mtd, NULL, 0);
1316 if (ret) {
1317 dev_err(nc->dev, "Failed to register mtd device: %d\n", ret);
1318 nand_cleanup(chip);
1319 return ret;
1320 }
1321
1322 list_add_tail(&nand->node, &nc->chips);
1323
1324 return 0;
1325}
1326
1327static struct atmel_nand *atmel_nand_create(struct atmel_nand_controller *nc,
1328 struct device_node *np,
1329 int reg_cells)
1330{
1331 struct atmel_nand *nand;
1332 struct gpio_desc *gpio;
1333 int numcs, ret, i;
1334
1335 numcs = of_property_count_elems_of_size(np, "reg",
1336 reg_cells * sizeof(u32));
1337 if (numcs < 1) {
1338 dev_err(nc->dev, "Missing or invalid reg property\n");
1339 return ERR_PTR(-EINVAL);
1340 }
1341
1342 nand = devm_kzalloc(nc->dev,
1343 sizeof(*nand) + (numcs * sizeof(*nand->cs)),
1344 GFP_KERNEL);
1345 if (!nand) {
1346 dev_err(nc->dev, "Failed to allocate NAND object\n");
1347 return ERR_PTR(-ENOMEM);
1348 }
1349
1350 nand->numcs = numcs;
1351
1352 gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev, "det", 0,
1353 &np->fwnode, GPIOD_IN,
1354 "nand-det");
1355 if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
1356 dev_err(nc->dev,
1357 "Failed to get detect gpio (err = %ld)\n",
1358 PTR_ERR(gpio));
1359 return ERR_CAST(gpio);
1360 }
1361
1362 if (!IS_ERR(gpio))
1363 nand->cdgpio = gpio;
1364
1365 for (i = 0; i < numcs; i++) {
1366 struct resource res;
1367 u32 val;
1368
1369 ret = of_address_to_resource(np, 0, &res);
1370 if (ret) {
1371 dev_err(nc->dev, "Invalid reg property (err = %d)\n",
1372 ret);
1373 return ERR_PTR(ret);
1374 }
1375
1376 ret = of_property_read_u32_index(np, "reg", i * reg_cells,
1377 &val);
1378 if (ret) {
1379 dev_err(nc->dev, "Invalid reg property (err = %d)\n",
1380 ret);
1381 return ERR_PTR(ret);
1382 }
1383
1384 nand->cs[i].id = val;
1385
1386 nand->cs[i].io.dma = res.start;
1387 nand->cs[i].io.virt = devm_ioremap_resource(nc->dev, &res);
1388 if (IS_ERR(nand->cs[i].io.virt))
1389 return ERR_CAST(nand->cs[i].io.virt);
1390
1391 if (!of_property_read_u32(np, "atmel,rb", &val)) {
1392 if (val > ATMEL_NFC_MAX_RB_ID)
1393 return ERR_PTR(-EINVAL);
1394
1395 nand->cs[i].rb.type = ATMEL_NAND_NATIVE_RB;
1396 nand->cs[i].rb.id = val;
1397 } else {
1398 gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev,
1399 "rb", i, &np->fwnode,
1400 GPIOD_IN, "nand-rb");
1401 if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
1402 dev_err(nc->dev,
1403 "Failed to get R/B gpio (err = %ld)\n",
1404 PTR_ERR(gpio));
1405 return ERR_CAST(gpio);
1406 }
1407
1408 if (!IS_ERR(gpio)) {
1409 nand->cs[i].rb.type = ATMEL_NAND_GPIO_RB;
1410 nand->cs[i].rb.gpio = gpio;
1411 }
1412 }
1413
1414 gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev, "cs",
1415 i, &np->fwnode,
1416 GPIOD_OUT_HIGH,
1417 "nand-cs");
1418 if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
1419 dev_err(nc->dev,
1420 "Failed to get CS gpio (err = %ld)\n",
1421 PTR_ERR(gpio));
1422 return ERR_CAST(gpio);
1423 }
1424
1425 if (!IS_ERR(gpio))
1426 nand->cs[i].csgpio = gpio;
1427 }
1428
1429 nand_set_flash_node(&nand->base, np);
1430
1431 return nand;
1432}
1433
1434static int
1435atmel_nand_controller_add_nand(struct atmel_nand_controller *nc,
1436 struct atmel_nand *nand)
1437{
1438 int ret;
1439
1440 /* No card inserted, skip this NAND. */
1441 if (nand->cdgpio && gpiod_get_value(nand->cdgpio)) {
1442 dev_info(nc->dev, "No SmartMedia card inserted.\n");
1443 return 0;
1444 }
1445
1446 nc->caps->ops->nand_init(nc, nand);
1447
1448 ret = atmel_nand_detect(nand);
1449 if (ret)
1450 return ret;
1451
1452 ret = nc->caps->ops->ecc_init(nand);
1453 if (ret)
1454 return ret;
1455
1456 return atmel_nand_register(nand);
1457}
1458
1459static int
1460atmel_nand_controller_remove_nands(struct atmel_nand_controller *nc)
1461{
1462 struct atmel_nand *nand, *tmp;
1463 int ret;
1464
1465 list_for_each_entry_safe(nand, tmp, &nc->chips, node) {
1466 ret = atmel_nand_unregister(nand);
1467 if (ret)
1468 return ret;
1469 }
1470
1471 return 0;
1472}
1473
1474static int
1475atmel_nand_controller_legacy_add_nands(struct atmel_nand_controller *nc)
1476{
1477 struct device *dev = nc->dev;
1478 struct platform_device *pdev = to_platform_device(dev);
1479 struct atmel_nand *nand;
1480 struct gpio_desc *gpio;
1481 struct resource *res;
1482
1483 /*
1484 * Legacy bindings only allow connecting a single NAND with a unique CS
1485 * line to the controller.
1486 */
1487 nand = devm_kzalloc(nc->dev, sizeof(*nand) + sizeof(*nand->cs),
1488 GFP_KERNEL);
1489 if (!nand)
1490 return -ENOMEM;
1491
1492 nand->numcs = 1;
1493
1494 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1495 nand->cs[0].io.virt = devm_ioremap_resource(dev, res);
1496 if (IS_ERR(nand->cs[0].io.virt))
1497 return PTR_ERR(nand->cs[0].io.virt);
1498
1499 nand->cs[0].io.dma = res->start;
1500
1501 /*
1502 * The old driver was hardcoding the CS id to 3 for all sama5
1503 * controllers. Since this id is only meaningful for the sama5
1504 * controller we can safely assign this id to 3 no matter the
1505 * controller.
1506 * If one wants to connect a NAND to a different CS line, he will
1507 * have to use the new bindings.
1508 */
1509 nand->cs[0].id = 3;
1510
1511 /* R/B GPIO. */
1512 gpio = devm_gpiod_get_index_optional(dev, NULL, 0, GPIOD_IN);
1513 if (IS_ERR(gpio)) {
1514 dev_err(dev, "Failed to get R/B gpio (err = %ld)\n",
1515 PTR_ERR(gpio));
1516 return PTR_ERR(gpio);
1517 }
1518
1519 if (gpio) {
1520 nand->cs[0].rb.type = ATMEL_NAND_GPIO_RB;
1521 nand->cs[0].rb.gpio = gpio;
1522 }
1523
1524 /* CS GPIO. */
1525 gpio = devm_gpiod_get_index_optional(dev, NULL, 1, GPIOD_OUT_HIGH);
1526 if (IS_ERR(gpio)) {
1527 dev_err(dev, "Failed to get CS gpio (err = %ld)\n",
1528 PTR_ERR(gpio));
1529 return PTR_ERR(gpio);
1530 }
1531
1532 nand->cs[0].csgpio = gpio;
1533
1534 /* Card detect GPIO. */
1535 gpio = devm_gpiod_get_index_optional(nc->dev, NULL, 2, GPIOD_IN);
1536 if (IS_ERR(gpio)) {
1537 dev_err(dev,
1538 "Failed to get detect gpio (err = %ld)\n",
1539 PTR_ERR(gpio));
1540 return PTR_ERR(gpio);
1541 }
1542
1543 nand->cdgpio = gpio;
1544
1545 nand_set_flash_node(&nand->base, nc->dev->of_node);
1546
1547 return atmel_nand_controller_add_nand(nc, nand);
1548}
1549
1550static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc)
1551{
1552 struct device_node *np, *nand_np;
1553 struct device *dev = nc->dev;
1554 int ret, reg_cells;
1555 u32 val;
1556
1557 /* We do not retrieve the SMC syscon when parsing old DTs. */
1558 if (nc->caps->legacy_of_bindings)
1559 return atmel_nand_controller_legacy_add_nands(nc);
1560
1561 np = dev->of_node;
1562
1563 ret = of_property_read_u32(np, "#address-cells", &val);
1564 if (ret) {
1565 dev_err(dev, "missing #address-cells property\n");
1566 return ret;
1567 }
1568
1569 reg_cells = val;
1570
1571 ret = of_property_read_u32(np, "#size-cells", &val);
1572 if (ret) {
1573 dev_err(dev, "missing #address-cells property\n");
1574 return ret;
1575 }
1576
1577 reg_cells += val;
1578
1579 for_each_child_of_node(np, nand_np) {
1580 struct atmel_nand *nand;
1581
1582 nand = atmel_nand_create(nc, nand_np, reg_cells);
1583 if (IS_ERR(nand)) {
1584 ret = PTR_ERR(nand);
1585 goto err;
1586 }
1587
1588 ret = atmel_nand_controller_add_nand(nc, nand);
1589 if (ret)
1590 goto err;
1591 }
1592
1593 return 0;
1594
1595err:
1596 atmel_nand_controller_remove_nands(nc);
1597
1598 return ret;
1599}
1600
1601static void atmel_nand_controller_cleanup(struct atmel_nand_controller *nc)
1602{
1603 if (nc->dmac)
1604 dma_release_channel(nc->dmac);
1605
1606 clk_put(nc->mck);
1607}
1608
1609static const struct of_device_id atmel_matrix_of_ids[] = {
1610 {
1611 .compatible = "atmel,at91sam9260-matrix",
1612 .data = (void *)AT91SAM9260_MATRIX_EBICSA,
1613 },
1614 {
1615 .compatible = "atmel,at91sam9261-matrix",
1616 .data = (void *)AT91SAM9261_MATRIX_EBICSA,
1617 },
1618 {
1619 .compatible = "atmel,at91sam9263-matrix",
1620 .data = (void *)AT91SAM9263_MATRIX_EBI0CSA,
1621 },
1622 {
1623 .compatible = "atmel,at91sam9rl-matrix",
1624 .data = (void *)AT91SAM9RL_MATRIX_EBICSA,
1625 },
1626 {
1627 .compatible = "atmel,at91sam9g45-matrix",
1628 .data = (void *)AT91SAM9G45_MATRIX_EBICSA,
1629 },
1630 {
1631 .compatible = "atmel,at91sam9n12-matrix",
1632 .data = (void *)AT91SAM9N12_MATRIX_EBICSA,
1633 },
1634 {
1635 .compatible = "atmel,at91sam9x5-matrix",
1636 .data = (void *)AT91SAM9X5_MATRIX_EBICSA,
1637 },
1638 { /* sentinel */ },
1639};
1640
1641static int atmel_nand_controller_init(struct atmel_nand_controller *nc,
1642 struct platform_device *pdev,
1643 const struct atmel_nand_controller_caps *caps)
1644{
1645 struct device *dev = &pdev->dev;
1646 struct device_node *np = dev->of_node;
1647 int ret;
1648
1649 nand_hw_control_init(&nc->base);
1650 INIT_LIST_HEAD(&nc->chips);
1651 nc->dev = dev;
1652 nc->caps = caps;
1653
1654 platform_set_drvdata(pdev, nc);
1655
1656 nc->pmecc = devm_atmel_pmecc_get(dev);
1657 if (IS_ERR(nc->pmecc)) {
1658 ret = PTR_ERR(nc->pmecc);
1659 if (ret != -EPROBE_DEFER)
1660 dev_err(dev, "Could not get PMECC object (err = %d)\n",
1661 ret);
1662 return ret;
1663 }
1664
1665 if (nc->caps->has_dma) {
1666 dma_cap_mask_t mask;
1667
1668 dma_cap_zero(mask);
1669 dma_cap_set(DMA_MEMCPY, mask);
1670
1671 nc->dmac = dma_request_channel(mask, NULL, NULL);
1672 if (!nc->dmac)
1673 dev_err(nc->dev, "Failed to request DMA channel\n");
1674 }
1675
1676 /* We do not retrieve the SMC syscon when parsing old DTs. */
1677 if (nc->caps->legacy_of_bindings)
1678 return 0;
1679
1680 np = of_parse_phandle(dev->parent->of_node, "atmel,smc", 0);
1681 if (!np) {
1682 dev_err(dev, "Missing or invalid atmel,smc property\n");
1683 return -EINVAL;
1684 }
1685
1686 nc->smc = syscon_node_to_regmap(np);
1687 of_node_put(np);
1688 if (IS_ERR(nc->smc)) {
1689 ret = PTR_ERR(nc->smc);
1690 dev_err(dev, "Could not get SMC regmap (err = %d)\n", ret);
1691 return ret;
1692 }
1693
1694 return 0;
1695}
1696
1697static int
1698atmel_smc_nand_controller_init(struct atmel_smc_nand_controller *nc)
1699{
1700 struct device *dev = nc->base.dev;
1701 const struct of_device_id *match;
1702 struct device_node *np;
1703 int ret;
1704
1705 /* We do not retrieve the matrix syscon when parsing old DTs. */
1706 if (nc->base.caps->legacy_of_bindings)
1707 return 0;
1708
1709 np = of_parse_phandle(dev->parent->of_node, "atmel,matrix", 0);
1710 if (!np)
1711 return 0;
1712
1713 match = of_match_node(atmel_matrix_of_ids, np);
1714 if (!match) {
1715 of_node_put(np);
1716 return 0;
1717 }
1718
1719 nc->matrix = syscon_node_to_regmap(np);
1720 of_node_put(np);
1721 if (IS_ERR(nc->matrix)) {
1722 ret = PTR_ERR(nc->matrix);
1723 dev_err(dev, "Could not get Matrix regmap (err = %d)\n", ret);
1724 return ret;
1725 }
1726
1727 nc->ebi_csa_offs = (unsigned int)match->data;
1728
1729 /*
1730 * The at91sam9263 has 2 EBIs, if the NAND controller is under EBI1
1731 * add 4 to ->ebi_csa_offs.
1732 */
1733 if (of_device_is_compatible(dev->parent->of_node,
1734 "atmel,at91sam9263-ebi1"))
1735 nc->ebi_csa_offs += 4;
1736
1737 return 0;
1738}
1739
1740static int
1741atmel_hsmc_nand_controller_legacy_init(struct atmel_hsmc_nand_controller *nc)
1742{
1743 struct regmap_config regmap_conf = {
1744 .reg_bits = 32,
1745 .val_bits = 32,
1746 .reg_stride = 4,
1747 };
1748
1749 struct device *dev = nc->base.dev;
1750 struct device_node *nand_np, *nfc_np;
1751 void __iomem *iomem;
1752 struct resource res;
1753 int ret;
1754
1755 nand_np = dev->of_node;
1756 nfc_np = of_find_compatible_node(dev->of_node, NULL,
1757 "atmel,sama5d3-nfc");
1758
1759 nc->clk = of_clk_get(nfc_np, 0);
1760 if (IS_ERR(nc->clk)) {
1761 ret = PTR_ERR(nc->clk);
1762 dev_err(dev, "Failed to retrieve HSMC clock (err = %d)\n",
1763 ret);
1764 goto out;
1765 }
1766
1767 ret = clk_prepare_enable(nc->clk);
1768 if (ret) {
1769 dev_err(dev, "Failed to enable the HSMC clock (err = %d)\n",
1770 ret);
1771 goto out;
1772 }
1773
1774 nc->irq = of_irq_get(nand_np, 0);
1775 if (nc->irq < 0) {
1776 ret = nc->irq;
1777 if (ret != -EPROBE_DEFER)
1778 dev_err(dev, "Failed to get IRQ number (err = %d)\n",
1779 ret);
1780 goto out;
1781 }
1782
1783 ret = of_address_to_resource(nfc_np, 0, &res);
1784 if (ret) {
1785 dev_err(dev, "Invalid or missing NFC IO resource (err = %d)\n",
1786 ret);
1787 goto out;
1788 }
1789
1790 iomem = devm_ioremap_resource(dev, &res);
1791 if (IS_ERR(iomem)) {
1792 ret = PTR_ERR(iomem);
1793 goto out;
1794 }
1795
1796 regmap_conf.name = "nfc-io";
1797 regmap_conf.max_register = resource_size(&res) - 4;
1798 nc->io = devm_regmap_init_mmio(dev, iomem, &regmap_conf);
1799 if (IS_ERR(nc->io)) {
1800 ret = PTR_ERR(nc->io);
1801 dev_err(dev, "Could not create NFC IO regmap (err = %d)\n",
1802 ret);
1803 goto out;
1804 }
1805
1806 ret = of_address_to_resource(nfc_np, 1, &res);
1807 if (ret) {
1808 dev_err(dev, "Invalid or missing HSMC resource (err = %d)\n",
1809 ret);
1810 goto out;
1811 }
1812
1813 iomem = devm_ioremap_resource(dev, &res);
1814 if (IS_ERR(iomem)) {
1815 ret = PTR_ERR(iomem);
1816 goto out;
1817 }
1818
1819 regmap_conf.name = "smc";
1820 regmap_conf.max_register = resource_size(&res) - 4;
1821 nc->base.smc = devm_regmap_init_mmio(dev, iomem, &regmap_conf);
1822 if (IS_ERR(nc->base.smc)) {
1823 ret = PTR_ERR(nc->base.smc);
1824 dev_err(dev, "Could not create NFC IO regmap (err = %d)\n",
1825 ret);
1826 goto out;
1827 }
1828
1829 ret = of_address_to_resource(nfc_np, 2, &res);
1830 if (ret) {
1831 dev_err(dev, "Invalid or missing SRAM resource (err = %d)\n",
1832 ret);
1833 goto out;
1834 }
1835
1836 nc->sram.virt = devm_ioremap_resource(dev, &res);
1837 if (IS_ERR(nc->sram.virt)) {
1838 ret = PTR_ERR(nc->sram.virt);
1839 goto out;
1840 }
1841
1842 nc->sram.dma = res.start;
1843
1844out:
1845 of_node_put(nfc_np);
1846
1847 return ret;
1848}
1849
1850static int
1851atmel_hsmc_nand_controller_init(struct atmel_hsmc_nand_controller *nc)
1852{
1853 struct device *dev = nc->base.dev;
1854 struct device_node *np;
1855 int ret;
1856
1857 np = of_parse_phandle(dev->parent->of_node, "atmel,smc", 0);
1858 if (!np) {
1859 dev_err(dev, "Missing or invalid atmel,smc property\n");
1860 return -EINVAL;
1861 }
1862
1863 nc->irq = of_irq_get(np, 0);
1864 of_node_put(np);
1865 if (nc->irq < 0) {
1866 if (nc->irq != -EPROBE_DEFER)
1867 dev_err(dev, "Failed to get IRQ number (err = %d)\n",
1868 nc->irq);
1869 return nc->irq;
1870 }
1871
1872 np = of_parse_phandle(dev->of_node, "atmel,nfc-io", 0);
1873 if (!np) {
1874 dev_err(dev, "Missing or invalid atmel,nfc-io property\n");
1875 return -EINVAL;
1876 }
1877
1878 nc->io = syscon_node_to_regmap(np);
1879 of_node_put(np);
1880 if (IS_ERR(nc->io)) {
1881 ret = PTR_ERR(nc->io);
1882 dev_err(dev, "Could not get NFC IO regmap (err = %d)\n", ret);
1883 return ret;
1884 }
1885
1886 nc->sram.pool = of_gen_pool_get(nc->base.dev->of_node,
1887 "atmel,nfc-sram", 0);
1888 if (!nc->sram.pool) {
1889 dev_err(nc->base.dev, "Missing SRAM\n");
1890 return -ENOMEM;
1891 }
1892
1893 nc->sram.virt = gen_pool_dma_alloc(nc->sram.pool,
1894 ATMEL_NFC_SRAM_SIZE,
1895 &nc->sram.dma);
1896 if (!nc->sram.virt) {
1897 dev_err(nc->base.dev,
1898 "Could not allocate memory from the NFC SRAM pool\n");
1899 return -ENOMEM;
1900 }
1901
1902 return 0;
1903}
1904
1905static int
1906atmel_hsmc_nand_controller_remove(struct atmel_nand_controller *nc)
1907{
1908 struct atmel_hsmc_nand_controller *hsmc_nc;
1909 int ret;
1910
1911 ret = atmel_nand_controller_remove_nands(nc);
1912 if (ret)
1913 return ret;
1914
1915 hsmc_nc = container_of(nc, struct atmel_hsmc_nand_controller, base);
1916 if (hsmc_nc->sram.pool)
1917 gen_pool_free(hsmc_nc->sram.pool,
1918 (unsigned long)hsmc_nc->sram.virt,
1919 ATMEL_NFC_SRAM_SIZE);
1920
1921 if (hsmc_nc->clk) {
1922 clk_disable_unprepare(hsmc_nc->clk);
1923 clk_put(hsmc_nc->clk);
1924 }
1925
1926 atmel_nand_controller_cleanup(nc);
1927
1928 return 0;
1929}
1930
1931static int atmel_hsmc_nand_controller_probe(struct platform_device *pdev,
1932 const struct atmel_nand_controller_caps *caps)
1933{
1934 struct device *dev = &pdev->dev;
1935 struct atmel_hsmc_nand_controller *nc;
1936 int ret;
1937
1938 nc = devm_kzalloc(dev, sizeof(*nc), GFP_KERNEL);
1939 if (!nc)
1940 return -ENOMEM;
1941
1942 ret = atmel_nand_controller_init(&nc->base, pdev, caps);
1943 if (ret)
1944 return ret;
1945
1946 if (caps->legacy_of_bindings)
1947 ret = atmel_hsmc_nand_controller_legacy_init(nc);
1948 else
1949 ret = atmel_hsmc_nand_controller_init(nc);
1950
1951 if (ret)
1952 return ret;
1953
1954 /* Make sure all irqs are masked before registering our IRQ handler. */
1955 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, 0xffffffff);
1956 ret = devm_request_irq(dev, nc->irq, atmel_nfc_interrupt,
1957 IRQF_SHARED, "nfc", nc);
1958 if (ret) {
1959 dev_err(dev,
1960 "Could not get register NFC interrupt handler (err = %d)\n",
1961 ret);
1962 goto err;
1963 }
1964
1965 /* Initial NFC configuration. */
1966 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CFG,
1967 ATMEL_HSMC_NFC_CFG_DTO_MAX);
1968
1969 ret = atmel_nand_controller_add_nands(&nc->base);
1970 if (ret)
1971 goto err;
1972
1973 return 0;
1974
1975err:
1976 atmel_hsmc_nand_controller_remove(&nc->base);
1977
1978 return ret;
1979}
1980
1981static const struct atmel_nand_controller_ops atmel_hsmc_nc_ops = {
1982 .probe = atmel_hsmc_nand_controller_probe,
1983 .remove = atmel_hsmc_nand_controller_remove,
1984 .ecc_init = atmel_hsmc_nand_ecc_init,
1985 .nand_init = atmel_hsmc_nand_init,
1986};
1987
1988static const struct atmel_nand_controller_caps atmel_sama5_nc_caps = {
1989 .has_dma = true,
1990 .ale_offs = BIT(21),
1991 .cle_offs = BIT(22),
1992 .ops = &atmel_hsmc_nc_ops,
1993};
1994
1995/* Only used to parse old bindings. */
1996static const struct atmel_nand_controller_caps atmel_sama5_nand_caps = {
1997 .has_dma = true,
1998 .ale_offs = BIT(21),
1999 .cle_offs = BIT(22),
2000 .ops = &atmel_hsmc_nc_ops,
2001 .legacy_of_bindings = true,
2002};
2003
2004static int atmel_smc_nand_controller_probe(struct platform_device *pdev,
2005 const struct atmel_nand_controller_caps *caps)
2006{
2007 struct device *dev = &pdev->dev;
2008 struct atmel_smc_nand_controller *nc;
2009 int ret;
2010
2011 nc = devm_kzalloc(dev, sizeof(*nc), GFP_KERNEL);
2012 if (!nc)
2013 return -ENOMEM;
2014
2015 ret = atmel_nand_controller_init(&nc->base, pdev, caps);
2016 if (ret)
2017 return ret;
2018
2019 ret = atmel_smc_nand_controller_init(nc);
2020 if (ret)
2021 return ret;
2022
2023 return atmel_nand_controller_add_nands(&nc->base);
2024}
2025
2026static int
2027atmel_smc_nand_controller_remove(struct atmel_nand_controller *nc)
2028{
2029 int ret;
2030
2031 ret = atmel_nand_controller_remove_nands(nc);
2032 if (ret)
2033 return ret;
2034
2035 atmel_nand_controller_cleanup(nc);
2036
2037 return 0;
2038}
2039
2040static const struct atmel_nand_controller_ops atmel_smc_nc_ops = {
2041 .probe = atmel_smc_nand_controller_probe,
2042 .remove = atmel_smc_nand_controller_remove,
2043 .ecc_init = atmel_nand_ecc_init,
2044 .nand_init = atmel_smc_nand_init,
2045};
2046
2047static const struct atmel_nand_controller_caps atmel_rm9200_nc_caps = {
2048 .ale_offs = BIT(21),
2049 .cle_offs = BIT(22),
2050 .ops = &atmel_smc_nc_ops,
2051};
2052
2053static const struct atmel_nand_controller_caps atmel_sam9261_nc_caps = {
2054 .ale_offs = BIT(22),
2055 .cle_offs = BIT(21),
2056 .ops = &atmel_smc_nc_ops,
2057};
2058
2059static const struct atmel_nand_controller_caps atmel_sam9g45_nc_caps = {
2060 .has_dma = true,
2061 .ale_offs = BIT(21),
2062 .cle_offs = BIT(22),
2063 .ops = &atmel_smc_nc_ops,
2064};
2065
2066/* Only used to parse old bindings. */
2067static const struct atmel_nand_controller_caps atmel_rm9200_nand_caps = {
2068 .ale_offs = BIT(21),
2069 .cle_offs = BIT(22),
2070 .ops = &atmel_smc_nc_ops,
2071 .legacy_of_bindings = true,
2072};
2073
2074static const struct atmel_nand_controller_caps atmel_sam9261_nand_caps = {
2075 .ale_offs = BIT(22),
2076 .cle_offs = BIT(21),
2077 .ops = &atmel_smc_nc_ops,
2078 .legacy_of_bindings = true,
2079};
2080
2081static const struct atmel_nand_controller_caps atmel_sam9g45_nand_caps = {
2082 .has_dma = true,
2083 .ale_offs = BIT(21),
2084 .cle_offs = BIT(22),
2085 .ops = &atmel_smc_nc_ops,
2086 .legacy_of_bindings = true,
2087};
2088
2089static const struct of_device_id atmel_nand_controller_of_ids[] = {
2090 {
2091 .compatible = "atmel,at91rm9200-nand-controller",
2092 .data = &atmel_rm9200_nc_caps,
2093 },
2094 {
2095 .compatible = "atmel,at91sam9260-nand-controller",
2096 .data = &atmel_rm9200_nc_caps,
2097 },
2098 {
2099 .compatible = "atmel,at91sam9261-nand-controller",
2100 .data = &atmel_sam9261_nc_caps,
2101 },
2102 {
2103 .compatible = "atmel,at91sam9g45-nand-controller",
2104 .data = &atmel_sam9g45_nc_caps,
2105 },
2106 {
2107 .compatible = "atmel,sama5d3-nand-controller",
2108 .data = &atmel_sama5_nc_caps,
2109 },
2110 /* Support for old/deprecated bindings: */
2111 {
2112 .compatible = "atmel,at91rm9200-nand",
2113 .data = &atmel_rm9200_nand_caps,
2114 },
2115 {
2116 .compatible = "atmel,sama5d4-nand",
2117 .data = &atmel_rm9200_nand_caps,
2118 },
2119 {
2120 .compatible = "atmel,sama5d2-nand",
2121 .data = &atmel_rm9200_nand_caps,
2122 },
2123 { /* sentinel */ },
2124};
2125MODULE_DEVICE_TABLE(of, atmel_nand_controller_of_ids);
2126
2127static int atmel_nand_controller_probe(struct platform_device *pdev)
2128{
2129 const struct atmel_nand_controller_caps *caps;
2130
2131 if (pdev->id_entry)
2132 caps = (void *)pdev->id_entry->driver_data;
2133 else
2134 caps = of_device_get_match_data(&pdev->dev);
2135
2136 if (!caps) {
2137 dev_err(&pdev->dev, "Could not retrieve NFC caps\n");
2138 return -EINVAL;
2139 }
2140
2141 if (caps->legacy_of_bindings) {
2142 u32 ale_offs = 21;
2143
2144 /*
2145 * If we are parsing legacy DT props and the DT contains a
2146 * valid NFC node, forward the request to the sama5 logic.
2147 */
2148 if (of_find_compatible_node(pdev->dev.of_node, NULL,
2149 "atmel,sama5d3-nfc"))
2150 caps = &atmel_sama5_nand_caps;
2151
2152 /*
2153 * Even if the compatible says we are dealing with an
2154 * at91rm9200 controller, the atmel,nand-has-dma specify that
2155 * this controller supports DMA, which means we are in fact
2156 * dealing with an at91sam9g45+ controller.
2157 */
2158 if (!caps->has_dma &&
2159 of_property_read_bool(pdev->dev.of_node,
2160 "atmel,nand-has-dma"))
2161 caps = &atmel_sam9g45_nand_caps;
2162
2163 /*
2164 * All SoCs except the at91sam9261 are assigning ALE to A21 and
2165 * CLE to A22. If atmel,nand-addr-offset != 21 this means we're
2166 * actually dealing with an at91sam9261 controller.
2167 */
2168 of_property_read_u32(pdev->dev.of_node,
2169 "atmel,nand-addr-offset", &ale_offs);
2170 if (ale_offs != 21)
2171 caps = &atmel_sam9261_nand_caps;
2172 }
2173
2174 return caps->ops->probe(pdev, caps);
2175}
2176
2177static int atmel_nand_controller_remove(struct platform_device *pdev)
2178{
2179 struct atmel_nand_controller *nc = platform_get_drvdata(pdev);
2180
2181 return nc->caps->ops->remove(nc);
2182}
2183
2184static struct platform_driver atmel_nand_controller_driver = {
2185 .driver = {
2186 .name = "atmel-nand-controller",
2187 .of_match_table = of_match_ptr(atmel_nand_controller_of_ids),
2188 },
2189 .probe = atmel_nand_controller_probe,
2190 .remove = atmel_nand_controller_remove,
2191};
2192module_platform_driver(atmel_nand_controller_driver);
2193
2194MODULE_LICENSE("GPL");
2195MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
2196MODULE_DESCRIPTION("NAND Flash Controller driver for Atmel SoCs");
2197MODULE_ALIAS("platform:atmel-nand-controller");
diff --git a/drivers/mtd/nand/atmel/pmecc.c b/drivers/mtd/nand/atmel/pmecc.c
new file mode 100644
index 000000000000..55a8ee5306ea
--- /dev/null
+++ b/drivers/mtd/nand/atmel/pmecc.c
@@ -0,0 +1,1020 @@
1/*
2 * Copyright 2017 ATMEL
3 * Copyright 2017 Free Electrons
4 *
5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
6 *
7 * Derived from the atmel_nand.c driver which contained the following
8 * copyrights:
9 *
10 * Copyright 2003 Rick Bronson
11 *
12 * Derived from drivers/mtd/nand/autcpu12.c
13 * Copyright 2001 Thomas Gleixner (gleixner@autronix.de)
14 *
15 * Derived from drivers/mtd/spia.c
16 * Copyright 2000 Steven J. Hill (sjhill@cotw.com)
17 *
18 * Add Hardware ECC support for AT91SAM9260 / AT91SAM9263
19 * Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright 2007
20 *
21 * Derived from Das U-Boot source code
22 * (u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c)
23 * Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
24 *
25 * Add Programmable Multibit ECC support for various AT91 SoC
26 * Copyright 2012 ATMEL, Hong Xu
27 *
28 * Add Nand Flash Controller support for SAMA5 SoC
29 * Copyright 2013 ATMEL, Josh Wu (josh.wu@atmel.com)
30 *
31 * This program is free software; you can redistribute it and/or modify
32 * it under the terms of the GNU General Public License version 2 as
33 * published by the Free Software Foundation.
34 *
35 * The PMECC is an hardware assisted BCH engine, which means part of the
36 * ECC algorithm is left to the software. The hardware/software repartition
37 * is explained in the "PMECC Controller Functional Description" chapter in
38 * Atmel datasheets, and some of the functions in this file are directly
39 * implementing the algorithms described in the "Software Implementation"
40 * sub-section.
41 *
42 * TODO: it seems that the software BCH implementation in lib/bch.c is already
43 * providing some of the logic we are implementing here. It would be smart
44 * to expose the needed lib/bch.c helpers/functions and re-use them here.
45 */
46
47#include <linux/genalloc.h>
48#include <linux/iopoll.h>
49#include <linux/module.h>
50#include <linux/mtd/nand.h>
51#include <linux/of_irq.h>
52#include <linux/of_platform.h>
53#include <linux/platform_device.h>
54#include <linux/slab.h>
55
56#include "pmecc.h"
57
58/* Galois field dimension */
59#define PMECC_GF_DIMENSION_13 13
60#define PMECC_GF_DIMENSION_14 14
61
62/* Primitive Polynomial used by PMECC */
63#define PMECC_GF_13_PRIMITIVE_POLY 0x201b
64#define PMECC_GF_14_PRIMITIVE_POLY 0x4443
65
66#define PMECC_LOOKUP_TABLE_SIZE_512 0x2000
67#define PMECC_LOOKUP_TABLE_SIZE_1024 0x4000
68
69/* Time out value for reading PMECC status register */
70#define PMECC_MAX_TIMEOUT_MS 100
71
72/* PMECC Register Definitions */
73#define ATMEL_PMECC_CFG 0x0
74#define PMECC_CFG_BCH_STRENGTH(x) (x)
75#define PMECC_CFG_BCH_STRENGTH_MASK GENMASK(2, 0)
76#define PMECC_CFG_SECTOR512 (0 << 4)
77#define PMECC_CFG_SECTOR1024 (1 << 4)
78#define PMECC_CFG_NSECTORS(x) ((fls(x) - 1) << 8)
79#define PMECC_CFG_READ_OP (0 << 12)
80#define PMECC_CFG_WRITE_OP (1 << 12)
81#define PMECC_CFG_SPARE_ENABLE BIT(16)
82#define PMECC_CFG_AUTO_ENABLE BIT(20)
83
84#define ATMEL_PMECC_SAREA 0x4
85#define ATMEL_PMECC_SADDR 0x8
86#define ATMEL_PMECC_EADDR 0xc
87
88#define ATMEL_PMECC_CLK 0x10
89#define PMECC_CLK_133MHZ (2 << 0)
90
91#define ATMEL_PMECC_CTRL 0x14
92#define PMECC_CTRL_RST BIT(0)
93#define PMECC_CTRL_DATA BIT(1)
94#define PMECC_CTRL_USER BIT(2)
95#define PMECC_CTRL_ENABLE BIT(4)
96#define PMECC_CTRL_DISABLE BIT(5)
97
98#define ATMEL_PMECC_SR 0x18
99#define PMECC_SR_BUSY BIT(0)
100#define PMECC_SR_ENABLE BIT(4)
101
102#define ATMEL_PMECC_IER 0x1c
103#define ATMEL_PMECC_IDR 0x20
104#define ATMEL_PMECC_IMR 0x24
105#define ATMEL_PMECC_ISR 0x28
106#define PMECC_ERROR_INT BIT(0)
107
108#define ATMEL_PMECC_ECC(sector, n) \
109 ((((sector) + 1) * 0x40) + (n))
110
111#define ATMEL_PMECC_REM(sector, n) \
112 ((((sector) + 1) * 0x40) + ((n) * 4) + 0x200)
113
114/* PMERRLOC Register Definitions */
115#define ATMEL_PMERRLOC_ELCFG 0x0
116#define PMERRLOC_ELCFG_SECTOR_512 (0 << 0)
117#define PMERRLOC_ELCFG_SECTOR_1024 (1 << 0)
118#define PMERRLOC_ELCFG_NUM_ERRORS(n) ((n) << 16)
119
120#define ATMEL_PMERRLOC_ELPRIM 0x4
121#define ATMEL_PMERRLOC_ELEN 0x8
122#define ATMEL_PMERRLOC_ELDIS 0xc
123#define PMERRLOC_DISABLE BIT(0)
124
125#define ATMEL_PMERRLOC_ELSR 0x10
126#define PMERRLOC_ELSR_BUSY BIT(0)
127
128#define ATMEL_PMERRLOC_ELIER 0x14
129#define ATMEL_PMERRLOC_ELIDR 0x18
130#define ATMEL_PMERRLOC_ELIMR 0x1c
131#define ATMEL_PMERRLOC_ELISR 0x20
132#define PMERRLOC_ERR_NUM_MASK GENMASK(12, 8)
133#define PMERRLOC_CALC_DONE BIT(0)
134
135#define ATMEL_PMERRLOC_SIGMA(x) (((x) * 0x4) + 0x28)
136
137#define ATMEL_PMERRLOC_EL(offs, x) (((x) * 0x4) + (offs))
138
139struct atmel_pmecc_gf_tables {
140 u16 *alpha_to;
141 u16 *index_of;
142};
143
144struct atmel_pmecc_caps {
145 const int *strengths;
146 int nstrengths;
147 int el_offset;
148 bool correct_erased_chunks;
149};
150
151struct atmel_pmecc {
152 struct device *dev;
153 const struct atmel_pmecc_caps *caps;
154
155 struct {
156 void __iomem *base;
157 void __iomem *errloc;
158 } regs;
159
160 struct mutex lock;
161};
162
163struct atmel_pmecc_user_conf_cache {
164 u32 cfg;
165 u32 sarea;
166 u32 saddr;
167 u32 eaddr;
168};
169
170struct atmel_pmecc_user {
171 struct atmel_pmecc_user_conf_cache cache;
172 struct atmel_pmecc *pmecc;
173 const struct atmel_pmecc_gf_tables *gf_tables;
174 int eccbytes;
175 s16 *partial_syn;
176 s16 *si;
177 s16 *lmu;
178 s16 *smu;
179 s32 *mu;
180 s32 *dmu;
181 s32 *delta;
182 u32 isr;
183};
184
185static DEFINE_MUTEX(pmecc_gf_tables_lock);
186static const struct atmel_pmecc_gf_tables *pmecc_gf_tables_512;
187static const struct atmel_pmecc_gf_tables *pmecc_gf_tables_1024;
188
189static inline int deg(unsigned int poly)
190{
191 /* polynomial degree is the most-significant bit index */
192 return fls(poly) - 1;
193}
194
195static int atmel_pmecc_build_gf_tables(int mm, unsigned int poly,
196 struct atmel_pmecc_gf_tables *gf_tables)
197{
198 unsigned int i, x = 1;
199 const unsigned int k = BIT(deg(poly));
200 unsigned int nn = BIT(mm) - 1;
201
202 /* primitive polynomial must be of degree m */
203 if (k != (1u << mm))
204 return -EINVAL;
205
206 for (i = 0; i < nn; i++) {
207 gf_tables->alpha_to[i] = x;
208 gf_tables->index_of[x] = i;
209 if (i && (x == 1))
210 /* polynomial is not primitive (a^i=1 with 0<i<2^m-1) */
211 return -EINVAL;
212 x <<= 1;
213 if (x & k)
214 x ^= poly;
215 }
216 gf_tables->alpha_to[nn] = 1;
217 gf_tables->index_of[0] = 0;
218
219 return 0;
220}
221
222static const struct atmel_pmecc_gf_tables *
223atmel_pmecc_create_gf_tables(const struct atmel_pmecc_user_req *req)
224{
225 struct atmel_pmecc_gf_tables *gf_tables;
226 unsigned int poly, degree, table_size;
227 int ret;
228
229 if (req->ecc.sectorsize == 512) {
230 degree = PMECC_GF_DIMENSION_13;
231 poly = PMECC_GF_13_PRIMITIVE_POLY;
232 table_size = PMECC_LOOKUP_TABLE_SIZE_512;
233 } else {
234 degree = PMECC_GF_DIMENSION_14;
235 poly = PMECC_GF_14_PRIMITIVE_POLY;
236 table_size = PMECC_LOOKUP_TABLE_SIZE_1024;
237 }
238
239 gf_tables = kzalloc(sizeof(*gf_tables) +
240 (2 * table_size * sizeof(u16)),
241 GFP_KERNEL);
242 if (!gf_tables)
243 return ERR_PTR(-ENOMEM);
244
245 gf_tables->alpha_to = (void *)(gf_tables + 1);
246 gf_tables->index_of = gf_tables->alpha_to + table_size;
247
248 ret = atmel_pmecc_build_gf_tables(degree, poly, gf_tables);
249 if (ret) {
250 kfree(gf_tables);
251 return ERR_PTR(ret);
252 }
253
254 return gf_tables;
255}
256
257static const struct atmel_pmecc_gf_tables *
258atmel_pmecc_get_gf_tables(const struct atmel_pmecc_user_req *req)
259{
260 const struct atmel_pmecc_gf_tables **gf_tables, *ret;
261
262 mutex_lock(&pmecc_gf_tables_lock);
263 if (req->ecc.sectorsize == 512)
264 gf_tables = &pmecc_gf_tables_512;
265 else
266 gf_tables = &pmecc_gf_tables_1024;
267
268 ret = *gf_tables;
269
270 if (!ret) {
271 ret = atmel_pmecc_create_gf_tables(req);
272 if (!IS_ERR(ret))
273 *gf_tables = ret;
274 }
275 mutex_unlock(&pmecc_gf_tables_lock);
276
277 return ret;
278}
279
280static int atmel_pmecc_prepare_user_req(struct atmel_pmecc *pmecc,
281 struct atmel_pmecc_user_req *req)
282{
283 int i, max_eccbytes, eccbytes = 0, eccstrength = 0;
284
285 if (req->pagesize <= 0 || req->oobsize <= 0 || req->ecc.bytes <= 0)
286 return -EINVAL;
287
288 if (req->ecc.ooboffset >= 0 &&
289 req->ecc.ooboffset + req->ecc.bytes > req->oobsize)
290 return -EINVAL;
291
292 if (req->ecc.sectorsize == ATMEL_PMECC_SECTOR_SIZE_AUTO) {
293 if (req->ecc.strength != ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH)
294 return -EINVAL;
295
296 if (req->pagesize > 512)
297 req->ecc.sectorsize = 1024;
298 else
299 req->ecc.sectorsize = 512;
300 }
301
302 if (req->ecc.sectorsize != 512 && req->ecc.sectorsize != 1024)
303 return -EINVAL;
304
305 if (req->pagesize % req->ecc.sectorsize)
306 return -EINVAL;
307
308 req->ecc.nsectors = req->pagesize / req->ecc.sectorsize;
309
310 max_eccbytes = req->ecc.bytes;
311
312 for (i = 0; i < pmecc->caps->nstrengths; i++) {
313 int nbytes, strength = pmecc->caps->strengths[i];
314
315 if (req->ecc.strength != ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH &&
316 strength < req->ecc.strength)
317 continue;
318
319 nbytes = DIV_ROUND_UP(strength * fls(8 * req->ecc.sectorsize),
320 8);
321 nbytes *= req->ecc.nsectors;
322
323 if (nbytes > max_eccbytes)
324 break;
325
326 eccstrength = strength;
327 eccbytes = nbytes;
328
329 if (req->ecc.strength != ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH)
330 break;
331 }
332
333 if (!eccstrength)
334 return -EINVAL;
335
336 req->ecc.bytes = eccbytes;
337 req->ecc.strength = eccstrength;
338
339 if (req->ecc.ooboffset < 0)
340 req->ecc.ooboffset = req->oobsize - eccbytes;
341
342 return 0;
343}
344
345struct atmel_pmecc_user *
346atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
347 struct atmel_pmecc_user_req *req)
348{
349 struct atmel_pmecc_user *user;
350 const struct atmel_pmecc_gf_tables *gf_tables;
351 int strength, size, ret;
352
353 ret = atmel_pmecc_prepare_user_req(pmecc, req);
354 if (ret)
355 return ERR_PTR(ret);
356
357 size = sizeof(*user);
358 size = ALIGN(size, sizeof(u16));
359 /* Reserve space for partial_syn, si and smu */
360 size += ((2 * req->ecc.strength) + 1) * sizeof(u16) *
361 (2 + req->ecc.strength + 2);
362 /* Reserve space for lmu. */
363 size += (req->ecc.strength + 1) * sizeof(u16);
364 /* Reserve space for mu, dmu and delta. */
365 size = ALIGN(size, sizeof(s32));
366 size += (req->ecc.strength + 1) * sizeof(s32);
367
368 user = kzalloc(size, GFP_KERNEL);
369 if (!user)
370 return ERR_PTR(-ENOMEM);
371
372 user->pmecc = pmecc;
373
374 user->partial_syn = (s16 *)PTR_ALIGN(user + 1, sizeof(u16));
375 user->si = user->partial_syn + ((2 * req->ecc.strength) + 1);
376 user->lmu = user->si + ((2 * req->ecc.strength) + 1);
377 user->smu = user->lmu + (req->ecc.strength + 1);
378 user->mu = (s32 *)PTR_ALIGN(user->smu +
379 (((2 * req->ecc.strength) + 1) *
380 (req->ecc.strength + 2)),
381 sizeof(s32));
382 user->dmu = user->mu + req->ecc.strength + 1;
383 user->delta = user->dmu + req->ecc.strength + 1;
384
385 gf_tables = atmel_pmecc_get_gf_tables(req);
386 if (IS_ERR(gf_tables)) {
387 kfree(user);
388 return ERR_CAST(gf_tables);
389 }
390
391 user->gf_tables = gf_tables;
392
393 user->eccbytes = req->ecc.bytes / req->ecc.nsectors;
394
395 for (strength = 0; strength < pmecc->caps->nstrengths; strength++) {
396 if (pmecc->caps->strengths[strength] == req->ecc.strength)
397 break;
398 }
399
400 user->cache.cfg = PMECC_CFG_BCH_STRENGTH(strength) |
401 PMECC_CFG_NSECTORS(req->ecc.nsectors);
402
403 if (req->ecc.sectorsize == 1024)
404 user->cache.cfg |= PMECC_CFG_SECTOR1024;
405
406 user->cache.sarea = req->oobsize - 1;
407 user->cache.saddr = req->ecc.ooboffset;
408 user->cache.eaddr = req->ecc.ooboffset + req->ecc.bytes - 1;
409
410 return user;
411}
412EXPORT_SYMBOL_GPL(atmel_pmecc_create_user);
413
414void atmel_pmecc_destroy_user(struct atmel_pmecc_user *user)
415{
416 kfree(user);
417}
418EXPORT_SYMBOL_GPL(atmel_pmecc_destroy_user);
419
420static int get_strength(struct atmel_pmecc_user *user)
421{
422 const int *strengths = user->pmecc->caps->strengths;
423
424 return strengths[user->cache.cfg & PMECC_CFG_BCH_STRENGTH_MASK];
425}
426
427static int get_sectorsize(struct atmel_pmecc_user *user)
428{
429 return user->cache.cfg & PMECC_LOOKUP_TABLE_SIZE_1024 ? 1024 : 512;
430}
431
432static void atmel_pmecc_gen_syndrome(struct atmel_pmecc_user *user, int sector)
433{
434 int strength = get_strength(user);
435 u32 value;
436 int i;
437
438 /* Fill odd syndromes */
439 for (i = 0; i < strength; i++) {
440 value = readl_relaxed(user->pmecc->regs.base +
441 ATMEL_PMECC_REM(sector, i / 2));
442 if (i & 1)
443 value >>= 16;
444
445 user->partial_syn[(2 * i) + 1] = value;
446 }
447}
448
449static void atmel_pmecc_substitute(struct atmel_pmecc_user *user)
450{
451 int degree = get_sectorsize(user) == 512 ? 13 : 14;
452 int cw_len = BIT(degree) - 1;
453 int strength = get_strength(user);
454 s16 *alpha_to = user->gf_tables->alpha_to;
455 s16 *index_of = user->gf_tables->index_of;
456 s16 *partial_syn = user->partial_syn;
457 s16 *si;
458 int i, j;
459
460 /*
461 * si[] is a table that holds the current syndrome value,
462 * an element of that table belongs to the field
463 */
464 si = user->si;
465
466 memset(&si[1], 0, sizeof(s16) * ((2 * strength) - 1));
467
468 /* Computation 2t syndromes based on S(x) */
469 /* Odd syndromes */
470 for (i = 1; i < 2 * strength; i += 2) {
471 for (j = 0; j < degree; j++) {
472 if (partial_syn[i] & BIT(j))
473 si[i] = alpha_to[i * j] ^ si[i];
474 }
475 }
476 /* Even syndrome = (Odd syndrome) ** 2 */
477 for (i = 2, j = 1; j <= strength; i = ++j << 1) {
478 if (si[j] == 0) {
479 si[i] = 0;
480 } else {
481 s16 tmp;
482
483 tmp = index_of[si[j]];
484 tmp = (tmp * 2) % cw_len;
485 si[i] = alpha_to[tmp];
486 }
487 }
488}
489
490static void atmel_pmecc_get_sigma(struct atmel_pmecc_user *user)
491{
492 s16 *lmu = user->lmu;
493 s16 *si = user->si;
494 s32 *mu = user->mu;
495 s32 *dmu = user->dmu;
496 s32 *delta = user->delta;
497 int degree = get_sectorsize(user) == 512 ? 13 : 14;
498 int cw_len = BIT(degree) - 1;
499 int strength = get_strength(user);
500 int num = 2 * strength + 1;
501 s16 *index_of = user->gf_tables->index_of;
502 s16 *alpha_to = user->gf_tables->alpha_to;
503 int i, j, k;
504 u32 dmu_0_count, tmp;
505 s16 *smu = user->smu;
506
507 /* index of largest delta */
508 int ro;
509 int largest;
510 int diff;
511
512 dmu_0_count = 0;
513
514 /* First Row */
515
516 /* Mu */
517 mu[0] = -1;
518
519 memset(smu, 0, sizeof(s16) * num);
520 smu[0] = 1;
521
522 /* discrepancy set to 1 */
523 dmu[0] = 1;
524 /* polynom order set to 0 */
525 lmu[0] = 0;
526 delta[0] = (mu[0] * 2 - lmu[0]) >> 1;
527
528 /* Second Row */
529
530 /* Mu */
531 mu[1] = 0;
532 /* Sigma(x) set to 1 */
533 memset(&smu[num], 0, sizeof(s16) * num);
534 smu[num] = 1;
535
536 /* discrepancy set to S1 */
537 dmu[1] = si[1];
538
539 /* polynom order set to 0 */
540 lmu[1] = 0;
541
542 delta[1] = (mu[1] * 2 - lmu[1]) >> 1;
543
544 /* Init the Sigma(x) last row */
545 memset(&smu[(strength + 1) * num], 0, sizeof(s16) * num);
546
547 for (i = 1; i <= strength; i++) {
548 mu[i + 1] = i << 1;
549 /* Begin Computing Sigma (Mu+1) and L(mu) */
550 /* check if discrepancy is set to 0 */
551 if (dmu[i] == 0) {
552 dmu_0_count++;
553
554 tmp = ((strength - (lmu[i] >> 1) - 1) / 2);
555 if ((strength - (lmu[i] >> 1) - 1) & 0x1)
556 tmp += 2;
557 else
558 tmp += 1;
559
560 if (dmu_0_count == tmp) {
561 for (j = 0; j <= (lmu[i] >> 1) + 1; j++)
562 smu[(strength + 1) * num + j] =
563 smu[i * num + j];
564
565 lmu[strength + 1] = lmu[i];
566 return;
567 }
568
569 /* copy polynom */
570 for (j = 0; j <= lmu[i] >> 1; j++)
571 smu[(i + 1) * num + j] = smu[i * num + j];
572
573 /* copy previous polynom order to the next */
574 lmu[i + 1] = lmu[i];
575 } else {
576 ro = 0;
577 largest = -1;
578 /* find largest delta with dmu != 0 */
579 for (j = 0; j < i; j++) {
580 if ((dmu[j]) && (delta[j] > largest)) {
581 largest = delta[j];
582 ro = j;
583 }
584 }
585
586 /* compute difference */
587 diff = (mu[i] - mu[ro]);
588
589 /* Compute degree of the new smu polynomial */
590 if ((lmu[i] >> 1) > ((lmu[ro] >> 1) + diff))
591 lmu[i + 1] = lmu[i];
592 else
593 lmu[i + 1] = ((lmu[ro] >> 1) + diff) * 2;
594
595 /* Init smu[i+1] with 0 */
596 for (k = 0; k < num; k++)
597 smu[(i + 1) * num + k] = 0;
598
599 /* Compute smu[i+1] */
600 for (k = 0; k <= lmu[ro] >> 1; k++) {
601 s16 a, b, c;
602
603 if (!(smu[ro * num + k] && dmu[i]))
604 continue;
605
606 a = index_of[dmu[i]];
607 b = index_of[dmu[ro]];
608 c = index_of[smu[ro * num + k]];
609 tmp = a + (cw_len - b) + c;
610 a = alpha_to[tmp % cw_len];
611 smu[(i + 1) * num + (k + diff)] = a;
612 }
613
614 for (k = 0; k <= lmu[i] >> 1; k++)
615 smu[(i + 1) * num + k] ^= smu[i * num + k];
616 }
617
618 /* End Computing Sigma (Mu+1) and L(mu) */
619 /* In either case compute delta */
620 delta[i + 1] = (mu[i + 1] * 2 - lmu[i + 1]) >> 1;
621
622 /* Do not compute discrepancy for the last iteration */
623 if (i >= strength)
624 continue;
625
626 for (k = 0; k <= (lmu[i + 1] >> 1); k++) {
627 tmp = 2 * (i - 1);
628 if (k == 0) {
629 dmu[i + 1] = si[tmp + 3];
630 } else if (smu[(i + 1) * num + k] && si[tmp + 3 - k]) {
631 s16 a, b, c;
632
633 a = index_of[smu[(i + 1) * num + k]];
634 b = si[2 * (i - 1) + 3 - k];
635 c = index_of[b];
636 tmp = a + c;
637 tmp %= cw_len;
638 dmu[i + 1] = alpha_to[tmp] ^ dmu[i + 1];
639 }
640 }
641 }
642}
643
644static int atmel_pmecc_err_location(struct atmel_pmecc_user *user)
645{
646 int sector_size = get_sectorsize(user);
647 int degree = sector_size == 512 ? 13 : 14;
648 struct atmel_pmecc *pmecc = user->pmecc;
649 int strength = get_strength(user);
650 int ret, roots_nbr, i, err_nbr = 0;
651 int num = (2 * strength) + 1;
652 s16 *smu = user->smu;
653 u32 val;
654
655 writel(PMERRLOC_DISABLE, pmecc->regs.errloc + ATMEL_PMERRLOC_ELDIS);
656
657 for (i = 0; i <= user->lmu[strength + 1] >> 1; i++) {
658 writel_relaxed(smu[(strength + 1) * num + i],
659 pmecc->regs.errloc + ATMEL_PMERRLOC_SIGMA(i));
660 err_nbr++;
661 }
662
663 val = (err_nbr - 1) << 16;
664 if (sector_size == 1024)
665 val |= 1;
666
667 writel(val, pmecc->regs.errloc + ATMEL_PMERRLOC_ELCFG);
668 writel((sector_size * 8) + (degree * strength),
669 pmecc->regs.errloc + ATMEL_PMERRLOC_ELEN);
670
671 ret = readl_relaxed_poll_timeout(pmecc->regs.errloc +
672 ATMEL_PMERRLOC_ELISR,
673 val, val & PMERRLOC_CALC_DONE, 0,
674 PMECC_MAX_TIMEOUT_MS * 1000);
675 if (ret) {
676 dev_err(pmecc->dev,
677 "PMECC: Timeout to calculate error location.\n");
678 return ret;
679 }
680
681 roots_nbr = (val & PMERRLOC_ERR_NUM_MASK) >> 8;
682 /* Number of roots == degree of smu hence <= cap */
683 if (roots_nbr == user->lmu[strength + 1] >> 1)
684 return err_nbr - 1;
685
686 /*
687 * Number of roots does not match the degree of smu
688 * unable to correct error.
689 */
690 return -EBADMSG;
691}
692
693int atmel_pmecc_correct_sector(struct atmel_pmecc_user *user, int sector,
694 void *data, void *ecc)
695{
696 struct atmel_pmecc *pmecc = user->pmecc;
697 int sectorsize = get_sectorsize(user);
698 int eccbytes = user->eccbytes;
699 int i, nerrors;
700
701 if (!(user->isr & BIT(sector)))
702 return 0;
703
704 atmel_pmecc_gen_syndrome(user, sector);
705 atmel_pmecc_substitute(user);
706 atmel_pmecc_get_sigma(user);
707
708 nerrors = atmel_pmecc_err_location(user);
709 if (nerrors < 0)
710 return nerrors;
711
712 for (i = 0; i < nerrors; i++) {
713 const char *area;
714 int byte, bit;
715 u32 errpos;
716 u8 *ptr;
717
718 errpos = readl_relaxed(pmecc->regs.errloc +
719 ATMEL_PMERRLOC_EL(pmecc->caps->el_offset, i));
720 errpos--;
721
722 byte = errpos / 8;
723 bit = errpos % 8;
724
725 if (byte < sectorsize) {
726 ptr = data + byte;
727 area = "data";
728 } else if (byte < sectorsize + eccbytes) {
729 ptr = ecc + byte - sectorsize;
730 area = "ECC";
731 } else {
732 dev_dbg(pmecc->dev,
733 "Invalid errpos value (%d, max is %d)\n",
734 errpos, (sectorsize + eccbytes) * 8);
735 return -EINVAL;
736 }
737
738 dev_dbg(pmecc->dev,
739 "Bit flip in %s area, byte %d: 0x%02x -> 0x%02x\n",
740 area, byte, *ptr, (unsigned int)(*ptr ^ BIT(bit)));
741
742 *ptr ^= BIT(bit);
743 }
744
745 return nerrors;
746}
747EXPORT_SYMBOL_GPL(atmel_pmecc_correct_sector);
748
749bool atmel_pmecc_correct_erased_chunks(struct atmel_pmecc_user *user)
750{
751 return user->pmecc->caps->correct_erased_chunks;
752}
753EXPORT_SYMBOL_GPL(atmel_pmecc_correct_erased_chunks);
754
755void atmel_pmecc_get_generated_eccbytes(struct atmel_pmecc_user *user,
756 int sector, void *ecc)
757{
758 struct atmel_pmecc *pmecc = user->pmecc;
759 u8 *ptr = ecc;
760 int i;
761
762 for (i = 0; i < user->eccbytes; i++)
763 ptr[i] = readb_relaxed(pmecc->regs.base +
764 ATMEL_PMECC_ECC(sector, i));
765}
766EXPORT_SYMBOL_GPL(atmel_pmecc_get_generated_eccbytes);
767
768int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op)
769{
770 struct atmel_pmecc *pmecc = user->pmecc;
771 u32 cfg;
772
773 if (op != NAND_ECC_READ && op != NAND_ECC_WRITE) {
774 dev_err(pmecc->dev, "Bad ECC operation!");
775 return -EINVAL;
776 }
777
778 mutex_lock(&user->pmecc->lock);
779
780 cfg = user->cache.cfg;
781 if (op == NAND_ECC_WRITE)
782 cfg |= PMECC_CFG_WRITE_OP;
783 else
784 cfg |= PMECC_CFG_AUTO_ENABLE;
785
786 writel(cfg, pmecc->regs.base + ATMEL_PMECC_CFG);
787 writel(user->cache.sarea, pmecc->regs.base + ATMEL_PMECC_SAREA);
788 writel(user->cache.saddr, pmecc->regs.base + ATMEL_PMECC_SADDR);
789 writel(user->cache.eaddr, pmecc->regs.base + ATMEL_PMECC_EADDR);
790
791 writel(PMECC_CTRL_ENABLE, pmecc->regs.base + ATMEL_PMECC_CTRL);
792 writel(PMECC_CTRL_DATA, pmecc->regs.base + ATMEL_PMECC_CTRL);
793
794 return 0;
795}
796EXPORT_SYMBOL_GPL(atmel_pmecc_enable);
797
798void atmel_pmecc_disable(struct atmel_pmecc_user *user)
799{
800 struct atmel_pmecc *pmecc = user->pmecc;
801
802 writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL);
803 writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL);
804 mutex_unlock(&user->pmecc->lock);
805}
806EXPORT_SYMBOL_GPL(atmel_pmecc_disable);
807
808int atmel_pmecc_wait_rdy(struct atmel_pmecc_user *user)
809{
810 struct atmel_pmecc *pmecc = user->pmecc;
811 u32 status;
812 int ret;
813
814 ret = readl_relaxed_poll_timeout(pmecc->regs.base +
815 ATMEL_PMECC_SR,
816 status, !(status & PMECC_SR_BUSY), 0,
817 PMECC_MAX_TIMEOUT_MS * 1000);
818 if (ret) {
819 dev_err(pmecc->dev,
820 "Timeout while waiting for PMECC ready.\n");
821 return ret;
822 }
823
824 user->isr = readl_relaxed(pmecc->regs.base + ATMEL_PMECC_ISR);
825
826 return 0;
827}
828EXPORT_SYMBOL_GPL(atmel_pmecc_wait_rdy);
829
830static struct atmel_pmecc *atmel_pmecc_create(struct platform_device *pdev,
831 const struct atmel_pmecc_caps *caps,
832 int pmecc_res_idx, int errloc_res_idx)
833{
834 struct device *dev = &pdev->dev;
835 struct atmel_pmecc *pmecc;
836 struct resource *res;
837
838 pmecc = devm_kzalloc(dev, sizeof(*pmecc), GFP_KERNEL);
839 if (!pmecc)
840 return ERR_PTR(-ENOMEM);
841
842 pmecc->caps = caps;
843 pmecc->dev = dev;
844 mutex_init(&pmecc->lock);
845
846 res = platform_get_resource(pdev, IORESOURCE_MEM, pmecc_res_idx);
847 pmecc->regs.base = devm_ioremap_resource(dev, res);
848 if (IS_ERR(pmecc->regs.base))
849 return ERR_CAST(pmecc->regs.base);
850
851 res = platform_get_resource(pdev, IORESOURCE_MEM, errloc_res_idx);
852 pmecc->regs.errloc = devm_ioremap_resource(dev, res);
853 if (IS_ERR(pmecc->regs.errloc))
854 return ERR_CAST(pmecc->regs.errloc);
855
856 /* Disable all interrupts before registering the PMECC handler. */
857 writel(0xffffffff, pmecc->regs.base + ATMEL_PMECC_IDR);
858
859 /* Reset the ECC engine */
860 writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL);
861 writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL);
862
863 return pmecc;
864}
865
866static void devm_atmel_pmecc_put(struct device *dev, void *res)
867{
868 struct atmel_pmecc **pmecc = res;
869
870 put_device((*pmecc)->dev);
871}
872
873static struct atmel_pmecc *atmel_pmecc_get_by_node(struct device *userdev,
874 struct device_node *np)
875{
876 struct platform_device *pdev;
877 struct atmel_pmecc *pmecc, **ptr;
878
879 pdev = of_find_device_by_node(np);
880 if (!pdev || !platform_get_drvdata(pdev))
881 return ERR_PTR(-EPROBE_DEFER);
882
883 ptr = devres_alloc(devm_atmel_pmecc_put, sizeof(*ptr), GFP_KERNEL);
884 if (!ptr)
885 return ERR_PTR(-ENOMEM);
886
887 get_device(&pdev->dev);
888 pmecc = platform_get_drvdata(pdev);
889
890 *ptr = pmecc;
891
892 devres_add(userdev, ptr);
893
894 return pmecc;
895}
896
897static const int atmel_pmecc_strengths[] = { 2, 4, 8, 12, 24, 32 };
898
899static struct atmel_pmecc_caps at91sam9g45_caps = {
900 .strengths = atmel_pmecc_strengths,
901 .nstrengths = 5,
902 .el_offset = 0x8c,
903};
904
905static struct atmel_pmecc_caps sama5d4_caps = {
906 .strengths = atmel_pmecc_strengths,
907 .nstrengths = 5,
908 .el_offset = 0x8c,
909 .correct_erased_chunks = true,
910};
911
912static struct atmel_pmecc_caps sama5d2_caps = {
913 .strengths = atmel_pmecc_strengths,
914 .nstrengths = 6,
915 .el_offset = 0xac,
916 .correct_erased_chunks = true,
917};
918
919static const struct of_device_id atmel_pmecc_legacy_match[] = {
920 { .compatible = "atmel,sama5d4-nand", &sama5d4_caps },
921 { .compatible = "atmel,sama5d2-nand", &sama5d2_caps },
922 { /* sentinel */ }
923};
924
925struct atmel_pmecc *devm_atmel_pmecc_get(struct device *userdev)
926{
927 struct atmel_pmecc *pmecc;
928 struct device_node *np;
929
930 if (!userdev)
931 return ERR_PTR(-EINVAL);
932
933 if (!userdev->of_node)
934 return NULL;
935
936 np = of_parse_phandle(userdev->of_node, "ecc-engine", 0);
937 if (np) {
938 pmecc = atmel_pmecc_get_by_node(userdev, np);
939 of_node_put(np);
940 } else {
941 /*
942 * Support old DT bindings: in this case the PMECC iomem
943 * resources are directly defined in the user pdev at position
944 * 1 and 2. Extract all relevant information from there.
945 */
946 struct platform_device *pdev = to_platform_device(userdev);
947 const struct atmel_pmecc_caps *caps;
948
949 /* No PMECC engine available. */
950 if (!of_property_read_bool(userdev->of_node,
951 "atmel,has-pmecc"))
952 return NULL;
953
954 caps = &at91sam9g45_caps;
955
956 /*
957 * Try to find the NFC subnode and extract the associated caps
958 * from there.
959 */
960 np = of_find_compatible_node(userdev->of_node, NULL,
961 "atmel,sama5d3-nfc");
962 if (np) {
963 const struct of_device_id *match;
964
965 match = of_match_node(atmel_pmecc_legacy_match, np);
966 if (match && match->data)
967 caps = match->data;
968
969 of_node_put(np);
970 }
971
972 pmecc = atmel_pmecc_create(pdev, caps, 1, 2);
973 }
974
975 return pmecc;
976}
977EXPORT_SYMBOL(devm_atmel_pmecc_get);
978
979static const struct of_device_id atmel_pmecc_match[] = {
980 { .compatible = "atmel,at91sam9g45-pmecc", &at91sam9g45_caps },
981 { .compatible = "atmel,sama5d4-pmecc", &sama5d4_caps },
982 { .compatible = "atmel,sama5d2-pmecc", &sama5d2_caps },
983 { /* sentinel */ }
984};
985MODULE_DEVICE_TABLE(of, atmel_pmecc_match);
986
987static int atmel_pmecc_probe(struct platform_device *pdev)
988{
989 struct device *dev = &pdev->dev;
990 const struct atmel_pmecc_caps *caps;
991 struct atmel_pmecc *pmecc;
992
993 caps = of_device_get_match_data(&pdev->dev);
994 if (!caps) {
995 dev_err(dev, "Invalid caps\n");
996 return -EINVAL;
997 }
998
999 pmecc = atmel_pmecc_create(pdev, caps, 0, 1);
1000 if (IS_ERR(pmecc))
1001 return PTR_ERR(pmecc);
1002
1003 platform_set_drvdata(pdev, pmecc);
1004
1005 return 0;
1006}
1007
1008static struct platform_driver atmel_pmecc_driver = {
1009 .driver = {
1010 .name = "atmel-pmecc",
1011 .of_match_table = of_match_ptr(atmel_pmecc_match),
1012 },
1013 .probe = atmel_pmecc_probe,
1014};
1015module_platform_driver(atmel_pmecc_driver);
1016
1017MODULE_LICENSE("GPL");
1018MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
1019MODULE_DESCRIPTION("PMECC engine driver");
1020MODULE_ALIAS("platform:atmel_pmecc");
diff --git a/drivers/mtd/nand/atmel/pmecc.h b/drivers/mtd/nand/atmel/pmecc.h
new file mode 100644
index 000000000000..a8ddbfca2ea5
--- /dev/null
+++ b/drivers/mtd/nand/atmel/pmecc.h
@@ -0,0 +1,73 @@
1/*
2 * © Copyright 2016 ATMEL
3 * © Copyright 2016 Free Electrons
4 *
5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
6 *
7 * Derived from the atmel_nand.c driver which contained the following
8 * copyrights:
9 *
10 * Copyright © 2003 Rick Bronson
11 *
12 * Derived from drivers/mtd/nand/autcpu12.c
13 * Copyright © 2001 Thomas Gleixner (gleixner@autronix.de)
14 *
15 * Derived from drivers/mtd/spia.c
16 * Copyright © 2000 Steven J. Hill (sjhill@cotw.com)
17 *
18 *
19 * Add Hardware ECC support for AT91SAM9260 / AT91SAM9263
20 * Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright © 2007
21 *
22 * Derived from Das U-Boot source code
23 * (u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c)
24 * © Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
25 *
26 * Add Programmable Multibit ECC support for various AT91 SoC
27 * © Copyright 2012 ATMEL, Hong Xu
28 *
29 * Add Nand Flash Controller support for SAMA5 SoC
30 * © Copyright 2013 ATMEL, Josh Wu (josh.wu@atmel.com)
31 *
32 * This program is free software; you can redistribute it and/or modify
33 * it under the terms of the GNU General Public License version 2 as
34 * published by the Free Software Foundation.
35 *
36 */
37
38#ifndef ATMEL_PMECC_H
39#define ATMEL_PMECC_H
40
41#define ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH 0
42#define ATMEL_PMECC_SECTOR_SIZE_AUTO 0
43#define ATMEL_PMECC_OOBOFFSET_AUTO -1
44
45struct atmel_pmecc_user_req {
46 int pagesize;
47 int oobsize;
48 struct {
49 int strength;
50 int bytes;
51 int sectorsize;
52 int nsectors;
53 int ooboffset;
54 } ecc;
55};
56
57struct atmel_pmecc *devm_atmel_pmecc_get(struct device *dev);
58
59struct atmel_pmecc_user *
60atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
61 struct atmel_pmecc_user_req *req);
62void atmel_pmecc_destroy_user(struct atmel_pmecc_user *user);
63
64int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op);
65void atmel_pmecc_disable(struct atmel_pmecc_user *user);
66int atmel_pmecc_wait_rdy(struct atmel_pmecc_user *user);
67int atmel_pmecc_correct_sector(struct atmel_pmecc_user *user, int sector,
68 void *data, void *ecc);
69bool atmel_pmecc_correct_erased_chunks(struct atmel_pmecc_user *user);
70void atmel_pmecc_get_generated_eccbytes(struct atmel_pmecc_user *user,
71 int sector, void *ecc);
72
73#endif /* ATMEL_PMECC_H */
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
deleted file mode 100644
index 9ebd5ecefea6..000000000000
--- a/drivers/mtd/nand/atmel_nand.c
+++ /dev/null
@@ -1,2479 +0,0 @@
1/*
2 * Copyright © 2003 Rick Bronson
3 *
4 * Derived from drivers/mtd/nand/autcpu12.c
5 * Copyright © 2001 Thomas Gleixner (gleixner@autronix.de)
6 *
7 * Derived from drivers/mtd/spia.c
8 * Copyright © 2000 Steven J. Hill (sjhill@cotw.com)
9 *
10 *
11 * Add Hardware ECC support for AT91SAM9260 / AT91SAM9263
12 * Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright © 2007
13 *
14 * Derived from Das U-Boot source code
15 * (u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c)
16 * © Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
17 *
18 * Add Programmable Multibit ECC support for various AT91 SoC
19 * © Copyright 2012 ATMEL, Hong Xu
20 *
21 * Add Nand Flash Controller support for SAMA5 SoC
22 * © Copyright 2013 ATMEL, Josh Wu (josh.wu@atmel.com)
23 *
24 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License version 2 as
26 * published by the Free Software Foundation.
27 *
28 */
29
30#include <linux/clk.h>
31#include <linux/dma-mapping.h>
32#include <linux/slab.h>
33#include <linux/module.h>
34#include <linux/moduleparam.h>
35#include <linux/platform_device.h>
36#include <linux/of.h>
37#include <linux/of_device.h>
38#include <linux/of_gpio.h>
39#include <linux/mtd/mtd.h>
40#include <linux/mtd/nand.h>
41#include <linux/mtd/partitions.h>
42
43#include <linux/delay.h>
44#include <linux/dmaengine.h>
45#include <linux/gpio.h>
46#include <linux/interrupt.h>
47#include <linux/io.h>
48#include <linux/platform_data/atmel.h>
49
50static int use_dma = 1;
51module_param(use_dma, int, 0);
52
53static int on_flash_bbt = 0;
54module_param(on_flash_bbt, int, 0);
55
56/* Register access macros */
57#define ecc_readl(add, reg) \
58 __raw_readl(add + ATMEL_ECC_##reg)
59#define ecc_writel(add, reg, value) \
60 __raw_writel((value), add + ATMEL_ECC_##reg)
61
62#include "atmel_nand_ecc.h" /* Hardware ECC registers */
63#include "atmel_nand_nfc.h" /* Nand Flash Controller definition */
64
65struct atmel_nand_caps {
66 bool pmecc_correct_erase_page;
67 uint8_t pmecc_max_correction;
68};
69
70/*
71 * oob layout for large page size
72 * bad block info is on bytes 0 and 1
73 * the bytes have to be consecutives to avoid
74 * several NAND_CMD_RNDOUT during read
75 *
76 * oob layout for small page size
77 * bad block info is on bytes 4 and 5
78 * the bytes have to be consecutives to avoid
79 * several NAND_CMD_RNDOUT during read
80 */
81static int atmel_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
82 struct mtd_oob_region *oobregion)
83{
84 if (section)
85 return -ERANGE;
86
87 oobregion->length = 4;
88 oobregion->offset = 0;
89
90 return 0;
91}
92
93static int atmel_ooblayout_free_sp(struct mtd_info *mtd, int section,
94 struct mtd_oob_region *oobregion)
95{
96 if (section)
97 return -ERANGE;
98
99 oobregion->offset = 6;
100 oobregion->length = mtd->oobsize - oobregion->offset;
101
102 return 0;
103}
104
105static const struct mtd_ooblayout_ops atmel_ooblayout_sp_ops = {
106 .ecc = atmel_ooblayout_ecc_sp,
107 .free = atmel_ooblayout_free_sp,
108};
109
110struct atmel_nfc {
111 void __iomem *base_cmd_regs;
112 void __iomem *hsmc_regs;
113 void *sram_bank0;
114 dma_addr_t sram_bank0_phys;
115 bool use_nfc_sram;
116 bool write_by_sram;
117
118 struct clk *clk;
119
120 bool is_initialized;
121 struct completion comp_ready;
122 struct completion comp_cmd_done;
123 struct completion comp_xfer_done;
124
125 /* Point to the sram bank which include readed data via NFC */
126 void *data_in_sram;
127 bool will_write_sram;
128};
129static struct atmel_nfc nand_nfc;
130
131struct atmel_nand_host {
132 struct nand_chip nand_chip;
133 void __iomem *io_base;
134 dma_addr_t io_phys;
135 struct atmel_nand_data board;
136 struct device *dev;
137 void __iomem *ecc;
138
139 struct completion comp;
140 struct dma_chan *dma_chan;
141
142 struct atmel_nfc *nfc;
143
144 const struct atmel_nand_caps *caps;
145 bool has_pmecc;
146 u8 pmecc_corr_cap;
147 u16 pmecc_sector_size;
148 bool has_no_lookup_table;
149 u32 pmecc_lookup_table_offset;
150 u32 pmecc_lookup_table_offset_512;
151 u32 pmecc_lookup_table_offset_1024;
152
153 int pmecc_degree; /* Degree of remainders */
154 int pmecc_cw_len; /* Length of codeword */
155
156 void __iomem *pmerrloc_base;
157 void __iomem *pmerrloc_el_base;
158 void __iomem *pmecc_rom_base;
159
160 /* lookup table for alpha_to and index_of */
161 void __iomem *pmecc_alpha_to;
162 void __iomem *pmecc_index_of;
163
164 /* data for pmecc computation */
165 int16_t *pmecc_partial_syn;
166 int16_t *pmecc_si;
167 int16_t *pmecc_smu; /* Sigma table */
168 int16_t *pmecc_lmu; /* polynomal order */
169 int *pmecc_mu;
170 int *pmecc_dmu;
171 int *pmecc_delta;
172};
173
174/*
175 * Enable NAND.
176 */
177static void atmel_nand_enable(struct atmel_nand_host *host)
178{
179 if (gpio_is_valid(host->board.enable_pin))
180 gpio_set_value(host->board.enable_pin, 0);
181}
182
183/*
184 * Disable NAND.
185 */
186static void atmel_nand_disable(struct atmel_nand_host *host)
187{
188 if (gpio_is_valid(host->board.enable_pin))
189 gpio_set_value(host->board.enable_pin, 1);
190}
191
192/*
193 * Hardware specific access to control-lines
194 */
195static void atmel_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
196{
197 struct nand_chip *nand_chip = mtd_to_nand(mtd);
198 struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
199
200 if (ctrl & NAND_CTRL_CHANGE) {
201 if (ctrl & NAND_NCE)
202 atmel_nand_enable(host);
203 else
204 atmel_nand_disable(host);
205 }
206 if (cmd == NAND_CMD_NONE)
207 return;
208
209 if (ctrl & NAND_CLE)
210 writeb(cmd, host->io_base + (1 << host->board.cle));
211 else
212 writeb(cmd, host->io_base + (1 << host->board.ale));
213}
214
215/*
216 * Read the Device Ready pin.
217 */
218static int atmel_nand_device_ready(struct mtd_info *mtd)
219{
220 struct nand_chip *nand_chip = mtd_to_nand(mtd);
221 struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
222
223 return gpio_get_value(host->board.rdy_pin) ^
224 !!host->board.rdy_pin_active_low;
225}
226
227/* Set up for hardware ready pin and enable pin. */
228static int atmel_nand_set_enable_ready_pins(struct mtd_info *mtd)
229{
230 struct nand_chip *chip = mtd_to_nand(mtd);
231 struct atmel_nand_host *host = nand_get_controller_data(chip);
232 int res = 0;
233
234 if (gpio_is_valid(host->board.rdy_pin)) {
235 res = devm_gpio_request(host->dev,
236 host->board.rdy_pin, "nand_rdy");
237 if (res < 0) {
238 dev_err(host->dev,
239 "can't request rdy gpio %d\n",
240 host->board.rdy_pin);
241 return res;
242 }
243
244 res = gpio_direction_input(host->board.rdy_pin);
245 if (res < 0) {
246 dev_err(host->dev,
247 "can't request input direction rdy gpio %d\n",
248 host->board.rdy_pin);
249 return res;
250 }
251
252 chip->dev_ready = atmel_nand_device_ready;
253 }
254
255 if (gpio_is_valid(host->board.enable_pin)) {
256 res = devm_gpio_request(host->dev,
257 host->board.enable_pin, "nand_enable");
258 if (res < 0) {
259 dev_err(host->dev,
260 "can't request enable gpio %d\n",
261 host->board.enable_pin);
262 return res;
263 }
264
265 res = gpio_direction_output(host->board.enable_pin, 1);
266 if (res < 0) {
267 dev_err(host->dev,
268 "can't request output direction enable gpio %d\n",
269 host->board.enable_pin);
270 return res;
271 }
272 }
273
274 return res;
275}
276
277/*
278 * Minimal-overhead PIO for data access.
279 */
280static void atmel_read_buf8(struct mtd_info *mtd, u8 *buf, int len)
281{
282 struct nand_chip *nand_chip = mtd_to_nand(mtd);
283 struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
284
285 if (host->nfc && host->nfc->use_nfc_sram && host->nfc->data_in_sram) {
286 memcpy(buf, host->nfc->data_in_sram, len);
287 host->nfc->data_in_sram += len;
288 } else {
289 __raw_readsb(nand_chip->IO_ADDR_R, buf, len);
290 }
291}
292
293static void atmel_read_buf16(struct mtd_info *mtd, u8 *buf, int len)
294{
295 struct nand_chip *nand_chip = mtd_to_nand(mtd);
296 struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
297
298 if (host->nfc && host->nfc->use_nfc_sram && host->nfc->data_in_sram) {
299 memcpy(buf, host->nfc->data_in_sram, len);
300 host->nfc->data_in_sram += len;
301 } else {
302 __raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2);
303 }
304}
305
306static void atmel_write_buf8(struct mtd_info *mtd, const u8 *buf, int len)
307{
308 struct nand_chip *nand_chip = mtd_to_nand(mtd);
309
310 __raw_writesb(nand_chip->IO_ADDR_W, buf, len);
311}
312
313static void atmel_write_buf16(struct mtd_info *mtd, const u8 *buf, int len)
314{
315 struct nand_chip *nand_chip = mtd_to_nand(mtd);
316
317 __raw_writesw(nand_chip->IO_ADDR_W, buf, len / 2);
318}
319
320static void dma_complete_func(void *completion)
321{
322 complete(completion);
323}
324
325static int nfc_set_sram_bank(struct atmel_nand_host *host, unsigned int bank)
326{
327 /* NFC only has two banks. Must be 0 or 1 */
328 if (bank > 1)
329 return -EINVAL;
330
331 if (bank) {
332 struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
333
334 /* Only for a 2k-page or lower flash, NFC can handle 2 banks */
335 if (mtd->writesize > 2048)
336 return -EINVAL;
337 nfc_writel(host->nfc->hsmc_regs, BANK, ATMEL_HSMC_NFC_BANK1);
338 } else {
339 nfc_writel(host->nfc->hsmc_regs, BANK, ATMEL_HSMC_NFC_BANK0);
340 }
341
342 return 0;
343}
344
345static uint nfc_get_sram_off(struct atmel_nand_host *host)
346{
347 if (nfc_readl(host->nfc->hsmc_regs, BANK) & ATMEL_HSMC_NFC_BANK1)
348 return NFC_SRAM_BANK1_OFFSET;
349 else
350 return 0;
351}
352
353static dma_addr_t nfc_sram_phys(struct atmel_nand_host *host)
354{
355 if (nfc_readl(host->nfc->hsmc_regs, BANK) & ATMEL_HSMC_NFC_BANK1)
356 return host->nfc->sram_bank0_phys + NFC_SRAM_BANK1_OFFSET;
357 else
358 return host->nfc->sram_bank0_phys;
359}
360
361static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len,
362 int is_read)
363{
364 struct dma_device *dma_dev;
365 enum dma_ctrl_flags flags;
366 dma_addr_t dma_src_addr, dma_dst_addr, phys_addr;
367 struct dma_async_tx_descriptor *tx = NULL;
368 dma_cookie_t cookie;
369 struct nand_chip *chip = mtd_to_nand(mtd);
370 struct atmel_nand_host *host = nand_get_controller_data(chip);
371 void *p = buf;
372 int err = -EIO;
373 enum dma_data_direction dir = is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
374 struct atmel_nfc *nfc = host->nfc;
375
376 if (buf >= high_memory)
377 goto err_buf;
378
379 dma_dev = host->dma_chan->device;
380
381 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
382
383 phys_addr = dma_map_single(dma_dev->dev, p, len, dir);
384 if (dma_mapping_error(dma_dev->dev, phys_addr)) {
385 dev_err(host->dev, "Failed to dma_map_single\n");
386 goto err_buf;
387 }
388
389 if (is_read) {
390 if (nfc && nfc->data_in_sram)
391 dma_src_addr = nfc_sram_phys(host) + (nfc->data_in_sram
392 - (nfc->sram_bank0 + nfc_get_sram_off(host)));
393 else
394 dma_src_addr = host->io_phys;
395
396 dma_dst_addr = phys_addr;
397 } else {
398 dma_src_addr = phys_addr;
399
400 if (nfc && nfc->write_by_sram)
401 dma_dst_addr = nfc_sram_phys(host);
402 else
403 dma_dst_addr = host->io_phys;
404 }
405
406 tx = dma_dev->device_prep_dma_memcpy(host->dma_chan, dma_dst_addr,
407 dma_src_addr, len, flags);
408 if (!tx) {
409 dev_err(host->dev, "Failed to prepare DMA memcpy\n");
410 goto err_dma;
411 }
412
413 init_completion(&host->comp);
414 tx->callback = dma_complete_func;
415 tx->callback_param = &host->comp;
416
417 cookie = tx->tx_submit(tx);
418 if (dma_submit_error(cookie)) {
419 dev_err(host->dev, "Failed to do DMA tx_submit\n");
420 goto err_dma;
421 }
422
423 dma_async_issue_pending(host->dma_chan);
424 wait_for_completion(&host->comp);
425
426 if (is_read && nfc && nfc->data_in_sram)
427 /* After read data from SRAM, need to increase the position */
428 nfc->data_in_sram += len;
429
430 err = 0;
431
432err_dma:
433 dma_unmap_single(dma_dev->dev, phys_addr, len, dir);
434err_buf:
435 if (err != 0)
436 dev_dbg(host->dev, "Fall back to CPU I/O\n");
437 return err;
438}
439
440static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len)
441{
442 struct nand_chip *chip = mtd_to_nand(mtd);
443
444 if (use_dma && len > mtd->oobsize)
445 /* only use DMA for bigger than oob size: better performances */
446 if (atmel_nand_dma_op(mtd, buf, len, 1) == 0)
447 return;
448
449 if (chip->options & NAND_BUSWIDTH_16)
450 atmel_read_buf16(mtd, buf, len);
451 else
452 atmel_read_buf8(mtd, buf, len);
453}
454
455static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
456{
457 struct nand_chip *chip = mtd_to_nand(mtd);
458
459 if (use_dma && len > mtd->oobsize)
460 /* only use DMA for bigger than oob size: better performances */
461 if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0)
462 return;
463
464 if (chip->options & NAND_BUSWIDTH_16)
465 atmel_write_buf16(mtd, buf, len);
466 else
467 atmel_write_buf8(mtd, buf, len);
468}
469
470/*
471 * Return number of ecc bytes per sector according to sector size and
472 * correction capability
473 *
474 * Following table shows what at91 PMECC supported:
475 * Correction Capability Sector_512_bytes Sector_1024_bytes
476 * ===================== ================ =================
477 * 2-bits 4-bytes 4-bytes
478 * 4-bits 7-bytes 7-bytes
479 * 8-bits 13-bytes 14-bytes
480 * 12-bits 20-bytes 21-bytes
481 * 24-bits 39-bytes 42-bytes
482 * 32-bits 52-bytes 56-bytes
483 */
484static int pmecc_get_ecc_bytes(int cap, int sector_size)
485{
486 int m = 12 + sector_size / 512;
487 return (m * cap + 7) / 8;
488}
489
490static void __iomem *pmecc_get_alpha_to(struct atmel_nand_host *host)
491{
492 int table_size;
493
494 table_size = host->pmecc_sector_size == 512 ?
495 PMECC_LOOKUP_TABLE_SIZE_512 : PMECC_LOOKUP_TABLE_SIZE_1024;
496
497 return host->pmecc_rom_base + host->pmecc_lookup_table_offset +
498 table_size * sizeof(int16_t);
499}
500
501static int pmecc_data_alloc(struct atmel_nand_host *host)
502{
503 const int cap = host->pmecc_corr_cap;
504 int size;
505
506 size = (2 * cap + 1) * sizeof(int16_t);
507 host->pmecc_partial_syn = devm_kzalloc(host->dev, size, GFP_KERNEL);
508 host->pmecc_si = devm_kzalloc(host->dev, size, GFP_KERNEL);
509 host->pmecc_lmu = devm_kzalloc(host->dev,
510 (cap + 1) * sizeof(int16_t), GFP_KERNEL);
511 host->pmecc_smu = devm_kzalloc(host->dev,
512 (cap + 2) * size, GFP_KERNEL);
513
514 size = (cap + 1) * sizeof(int);
515 host->pmecc_mu = devm_kzalloc(host->dev, size, GFP_KERNEL);
516 host->pmecc_dmu = devm_kzalloc(host->dev, size, GFP_KERNEL);
517 host->pmecc_delta = devm_kzalloc(host->dev, size, GFP_KERNEL);
518
519 if (!host->pmecc_partial_syn ||
520 !host->pmecc_si ||
521 !host->pmecc_lmu ||
522 !host->pmecc_smu ||
523 !host->pmecc_mu ||
524 !host->pmecc_dmu ||
525 !host->pmecc_delta)
526 return -ENOMEM;
527
528 return 0;
529}
530
531static void pmecc_gen_syndrome(struct mtd_info *mtd, int sector)
532{
533 struct nand_chip *nand_chip = mtd_to_nand(mtd);
534 struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
535 int i;
536 uint32_t value;
537
538 /* Fill odd syndromes */
539 for (i = 0; i < host->pmecc_corr_cap; i++) {
540 value = pmecc_readl_rem_relaxed(host->ecc, sector, i / 2);
541 if (i & 1)
542 value >>= 16;
543 value &= 0xffff;
544 host->pmecc_partial_syn[(2 * i) + 1] = (int16_t)value;
545 }
546}
547
548static void pmecc_substitute(struct mtd_info *mtd)
549{
550 struct nand_chip *nand_chip = mtd_to_nand(mtd);
551 struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
552 int16_t __iomem *alpha_to = host->pmecc_alpha_to;
553 int16_t __iomem *index_of = host->pmecc_index_of;
554 int16_t *partial_syn = host->pmecc_partial_syn;
555 const int cap = host->pmecc_corr_cap;
556 int16_t *si;
557 int i, j;
558
559 /* si[] is a table that holds the current syndrome value,
560 * an element of that table belongs to the field
561 */
562 si = host->pmecc_si;
563
564 memset(&si[1], 0, sizeof(int16_t) * (2 * cap - 1));
565
566 /* Computation 2t syndromes based on S(x) */
567 /* Odd syndromes */
568 for (i = 1; i < 2 * cap; i += 2) {
569 for (j = 0; j < host->pmecc_degree; j++) {
570 if (partial_syn[i] & ((unsigned short)0x1 << j))
571 si[i] = readw_relaxed(alpha_to + i * j) ^ si[i];
572 }
573 }
574 /* Even syndrome = (Odd syndrome) ** 2 */
575 for (i = 2, j = 1; j <= cap; i = ++j << 1) {
576 if (si[j] == 0) {
577 si[i] = 0;
578 } else {
579 int16_t tmp;
580
581 tmp = readw_relaxed(index_of + si[j]);
582 tmp = (tmp * 2) % host->pmecc_cw_len;
583 si[i] = readw_relaxed(alpha_to + tmp);
584 }
585 }
586
587 return;
588}
589
590static void pmecc_get_sigma(struct mtd_info *mtd)
591{
592 struct nand_chip *nand_chip = mtd_to_nand(mtd);
593 struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
594
595 int16_t *lmu = host->pmecc_lmu;
596 int16_t *si = host->pmecc_si;
597 int *mu = host->pmecc_mu;
598 int *dmu = host->pmecc_dmu; /* Discrepancy */
599 int *delta = host->pmecc_delta; /* Delta order */
600 int cw_len = host->pmecc_cw_len;
601 const int16_t cap = host->pmecc_corr_cap;
602 const int num = 2 * cap + 1;
603 int16_t __iomem *index_of = host->pmecc_index_of;
604 int16_t __iomem *alpha_to = host->pmecc_alpha_to;
605 int i, j, k;
606 uint32_t dmu_0_count, tmp;
607 int16_t *smu = host->pmecc_smu;
608
609 /* index of largest delta */
610 int ro;
611 int largest;
612 int diff;
613
614 dmu_0_count = 0;
615
616 /* First Row */
617
618 /* Mu */
619 mu[0] = -1;
620
621 memset(smu, 0, sizeof(int16_t) * num);
622 smu[0] = 1;
623
624 /* discrepancy set to 1 */
625 dmu[0] = 1;
626 /* polynom order set to 0 */
627 lmu[0] = 0;
628 delta[0] = (mu[0] * 2 - lmu[0]) >> 1;
629
630 /* Second Row */
631
632 /* Mu */
633 mu[1] = 0;
634 /* Sigma(x) set to 1 */
635 memset(&smu[num], 0, sizeof(int16_t) * num);
636 smu[num] = 1;
637
638 /* discrepancy set to S1 */
639 dmu[1] = si[1];
640
641 /* polynom order set to 0 */
642 lmu[1] = 0;
643
644 delta[1] = (mu[1] * 2 - lmu[1]) >> 1;
645
646 /* Init the Sigma(x) last row */
647 memset(&smu[(cap + 1) * num], 0, sizeof(int16_t) * num);
648
649 for (i = 1; i <= cap; i++) {
650 mu[i + 1] = i << 1;
651 /* Begin Computing Sigma (Mu+1) and L(mu) */
652 /* check if discrepancy is set to 0 */
653 if (dmu[i] == 0) {
654 dmu_0_count++;
655
656 tmp = ((cap - (lmu[i] >> 1) - 1) / 2);
657 if ((cap - (lmu[i] >> 1) - 1) & 0x1)
658 tmp += 2;
659 else
660 tmp += 1;
661
662 if (dmu_0_count == tmp) {
663 for (j = 0; j <= (lmu[i] >> 1) + 1; j++)
664 smu[(cap + 1) * num + j] =
665 smu[i * num + j];
666
667 lmu[cap + 1] = lmu[i];
668 return;
669 }
670
671 /* copy polynom */
672 for (j = 0; j <= lmu[i] >> 1; j++)
673 smu[(i + 1) * num + j] = smu[i * num + j];
674
675 /* copy previous polynom order to the next */
676 lmu[i + 1] = lmu[i];
677 } else {
678 ro = 0;
679 largest = -1;
680 /* find largest delta with dmu != 0 */
681 for (j = 0; j < i; j++) {
682 if ((dmu[j]) && (delta[j] > largest)) {
683 largest = delta[j];
684 ro = j;
685 }
686 }
687
688 /* compute difference */
689 diff = (mu[i] - mu[ro]);
690
691 /* Compute degree of the new smu polynomial */
692 if ((lmu[i] >> 1) > ((lmu[ro] >> 1) + diff))
693 lmu[i + 1] = lmu[i];
694 else
695 lmu[i + 1] = ((lmu[ro] >> 1) + diff) * 2;
696
697 /* Init smu[i+1] with 0 */
698 for (k = 0; k < num; k++)
699 smu[(i + 1) * num + k] = 0;
700
701 /* Compute smu[i+1] */
702 for (k = 0; k <= lmu[ro] >> 1; k++) {
703 int16_t a, b, c;
704
705 if (!(smu[ro * num + k] && dmu[i]))
706 continue;
707 a = readw_relaxed(index_of + dmu[i]);
708 b = readw_relaxed(index_of + dmu[ro]);
709 c = readw_relaxed(index_of + smu[ro * num + k]);
710 tmp = a + (cw_len - b) + c;
711 a = readw_relaxed(alpha_to + tmp % cw_len);
712 smu[(i + 1) * num + (k + diff)] = a;
713 }
714
715 for (k = 0; k <= lmu[i] >> 1; k++)
716 smu[(i + 1) * num + k] ^= smu[i * num + k];
717 }
718
719 /* End Computing Sigma (Mu+1) and L(mu) */
720 /* In either case compute delta */
721 delta[i + 1] = (mu[i + 1] * 2 - lmu[i + 1]) >> 1;
722
723 /* Do not compute discrepancy for the last iteration */
724 if (i >= cap)
725 continue;
726
727 for (k = 0; k <= (lmu[i + 1] >> 1); k++) {
728 tmp = 2 * (i - 1);
729 if (k == 0) {
730 dmu[i + 1] = si[tmp + 3];
731 } else if (smu[(i + 1) * num + k] && si[tmp + 3 - k]) {
732 int16_t a, b, c;
733 a = readw_relaxed(index_of +
734 smu[(i + 1) * num + k]);
735 b = si[2 * (i - 1) + 3 - k];
736 c = readw_relaxed(index_of + b);
737 tmp = a + c;
738 tmp %= cw_len;
739 dmu[i + 1] = readw_relaxed(alpha_to + tmp) ^
740 dmu[i + 1];
741 }
742 }
743 }
744
745 return;
746}
747
748static int pmecc_err_location(struct mtd_info *mtd)
749{
750 struct nand_chip *nand_chip = mtd_to_nand(mtd);
751 struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
752 unsigned long end_time;
753 const int cap = host->pmecc_corr_cap;
754 const int num = 2 * cap + 1;
755 int sector_size = host->pmecc_sector_size;
756 int err_nbr = 0; /* number of error */
757 int roots_nbr; /* number of roots */
758 int i;
759 uint32_t val;
760 int16_t *smu = host->pmecc_smu;
761
762 pmerrloc_writel(host->pmerrloc_base, ELDIS, PMERRLOC_DISABLE);
763
764 for (i = 0; i <= host->pmecc_lmu[cap + 1] >> 1; i++) {
765 pmerrloc_writel_sigma_relaxed(host->pmerrloc_base, i,
766 smu[(cap + 1) * num + i]);
767 err_nbr++;
768 }
769
770 val = (err_nbr - 1) << 16;
771 if (sector_size == 1024)
772 val |= 1;
773
774 pmerrloc_writel(host->pmerrloc_base, ELCFG, val);
775 pmerrloc_writel(host->pmerrloc_base, ELEN,
776 sector_size * 8 + host->pmecc_degree * cap);
777
778 end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS);
779 while (!(pmerrloc_readl_relaxed(host->pmerrloc_base, ELISR)
780 & PMERRLOC_CALC_DONE)) {
781 if (unlikely(time_after(jiffies, end_time))) {
782 dev_err(host->dev, "PMECC: Timeout to calculate error location.\n");
783 return -1;
784 }
785 cpu_relax();
786 }
787
788 roots_nbr = (pmerrloc_readl_relaxed(host->pmerrloc_base, ELISR)
789 & PMERRLOC_ERR_NUM_MASK) >> 8;
790 /* Number of roots == degree of smu hence <= cap */
791 if (roots_nbr == host->pmecc_lmu[cap + 1] >> 1)
792 return err_nbr - 1;
793
794 /* Number of roots does not match the degree of smu
795 * unable to correct error */
796 return -1;
797}
798
799static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc,
800 int sector_num, int extra_bytes, int err_nbr)
801{
802 struct nand_chip *nand_chip = mtd_to_nand(mtd);
803 struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
804 int i = 0;
805 int byte_pos, bit_pos, sector_size, pos;
806 uint32_t tmp;
807 uint8_t err_byte;
808
809 sector_size = host->pmecc_sector_size;
810
811 while (err_nbr) {
812 tmp = pmerrloc_readl_el_relaxed(host->pmerrloc_el_base, i) - 1;
813 byte_pos = tmp / 8;
814 bit_pos = tmp % 8;
815
816 if (byte_pos >= (sector_size + extra_bytes))
817 BUG(); /* should never happen */
818
819 if (byte_pos < sector_size) {
820 err_byte = *(buf + byte_pos);
821 *(buf + byte_pos) ^= (1 << bit_pos);
822
823 pos = sector_num * host->pmecc_sector_size + byte_pos;
824 dev_dbg(host->dev, "Bit flip in data area, byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
825 pos, bit_pos, err_byte, *(buf + byte_pos));
826 } else {
827 struct mtd_oob_region oobregion;
828
829 /* Bit flip in OOB area */
830 tmp = sector_num * nand_chip->ecc.bytes
831 + (byte_pos - sector_size);
832 err_byte = ecc[tmp];
833 ecc[tmp] ^= (1 << bit_pos);
834
835 mtd_ooblayout_ecc(mtd, 0, &oobregion);
836 pos = tmp + oobregion.offset;
837 dev_dbg(host->dev, "Bit flip in OOB, oob_byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
838 pos, bit_pos, err_byte, ecc[tmp]);
839 }
840
841 i++;
842 err_nbr--;
843 }
844
845 return;
846}
847
848static int pmecc_correction(struct mtd_info *mtd, u32 pmecc_stat, uint8_t *buf,
849 u8 *ecc)
850{
851 struct nand_chip *nand_chip = mtd_to_nand(mtd);
852 struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
853 int i, err_nbr;
854 uint8_t *buf_pos;
855 int max_bitflips = 0;
856
857 for (i = 0; i < nand_chip->ecc.steps; i++) {
858 err_nbr = 0;
859 if (pmecc_stat & 0x1) {
860 buf_pos = buf + i * host->pmecc_sector_size;
861
862 pmecc_gen_syndrome(mtd, i);
863 pmecc_substitute(mtd);
864 pmecc_get_sigma(mtd);
865
866 err_nbr = pmecc_err_location(mtd);
867 if (err_nbr >= 0) {
868 pmecc_correct_data(mtd, buf_pos, ecc, i,
869 nand_chip->ecc.bytes,
870 err_nbr);
871 } else if (!host->caps->pmecc_correct_erase_page) {
872 u8 *ecc_pos = ecc + (i * nand_chip->ecc.bytes);
873
874 /* Try to detect erased pages */
875 err_nbr = nand_check_erased_ecc_chunk(buf_pos,
876 host->pmecc_sector_size,
877 ecc_pos,
878 nand_chip->ecc.bytes,
879 NULL, 0,
880 nand_chip->ecc.strength);
881 }
882
883 if (err_nbr < 0) {
884 dev_err(host->dev, "PMECC: Too many errors\n");
885 mtd->ecc_stats.failed++;
886 return -EIO;
887 }
888
889 mtd->ecc_stats.corrected += err_nbr;
890 max_bitflips = max_t(int, max_bitflips, err_nbr);
891 }
892 pmecc_stat >>= 1;
893 }
894
895 return max_bitflips;
896}
897
898static void pmecc_enable(struct atmel_nand_host *host, int ecc_op)
899{
900 u32 val;
901
902 if (ecc_op != NAND_ECC_READ && ecc_op != NAND_ECC_WRITE) {
903 dev_err(host->dev, "atmel_nand: wrong pmecc operation type!");
904 return;
905 }
906
907 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
908 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
909 val = pmecc_readl_relaxed(host->ecc, CFG);
910
911 if (ecc_op == NAND_ECC_READ)
912 pmecc_writel(host->ecc, CFG, (val & ~PMECC_CFG_WRITE_OP)
913 | PMECC_CFG_AUTO_ENABLE);
914 else
915 pmecc_writel(host->ecc, CFG, (val | PMECC_CFG_WRITE_OP)
916 & ~PMECC_CFG_AUTO_ENABLE);
917
918 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
919 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DATA);
920}
921
922static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
923 struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
924{
925 struct atmel_nand_host *host = nand_get_controller_data(chip);
926 int eccsize = chip->ecc.size * chip->ecc.steps;
927 uint8_t *oob = chip->oob_poi;
928 uint32_t stat;
929 unsigned long end_time;
930 int bitflips = 0;
931
932 if (!host->nfc || !host->nfc->use_nfc_sram)
933 pmecc_enable(host, NAND_ECC_READ);
934
935 chip->read_buf(mtd, buf, eccsize);
936 chip->read_buf(mtd, oob, mtd->oobsize);
937
938 end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS);
939 while ((pmecc_readl_relaxed(host->ecc, SR) & PMECC_SR_BUSY)) {
940 if (unlikely(time_after(jiffies, end_time))) {
941 dev_err(host->dev, "PMECC: Timeout to get error status.\n");
942 return -EIO;
943 }
944 cpu_relax();
945 }
946
947 stat = pmecc_readl_relaxed(host->ecc, ISR);
948 if (stat != 0) {
949 struct mtd_oob_region oobregion;
950
951 mtd_ooblayout_ecc(mtd, 0, &oobregion);
952 bitflips = pmecc_correction(mtd, stat, buf,
953 &oob[oobregion.offset]);
954 if (bitflips < 0)
955 /* uncorrectable errors */
956 return 0;
957 }
958
959 return bitflips;
960}
961
962static int atmel_nand_pmecc_write_page(struct mtd_info *mtd,
963 struct nand_chip *chip, const uint8_t *buf, int oob_required,
964 int page)
965{
966 struct atmel_nand_host *host = nand_get_controller_data(chip);
967 struct mtd_oob_region oobregion = { };
968 int i, j, section = 0;
969 unsigned long end_time;
970
971 if (!host->nfc || !host->nfc->write_by_sram) {
972 pmecc_enable(host, NAND_ECC_WRITE);
973 chip->write_buf(mtd, (u8 *)buf, mtd->writesize);
974 }
975
976 end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS);
977 while ((pmecc_readl_relaxed(host->ecc, SR) & PMECC_SR_BUSY)) {
978 if (unlikely(time_after(jiffies, end_time))) {
979 dev_err(host->dev, "PMECC: Timeout to get ECC value.\n");
980 return -EIO;
981 }
982 cpu_relax();
983 }
984
985 for (i = 0; i < chip->ecc.steps; i++) {
986 for (j = 0; j < chip->ecc.bytes; j++) {
987 if (!oobregion.length)
988 mtd_ooblayout_ecc(mtd, section, &oobregion);
989
990 chip->oob_poi[oobregion.offset] =
991 pmecc_readb_ecc_relaxed(host->ecc, i, j);
992 oobregion.length--;
993 oobregion.offset++;
994 section++;
995 }
996 }
997 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
998
999 return 0;
1000}
1001
1002static void atmel_pmecc_core_init(struct mtd_info *mtd)
1003{
1004 struct nand_chip *nand_chip = mtd_to_nand(mtd);
1005 struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
1006 int eccbytes = mtd_ooblayout_count_eccbytes(mtd);
1007 uint32_t val = 0;
1008 struct mtd_oob_region oobregion;
1009
1010 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
1011 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
1012
1013 switch (host->pmecc_corr_cap) {
1014 case 2:
1015 val = PMECC_CFG_BCH_ERR2;
1016 break;
1017 case 4:
1018 val = PMECC_CFG_BCH_ERR4;
1019 break;
1020 case 8:
1021 val = PMECC_CFG_BCH_ERR8;
1022 break;
1023 case 12:
1024 val = PMECC_CFG_BCH_ERR12;
1025 break;
1026 case 24:
1027 val = PMECC_CFG_BCH_ERR24;
1028 break;
1029 case 32:
1030 val = PMECC_CFG_BCH_ERR32;
1031 break;
1032 }
1033
1034 if (host->pmecc_sector_size == 512)
1035 val |= PMECC_CFG_SECTOR512;
1036 else if (host->pmecc_sector_size == 1024)
1037 val |= PMECC_CFG_SECTOR1024;
1038
1039 switch (nand_chip->ecc.steps) {
1040 case 1:
1041 val |= PMECC_CFG_PAGE_1SECTOR;
1042 break;
1043 case 2:
1044 val |= PMECC_CFG_PAGE_2SECTORS;
1045 break;
1046 case 4:
1047 val |= PMECC_CFG_PAGE_4SECTORS;
1048 break;
1049 case 8:
1050 val |= PMECC_CFG_PAGE_8SECTORS;
1051 break;
1052 }
1053
1054 val |= (PMECC_CFG_READ_OP | PMECC_CFG_SPARE_DISABLE
1055 | PMECC_CFG_AUTO_DISABLE);
1056 pmecc_writel(host->ecc, CFG, val);
1057
1058 pmecc_writel(host->ecc, SAREA, mtd->oobsize - 1);
1059 mtd_ooblayout_ecc(mtd, 0, &oobregion);
1060 pmecc_writel(host->ecc, SADDR, oobregion.offset);
1061 pmecc_writel(host->ecc, EADDR,
1062 oobregion.offset + eccbytes - 1);
1063 /* See datasheet about PMECC Clock Control Register */
1064 pmecc_writel(host->ecc, CLK, 2);
1065 pmecc_writel(host->ecc, IDR, 0xff);
1066 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
1067}
1068
1069/*
1070 * Get minimum ecc requirements from NAND.
1071 * If pmecc-cap, pmecc-sector-size in DTS are not specified, this function
1072 * will set them according to minimum ecc requirement. Otherwise, use the
1073 * value in DTS file.
1074 * return 0 if success. otherwise return error code.
1075 */
1076static int pmecc_choose_ecc(struct atmel_nand_host *host,
1077 int *cap, int *sector_size)
1078{
1079 /* Get minimum ECC requirements */
1080 if (host->nand_chip.ecc_strength_ds) {
1081 *cap = host->nand_chip.ecc_strength_ds;
1082 *sector_size = host->nand_chip.ecc_step_ds;
1083 dev_info(host->dev, "minimum ECC: %d bits in %d bytes\n",
1084 *cap, *sector_size);
1085 } else {
1086 *cap = 2;
1087 *sector_size = 512;
1088 dev_info(host->dev, "can't detect min. ECC, assume 2 bits in 512 bytes\n");
1089 }
1090
1091 /* If device tree doesn't specify, use NAND's minimum ECC parameters */
1092 if (host->pmecc_corr_cap == 0) {
1093 if (*cap > host->caps->pmecc_max_correction)
1094 return -EINVAL;
1095
1096 /* use the most fitable ecc bits (the near bigger one ) */
1097 if (*cap <= 2)
1098 host->pmecc_corr_cap = 2;
1099 else if (*cap <= 4)
1100 host->pmecc_corr_cap = 4;
1101 else if (*cap <= 8)
1102 host->pmecc_corr_cap = 8;
1103 else if (*cap <= 12)
1104 host->pmecc_corr_cap = 12;
1105 else if (*cap <= 24)
1106 host->pmecc_corr_cap = 24;
1107 else if (*cap <= 32)
1108 host->pmecc_corr_cap = 32;
1109 else
1110 return -EINVAL;
1111 }
1112 if (host->pmecc_sector_size == 0) {
1113 /* use the most fitable sector size (the near smaller one ) */
1114 if (*sector_size >= 1024)
1115 host->pmecc_sector_size = 1024;
1116 else if (*sector_size >= 512)
1117 host->pmecc_sector_size = 512;
1118 else
1119 return -EINVAL;
1120 }
1121 return 0;
1122}
1123
1124static inline int deg(unsigned int poly)
1125{
1126 /* polynomial degree is the most-significant bit index */
1127 return fls(poly) - 1;
1128}
1129
1130static int build_gf_tables(int mm, unsigned int poly,
1131 int16_t *index_of, int16_t *alpha_to)
1132{
1133 unsigned int i, x = 1;
1134 const unsigned int k = 1 << deg(poly);
1135 unsigned int nn = (1 << mm) - 1;
1136
1137 /* primitive polynomial must be of degree m */
1138 if (k != (1u << mm))
1139 return -EINVAL;
1140
1141 for (i = 0; i < nn; i++) {
1142 alpha_to[i] = x;
1143 index_of[x] = i;
1144 if (i && (x == 1))
1145 /* polynomial is not primitive (a^i=1 with 0<i<2^m-1) */
1146 return -EINVAL;
1147 x <<= 1;
1148 if (x & k)
1149 x ^= poly;
1150 }
1151 alpha_to[nn] = 1;
1152 index_of[0] = 0;
1153
1154 return 0;
1155}
1156
1157static uint16_t *create_lookup_table(struct device *dev, int sector_size)
1158{
1159 int degree = (sector_size == 512) ?
1160 PMECC_GF_DIMENSION_13 :
1161 PMECC_GF_DIMENSION_14;
1162 unsigned int poly = (sector_size == 512) ?
1163 PMECC_GF_13_PRIMITIVE_POLY :
1164 PMECC_GF_14_PRIMITIVE_POLY;
1165 int table_size = (sector_size == 512) ?
1166 PMECC_LOOKUP_TABLE_SIZE_512 :
1167 PMECC_LOOKUP_TABLE_SIZE_1024;
1168
1169 int16_t *addr = devm_kzalloc(dev, 2 * table_size * sizeof(uint16_t),
1170 GFP_KERNEL);
1171 if (addr && build_gf_tables(degree, poly, addr, addr + table_size))
1172 return NULL;
1173
1174 return addr;
1175}
1176
1177static int atmel_pmecc_nand_init_params(struct platform_device *pdev,
1178 struct atmel_nand_host *host)
1179{
1180 struct nand_chip *nand_chip = &host->nand_chip;
1181 struct mtd_info *mtd = nand_to_mtd(nand_chip);
1182 struct resource *regs, *regs_pmerr, *regs_rom;
1183 uint16_t *galois_table;
1184 int cap, sector_size, err_no;
1185
1186 err_no = pmecc_choose_ecc(host, &cap, &sector_size);
1187 if (err_no) {
1188 dev_err(host->dev, "The NAND flash's ECC requirement are not support!");
1189 return err_no;
1190 }
1191
1192 if (cap > host->pmecc_corr_cap ||
1193 sector_size != host->pmecc_sector_size)
1194 dev_info(host->dev, "WARNING: Be Caution! Using different PMECC parameters from Nand ONFI ECC reqirement.\n");
1195
1196 cap = host->pmecc_corr_cap;
1197 sector_size = host->pmecc_sector_size;
1198 host->pmecc_lookup_table_offset = (sector_size == 512) ?
1199 host->pmecc_lookup_table_offset_512 :
1200 host->pmecc_lookup_table_offset_1024;
1201
1202 dev_info(host->dev, "Initialize PMECC params, cap: %d, sector: %d\n",
1203 cap, sector_size);
1204
1205 regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1206 if (!regs) {
1207 dev_warn(host->dev,
1208 "Can't get I/O resource regs for PMECC controller, rolling back on software ECC\n");
1209 nand_chip->ecc.mode = NAND_ECC_SOFT;
1210 nand_chip->ecc.algo = NAND_ECC_HAMMING;
1211 return 0;
1212 }
1213
1214 host->ecc = devm_ioremap_resource(&pdev->dev, regs);
1215 if (IS_ERR(host->ecc)) {
1216 err_no = PTR_ERR(host->ecc);
1217 goto err;
1218 }
1219
1220 regs_pmerr = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1221 host->pmerrloc_base = devm_ioremap_resource(&pdev->dev, regs_pmerr);
1222 if (IS_ERR(host->pmerrloc_base)) {
1223 err_no = PTR_ERR(host->pmerrloc_base);
1224 goto err;
1225 }
1226 host->pmerrloc_el_base = host->pmerrloc_base + ATMEL_PMERRLOC_SIGMAx +
1227 (host->caps->pmecc_max_correction + 1) * 4;
1228
1229 if (!host->has_no_lookup_table) {
1230 regs_rom = platform_get_resource(pdev, IORESOURCE_MEM, 3);
1231 host->pmecc_rom_base = devm_ioremap_resource(&pdev->dev,
1232 regs_rom);
1233 if (IS_ERR(host->pmecc_rom_base)) {
1234 dev_err(host->dev, "Can not get I/O resource for ROM, will build a lookup table in runtime!\n");
1235 host->has_no_lookup_table = true;
1236 }
1237 }
1238
1239 if (host->has_no_lookup_table) {
1240 /* Build the look-up table in runtime */
1241 galois_table = create_lookup_table(host->dev, sector_size);
1242 if (!galois_table) {
1243 dev_err(host->dev, "Failed to build a lookup table in runtime!\n");
1244 err_no = -EINVAL;
1245 goto err;
1246 }
1247
1248 host->pmecc_rom_base = (void __iomem *)galois_table;
1249 host->pmecc_lookup_table_offset = 0;
1250 }
1251
1252 nand_chip->ecc.size = sector_size;
1253
1254 /* set ECC page size and oob layout */
1255 switch (mtd->writesize) {
1256 case 512:
1257 case 1024:
1258 case 2048:
1259 case 4096:
1260 case 8192:
1261 if (sector_size > mtd->writesize) {
1262 dev_err(host->dev, "pmecc sector size is bigger than the page size!\n");
1263 err_no = -EINVAL;
1264 goto err;
1265 }
1266
1267 host->pmecc_degree = (sector_size == 512) ?
1268 PMECC_GF_DIMENSION_13 : PMECC_GF_DIMENSION_14;
1269 host->pmecc_cw_len = (1 << host->pmecc_degree) - 1;
1270 host->pmecc_alpha_to = pmecc_get_alpha_to(host);
1271 host->pmecc_index_of = host->pmecc_rom_base +
1272 host->pmecc_lookup_table_offset;
1273
1274 nand_chip->ecc.strength = cap;
1275 nand_chip->ecc.bytes = pmecc_get_ecc_bytes(cap, sector_size);
1276 nand_chip->ecc.steps = mtd->writesize / sector_size;
1277 nand_chip->ecc.total = nand_chip->ecc.bytes *
1278 nand_chip->ecc.steps;
1279 if (nand_chip->ecc.total >
1280 mtd->oobsize - PMECC_OOB_RESERVED_BYTES) {
1281 dev_err(host->dev, "No room for ECC bytes\n");
1282 err_no = -EINVAL;
1283 goto err;
1284 }
1285
1286 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
1287 break;
1288 default:
1289 dev_warn(host->dev,
1290 "Unsupported page size for PMECC, use Software ECC\n");
1291 /* page size not handled by HW ECC */
1292 /* switching back to soft ECC */
1293 nand_chip->ecc.mode = NAND_ECC_SOFT;
1294 nand_chip->ecc.algo = NAND_ECC_HAMMING;
1295 return 0;
1296 }
1297
1298 /* Allocate data for PMECC computation */
1299 err_no = pmecc_data_alloc(host);
1300 if (err_no) {
1301 dev_err(host->dev,
1302 "Cannot allocate memory for PMECC computation!\n");
1303 goto err;
1304 }
1305
1306 nand_chip->options |= NAND_NO_SUBPAGE_WRITE;
1307 nand_chip->ecc.read_page = atmel_nand_pmecc_read_page;
1308 nand_chip->ecc.write_page = atmel_nand_pmecc_write_page;
1309
1310 atmel_pmecc_core_init(mtd);
1311
1312 return 0;
1313
1314err:
1315 return err_no;
1316}
1317
1318/*
1319 * Calculate HW ECC
1320 *
1321 * function called after a write
1322 *
1323 * mtd: MTD block structure
1324 * dat: raw data (unused)
1325 * ecc_code: buffer for ECC
1326 */
1327static int atmel_nand_calculate(struct mtd_info *mtd,
1328 const u_char *dat, unsigned char *ecc_code)
1329{
1330 struct nand_chip *nand_chip = mtd_to_nand(mtd);
1331 struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
1332 unsigned int ecc_value;
1333
1334 /* get the first 2 ECC bytes */
1335 ecc_value = ecc_readl(host->ecc, PR);
1336
1337 ecc_code[0] = ecc_value & 0xFF;
1338 ecc_code[1] = (ecc_value >> 8) & 0xFF;
1339
1340 /* get the last 2 ECC bytes */
1341 ecc_value = ecc_readl(host->ecc, NPR) & ATMEL_ECC_NPARITY;
1342
1343 ecc_code[2] = ecc_value & 0xFF;
1344 ecc_code[3] = (ecc_value >> 8) & 0xFF;
1345
1346 return 0;
1347}
1348
1349/*
1350 * HW ECC read page function
1351 *
1352 * mtd: mtd info structure
1353 * chip: nand chip info structure
1354 * buf: buffer to store read data
1355 * oob_required: caller expects OOB data read to chip->oob_poi
1356 */
1357static int atmel_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1358 uint8_t *buf, int oob_required, int page)
1359{
1360 int eccsize = chip->ecc.size;
1361 int eccbytes = chip->ecc.bytes;
1362 uint8_t *p = buf;
1363 uint8_t *oob = chip->oob_poi;
1364 uint8_t *ecc_pos;
1365 int stat;
1366 unsigned int max_bitflips = 0;
1367 struct mtd_oob_region oobregion = {};
1368
1369 /*
1370 * Errata: ALE is incorrectly wired up to the ECC controller
1371 * on the AP7000, so it will include the address cycles in the
1372 * ECC calculation.
1373 *
1374 * Workaround: Reset the parity registers before reading the
1375 * actual data.
1376 */
1377 struct atmel_nand_host *host = nand_get_controller_data(chip);
1378 if (host->board.need_reset_workaround)
1379 ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
1380
1381 /* read the page */
1382 chip->read_buf(mtd, p, eccsize);
1383
1384 /* move to ECC position if needed */
1385 mtd_ooblayout_ecc(mtd, 0, &oobregion);
1386 if (oobregion.offset != 0) {
1387 /*
1388 * This only works on large pages because the ECC controller
1389 * waits for NAND_CMD_RNDOUTSTART after the NAND_CMD_RNDOUT.
1390 * Anyway, for small pages, the first ECC byte is at offset
1391 * 0 in the OOB area.
1392 */
1393 chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
1394 mtd->writesize + oobregion.offset, -1);
1395 }
1396
1397 /* the ECC controller needs to read the ECC just after the data */
1398 ecc_pos = oob + oobregion.offset;
1399 chip->read_buf(mtd, ecc_pos, eccbytes);
1400
1401 /* check if there's an error */
1402 stat = chip->ecc.correct(mtd, p, oob, NULL);
1403
1404 if (stat < 0) {
1405 mtd->ecc_stats.failed++;
1406 } else {
1407 mtd->ecc_stats.corrected += stat;
1408 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1409 }
1410
1411 /* get back to oob start (end of page) */
1412 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
1413
1414 /* read the oob */
1415 chip->read_buf(mtd, oob, mtd->oobsize);
1416
1417 return max_bitflips;
1418}
1419
1420/*
1421 * HW ECC Correction
1422 *
1423 * function called after a read
1424 *
1425 * mtd: MTD block structure
1426 * dat: raw data read from the chip
1427 * read_ecc: ECC from the chip (unused)
1428 * isnull: unused
1429 *
1430 * Detect and correct a 1 bit error for a page
1431 */
1432static int atmel_nand_correct(struct mtd_info *mtd, u_char *dat,
1433 u_char *read_ecc, u_char *isnull)
1434{
1435 struct nand_chip *nand_chip = mtd_to_nand(mtd);
1436 struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
1437 unsigned int ecc_status;
1438 unsigned int ecc_word, ecc_bit;
1439
1440 /* get the status from the Status Register */
1441 ecc_status = ecc_readl(host->ecc, SR);
1442
1443 /* if there's no error */
1444 if (likely(!(ecc_status & ATMEL_ECC_RECERR)))
1445 return 0;
1446
1447 /* get error bit offset (4 bits) */
1448 ecc_bit = ecc_readl(host->ecc, PR) & ATMEL_ECC_BITADDR;
1449 /* get word address (12 bits) */
1450 ecc_word = ecc_readl(host->ecc, PR) & ATMEL_ECC_WORDADDR;
1451 ecc_word >>= 4;
1452
1453 /* if there are multiple errors */
1454 if (ecc_status & ATMEL_ECC_MULERR) {
1455 /* check if it is a freshly erased block
1456 * (filled with 0xff) */
1457 if ((ecc_bit == ATMEL_ECC_BITADDR)
1458 && (ecc_word == (ATMEL_ECC_WORDADDR >> 4))) {
1459 /* the block has just been erased, return OK */
1460 return 0;
1461 }
1462 /* it doesn't seems to be a freshly
1463 * erased block.
1464 * We can't correct so many errors */
1465 dev_dbg(host->dev, "atmel_nand : multiple errors detected."
1466 " Unable to correct.\n");
1467 return -EBADMSG;
1468 }
1469
1470 /* if there's a single bit error : we can correct it */
1471 if (ecc_status & ATMEL_ECC_ECCERR) {
1472 /* there's nothing much to do here.
1473 * the bit error is on the ECC itself.
1474 */
1475 dev_dbg(host->dev, "atmel_nand : one bit error on ECC code."
1476 " Nothing to correct\n");
1477 return 0;
1478 }
1479
1480 dev_dbg(host->dev, "atmel_nand : one bit error on data."
1481 " (word offset in the page :"
1482 " 0x%x bit offset : 0x%x)\n",
1483 ecc_word, ecc_bit);
1484 /* correct the error */
1485 if (nand_chip->options & NAND_BUSWIDTH_16) {
1486 /* 16 bits words */
1487 ((unsigned short *) dat)[ecc_word] ^= (1 << ecc_bit);
1488 } else {
1489 /* 8 bits words */
1490 dat[ecc_word] ^= (1 << ecc_bit);
1491 }
1492 dev_dbg(host->dev, "atmel_nand : error corrected\n");
1493 return 1;
1494}
1495
1496/*
1497 * Enable HW ECC : unused on most chips
1498 */
1499static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
1500{
1501 struct nand_chip *nand_chip = mtd_to_nand(mtd);
1502 struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
1503
1504 if (host->board.need_reset_workaround)
1505 ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
1506}
1507
1508static int atmel_of_init_ecc(struct atmel_nand_host *host,
1509 struct device_node *np)
1510{
1511 u32 offset[2];
1512 u32 val;
1513
1514 host->has_pmecc = of_property_read_bool(np, "atmel,has-pmecc");
1515
1516 /* Not using PMECC */
1517 if (!(host->nand_chip.ecc.mode == NAND_ECC_HW) || !host->has_pmecc)
1518 return 0;
1519
1520 /* use PMECC, get correction capability, sector size and lookup
1521 * table offset.
1522 * If correction bits and sector size are not specified, then find
1523 * them from NAND ONFI parameters.
1524 */
1525 if (of_property_read_u32(np, "atmel,pmecc-cap", &val) == 0) {
1526 if (val > host->caps->pmecc_max_correction) {
1527 dev_err(host->dev,
1528 "Required ECC strength too high: %u max %u\n",
1529 val, host->caps->pmecc_max_correction);
1530 return -EINVAL;
1531 }
1532 if ((val != 2) && (val != 4) && (val != 8) &&
1533 (val != 12) && (val != 24) && (val != 32)) {
1534 dev_err(host->dev,
1535 "Required ECC strength not supported: %u\n",
1536 val);
1537 return -EINVAL;
1538 }
1539 host->pmecc_corr_cap = (u8)val;
1540 }
1541
1542 if (of_property_read_u32(np, "atmel,pmecc-sector-size", &val) == 0) {
1543 if ((val != 512) && (val != 1024)) {
1544 dev_err(host->dev,
1545 "Required ECC sector size not supported: %u\n",
1546 val);
1547 return -EINVAL;
1548 }
1549 host->pmecc_sector_size = (u16)val;
1550 }
1551
1552 if (of_property_read_u32_array(np, "atmel,pmecc-lookup-table-offset",
1553 offset, 2) != 0) {
1554 dev_err(host->dev, "Cannot get PMECC lookup table offset, will build a lookup table in runtime.\n");
1555 host->has_no_lookup_table = true;
1556 /* Will build a lookup table and initialize the offset later */
1557 return 0;
1558 }
1559
1560 if (!offset[0] && !offset[1]) {
1561 dev_err(host->dev, "Invalid PMECC lookup table offset\n");
1562 return -EINVAL;
1563 }
1564
1565 host->pmecc_lookup_table_offset_512 = offset[0];
1566 host->pmecc_lookup_table_offset_1024 = offset[1];
1567
1568 return 0;
1569}
1570
1571static int atmel_of_init_port(struct atmel_nand_host *host,
1572 struct device_node *np)
1573{
1574 u32 val;
1575 struct atmel_nand_data *board = &host->board;
1576 enum of_gpio_flags flags = 0;
1577
1578 host->caps = (struct atmel_nand_caps *)
1579 of_device_get_match_data(host->dev);
1580
1581 if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) {
1582 if (val >= 32) {
1583 dev_err(host->dev, "invalid addr-offset %u\n", val);
1584 return -EINVAL;
1585 }
1586 board->ale = val;
1587 }
1588
1589 if (of_property_read_u32(np, "atmel,nand-cmd-offset", &val) == 0) {
1590 if (val >= 32) {
1591 dev_err(host->dev, "invalid cmd-offset %u\n", val);
1592 return -EINVAL;
1593 }
1594 board->cle = val;
1595 }
1596
1597 board->has_dma = of_property_read_bool(np, "atmel,nand-has-dma");
1598
1599 board->rdy_pin = of_get_gpio_flags(np, 0, &flags);
1600 board->rdy_pin_active_low = (flags == OF_GPIO_ACTIVE_LOW);
1601
1602 board->enable_pin = of_get_gpio(np, 1);
1603 board->det_pin = of_get_gpio(np, 2);
1604
1605 /* load the nfc driver if there is */
1606 of_platform_populate(np, NULL, NULL, host->dev);
1607
1608 /*
1609 * Initialize ECC mode to NAND_ECC_SOFT so that we have a correct value
1610 * even if the nand-ecc-mode property is not defined.
1611 */
1612 host->nand_chip.ecc.mode = NAND_ECC_SOFT;
1613 host->nand_chip.ecc.algo = NAND_ECC_HAMMING;
1614
1615 return 0;
1616}
1617
1618static int atmel_hw_nand_init_params(struct platform_device *pdev,
1619 struct atmel_nand_host *host)
1620{
1621 struct nand_chip *nand_chip = &host->nand_chip;
1622 struct mtd_info *mtd = nand_to_mtd(nand_chip);
1623 struct resource *regs;
1624
1625 regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1626 if (!regs) {
1627 dev_err(host->dev,
1628 "Can't get I/O resource regs, use software ECC\n");
1629 nand_chip->ecc.mode = NAND_ECC_SOFT;
1630 nand_chip->ecc.algo = NAND_ECC_HAMMING;
1631 return 0;
1632 }
1633
1634 host->ecc = devm_ioremap_resource(&pdev->dev, regs);
1635 if (IS_ERR(host->ecc))
1636 return PTR_ERR(host->ecc);
1637
1638 /* ECC is calculated for the whole page (1 step) */
1639 nand_chip->ecc.size = mtd->writesize;
1640
1641 /* set ECC page size and oob layout */
1642 switch (mtd->writesize) {
1643 case 512:
1644 mtd_set_ooblayout(mtd, &atmel_ooblayout_sp_ops);
1645 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_528);
1646 break;
1647 case 1024:
1648 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
1649 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_1056);
1650 break;
1651 case 2048:
1652 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
1653 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_2112);
1654 break;
1655 case 4096:
1656 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
1657 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_4224);
1658 break;
1659 default:
1660 /* page size not handled by HW ECC */
1661 /* switching back to soft ECC */
1662 nand_chip->ecc.mode = NAND_ECC_SOFT;
1663 nand_chip->ecc.algo = NAND_ECC_HAMMING;
1664 return 0;
1665 }
1666
1667 /* set up for HW ECC */
1668 nand_chip->ecc.calculate = atmel_nand_calculate;
1669 nand_chip->ecc.correct = atmel_nand_correct;
1670 nand_chip->ecc.hwctl = atmel_nand_hwctl;
1671 nand_chip->ecc.read_page = atmel_nand_read_page;
1672 nand_chip->ecc.bytes = 4;
1673 nand_chip->ecc.strength = 1;
1674
1675 return 0;
1676}
1677
1678static inline u32 nfc_read_status(struct atmel_nand_host *host)
1679{
1680 u32 err_flags = NFC_SR_DTOE | NFC_SR_UNDEF | NFC_SR_AWB | NFC_SR_ASE;
1681 u32 nfc_status = nfc_readl(host->nfc->hsmc_regs, SR);
1682
1683 if (unlikely(nfc_status & err_flags)) {
1684 if (nfc_status & NFC_SR_DTOE)
1685 dev_err(host->dev, "NFC: Waiting Nand R/B Timeout Error\n");
1686 else if (nfc_status & NFC_SR_UNDEF)
1687 dev_err(host->dev, "NFC: Access Undefined Area Error\n");
1688 else if (nfc_status & NFC_SR_AWB)
1689 dev_err(host->dev, "NFC: Access memory While NFC is busy\n");
1690 else if (nfc_status & NFC_SR_ASE)
1691 dev_err(host->dev, "NFC: Access memory Size Error\n");
1692 }
1693
1694 return nfc_status;
1695}
1696
1697/* SMC interrupt service routine */
1698static irqreturn_t hsmc_interrupt(int irq, void *dev_id)
1699{
1700 struct atmel_nand_host *host = dev_id;
1701 u32 status, mask, pending;
1702 irqreturn_t ret = IRQ_NONE;
1703
1704 status = nfc_read_status(host);
1705 mask = nfc_readl(host->nfc->hsmc_regs, IMR);
1706 pending = status & mask;
1707
1708 if (pending & NFC_SR_XFR_DONE) {
1709 complete(&host->nfc->comp_xfer_done);
1710 nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_XFR_DONE);
1711 ret = IRQ_HANDLED;
1712 }
1713 if (pending & NFC_SR_RB_EDGE) {
1714 complete(&host->nfc->comp_ready);
1715 nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_RB_EDGE);
1716 ret = IRQ_HANDLED;
1717 }
1718 if (pending & NFC_SR_CMD_DONE) {
1719 complete(&host->nfc->comp_cmd_done);
1720 nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_CMD_DONE);
1721 ret = IRQ_HANDLED;
1722 }
1723
1724 return ret;
1725}
1726
1727/* NFC(Nand Flash Controller) related functions */
1728static void nfc_prepare_interrupt(struct atmel_nand_host *host, u32 flag)
1729{
1730 if (flag & NFC_SR_XFR_DONE)
1731 init_completion(&host->nfc->comp_xfer_done);
1732
1733 if (flag & NFC_SR_RB_EDGE)
1734 init_completion(&host->nfc->comp_ready);
1735
1736 if (flag & NFC_SR_CMD_DONE)
1737 init_completion(&host->nfc->comp_cmd_done);
1738
1739 /* Enable interrupt that need to wait for */
1740 nfc_writel(host->nfc->hsmc_regs, IER, flag);
1741}
1742
1743static int nfc_wait_interrupt(struct atmel_nand_host *host, u32 flag)
1744{
1745 int i, index = 0;
1746 struct completion *comp[3]; /* Support 3 interrupt completion */
1747
1748 if (flag & NFC_SR_XFR_DONE)
1749 comp[index++] = &host->nfc->comp_xfer_done;
1750
1751 if (flag & NFC_SR_RB_EDGE)
1752 comp[index++] = &host->nfc->comp_ready;
1753
1754 if (flag & NFC_SR_CMD_DONE)
1755 comp[index++] = &host->nfc->comp_cmd_done;
1756
1757 if (index == 0) {
1758 dev_err(host->dev, "Unknown interrupt flag: 0x%08x\n", flag);
1759 return -EINVAL;
1760 }
1761
1762 for (i = 0; i < index; i++) {
1763 if (wait_for_completion_timeout(comp[i],
1764 msecs_to_jiffies(NFC_TIME_OUT_MS)))
1765 continue; /* wait for next completion */
1766 else
1767 goto err_timeout;
1768 }
1769
1770 return 0;
1771
1772err_timeout:
1773 dev_err(host->dev, "Time out to wait for interrupt: 0x%08x\n", flag);
1774 /* Disable the interrupt as it is not handled by interrupt handler */
1775 nfc_writel(host->nfc->hsmc_regs, IDR, flag);
1776 return -ETIMEDOUT;
1777}
1778
1779static int nfc_send_command(struct atmel_nand_host *host,
1780 unsigned int cmd, unsigned int addr, unsigned char cycle0)
1781{
1782 unsigned long timeout;
1783 u32 flag = NFC_SR_CMD_DONE;
1784 flag |= cmd & NFCADDR_CMD_DATAEN ? NFC_SR_XFR_DONE : 0;
1785
1786 dev_dbg(host->dev,
1787 "nfc_cmd: 0x%08x, addr1234: 0x%08x, cycle0: 0x%02x\n",
1788 cmd, addr, cycle0);
1789
1790 timeout = jiffies + msecs_to_jiffies(NFC_TIME_OUT_MS);
1791 while (nfc_readl(host->nfc->hsmc_regs, SR) & NFC_SR_BUSY) {
1792 if (time_after(jiffies, timeout)) {
1793 dev_err(host->dev,
1794 "Time out to wait for NFC ready!\n");
1795 return -ETIMEDOUT;
1796 }
1797 }
1798
1799 nfc_prepare_interrupt(host, flag);
1800 nfc_writel(host->nfc->hsmc_regs, CYCLE0, cycle0);
1801 nfc_cmd_addr1234_writel(cmd, addr, host->nfc->base_cmd_regs);
1802 return nfc_wait_interrupt(host, flag);
1803}
1804
1805static int nfc_device_ready(struct mtd_info *mtd)
1806{
1807 u32 status, mask;
1808 struct nand_chip *nand_chip = mtd_to_nand(mtd);
1809 struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
1810
1811 status = nfc_read_status(host);
1812 mask = nfc_readl(host->nfc->hsmc_regs, IMR);
1813
1814 /* The mask should be 0. If not we may lost interrupts */
1815 if (unlikely(mask & status))
1816 dev_err(host->dev, "Lost the interrupt flags: 0x%08x\n",
1817 mask & status);
1818
1819 return status & NFC_SR_RB_EDGE;
1820}
1821
1822static void nfc_select_chip(struct mtd_info *mtd, int chip)
1823{
1824 struct nand_chip *nand_chip = mtd_to_nand(mtd);
1825 struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
1826
1827 if (chip == -1)
1828 nfc_writel(host->nfc->hsmc_regs, CTRL, NFC_CTRL_DISABLE);
1829 else
1830 nfc_writel(host->nfc->hsmc_regs, CTRL, NFC_CTRL_ENABLE);
1831}
1832
1833static int nfc_make_addr(struct mtd_info *mtd, int command, int column,
1834 int page_addr, unsigned int *addr1234, unsigned int *cycle0)
1835{
1836 struct nand_chip *chip = mtd_to_nand(mtd);
1837
1838 int acycle = 0;
1839 unsigned char addr_bytes[8];
1840 int index = 0, bit_shift;
1841
1842 BUG_ON(addr1234 == NULL || cycle0 == NULL);
1843
1844 *cycle0 = 0;
1845 *addr1234 = 0;
1846
1847 if (column != -1) {
1848 if (chip->options & NAND_BUSWIDTH_16 &&
1849 !nand_opcode_8bits(command))
1850 column >>= 1;
1851 addr_bytes[acycle++] = column & 0xff;
1852 if (mtd->writesize > 512)
1853 addr_bytes[acycle++] = (column >> 8) & 0xff;
1854 }
1855
1856 if (page_addr != -1) {
1857 addr_bytes[acycle++] = page_addr & 0xff;
1858 addr_bytes[acycle++] = (page_addr >> 8) & 0xff;
1859 if (chip->chipsize > (128 << 20))
1860 addr_bytes[acycle++] = (page_addr >> 16) & 0xff;
1861 }
1862
1863 if (acycle > 4)
1864 *cycle0 = addr_bytes[index++];
1865
1866 for (bit_shift = 0; index < acycle; bit_shift += 8)
1867 *addr1234 += addr_bytes[index++] << bit_shift;
1868
1869 /* return acycle in cmd register */
1870 return acycle << NFCADDR_CMD_ACYCLE_BIT_POS;
1871}
1872
1873static void nfc_nand_command(struct mtd_info *mtd, unsigned int command,
1874 int column, int page_addr)
1875{
1876 struct nand_chip *chip = mtd_to_nand(mtd);
1877 struct atmel_nand_host *host = nand_get_controller_data(chip);
1878 unsigned long timeout;
1879 unsigned int nfc_addr_cmd = 0;
1880
1881 unsigned int cmd1 = command << NFCADDR_CMD_CMD1_BIT_POS;
1882
1883 /* Set default settings: no cmd2, no addr cycle. read from nand */
1884 unsigned int cmd2 = 0;
1885 unsigned int vcmd2 = 0;
1886 int acycle = NFCADDR_CMD_ACYCLE_NONE;
1887 int csid = NFCADDR_CMD_CSID_3;
1888 int dataen = NFCADDR_CMD_DATADIS;
1889 int nfcwr = NFCADDR_CMD_NFCRD;
1890 unsigned int addr1234 = 0;
1891 unsigned int cycle0 = 0;
1892 bool do_addr = true;
1893 host->nfc->data_in_sram = NULL;
1894
1895 dev_dbg(host->dev, "%s: cmd = 0x%02x, col = 0x%08x, page = 0x%08x\n",
1896 __func__, command, column, page_addr);
1897
1898 switch (command) {
1899 case NAND_CMD_RESET:
1900 nfc_addr_cmd = cmd1 | acycle | csid | dataen | nfcwr;
1901 nfc_send_command(host, nfc_addr_cmd, addr1234, cycle0);
1902 udelay(chip->chip_delay);
1903
1904 nfc_nand_command(mtd, NAND_CMD_STATUS, -1, -1);
1905 timeout = jiffies + msecs_to_jiffies(NFC_TIME_OUT_MS);
1906 while (!(chip->read_byte(mtd) & NAND_STATUS_READY)) {
1907 if (time_after(jiffies, timeout)) {
1908 dev_err(host->dev,
1909 "Time out to wait status ready!\n");
1910 break;
1911 }
1912 }
1913 return;
1914 case NAND_CMD_STATUS:
1915 do_addr = false;
1916 break;
1917 case NAND_CMD_PARAM:
1918 case NAND_CMD_READID:
1919 do_addr = false;
1920 acycle = NFCADDR_CMD_ACYCLE_1;
1921 if (column != -1)
1922 addr1234 = column;
1923 break;
1924 case NAND_CMD_RNDOUT:
1925 cmd2 = NAND_CMD_RNDOUTSTART << NFCADDR_CMD_CMD2_BIT_POS;
1926 vcmd2 = NFCADDR_CMD_VCMD2;
1927 break;
1928 case NAND_CMD_READ0:
1929 case NAND_CMD_READOOB:
1930 if (command == NAND_CMD_READOOB) {
1931 column += mtd->writesize;
1932 command = NAND_CMD_READ0; /* only READ0 is valid */
1933 cmd1 = command << NFCADDR_CMD_CMD1_BIT_POS;
1934 }
1935 if (host->nfc->use_nfc_sram) {
1936 /* Enable Data transfer to sram */
1937 dataen = NFCADDR_CMD_DATAEN;
1938
1939 /* Need enable PMECC now, since NFC will transfer
1940 * data in bus after sending nfc read command.
1941 */
1942 if (chip->ecc.mode == NAND_ECC_HW && host->has_pmecc)
1943 pmecc_enable(host, NAND_ECC_READ);
1944 }
1945
1946 cmd2 = NAND_CMD_READSTART << NFCADDR_CMD_CMD2_BIT_POS;
1947 vcmd2 = NFCADDR_CMD_VCMD2;
1948 break;
1949 /* For prgramming command, the cmd need set to write enable */
1950 case NAND_CMD_PAGEPROG:
1951 case NAND_CMD_SEQIN:
1952 case NAND_CMD_RNDIN:
1953 nfcwr = NFCADDR_CMD_NFCWR;
1954 if (host->nfc->will_write_sram && command == NAND_CMD_SEQIN)
1955 dataen = NFCADDR_CMD_DATAEN;
1956 break;
1957 default:
1958 break;
1959 }
1960
1961 if (do_addr)
1962 acycle = nfc_make_addr(mtd, command, column, page_addr,
1963 &addr1234, &cycle0);
1964
1965 nfc_addr_cmd = cmd1 | cmd2 | vcmd2 | acycle | csid | dataen | nfcwr;
1966 nfc_send_command(host, nfc_addr_cmd, addr1234, cycle0);
1967
1968 /*
1969 * Program and erase have their own busy handlers status, sequential
1970 * in, and deplete1 need no delay.
1971 */
1972 switch (command) {
1973 case NAND_CMD_CACHEDPROG:
1974 case NAND_CMD_PAGEPROG:
1975 case NAND_CMD_ERASE1:
1976 case NAND_CMD_ERASE2:
1977 case NAND_CMD_RNDIN:
1978 case NAND_CMD_STATUS:
1979 case NAND_CMD_RNDOUT:
1980 case NAND_CMD_SEQIN:
1981 case NAND_CMD_READID:
1982 return;
1983
1984 case NAND_CMD_READ0:
1985 if (dataen == NFCADDR_CMD_DATAEN) {
1986 host->nfc->data_in_sram = host->nfc->sram_bank0 +
1987 nfc_get_sram_off(host);
1988 return;
1989 }
1990 /* fall through */
1991 default:
1992 nfc_prepare_interrupt(host, NFC_SR_RB_EDGE);
1993 nfc_wait_interrupt(host, NFC_SR_RB_EDGE);
1994 }
1995}
1996
1997static int nfc_sram_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1998 uint32_t offset, int data_len, const uint8_t *buf,
1999 int oob_required, int page, int cached, int raw)
2000{
2001 int cfg, len;
2002 int status = 0;
2003 struct atmel_nand_host *host = nand_get_controller_data(chip);
2004 void *sram = host->nfc->sram_bank0 + nfc_get_sram_off(host);
2005
2006 /* Subpage write is not supported */
2007 if (offset || (data_len < mtd->writesize))
2008 return -EINVAL;
2009
2010 len = mtd->writesize;
2011 /* Copy page data to sram that will write to nand via NFC */
2012 if (use_dma) {
2013 if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) != 0)
2014 /* Fall back to use cpu copy */
2015 memcpy(sram, buf, len);
2016 } else {
2017 memcpy(sram, buf, len);
2018 }
2019
2020 cfg = nfc_readl(host->nfc->hsmc_regs, CFG);
2021 if (unlikely(raw) && oob_required) {
2022 memcpy(sram + len, chip->oob_poi, mtd->oobsize);
2023 len += mtd->oobsize;
2024 nfc_writel(host->nfc->hsmc_regs, CFG, cfg | NFC_CFG_WSPARE);
2025 } else {
2026 nfc_writel(host->nfc->hsmc_regs, CFG, cfg & ~NFC_CFG_WSPARE);
2027 }
2028
2029 if (chip->ecc.mode == NAND_ECC_HW && host->has_pmecc)
2030 /*
2031 * When use NFC sram, need set up PMECC before send
2032 * NAND_CMD_SEQIN command. Since when the nand command
2033 * is sent, nfc will do transfer from sram and nand.
2034 */
2035 pmecc_enable(host, NAND_ECC_WRITE);
2036
2037 host->nfc->will_write_sram = true;
2038 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
2039 host->nfc->will_write_sram = false;
2040
2041 if (likely(!raw))
2042 /* Need to write ecc into oob */
2043 status = chip->ecc.write_page(mtd, chip, buf, oob_required,
2044 page);
2045
2046 if (status < 0)
2047 return status;
2048
2049 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2050 status = chip->waitfunc(mtd, chip);
2051
2052 if ((status & NAND_STATUS_FAIL) && (chip->errstat))
2053 status = chip->errstat(mtd, chip, FL_WRITING, status, page);
2054
2055 if (status & NAND_STATUS_FAIL)
2056 return -EIO;
2057
2058 return 0;
2059}
2060
2061static int nfc_sram_init(struct mtd_info *mtd)
2062{
2063 struct nand_chip *chip = mtd_to_nand(mtd);
2064 struct atmel_nand_host *host = nand_get_controller_data(chip);
2065 int res = 0;
2066
2067 /* Initialize the NFC CFG register */
2068 unsigned int cfg_nfc = 0;
2069
2070 /* set page size and oob layout */
2071 switch (mtd->writesize) {
2072 case 512:
2073 cfg_nfc = NFC_CFG_PAGESIZE_512;
2074 break;
2075 case 1024:
2076 cfg_nfc = NFC_CFG_PAGESIZE_1024;
2077 break;
2078 case 2048:
2079 cfg_nfc = NFC_CFG_PAGESIZE_2048;
2080 break;
2081 case 4096:
2082 cfg_nfc = NFC_CFG_PAGESIZE_4096;
2083 break;
2084 case 8192:
2085 cfg_nfc = NFC_CFG_PAGESIZE_8192;
2086 break;
2087 default:
2088 dev_err(host->dev, "Unsupported page size for NFC.\n");
2089 res = -ENXIO;
2090 return res;
2091 }
2092
2093 /* oob bytes size = (NFCSPARESIZE + 1) * 4
2094 * Max support spare size is 512 bytes. */
2095 cfg_nfc |= (((mtd->oobsize / 4) - 1) << NFC_CFG_NFC_SPARESIZE_BIT_POS
2096 & NFC_CFG_NFC_SPARESIZE);
2097 /* default set a max timeout */
2098 cfg_nfc |= NFC_CFG_RSPARE |
2099 NFC_CFG_NFC_DTOCYC | NFC_CFG_NFC_DTOMUL;
2100
2101 nfc_writel(host->nfc->hsmc_regs, CFG, cfg_nfc);
2102
2103 host->nfc->will_write_sram = false;
2104 nfc_set_sram_bank(host, 0);
2105
2106 /* Use Write page with NFC SRAM only for PMECC or ECC NONE. */
2107 if (host->nfc->write_by_sram) {
2108 if ((chip->ecc.mode == NAND_ECC_HW && host->has_pmecc) ||
2109 chip->ecc.mode == NAND_ECC_NONE)
2110 chip->write_page = nfc_sram_write_page;
2111 else
2112 host->nfc->write_by_sram = false;
2113 }
2114
2115 dev_info(host->dev, "Using NFC Sram read %s\n",
2116 host->nfc->write_by_sram ? "and write" : "");
2117 return 0;
2118}
2119
2120static struct platform_driver atmel_nand_nfc_driver;
2121/*
2122 * Probe for the NAND device.
2123 */
2124static int atmel_nand_probe(struct platform_device *pdev)
2125{
2126 struct atmel_nand_host *host;
2127 struct mtd_info *mtd;
2128 struct nand_chip *nand_chip;
2129 struct resource *mem;
2130 int res, irq;
2131
2132 /* Allocate memory for the device structure (and zero it) */
2133 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
2134 if (!host)
2135 return -ENOMEM;
2136
2137 res = platform_driver_register(&atmel_nand_nfc_driver);
2138 if (res)
2139 dev_err(&pdev->dev, "atmel_nand: can't register NFC driver\n");
2140
2141 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2142 host->io_base = devm_ioremap_resource(&pdev->dev, mem);
2143 if (IS_ERR(host->io_base)) {
2144 res = PTR_ERR(host->io_base);
2145 goto err_nand_ioremap;
2146 }
2147 host->io_phys = (dma_addr_t)mem->start;
2148
2149 nand_chip = &host->nand_chip;
2150 mtd = nand_to_mtd(nand_chip);
2151 host->dev = &pdev->dev;
2152 if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
2153 nand_set_flash_node(nand_chip, pdev->dev.of_node);
2154 /* Only when CONFIG_OF is enabled of_node can be parsed */
2155 res = atmel_of_init_port(host, pdev->dev.of_node);
2156 if (res)
2157 goto err_nand_ioremap;
2158 } else {
2159 memcpy(&host->board, dev_get_platdata(&pdev->dev),
2160 sizeof(struct atmel_nand_data));
2161 nand_chip->ecc.mode = host->board.ecc_mode;
2162
2163 /*
2164 * When using software ECC every supported avr32 board means
2165 * Hamming algorithm. If that ever changes we'll need to add
2166 * ecc_algo field to the struct atmel_nand_data.
2167 */
2168 if (nand_chip->ecc.mode == NAND_ECC_SOFT)
2169 nand_chip->ecc.algo = NAND_ECC_HAMMING;
2170
2171 /* 16-bit bus width */
2172 if (host->board.bus_width_16)
2173 nand_chip->options |= NAND_BUSWIDTH_16;
2174 }
2175
2176 /* link the private data structures */
2177 nand_set_controller_data(nand_chip, host);
2178 mtd->dev.parent = &pdev->dev;
2179
2180 /* Set address of NAND IO lines */
2181 nand_chip->IO_ADDR_R = host->io_base;
2182 nand_chip->IO_ADDR_W = host->io_base;
2183
2184 if (nand_nfc.is_initialized) {
2185 /* NFC driver is probed and initialized */
2186 host->nfc = &nand_nfc;
2187
2188 nand_chip->select_chip = nfc_select_chip;
2189 nand_chip->dev_ready = nfc_device_ready;
2190 nand_chip->cmdfunc = nfc_nand_command;
2191
2192 /* Initialize the interrupt for NFC */
2193 irq = platform_get_irq(pdev, 0);
2194 if (irq < 0) {
2195 dev_err(host->dev, "Cannot get HSMC irq!\n");
2196 res = irq;
2197 goto err_nand_ioremap;
2198 }
2199
2200 res = devm_request_irq(&pdev->dev, irq, hsmc_interrupt,
2201 0, "hsmc", host);
2202 if (res) {
2203 dev_err(&pdev->dev, "Unable to request HSMC irq %d\n",
2204 irq);
2205 goto err_nand_ioremap;
2206 }
2207 } else {
2208 res = atmel_nand_set_enable_ready_pins(mtd);
2209 if (res)
2210 goto err_nand_ioremap;
2211
2212 nand_chip->cmd_ctrl = atmel_nand_cmd_ctrl;
2213 }
2214
2215 nand_chip->chip_delay = 40; /* 40us command delay time */
2216
2217
2218 nand_chip->read_buf = atmel_read_buf;
2219 nand_chip->write_buf = atmel_write_buf;
2220
2221 platform_set_drvdata(pdev, host);
2222 atmel_nand_enable(host);
2223
2224 if (gpio_is_valid(host->board.det_pin)) {
2225 res = devm_gpio_request(&pdev->dev,
2226 host->board.det_pin, "nand_det");
2227 if (res < 0) {
2228 dev_err(&pdev->dev,
2229 "can't request det gpio %d\n",
2230 host->board.det_pin);
2231 goto err_no_card;
2232 }
2233
2234 res = gpio_direction_input(host->board.det_pin);
2235 if (res < 0) {
2236 dev_err(&pdev->dev,
2237 "can't request input direction det gpio %d\n",
2238 host->board.det_pin);
2239 goto err_no_card;
2240 }
2241
2242 if (gpio_get_value(host->board.det_pin)) {
2243 dev_info(&pdev->dev, "No SmartMedia card inserted.\n");
2244 res = -ENXIO;
2245 goto err_no_card;
2246 }
2247 }
2248
2249 if (!host->board.has_dma)
2250 use_dma = 0;
2251
2252 if (use_dma) {
2253 dma_cap_mask_t mask;
2254
2255 dma_cap_zero(mask);
2256 dma_cap_set(DMA_MEMCPY, mask);
2257 host->dma_chan = dma_request_channel(mask, NULL, NULL);
2258 if (!host->dma_chan) {
2259 dev_err(host->dev, "Failed to request DMA channel\n");
2260 use_dma = 0;
2261 }
2262 }
2263 if (use_dma)
2264 dev_info(host->dev, "Using %s for DMA transfers.\n",
2265 dma_chan_name(host->dma_chan));
2266 else
2267 dev_info(host->dev, "No DMA support for NAND access.\n");
2268
2269 /* first scan to find the device and get the page size */
2270 res = nand_scan_ident(mtd, 1, NULL);
2271 if (res)
2272 goto err_scan_ident;
2273
2274 if (host->board.on_flash_bbt || on_flash_bbt)
2275 nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
2276
2277 if (nand_chip->bbt_options & NAND_BBT_USE_FLASH)
2278 dev_info(&pdev->dev, "Use On Flash BBT\n");
2279
2280 if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
2281 res = atmel_of_init_ecc(host, pdev->dev.of_node);
2282 if (res)
2283 goto err_hw_ecc;
2284 }
2285
2286 if (nand_chip->ecc.mode == NAND_ECC_HW) {
2287 if (host->has_pmecc)
2288 res = atmel_pmecc_nand_init_params(pdev, host);
2289 else
2290 res = atmel_hw_nand_init_params(pdev, host);
2291
2292 if (res != 0)
2293 goto err_hw_ecc;
2294 }
2295
2296 /* initialize the nfc configuration register */
2297 if (host->nfc && host->nfc->use_nfc_sram) {
2298 res = nfc_sram_init(mtd);
2299 if (res) {
2300 host->nfc->use_nfc_sram = false;
2301 dev_err(host->dev, "Disable use nfc sram for data transfer.\n");
2302 }
2303 }
2304
2305 /* second phase scan */
2306 res = nand_scan_tail(mtd);
2307 if (res)
2308 goto err_scan_tail;
2309
2310 mtd->name = "atmel_nand";
2311 res = mtd_device_register(mtd, host->board.parts,
2312 host->board.num_parts);
2313 if (!res)
2314 return res;
2315
2316err_scan_tail:
2317 if (host->has_pmecc && host->nand_chip.ecc.mode == NAND_ECC_HW)
2318 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
2319err_hw_ecc:
2320err_scan_ident:
2321err_no_card:
2322 atmel_nand_disable(host);
2323 if (host->dma_chan)
2324 dma_release_channel(host->dma_chan);
2325err_nand_ioremap:
2326 return res;
2327}
2328
2329/*
2330 * Remove a NAND device.
2331 */
2332static int atmel_nand_remove(struct platform_device *pdev)
2333{
2334 struct atmel_nand_host *host = platform_get_drvdata(pdev);
2335 struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
2336
2337 nand_release(mtd);
2338
2339 atmel_nand_disable(host);
2340
2341 if (host->has_pmecc && host->nand_chip.ecc.mode == NAND_ECC_HW) {
2342 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
2343 pmerrloc_writel(host->pmerrloc_base, ELDIS,
2344 PMERRLOC_DISABLE);
2345 }
2346
2347 if (host->dma_chan)
2348 dma_release_channel(host->dma_chan);
2349
2350 platform_driver_unregister(&atmel_nand_nfc_driver);
2351
2352 return 0;
2353}
2354
2355/*
2356 * AT91RM9200 does not have PMECC or PMECC Errloc peripherals for
2357 * BCH ECC. Combined with the "atmel,has-pmecc", it is used to describe
2358 * devices from the SAM9 family that have those.
2359 */
2360static const struct atmel_nand_caps at91rm9200_caps = {
2361 .pmecc_correct_erase_page = false,
2362 .pmecc_max_correction = 24,
2363};
2364
2365static const struct atmel_nand_caps sama5d4_caps = {
2366 .pmecc_correct_erase_page = true,
2367 .pmecc_max_correction = 24,
2368};
2369
2370/*
2371 * The PMECC Errloc controller starting in SAMA5D2 is not compatible,
2372 * as the increased correction strength requires more registers.
2373 */
2374static const struct atmel_nand_caps sama5d2_caps = {
2375 .pmecc_correct_erase_page = true,
2376 .pmecc_max_correction = 32,
2377};
2378
2379static const struct of_device_id atmel_nand_dt_ids[] = {
2380 { .compatible = "atmel,at91rm9200-nand", .data = &at91rm9200_caps },
2381 { .compatible = "atmel,sama5d4-nand", .data = &sama5d4_caps },
2382 { .compatible = "atmel,sama5d2-nand", .data = &sama5d2_caps },
2383 { /* sentinel */ }
2384};
2385
2386MODULE_DEVICE_TABLE(of, atmel_nand_dt_ids);
2387
2388static int atmel_nand_nfc_probe(struct platform_device *pdev)
2389{
2390 struct atmel_nfc *nfc = &nand_nfc;
2391 struct resource *nfc_cmd_regs, *nfc_hsmc_regs, *nfc_sram;
2392 int ret;
2393
2394 nfc_cmd_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2395 nfc->base_cmd_regs = devm_ioremap_resource(&pdev->dev, nfc_cmd_regs);
2396 if (IS_ERR(nfc->base_cmd_regs))
2397 return PTR_ERR(nfc->base_cmd_regs);
2398
2399 nfc_hsmc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2400 nfc->hsmc_regs = devm_ioremap_resource(&pdev->dev, nfc_hsmc_regs);
2401 if (IS_ERR(nfc->hsmc_regs))
2402 return PTR_ERR(nfc->hsmc_regs);
2403
2404 nfc_sram = platform_get_resource(pdev, IORESOURCE_MEM, 2);
2405 if (nfc_sram) {
2406 nfc->sram_bank0 = (void * __force)
2407 devm_ioremap_resource(&pdev->dev, nfc_sram);
2408 if (IS_ERR(nfc->sram_bank0)) {
2409 dev_warn(&pdev->dev, "Fail to ioremap the NFC sram with error: %ld. So disable NFC sram.\n",
2410 PTR_ERR(nfc->sram_bank0));
2411 } else {
2412 nfc->use_nfc_sram = true;
2413 nfc->sram_bank0_phys = (dma_addr_t)nfc_sram->start;
2414
2415 if (pdev->dev.of_node)
2416 nfc->write_by_sram = of_property_read_bool(
2417 pdev->dev.of_node,
2418 "atmel,write-by-sram");
2419 }
2420 }
2421
2422 nfc_writel(nfc->hsmc_regs, IDR, 0xffffffff);
2423 nfc_readl(nfc->hsmc_regs, SR); /* clear the NFC_SR */
2424
2425 nfc->clk = devm_clk_get(&pdev->dev, NULL);
2426 if (!IS_ERR(nfc->clk)) {
2427 ret = clk_prepare_enable(nfc->clk);
2428 if (ret)
2429 return ret;
2430 } else {
2431 dev_warn(&pdev->dev, "NFC clock missing, update your Device Tree");
2432 }
2433
2434 nfc->is_initialized = true;
2435 dev_info(&pdev->dev, "NFC is probed.\n");
2436
2437 return 0;
2438}
2439
2440static int atmel_nand_nfc_remove(struct platform_device *pdev)
2441{
2442 struct atmel_nfc *nfc = &nand_nfc;
2443
2444 if (!IS_ERR(nfc->clk))
2445 clk_disable_unprepare(nfc->clk);
2446
2447 return 0;
2448}
2449
2450static const struct of_device_id atmel_nand_nfc_match[] = {
2451 { .compatible = "atmel,sama5d3-nfc" },
2452 { /* sentinel */ }
2453};
2454MODULE_DEVICE_TABLE(of, atmel_nand_nfc_match);
2455
2456static struct platform_driver atmel_nand_nfc_driver = {
2457 .driver = {
2458 .name = "atmel_nand_nfc",
2459 .of_match_table = of_match_ptr(atmel_nand_nfc_match),
2460 },
2461 .probe = atmel_nand_nfc_probe,
2462 .remove = atmel_nand_nfc_remove,
2463};
2464
2465static struct platform_driver atmel_nand_driver = {
2466 .probe = atmel_nand_probe,
2467 .remove = atmel_nand_remove,
2468 .driver = {
2469 .name = "atmel_nand",
2470 .of_match_table = of_match_ptr(atmel_nand_dt_ids),
2471 },
2472};
2473
2474module_platform_driver(atmel_nand_driver);
2475
2476MODULE_LICENSE("GPL");
2477MODULE_AUTHOR("Rick Bronson");
2478MODULE_DESCRIPTION("NAND/SmartMedia driver for AT91 / AVR32");
2479MODULE_ALIAS("platform:atmel_nand");
diff --git a/drivers/mtd/nand/atmel_nand_ecc.h b/drivers/mtd/nand/atmel_nand_ecc.h
deleted file mode 100644
index 834d694487bd..000000000000
--- a/drivers/mtd/nand/atmel_nand_ecc.h
+++ /dev/null
@@ -1,163 +0,0 @@
1/*
2 * Error Corrected Code Controller (ECC) - System peripherals regsters.
3 * Based on AT91SAM9260 datasheet revision B.
4 *
5 * Copyright (C) 2007 Andrew Victor
6 * Copyright (C) 2007 - 2012 Atmel Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#ifndef ATMEL_NAND_ECC_H
15#define ATMEL_NAND_ECC_H
16
17#define ATMEL_ECC_CR 0x00 /* Control register */
18#define ATMEL_ECC_RST (1 << 0) /* Reset parity */
19
20#define ATMEL_ECC_MR 0x04 /* Mode register */
21#define ATMEL_ECC_PAGESIZE (3 << 0) /* Page Size */
22#define ATMEL_ECC_PAGESIZE_528 (0)
23#define ATMEL_ECC_PAGESIZE_1056 (1)
24#define ATMEL_ECC_PAGESIZE_2112 (2)
25#define ATMEL_ECC_PAGESIZE_4224 (3)
26
27#define ATMEL_ECC_SR 0x08 /* Status register */
28#define ATMEL_ECC_RECERR (1 << 0) /* Recoverable Error */
29#define ATMEL_ECC_ECCERR (1 << 1) /* ECC Single Bit Error */
30#define ATMEL_ECC_MULERR (1 << 2) /* Multiple Errors */
31
32#define ATMEL_ECC_PR 0x0c /* Parity register */
33#define ATMEL_ECC_BITADDR (0xf << 0) /* Bit Error Address */
34#define ATMEL_ECC_WORDADDR (0xfff << 4) /* Word Error Address */
35
36#define ATMEL_ECC_NPR 0x10 /* NParity register */
37#define ATMEL_ECC_NPARITY (0xffff << 0) /* NParity */
38
39/* PMECC Register Definitions */
40#define ATMEL_PMECC_CFG 0x000 /* Configuration Register */
41#define PMECC_CFG_BCH_ERR2 (0 << 0)
42#define PMECC_CFG_BCH_ERR4 (1 << 0)
43#define PMECC_CFG_BCH_ERR8 (2 << 0)
44#define PMECC_CFG_BCH_ERR12 (3 << 0)
45#define PMECC_CFG_BCH_ERR24 (4 << 0)
46#define PMECC_CFG_BCH_ERR32 (5 << 0)
47
48#define PMECC_CFG_SECTOR512 (0 << 4)
49#define PMECC_CFG_SECTOR1024 (1 << 4)
50
51#define PMECC_CFG_PAGE_1SECTOR (0 << 8)
52#define PMECC_CFG_PAGE_2SECTORS (1 << 8)
53#define PMECC_CFG_PAGE_4SECTORS (2 << 8)
54#define PMECC_CFG_PAGE_8SECTORS (3 << 8)
55
56#define PMECC_CFG_READ_OP (0 << 12)
57#define PMECC_CFG_WRITE_OP (1 << 12)
58
59#define PMECC_CFG_SPARE_ENABLE (1 << 16)
60#define PMECC_CFG_SPARE_DISABLE (0 << 16)
61
62#define PMECC_CFG_AUTO_ENABLE (1 << 20)
63#define PMECC_CFG_AUTO_DISABLE (0 << 20)
64
65#define ATMEL_PMECC_SAREA 0x004 /* Spare area size */
66#define ATMEL_PMECC_SADDR 0x008 /* PMECC starting address */
67#define ATMEL_PMECC_EADDR 0x00c /* PMECC ending address */
68#define ATMEL_PMECC_CLK 0x010 /* PMECC clock control */
69#define PMECC_CLK_133MHZ (2 << 0)
70
71#define ATMEL_PMECC_CTRL 0x014 /* PMECC control register */
72#define PMECC_CTRL_RST (1 << 0)
73#define PMECC_CTRL_DATA (1 << 1)
74#define PMECC_CTRL_USER (1 << 2)
75#define PMECC_CTRL_ENABLE (1 << 4)
76#define PMECC_CTRL_DISABLE (1 << 5)
77
78#define ATMEL_PMECC_SR 0x018 /* PMECC status register */
79#define PMECC_SR_BUSY (1 << 0)
80#define PMECC_SR_ENABLE (1 << 4)
81
82#define ATMEL_PMECC_IER 0x01c /* PMECC interrupt enable */
83#define PMECC_IER_ENABLE (1 << 0)
84#define ATMEL_PMECC_IDR 0x020 /* PMECC interrupt disable */
85#define PMECC_IER_DISABLE (1 << 0)
86#define ATMEL_PMECC_IMR 0x024 /* PMECC interrupt mask */
87#define PMECC_IER_MASK (1 << 0)
88#define ATMEL_PMECC_ISR 0x028 /* PMECC interrupt status */
89#define ATMEL_PMECC_ECCx 0x040 /* PMECC ECC x */
90#define ATMEL_PMECC_REMx 0x240 /* PMECC REM x */
91
92/* PMERRLOC Register Definitions */
93#define ATMEL_PMERRLOC_ELCFG 0x000 /* Error location config */
94#define PMERRLOC_ELCFG_SECTOR_512 (0 << 0)
95#define PMERRLOC_ELCFG_SECTOR_1024 (1 << 0)
96#define PMERRLOC_ELCFG_NUM_ERRORS(n) ((n) << 16)
97
98#define ATMEL_PMERRLOC_ELPRIM 0x004 /* Error location primitive */
99#define ATMEL_PMERRLOC_ELEN 0x008 /* Error location enable */
100#define ATMEL_PMERRLOC_ELDIS 0x00c /* Error location disable */
101#define PMERRLOC_DISABLE (1 << 0)
102
103#define ATMEL_PMERRLOC_ELSR 0x010 /* Error location status */
104#define PMERRLOC_ELSR_BUSY (1 << 0)
105#define ATMEL_PMERRLOC_ELIER 0x014 /* Error location int enable */
106#define ATMEL_PMERRLOC_ELIDR 0x018 /* Error location int disable */
107#define ATMEL_PMERRLOC_ELIMR 0x01c /* Error location int mask */
108#define ATMEL_PMERRLOC_ELISR 0x020 /* Error location int status */
109#define PMERRLOC_ERR_NUM_MASK (0x1f << 8)
110#define PMERRLOC_CALC_DONE (1 << 0)
111#define ATMEL_PMERRLOC_SIGMAx 0x028 /* Error location SIGMA x */
112
113/*
114 * The ATMEL_PMERRLOC_ELx register location depends from the number of
115 * bits corrected by the PMECC controller. Do not use it.
116 */
117
118/* Register access macros for PMECC */
119#define pmecc_readl_relaxed(addr, reg) \
120 readl_relaxed((addr) + ATMEL_PMECC_##reg)
121
122#define pmecc_writel(addr, reg, value) \
123 writel((value), (addr) + ATMEL_PMECC_##reg)
124
125#define pmecc_readb_ecc_relaxed(addr, sector, n) \
126 readb_relaxed((addr) + ATMEL_PMECC_ECCx + ((sector) * 0x40) + (n))
127
128#define pmecc_readl_rem_relaxed(addr, sector, n) \
129 readl_relaxed((addr) + ATMEL_PMECC_REMx + ((sector) * 0x40) + ((n) * 4))
130
131#define pmerrloc_readl_relaxed(addr, reg) \
132 readl_relaxed((addr) + ATMEL_PMERRLOC_##reg)
133
134#define pmerrloc_writel(addr, reg, value) \
135 writel((value), (addr) + ATMEL_PMERRLOC_##reg)
136
137#define pmerrloc_writel_sigma_relaxed(addr, n, value) \
138 writel_relaxed((value), (addr) + ATMEL_PMERRLOC_SIGMAx + ((n) * 4))
139
140#define pmerrloc_readl_sigma_relaxed(addr, n) \
141 readl_relaxed((addr) + ATMEL_PMERRLOC_SIGMAx + ((n) * 4))
142
143#define pmerrloc_readl_el_relaxed(addr, n) \
144 readl_relaxed((addr) + ((n) * 4))
145
146/* Galois field dimension */
147#define PMECC_GF_DIMENSION_13 13
148#define PMECC_GF_DIMENSION_14 14
149
150/* Primitive Polynomial used by PMECC */
151#define PMECC_GF_13_PRIMITIVE_POLY 0x201b
152#define PMECC_GF_14_PRIMITIVE_POLY 0x4443
153
154#define PMECC_LOOKUP_TABLE_SIZE_512 0x2000
155#define PMECC_LOOKUP_TABLE_SIZE_1024 0x4000
156
157/* Time out value for reading PMECC status register */
158#define PMECC_MAX_TIMEOUT_MS 100
159
160/* Reserved bytes in oob area */
161#define PMECC_OOB_RESERVED_BYTES 2
162
163#endif
diff --git a/drivers/mtd/nand/atmel_nand_nfc.h b/drivers/mtd/nand/atmel_nand_nfc.h
deleted file mode 100644
index 4d5d26221a7e..000000000000
--- a/drivers/mtd/nand/atmel_nand_nfc.h
+++ /dev/null
@@ -1,103 +0,0 @@
1/*
2 * Atmel Nand Flash Controller (NFC) - System peripherals regsters.
3 * Based on SAMA5D3 datasheet.
4 *
5 * © Copyright 2013 Atmel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 */
12
13#ifndef ATMEL_NAND_NFC_H
14#define ATMEL_NAND_NFC_H
15
16/*
17 * HSMC NFC registers
18 */
19#define ATMEL_HSMC_NFC_CFG 0x00 /* NFC Configuration Register */
20#define NFC_CFG_PAGESIZE (7 << 0)
21#define NFC_CFG_PAGESIZE_512 (0 << 0)
22#define NFC_CFG_PAGESIZE_1024 (1 << 0)
23#define NFC_CFG_PAGESIZE_2048 (2 << 0)
24#define NFC_CFG_PAGESIZE_4096 (3 << 0)
25#define NFC_CFG_PAGESIZE_8192 (4 << 0)
26#define NFC_CFG_WSPARE (1 << 8)
27#define NFC_CFG_RSPARE (1 << 9)
28#define NFC_CFG_NFC_DTOCYC (0xf << 16)
29#define NFC_CFG_NFC_DTOMUL (0x7 << 20)
30#define NFC_CFG_NFC_SPARESIZE (0x7f << 24)
31#define NFC_CFG_NFC_SPARESIZE_BIT_POS 24
32
33#define ATMEL_HSMC_NFC_CTRL 0x04 /* NFC Control Register */
34#define NFC_CTRL_ENABLE (1 << 0)
35#define NFC_CTRL_DISABLE (1 << 1)
36
37#define ATMEL_HSMC_NFC_SR 0x08 /* NFC Status Register */
38#define NFC_SR_BUSY (1 << 8)
39#define NFC_SR_XFR_DONE (1 << 16)
40#define NFC_SR_CMD_DONE (1 << 17)
41#define NFC_SR_DTOE (1 << 20)
42#define NFC_SR_UNDEF (1 << 21)
43#define NFC_SR_AWB (1 << 22)
44#define NFC_SR_ASE (1 << 23)
45#define NFC_SR_RB_EDGE (1 << 24)
46
47#define ATMEL_HSMC_NFC_IER 0x0c
48#define ATMEL_HSMC_NFC_IDR 0x10
49#define ATMEL_HSMC_NFC_IMR 0x14
50#define ATMEL_HSMC_NFC_CYCLE0 0x18 /* NFC Address Cycle Zero */
51#define ATMEL_HSMC_NFC_ADDR_CYCLE0 (0xff)
52
53#define ATMEL_HSMC_NFC_BANK 0x1c /* NFC Bank Register */
54#define ATMEL_HSMC_NFC_BANK0 (0 << 0)
55#define ATMEL_HSMC_NFC_BANK1 (1 << 0)
56
57#define nfc_writel(addr, reg, value) \
58 writel((value), (addr) + ATMEL_HSMC_NFC_##reg)
59
60#define nfc_readl(addr, reg) \
61 readl_relaxed((addr) + ATMEL_HSMC_NFC_##reg)
62
63/*
64 * NFC Address Command definitions
65 */
66#define NFCADDR_CMD_CMD1 (0xff << 2) /* Command for Cycle 1 */
67#define NFCADDR_CMD_CMD1_BIT_POS 2
68#define NFCADDR_CMD_CMD2 (0xff << 10) /* Command for Cycle 2 */
69#define NFCADDR_CMD_CMD2_BIT_POS 10
70#define NFCADDR_CMD_VCMD2 (0x1 << 18) /* Valid Cycle 2 Command */
71#define NFCADDR_CMD_ACYCLE (0x7 << 19) /* Number of Address required */
72#define NFCADDR_CMD_ACYCLE_NONE (0x0 << 19)
73#define NFCADDR_CMD_ACYCLE_1 (0x1 << 19)
74#define NFCADDR_CMD_ACYCLE_2 (0x2 << 19)
75#define NFCADDR_CMD_ACYCLE_3 (0x3 << 19)
76#define NFCADDR_CMD_ACYCLE_4 (0x4 << 19)
77#define NFCADDR_CMD_ACYCLE_5 (0x5 << 19)
78#define NFCADDR_CMD_ACYCLE_BIT_POS 19
79#define NFCADDR_CMD_CSID (0x7 << 22) /* Chip Select Identifier */
80#define NFCADDR_CMD_CSID_0 (0x0 << 22)
81#define NFCADDR_CMD_CSID_1 (0x1 << 22)
82#define NFCADDR_CMD_CSID_2 (0x2 << 22)
83#define NFCADDR_CMD_CSID_3 (0x3 << 22)
84#define NFCADDR_CMD_CSID_4 (0x4 << 22)
85#define NFCADDR_CMD_CSID_5 (0x5 << 22)
86#define NFCADDR_CMD_CSID_6 (0x6 << 22)
87#define NFCADDR_CMD_CSID_7 (0x7 << 22)
88#define NFCADDR_CMD_DATAEN (0x1 << 25) /* Data Transfer Enable */
89#define NFCADDR_CMD_DATADIS (0x0 << 25) /* Data Transfer Disable */
90#define NFCADDR_CMD_NFCRD (0x0 << 26) /* NFC Read Enable */
91#define NFCADDR_CMD_NFCWR (0x1 << 26) /* NFC Write Enable */
92#define NFCADDR_CMD_NFCBUSY (0x1 << 27) /* NFC Busy */
93
94#define nfc_cmd_addr1234_writel(cmd, addr1234, nfc_base) \
95 writel((addr1234), (cmd) + nfc_base)
96
97#define nfc_cmd_readl(bitstatus, nfc_base) \
98 readl_relaxed((bitstatus) + nfc_base)
99
100#define NFC_TIME_OUT_MS 100
101#define NFC_SRAM_BANK1_OFFSET 0x1200
102
103#endif
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
index 42ebd73f821d..7419c5ce63f8 100644
--- a/drivers/mtd/nand/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/brcmnand/brcmnand.c
@@ -101,6 +101,9 @@ struct brcm_nand_dma_desc {
101#define BRCMNAND_MIN_BLOCKSIZE (8 * 1024) 101#define BRCMNAND_MIN_BLOCKSIZE (8 * 1024)
102#define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024) 102#define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024)
103 103
104#define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY)
105#define NAND_POLL_STATUS_TIMEOUT_MS 100
106
104/* Controller feature flags */ 107/* Controller feature flags */
105enum { 108enum {
106 BRCMNAND_HAS_1K_SECTORS = BIT(0), 109 BRCMNAND_HAS_1K_SECTORS = BIT(0),
@@ -765,6 +768,31 @@ enum {
765 CS_SELECT_AUTO_DEVICE_ID_CFG = BIT(30), 768 CS_SELECT_AUTO_DEVICE_ID_CFG = BIT(30),
766}; 769};
767 770
771static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl,
772 u32 mask, u32 expected_val,
773 unsigned long timeout_ms)
774{
775 unsigned long limit;
776 u32 val;
777
778 if (!timeout_ms)
779 timeout_ms = NAND_POLL_STATUS_TIMEOUT_MS;
780
781 limit = jiffies + msecs_to_jiffies(timeout_ms);
782 do {
783 val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
784 if ((val & mask) == expected_val)
785 return 0;
786
787 cpu_relax();
788 } while (time_after(limit, jiffies));
789
790 dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n",
791 expected_val, val & mask);
792
793 return -ETIMEDOUT;
794}
795
768static inline void brcmnand_set_wp(struct brcmnand_controller *ctrl, bool en) 796static inline void brcmnand_set_wp(struct brcmnand_controller *ctrl, bool en)
769{ 797{
770 u32 val = en ? CS_SELECT_NAND_WP : 0; 798 u32 val = en ? CS_SELECT_NAND_WP : 0;
@@ -1024,12 +1052,39 @@ static void brcmnand_wp(struct mtd_info *mtd, int wp)
1024 1052
1025 if ((ctrl->features & BRCMNAND_HAS_WP) && wp_on == 1) { 1053 if ((ctrl->features & BRCMNAND_HAS_WP) && wp_on == 1) {
1026 static int old_wp = -1; 1054 static int old_wp = -1;
1055 int ret;
1027 1056
1028 if (old_wp != wp) { 1057 if (old_wp != wp) {
1029 dev_dbg(ctrl->dev, "WP %s\n", wp ? "on" : "off"); 1058 dev_dbg(ctrl->dev, "WP %s\n", wp ? "on" : "off");
1030 old_wp = wp; 1059 old_wp = wp;
1031 } 1060 }
1061
1062 /*
1063 * make sure ctrl/flash ready before and after
1064 * changing state of #WP pin
1065 */
1066 ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY |
1067 NAND_STATUS_READY,
1068 NAND_CTRL_RDY |
1069 NAND_STATUS_READY, 0);
1070 if (ret)
1071 return;
1072
1032 brcmnand_set_wp(ctrl, wp); 1073 brcmnand_set_wp(ctrl, wp);
1074 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
1075 /* NAND_STATUS_WP 0x00 = protected, 0x80 = not protected */
1076 ret = bcmnand_ctrl_poll_status(ctrl,
1077 NAND_CTRL_RDY |
1078 NAND_STATUS_READY |
1079 NAND_STATUS_WP,
1080 NAND_CTRL_RDY |
1081 NAND_STATUS_READY |
1082 (wp ? 0 : NAND_STATUS_WP), 0);
1083
1084 if (ret)
1085 dev_err_ratelimited(&host->pdev->dev,
1086 "nand #WP expected %s\n",
1087 wp ? "on" : "off");
1033 } 1088 }
1034} 1089}
1035 1090
@@ -1157,15 +1212,15 @@ static irqreturn_t brcmnand_dma_irq(int irq, void *data)
1157static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd) 1212static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
1158{ 1213{
1159 struct brcmnand_controller *ctrl = host->ctrl; 1214 struct brcmnand_controller *ctrl = host->ctrl;
1160 u32 intfc; 1215 int ret;
1161 1216
1162 dev_dbg(ctrl->dev, "send native cmd %d addr_lo 0x%x\n", cmd, 1217 dev_dbg(ctrl->dev, "send native cmd %d addr_lo 0x%x\n", cmd,
1163 brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS)); 1218 brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS));
1164 BUG_ON(ctrl->cmd_pending != 0); 1219 BUG_ON(ctrl->cmd_pending != 0);
1165 ctrl->cmd_pending = cmd; 1220 ctrl->cmd_pending = cmd;
1166 1221
1167 intfc = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS); 1222 ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
1168 WARN_ON(!(intfc & INTFC_CTLR_READY)); 1223 WARN_ON(ret);
1169 1224
1170 mb(); /* flush previous writes */ 1225 mb(); /* flush previous writes */
1171 brcmnand_write_reg(ctrl, BRCMNAND_CMD_START, 1226 brcmnand_write_reg(ctrl, BRCMNAND_CMD_START,
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index 226ac0bcafc6..949b9400dcb7 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -145,7 +145,7 @@ static int __init cmx270_init(void)
145 145
146 ret = gpio_request(GPIO_NAND_CS, "NAND CS"); 146 ret = gpio_request(GPIO_NAND_CS, "NAND CS");
147 if (ret) { 147 if (ret) {
148 pr_warning("CM-X270: failed to request NAND CS gpio\n"); 148 pr_warn("CM-X270: failed to request NAND CS gpio\n");
149 return ret; 149 return ret;
150 } 150 }
151 151
@@ -153,7 +153,7 @@ static int __init cmx270_init(void)
153 153
154 ret = gpio_request(GPIO_NAND_RB, "NAND R/B"); 154 ret = gpio_request(GPIO_NAND_RB, "NAND R/B");
155 if (ret) { 155 if (ret) {
156 pr_warning("CM-X270: failed to request NAND R/B gpio\n"); 156 pr_warn("CM-X270: failed to request NAND R/B gpio\n");
157 goto err_gpio_request; 157 goto err_gpio_request;
158 } 158 }
159 159
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 27fa8b87cd5f..531c51991e57 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -581,6 +581,17 @@ static struct davinci_nand_pdata
581 "ti,davinci-nand-use-bbt")) 581 "ti,davinci-nand-use-bbt"))
582 pdata->bbt_options = NAND_BBT_USE_FLASH; 582 pdata->bbt_options = NAND_BBT_USE_FLASH;
583 583
584 /*
585 * Since kernel v4.8, this driver has been fixed to enable
586 * use of 4-bit hardware ECC with subpages and verified on
587 * TI's keystone EVMs (K2L, K2HK and K2E).
588 * However, in the interest of not breaking systems using
589 * existing UBI partitions, sub-page writes are not being
590 * (re)enabled. If you want to use subpage writes on Keystone
591 * platforms (i.e. do not have any existing UBI partitions),
592 * then use "ti,davinci-nand" as the compatible in your
593 * device-tree file.
594 */
584 if (of_device_is_compatible(pdev->dev.of_node, 595 if (of_device_is_compatible(pdev->dev.of_node,
585 "ti,keystone-nand")) { 596 "ti,keystone-nand")) {
586 pdata->options |= NAND_NO_SUBPAGE_WRITE; 597 pdata->options |= NAND_NO_SUBPAGE_WRITE;
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 73b9d4e2dca0..16634df2e39a 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -45,16 +45,16 @@ MODULE_PARM_DESC(onfi_timing_mode,
45 * We define a macro here that combines all interrupts this driver uses into 45 * We define a macro here that combines all interrupts this driver uses into
46 * a single constant value, for convenience. 46 * a single constant value, for convenience.
47 */ 47 */
48#define DENALI_IRQ_ALL (INTR_STATUS__DMA_CMD_COMP | \ 48#define DENALI_IRQ_ALL (INTR__DMA_CMD_COMP | \
49 INTR_STATUS__ECC_TRANSACTION_DONE | \ 49 INTR__ECC_TRANSACTION_DONE | \
50 INTR_STATUS__ECC_ERR | \ 50 INTR__ECC_ERR | \
51 INTR_STATUS__PROGRAM_FAIL | \ 51 INTR__PROGRAM_FAIL | \
52 INTR_STATUS__LOAD_COMP | \ 52 INTR__LOAD_COMP | \
53 INTR_STATUS__PROGRAM_COMP | \ 53 INTR__PROGRAM_COMP | \
54 INTR_STATUS__TIME_OUT | \ 54 INTR__TIME_OUT | \
55 INTR_STATUS__ERASE_FAIL | \ 55 INTR__ERASE_FAIL | \
56 INTR_STATUS__RST_COMP | \ 56 INTR__RST_COMP | \
57 INTR_STATUS__ERASE_COMP) 57 INTR__ERASE_COMP)
58 58
59/* 59/*
60 * indicates whether or not the internal value for the flash bank is 60 * indicates whether or not the internal value for the flash bank is
@@ -62,8 +62,6 @@ MODULE_PARM_DESC(onfi_timing_mode,
62 */ 62 */
63#define CHIP_SELECT_INVALID -1 63#define CHIP_SELECT_INVALID -1
64 64
65#define SUPPORT_8BITECC 1
66
67/* 65/*
68 * This macro divides two integers and rounds fractional values up 66 * This macro divides two integers and rounds fractional values up
69 * to the nearest integer value. 67 * to the nearest integer value.
@@ -86,16 +84,10 @@ static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
86#define SPARE_ACCESS 0x41 84#define SPARE_ACCESS 0x41
87#define MAIN_ACCESS 0x42 85#define MAIN_ACCESS 0x42
88#define MAIN_SPARE_ACCESS 0x43 86#define MAIN_SPARE_ACCESS 0x43
89#define PIPELINE_ACCESS 0x2000
90 87
91#define DENALI_READ 0 88#define DENALI_READ 0
92#define DENALI_WRITE 0x100 89#define DENALI_WRITE 0x100
93 90
94/* types of device accesses. We can issue commands and get status */
95#define COMMAND_CYCLE 0
96#define ADDR_CYCLE 1
97#define STATUS_CYCLE 2
98
99/* 91/*
100 * this is a helper macro that allows us to 92 * this is a helper macro that allows us to
101 * format the bank into the proper bits for the controller 93 * format the bank into the proper bits for the controller
@@ -164,7 +156,7 @@ static void read_status(struct denali_nand_info *denali)
164static void reset_bank(struct denali_nand_info *denali) 156static void reset_bank(struct denali_nand_info *denali)
165{ 157{
166 uint32_t irq_status; 158 uint32_t irq_status;
167 uint32_t irq_mask = INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT; 159 uint32_t irq_mask = INTR__RST_COMP | INTR__TIME_OUT;
168 160
169 clear_interrupts(denali); 161 clear_interrupts(denali);
170 162
@@ -172,7 +164,7 @@ static void reset_bank(struct denali_nand_info *denali)
172 164
173 irq_status = wait_for_irq(denali, irq_mask); 165 irq_status = wait_for_irq(denali, irq_mask);
174 166
175 if (irq_status & INTR_STATUS__TIME_OUT) 167 if (irq_status & INTR__TIME_OUT)
176 dev_err(denali->dev, "reset bank failed.\n"); 168 dev_err(denali->dev, "reset bank failed.\n");
177} 169}
178 170
@@ -182,22 +174,22 @@ static uint16_t denali_nand_reset(struct denali_nand_info *denali)
182 int i; 174 int i;
183 175
184 for (i = 0; i < denali->max_banks; i++) 176 for (i = 0; i < denali->max_banks; i++)
185 iowrite32(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT, 177 iowrite32(INTR__RST_COMP | INTR__TIME_OUT,
186 denali->flash_reg + INTR_STATUS(i)); 178 denali->flash_reg + INTR_STATUS(i));
187 179
188 for (i = 0; i < denali->max_banks; i++) { 180 for (i = 0; i < denali->max_banks; i++) {
189 iowrite32(1 << i, denali->flash_reg + DEVICE_RESET); 181 iowrite32(1 << i, denali->flash_reg + DEVICE_RESET);
190 while (!(ioread32(denali->flash_reg + INTR_STATUS(i)) & 182 while (!(ioread32(denali->flash_reg + INTR_STATUS(i)) &
191 (INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT))) 183 (INTR__RST_COMP | INTR__TIME_OUT)))
192 cpu_relax(); 184 cpu_relax();
193 if (ioread32(denali->flash_reg + INTR_STATUS(i)) & 185 if (ioread32(denali->flash_reg + INTR_STATUS(i)) &
194 INTR_STATUS__TIME_OUT) 186 INTR__TIME_OUT)
195 dev_dbg(denali->dev, 187 dev_dbg(denali->dev,
196 "NAND Reset operation timed out on bank %d\n", i); 188 "NAND Reset operation timed out on bank %d\n", i);
197 } 189 }
198 190
199 for (i = 0; i < denali->max_banks; i++) 191 for (i = 0; i < denali->max_banks; i++)
200 iowrite32(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT, 192 iowrite32(INTR__RST_COMP | INTR__TIME_OUT,
201 denali->flash_reg + INTR_STATUS(i)); 193 denali->flash_reg + INTR_STATUS(i));
202 194
203 return PASS; 195 return PASS;
@@ -347,52 +339,25 @@ static void get_samsung_nand_para(struct denali_nand_info *denali,
347 339
348static void get_toshiba_nand_para(struct denali_nand_info *denali) 340static void get_toshiba_nand_para(struct denali_nand_info *denali)
349{ 341{
350 uint32_t tmp;
351
352 /* 342 /*
353 * Workaround to fix a controller bug which reports a wrong 343 * Workaround to fix a controller bug which reports a wrong
354 * spare area size for some kind of Toshiba NAND device 344 * spare area size for some kind of Toshiba NAND device
355 */ 345 */
356 if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) == 4096) && 346 if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
357 (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64)) { 347 (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64))
358 iowrite32(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE); 348 iowrite32(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
359 tmp = ioread32(denali->flash_reg + DEVICES_CONNECTED) *
360 ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
361 iowrite32(tmp,
362 denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
363#if SUPPORT_15BITECC
364 iowrite32(15, denali->flash_reg + ECC_CORRECTION);
365#elif SUPPORT_8BITECC
366 iowrite32(8, denali->flash_reg + ECC_CORRECTION);
367#endif
368 }
369} 349}
370 350
371static void get_hynix_nand_para(struct denali_nand_info *denali, 351static void get_hynix_nand_para(struct denali_nand_info *denali,
372 uint8_t device_id) 352 uint8_t device_id)
373{ 353{
374 uint32_t main_size, spare_size;
375
376 switch (device_id) { 354 switch (device_id) {
377 case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */ 355 case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
378 case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */ 356 case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
379 iowrite32(128, denali->flash_reg + PAGES_PER_BLOCK); 357 iowrite32(128, denali->flash_reg + PAGES_PER_BLOCK);
380 iowrite32(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE); 358 iowrite32(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
381 iowrite32(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE); 359 iowrite32(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
382 main_size = 4096 *
383 ioread32(denali->flash_reg + DEVICES_CONNECTED);
384 spare_size = 224 *
385 ioread32(denali->flash_reg + DEVICES_CONNECTED);
386 iowrite32(main_size,
387 denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
388 iowrite32(spare_size,
389 denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
390 iowrite32(0, denali->flash_reg + DEVICE_WIDTH); 360 iowrite32(0, denali->flash_reg + DEVICE_WIDTH);
391#if SUPPORT_15BITECC
392 iowrite32(15, denali->flash_reg + ECC_CORRECTION);
393#elif SUPPORT_8BITECC
394 iowrite32(8, denali->flash_reg + ECC_CORRECTION);
395#endif
396 break; 361 break;
397 default: 362 default:
398 dev_warn(denali->dev, 363 dev_warn(denali->dev,
@@ -454,17 +419,12 @@ static void find_valid_banks(struct denali_nand_info *denali)
454static void detect_max_banks(struct denali_nand_info *denali) 419static void detect_max_banks(struct denali_nand_info *denali)
455{ 420{
456 uint32_t features = ioread32(denali->flash_reg + FEATURES); 421 uint32_t features = ioread32(denali->flash_reg + FEATURES);
457 /*
458 * Read the revision register, so we can calculate the max_banks
459 * properly: the encoding changed from rev 5.0 to 5.1
460 */
461 u32 revision = MAKE_COMPARABLE_REVISION(
462 ioread32(denali->flash_reg + REVISION));
463 422
464 if (revision < REVISION_5_1) 423 denali->max_banks = 1 << (features & FEATURES__N_BANKS);
465 denali->max_banks = 2 << (features & FEATURES__N_BANKS); 424
466 else 425 /* the encoding changed from rev 5.0 to 5.1 */
467 denali->max_banks = 1 << (features & FEATURES__N_BANKS); 426 if (denali->revision < 0x0501)
427 denali->max_banks <<= 1;
468} 428}
469 429
470static uint16_t denali_nand_timing_set(struct denali_nand_info *denali) 430static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
@@ -653,7 +613,6 @@ static irqreturn_t denali_isr(int irq, void *dev_id)
653 spin_unlock(&denali->irq_lock); 613 spin_unlock(&denali->irq_lock);
654 return result; 614 return result;
655} 615}
656#define BANK(x) ((x) << 24)
657 616
658static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask) 617static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
659{ 618{
@@ -718,15 +677,7 @@ static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
718 int access_type, int op) 677 int access_type, int op)
719{ 678{
720 int status = PASS; 679 int status = PASS;
721 uint32_t page_count = 1; 680 uint32_t addr, cmd;
722 uint32_t addr, cmd, irq_status, irq_mask;
723
724 if (op == DENALI_READ)
725 irq_mask = INTR_STATUS__LOAD_COMP;
726 else if (op == DENALI_WRITE)
727 irq_mask = 0;
728 else
729 BUG();
730 681
731 setup_ecc_for_xfer(denali, ecc_en, transfer_spare); 682 setup_ecc_for_xfer(denali, ecc_en, transfer_spare);
732 683
@@ -749,35 +700,8 @@ static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
749 cmd = MODE_10 | addr; 700 cmd = MODE_10 | addr;
750 index_addr(denali, cmd, access_type); 701 index_addr(denali, cmd, access_type);
751 702
752 /* 703 cmd = MODE_01 | addr;
753 * page 33 of the NAND controller spec indicates we should not 704 iowrite32(cmd, denali->flash_mem);
754 * use the pipeline commands in Spare area only mode.
755 * So we don't.
756 */
757 if (access_type == SPARE_ACCESS) {
758 cmd = MODE_01 | addr;
759 iowrite32(cmd, denali->flash_mem);
760 } else {
761 index_addr(denali, cmd,
762 PIPELINE_ACCESS | op | page_count);
763
764 /*
765 * wait for command to be accepted
766 * can always use status0 bit as the
767 * mask is identical for each bank.
768 */
769 irq_status = wait_for_irq(denali, irq_mask);
770
771 if (irq_status == 0) {
772 dev_err(denali->dev,
773 "cmd, page, addr on timeout (0x%x, 0x%x, 0x%x)\n",
774 cmd, denali->page, addr);
775 status = FAIL;
776 } else {
777 cmd = MODE_01 | addr;
778 iowrite32(cmd, denali->flash_mem);
779 }
780 }
781 } 705 }
782 return status; 706 return status;
783} 707}
@@ -829,8 +753,7 @@ static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
829{ 753{
830 struct denali_nand_info *denali = mtd_to_denali(mtd); 754 struct denali_nand_info *denali = mtd_to_denali(mtd);
831 uint32_t irq_status; 755 uint32_t irq_status;
832 uint32_t irq_mask = INTR_STATUS__PROGRAM_COMP | 756 uint32_t irq_mask = INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL;
833 INTR_STATUS__PROGRAM_FAIL;
834 int status = 0; 757 int status = 0;
835 758
836 denali->page = page; 759 denali->page = page;
@@ -857,7 +780,7 @@ static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
857static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page) 780static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
858{ 781{
859 struct denali_nand_info *denali = mtd_to_denali(mtd); 782 struct denali_nand_info *denali = mtd_to_denali(mtd);
860 uint32_t irq_mask = INTR_STATUS__LOAD_COMP; 783 uint32_t irq_mask = INTR__LOAD_COMP;
861 uint32_t irq_status, addr, cmd; 784 uint32_t irq_status, addr, cmd;
862 785
863 denali->page = page; 786 denali->page = page;
@@ -890,98 +813,158 @@ static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
890 } 813 }
891} 814}
892 815
893/* 816static int denali_check_erased_page(struct mtd_info *mtd,
894 * this function examines buffers to see if they contain data that 817 struct nand_chip *chip, uint8_t *buf,
895 * indicate that the buffer is part of an erased region of flash. 818 unsigned long uncor_ecc_flags,
896 */ 819 unsigned int max_bitflips)
897static bool is_erased(uint8_t *buf, int len)
898{ 820{
899 int i; 821 uint8_t *ecc_code = chip->buffers->ecccode;
822 int ecc_steps = chip->ecc.steps;
823 int ecc_size = chip->ecc.size;
824 int ecc_bytes = chip->ecc.bytes;
825 int i, ret, stat;
826
827 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
828 chip->ecc.total);
829 if (ret)
830 return ret;
831
832 for (i = 0; i < ecc_steps; i++) {
833 if (!(uncor_ecc_flags & BIT(i)))
834 continue;
900 835
901 for (i = 0; i < len; i++) 836 stat = nand_check_erased_ecc_chunk(buf, ecc_size,
902 if (buf[i] != 0xFF) 837 ecc_code, ecc_bytes,
903 return false; 838 NULL, 0,
904 return true; 839 chip->ecc.strength);
840 if (stat < 0) {
841 mtd->ecc_stats.failed++;
842 } else {
843 mtd->ecc_stats.corrected += stat;
844 max_bitflips = max_t(unsigned int, max_bitflips, stat);
845 }
846
847 buf += ecc_size;
848 ecc_code += ecc_bytes;
849 }
850
851 return max_bitflips;
852}
853
854static int denali_hw_ecc_fixup(struct mtd_info *mtd,
855 struct denali_nand_info *denali,
856 unsigned long *uncor_ecc_flags)
857{
858 struct nand_chip *chip = mtd_to_nand(mtd);
859 int bank = denali->flash_bank;
860 uint32_t ecc_cor;
861 unsigned int max_bitflips;
862
863 ecc_cor = ioread32(denali->flash_reg + ECC_COR_INFO(bank));
864 ecc_cor >>= ECC_COR_INFO__SHIFT(bank);
865
866 if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) {
867 /*
868 * This flag is set when uncorrectable error occurs at least in
869 * one ECC sector. We can not know "how many sectors", or
870 * "which sector(s)". We need erase-page check for all sectors.
871 */
872 *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0);
873 return 0;
874 }
875
876 max_bitflips = ecc_cor & ECC_COR_INFO__MAX_ERRORS;
877
878 /*
879 * The register holds the maximum of per-sector corrected bitflips.
880 * This is suitable for the return value of the ->read_page() callback.
881 * Unfortunately, we can not know the total number of corrected bits in
882 * the page. Increase the stats by max_bitflips. (compromised solution)
883 */
884 mtd->ecc_stats.corrected += max_bitflips;
885
886 return max_bitflips;
905} 887}
888
906#define ECC_SECTOR_SIZE 512 889#define ECC_SECTOR_SIZE 512
907 890
908#define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12) 891#define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
909#define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET)) 892#define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET))
910#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK) 893#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
911#define ECC_ERROR_CORRECTABLE(x) (!((x) & ERR_CORRECTION_INFO__ERROR_TYPE)) 894#define ECC_ERROR_UNCORRECTABLE(x) ((x) & ERR_CORRECTION_INFO__ERROR_TYPE)
912#define ECC_ERR_DEVICE(x) (((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8) 895#define ECC_ERR_DEVICE(x) (((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8)
913#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO) 896#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
914 897
915static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf, 898static int denali_sw_ecc_fixup(struct mtd_info *mtd,
916 uint32_t irq_status, unsigned int *max_bitflips) 899 struct denali_nand_info *denali,
900 unsigned long *uncor_ecc_flags, uint8_t *buf)
917{ 901{
918 bool check_erased_page = false;
919 unsigned int bitflips = 0; 902 unsigned int bitflips = 0;
903 unsigned int max_bitflips = 0;
904 uint32_t err_addr, err_cor_info;
905 unsigned int err_byte, err_sector, err_device;
906 uint8_t err_cor_value;
907 unsigned int prev_sector = 0;
920 908
921 if (irq_status & INTR_STATUS__ECC_ERR) { 909 /* read the ECC errors. we'll ignore them for now */
922 /* read the ECC errors. we'll ignore them for now */ 910 denali_set_intr_modes(denali, false);
923 uint32_t err_address, err_correction_info, err_byte, 911
924 err_sector, err_device, err_correction_value; 912 do {
925 denali_set_intr_modes(denali, false); 913 err_addr = ioread32(denali->flash_reg + ECC_ERROR_ADDRESS);
926 914 err_sector = ECC_SECTOR(err_addr);
927 do { 915 err_byte = ECC_BYTE(err_addr);
928 err_address = ioread32(denali->flash_reg + 916
929 ECC_ERROR_ADDRESS); 917 err_cor_info = ioread32(denali->flash_reg + ERR_CORRECTION_INFO);
930 err_sector = ECC_SECTOR(err_address); 918 err_cor_value = ECC_CORRECTION_VALUE(err_cor_info);
931 err_byte = ECC_BYTE(err_address); 919 err_device = ECC_ERR_DEVICE(err_cor_info);
932 920
933 err_correction_info = ioread32(denali->flash_reg + 921 /* reset the bitflip counter when crossing ECC sector */
934 ERR_CORRECTION_INFO); 922 if (err_sector != prev_sector)
935 err_correction_value = 923 bitflips = 0;
936 ECC_CORRECTION_VALUE(err_correction_info); 924
937 err_device = ECC_ERR_DEVICE(err_correction_info); 925 if (ECC_ERROR_UNCORRECTABLE(err_cor_info)) {
938 926 /*
939 if (ECC_ERROR_CORRECTABLE(err_correction_info)) { 927 * Check later if this is a real ECC error, or
940 /* 928 * an erased sector.
941 * If err_byte is larger than ECC_SECTOR_SIZE, 929 */
942 * means error happened in OOB, so we ignore 930 *uncor_ecc_flags |= BIT(err_sector);
943 * it. It's no need for us to correct it 931 } else if (err_byte < ECC_SECTOR_SIZE) {
944 * err_device is represented the NAND error 932 /*
945 * bits are happened in if there are more 933 * If err_byte is larger than ECC_SECTOR_SIZE, means error
946 * than one NAND connected. 934 * happened in OOB, so we ignore it. It's no need for
947 */ 935 * us to correct it err_device is represented the NAND
948 if (err_byte < ECC_SECTOR_SIZE) { 936 * error bits are happened in if there are more than
949 struct mtd_info *mtd = 937 * one NAND connected.
950 nand_to_mtd(&denali->nand); 938 */
951 int offset; 939 int offset;
952 940 unsigned int flips_in_byte;
953 offset = (err_sector * 941
954 ECC_SECTOR_SIZE + 942 offset = (err_sector * ECC_SECTOR_SIZE + err_byte) *
955 err_byte) * 943 denali->devnum + err_device;
956 denali->devnum + 944
957 err_device; 945 /* correct the ECC error */
958 /* correct the ECC error */ 946 flips_in_byte = hweight8(buf[offset] ^ err_cor_value);
959 buf[offset] ^= err_correction_value; 947 buf[offset] ^= err_cor_value;
960 mtd->ecc_stats.corrected++; 948 mtd->ecc_stats.corrected += flips_in_byte;
961 bitflips++; 949 bitflips += flips_in_byte;
962 } 950
963 } else { 951 max_bitflips = max(max_bitflips, bitflips);
964 /* 952 }
965 * if the error is not correctable, need to 953
966 * look at the page to see if it is an erased 954 prev_sector = err_sector;
967 * page. if so, then it's not a real ECC error 955 } while (!ECC_LAST_ERR(err_cor_info));
968 */ 956
969 check_erased_page = true; 957 /*
970 } 958 * Once handle all ecc errors, controller will trigger a
971 } while (!ECC_LAST_ERR(err_correction_info)); 959 * ECC_TRANSACTION_DONE interrupt, so here just wait for
972 /* 960 * a while for this interrupt
973 * Once handle all ecc errors, controller will triger 961 */
974 * a ECC_TRANSACTION_DONE interrupt, so here just wait 962 while (!(read_interrupt_status(denali) & INTR__ECC_TRANSACTION_DONE))
975 * for a while for this interrupt 963 cpu_relax();
976 */ 964 clear_interrupts(denali);
977 while (!(read_interrupt_status(denali) & 965 denali_set_intr_modes(denali, true);
978 INTR_STATUS__ECC_TRANSACTION_DONE)) 966
979 cpu_relax(); 967 return max_bitflips;
980 clear_interrupts(denali);
981 denali_set_intr_modes(denali, true);
982 }
983 *max_bitflips = bitflips;
984 return check_erased_page;
985} 968}
986 969
987/* programs the controller to either enable/disable DMA transfers */ 970/* programs the controller to either enable/disable DMA transfers */
@@ -991,8 +974,30 @@ static void denali_enable_dma(struct denali_nand_info *denali, bool en)
991 ioread32(denali->flash_reg + DMA_ENABLE); 974 ioread32(denali->flash_reg + DMA_ENABLE);
992} 975}
993 976
994/* setups the HW to perform the data DMA */ 977static void denali_setup_dma64(struct denali_nand_info *denali, int op)
995static void denali_setup_dma(struct denali_nand_info *denali, int op) 978{
979 uint32_t mode;
980 const int page_count = 1;
981 uint64_t addr = denali->buf.dma_buf;
982
983 mode = MODE_10 | BANK(denali->flash_bank) | denali->page;
984
985 /* DMA is a three step process */
986
987 /*
988 * 1. setup transfer type, interrupt when complete,
989 * burst len = 64 bytes, the number of pages
990 */
991 index_addr(denali, mode, 0x01002000 | (64 << 16) | op | page_count);
992
993 /* 2. set memory low address */
994 index_addr(denali, mode, addr);
995
996 /* 3. set memory high address */
997 index_addr(denali, mode, addr >> 32);
998}
999
1000static void denali_setup_dma32(struct denali_nand_info *denali, int op)
996{ 1001{
997 uint32_t mode; 1002 uint32_t mode;
998 const int page_count = 1; 1003 const int page_count = 1;
@@ -1015,6 +1020,14 @@ static void denali_setup_dma(struct denali_nand_info *denali, int op)
1015 index_addr(denali, mode | 0x14000, 0x2400); 1020 index_addr(denali, mode | 0x14000, 0x2400);
1016} 1021}
1017 1022
1023static void denali_setup_dma(struct denali_nand_info *denali, int op)
1024{
1025 if (denali->caps & DENALI_CAP_DMA_64BIT)
1026 denali_setup_dma64(denali, op);
1027 else
1028 denali_setup_dma32(denali, op);
1029}
1030
1018/* 1031/*
1019 * writes a page. user specifies type, and this function handles the 1032 * writes a page. user specifies type, and this function handles the
1020 * configuration details. 1033 * configuration details.
@@ -1026,8 +1039,7 @@ static int write_page(struct mtd_info *mtd, struct nand_chip *chip,
1026 dma_addr_t addr = denali->buf.dma_buf; 1039 dma_addr_t addr = denali->buf.dma_buf;
1027 size_t size = mtd->writesize + mtd->oobsize; 1040 size_t size = mtd->writesize + mtd->oobsize;
1028 uint32_t irq_status; 1041 uint32_t irq_status;
1029 uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP | 1042 uint32_t irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL;
1030 INTR_STATUS__PROGRAM_FAIL;
1031 1043
1032 /* 1044 /*
1033 * if it is a raw xfer, we want to disable ecc and send the spare area. 1045 * if it is a raw xfer, we want to disable ecc and send the spare area.
@@ -1118,16 +1130,15 @@ static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1118static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip, 1130static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1119 uint8_t *buf, int oob_required, int page) 1131 uint8_t *buf, int oob_required, int page)
1120{ 1132{
1121 unsigned int max_bitflips;
1122 struct denali_nand_info *denali = mtd_to_denali(mtd); 1133 struct denali_nand_info *denali = mtd_to_denali(mtd);
1123
1124 dma_addr_t addr = denali->buf.dma_buf; 1134 dma_addr_t addr = denali->buf.dma_buf;
1125 size_t size = mtd->writesize + mtd->oobsize; 1135 size_t size = mtd->writesize + mtd->oobsize;
1126
1127 uint32_t irq_status; 1136 uint32_t irq_status;
1128 uint32_t irq_mask = INTR_STATUS__ECC_TRANSACTION_DONE | 1137 uint32_t irq_mask = denali->caps & DENALI_CAP_HW_ECC_FIXUP ?
1129 INTR_STATUS__ECC_ERR; 1138 INTR__DMA_CMD_COMP | INTR__ECC_UNCOR_ERR :
1130 bool check_erased_page = false; 1139 INTR__ECC_TRANSACTION_DONE | INTR__ECC_ERR;
1140 unsigned long uncor_ecc_flags = 0;
1141 int stat = 0;
1131 1142
1132 if (page != denali->page) { 1143 if (page != denali->page) {
1133 dev_err(denali->dev, 1144 dev_err(denali->dev,
@@ -1151,21 +1162,23 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1151 1162
1152 memcpy(buf, denali->buf.buf, mtd->writesize); 1163 memcpy(buf, denali->buf.buf, mtd->writesize);
1153 1164
1154 check_erased_page = handle_ecc(denali, buf, irq_status, &max_bitflips); 1165 if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
1166 stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags);
1167 else if (irq_status & INTR__ECC_ERR)
1168 stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf);
1155 denali_enable_dma(denali, false); 1169 denali_enable_dma(denali, false);
1156 1170
1157 if (check_erased_page) { 1171 if (stat < 0)
1172 return stat;
1173
1174 if (uncor_ecc_flags) {
1158 read_oob_data(mtd, chip->oob_poi, denali->page); 1175 read_oob_data(mtd, chip->oob_poi, denali->page);
1159 1176
1160 /* check ECC failures that may have occurred on erased pages */ 1177 stat = denali_check_erased_page(mtd, chip, buf,
1161 if (check_erased_page) { 1178 uncor_ecc_flags, stat);
1162 if (!is_erased(buf, mtd->writesize))
1163 mtd->ecc_stats.failed++;
1164 if (!is_erased(buf, mtd->oobsize))
1165 mtd->ecc_stats.failed++;
1166 }
1167 } 1179 }
1168 return max_bitflips; 1180
1181 return stat;
1169} 1182}
1170 1183
1171static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 1184static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
@@ -1174,7 +1187,7 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1174 struct denali_nand_info *denali = mtd_to_denali(mtd); 1187 struct denali_nand_info *denali = mtd_to_denali(mtd);
1175 dma_addr_t addr = denali->buf.dma_buf; 1188 dma_addr_t addr = denali->buf.dma_buf;
1176 size_t size = mtd->writesize + mtd->oobsize; 1189 size_t size = mtd->writesize + mtd->oobsize;
1177 uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP; 1190 uint32_t irq_mask = INTR__DMA_CMD_COMP;
1178 1191
1179 if (page != denali->page) { 1192 if (page != denali->page) {
1180 dev_err(denali->dev, 1193 dev_err(denali->dev,
@@ -1247,10 +1260,9 @@ static int denali_erase(struct mtd_info *mtd, int page)
1247 index_addr(denali, cmd, 0x1); 1260 index_addr(denali, cmd, 0x1);
1248 1261
1249 /* wait for erase to complete or failure to occur */ 1262 /* wait for erase to complete or failure to occur */
1250 irq_status = wait_for_irq(denali, INTR_STATUS__ERASE_COMP | 1263 irq_status = wait_for_irq(denali, INTR__ERASE_COMP | INTR__ERASE_FAIL);
1251 INTR_STATUS__ERASE_FAIL);
1252 1264
1253 return irq_status & INTR_STATUS__ERASE_FAIL ? NAND_STATUS_FAIL : PASS; 1265 return irq_status & INTR__ERASE_FAIL ? NAND_STATUS_FAIL : PASS;
1254} 1266}
1255 1267
1256static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col, 1268static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
@@ -1303,6 +1315,14 @@ static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
1303static void denali_hw_init(struct denali_nand_info *denali) 1315static void denali_hw_init(struct denali_nand_info *denali)
1304{ 1316{
1305 /* 1317 /*
1318 * The REVISION register may not be reliable. Platforms are allowed to
1319 * override it.
1320 */
1321 if (!denali->revision)
1322 denali->revision =
1323 swab16(ioread32(denali->flash_reg + REVISION));
1324
1325 /*
1306 * tell driver how many bit controller will skip before 1326 * tell driver how many bit controller will skip before
1307 * writing ECC code in OOB, this register may be already 1327 * writing ECC code in OOB, this register may be already
1308 * set by firmware. So we read this value out. 1328 * set by firmware. So we read this value out.
@@ -1413,9 +1433,61 @@ static void denali_drv_init(struct denali_nand_info *denali)
1413 denali->irq_status = 0; 1433 denali->irq_status = 0;
1414} 1434}
1415 1435
1436static int denali_multidev_fixup(struct denali_nand_info *denali)
1437{
1438 struct nand_chip *chip = &denali->nand;
1439 struct mtd_info *mtd = nand_to_mtd(chip);
1440
1441 /*
1442 * Support for multi device:
1443 * When the IP configuration is x16 capable and two x8 chips are
1444 * connected in parallel, DEVICES_CONNECTED should be set to 2.
1445 * In this case, the core framework knows nothing about this fact,
1446 * so we should tell it the _logical_ pagesize and anything necessary.
1447 */
1448 denali->devnum = ioread32(denali->flash_reg + DEVICES_CONNECTED);
1449
1450 /*
1451 * On some SoCs, DEVICES_CONNECTED is not auto-detected.
1452 * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case.
1453 */
1454 if (denali->devnum == 0) {
1455 denali->devnum = 1;
1456 iowrite32(1, denali->flash_reg + DEVICES_CONNECTED);
1457 }
1458
1459 if (denali->devnum == 1)
1460 return 0;
1461
1462 if (denali->devnum != 2) {
1463 dev_err(denali->dev, "unsupported number of devices %d\n",
1464 denali->devnum);
1465 return -EINVAL;
1466 }
1467
1468 /* 2 chips in parallel */
1469 mtd->size <<= 1;
1470 mtd->erasesize <<= 1;
1471 mtd->writesize <<= 1;
1472 mtd->oobsize <<= 1;
1473 chip->chipsize <<= 1;
1474 chip->page_shift += 1;
1475 chip->phys_erase_shift += 1;
1476 chip->bbt_erase_shift += 1;
1477 chip->chip_shift += 1;
1478 chip->pagemask <<= 1;
1479 chip->ecc.size <<= 1;
1480 chip->ecc.bytes <<= 1;
1481 chip->ecc.strength <<= 1;
1482 denali->bbtskipbytes <<= 1;
1483
1484 return 0;
1485}
1486
1416int denali_init(struct denali_nand_info *denali) 1487int denali_init(struct denali_nand_info *denali)
1417{ 1488{
1418 struct mtd_info *mtd = nand_to_mtd(&denali->nand); 1489 struct nand_chip *chip = &denali->nand;
1490 struct mtd_info *mtd = nand_to_mtd(chip);
1419 int ret; 1491 int ret;
1420 1492
1421 if (denali->platform == INTEL_CE4100) { 1493 if (denali->platform == INTEL_CE4100) {
@@ -1449,13 +1521,16 @@ int denali_init(struct denali_nand_info *denali)
1449 1521
1450 /* now that our ISR is registered, we can enable interrupts */ 1522 /* now that our ISR is registered, we can enable interrupts */
1451 denali_set_intr_modes(denali, true); 1523 denali_set_intr_modes(denali, true);
1452 mtd->name = "denali-nand"; 1524 nand_set_flash_node(chip, denali->dev->of_node);
1525 /* Fallback to the default name if DT did not give "label" property */
1526 if (!mtd->name)
1527 mtd->name = "denali-nand";
1453 1528
1454 /* register the driver with the NAND core subsystem */ 1529 /* register the driver with the NAND core subsystem */
1455 denali->nand.select_chip = denali_select_chip; 1530 chip->select_chip = denali_select_chip;
1456 denali->nand.cmdfunc = denali_cmdfunc; 1531 chip->cmdfunc = denali_cmdfunc;
1457 denali->nand.read_byte = denali_read_byte; 1532 chip->read_byte = denali_read_byte;
1458 denali->nand.waitfunc = denali_waitfunc; 1533 chip->waitfunc = denali_waitfunc;
1459 1534
1460 /* 1535 /*
1461 * scan for NAND devices attached to the controller 1536 * scan for NAND devices attached to the controller
@@ -1476,8 +1551,9 @@ int denali_init(struct denali_nand_info *denali)
1476 goto failed_req_irq; 1551 goto failed_req_irq;
1477 } 1552 }
1478 1553
1479 /* Is 32-bit DMA supported? */ 1554 ret = dma_set_mask(denali->dev,
1480 ret = dma_set_mask(denali->dev, DMA_BIT_MASK(32)); 1555 DMA_BIT_MASK(denali->caps & DENALI_CAP_DMA_64BIT ?
1556 64 : 32));
1481 if (ret) { 1557 if (ret) {
1482 dev_err(denali->dev, "No usable DMA configuration\n"); 1558 dev_err(denali->dev, "No usable DMA configuration\n");
1483 goto failed_req_irq; 1559 goto failed_req_irq;
@@ -1493,54 +1569,35 @@ int denali_init(struct denali_nand_info *denali)
1493 } 1569 }
1494 1570
1495 /* 1571 /*
1496 * support for multi nand
1497 * MTD known nothing about multi nand, so we should tell it
1498 * the real pagesize and anything necessery
1499 */
1500 denali->devnum = ioread32(denali->flash_reg + DEVICES_CONNECTED);
1501 denali->nand.chipsize <<= denali->devnum - 1;
1502 denali->nand.page_shift += denali->devnum - 1;
1503 denali->nand.pagemask = (denali->nand.chipsize >>
1504 denali->nand.page_shift) - 1;
1505 denali->nand.bbt_erase_shift += denali->devnum - 1;
1506 denali->nand.phys_erase_shift = denali->nand.bbt_erase_shift;
1507 denali->nand.chip_shift += denali->devnum - 1;
1508 mtd->writesize <<= denali->devnum - 1;
1509 mtd->oobsize <<= denali->devnum - 1;
1510 mtd->erasesize <<= denali->devnum - 1;
1511 mtd->size = denali->nand.numchips * denali->nand.chipsize;
1512 denali->bbtskipbytes *= denali->devnum;
1513
1514 /*
1515 * second stage of the NAND scan 1572 * second stage of the NAND scan
1516 * this stage requires information regarding ECC and 1573 * this stage requires information regarding ECC and
1517 * bad block management. 1574 * bad block management.
1518 */ 1575 */
1519 1576
1520 /* Bad block management */ 1577 /* Bad block management */
1521 denali->nand.bbt_td = &bbt_main_descr; 1578 chip->bbt_td = &bbt_main_descr;
1522 denali->nand.bbt_md = &bbt_mirror_descr; 1579 chip->bbt_md = &bbt_mirror_descr;
1523 1580
1524 /* skip the scan for now until we have OOB read and write support */ 1581 /* skip the scan for now until we have OOB read and write support */
1525 denali->nand.bbt_options |= NAND_BBT_USE_FLASH; 1582 chip->bbt_options |= NAND_BBT_USE_FLASH;
1526 denali->nand.options |= NAND_SKIP_BBTSCAN; 1583 chip->options |= NAND_SKIP_BBTSCAN;
1527 denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME; 1584 chip->ecc.mode = NAND_ECC_HW_SYNDROME;
1528 1585
1529 /* no subpage writes on denali */ 1586 /* no subpage writes on denali */
1530 denali->nand.options |= NAND_NO_SUBPAGE_WRITE; 1587 chip->options |= NAND_NO_SUBPAGE_WRITE;
1531 1588
1532 /* 1589 /*
1533 * Denali Controller only support 15bit and 8bit ECC in MRST, 1590 * Denali Controller only support 15bit and 8bit ECC in MRST,
1534 * so just let controller do 15bit ECC for MLC and 8bit ECC for 1591 * so just let controller do 15bit ECC for MLC and 8bit ECC for
1535 * SLC if possible. 1592 * SLC if possible.
1536 * */ 1593 * */
1537 if (!nand_is_slc(&denali->nand) && 1594 if (!nand_is_slc(chip) &&
1538 (mtd->oobsize > (denali->bbtskipbytes + 1595 (mtd->oobsize > (denali->bbtskipbytes +
1539 ECC_15BITS * (mtd->writesize / 1596 ECC_15BITS * (mtd->writesize /
1540 ECC_SECTOR_SIZE)))) { 1597 ECC_SECTOR_SIZE)))) {
1541 /* if MLC OOB size is large enough, use 15bit ECC*/ 1598 /* if MLC OOB size is large enough, use 15bit ECC*/
1542 denali->nand.ecc.strength = 15; 1599 chip->ecc.strength = 15;
1543 denali->nand.ecc.bytes = ECC_15BITS; 1600 chip->ecc.bytes = ECC_15BITS;
1544 iowrite32(15, denali->flash_reg + ECC_CORRECTION); 1601 iowrite32(15, denali->flash_reg + ECC_CORRECTION);
1545 } else if (mtd->oobsize < (denali->bbtskipbytes + 1602 } else if (mtd->oobsize < (denali->bbtskipbytes +
1546 ECC_8BITS * (mtd->writesize / 1603 ECC_8BITS * (mtd->writesize /
@@ -1548,24 +1605,26 @@ int denali_init(struct denali_nand_info *denali)
1548 pr_err("Your NAND chip OOB is not large enough to contain 8bit ECC correction codes"); 1605 pr_err("Your NAND chip OOB is not large enough to contain 8bit ECC correction codes");
1549 goto failed_req_irq; 1606 goto failed_req_irq;
1550 } else { 1607 } else {
1551 denali->nand.ecc.strength = 8; 1608 chip->ecc.strength = 8;
1552 denali->nand.ecc.bytes = ECC_8BITS; 1609 chip->ecc.bytes = ECC_8BITS;
1553 iowrite32(8, denali->flash_reg + ECC_CORRECTION); 1610 iowrite32(8, denali->flash_reg + ECC_CORRECTION);
1554 } 1611 }
1555 1612
1556 mtd_set_ooblayout(mtd, &denali_ooblayout_ops); 1613 mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
1557 denali->nand.ecc.bytes *= denali->devnum;
1558 denali->nand.ecc.strength *= denali->devnum;
1559 1614
1560 /* override the default read operations */ 1615 /* override the default read operations */
1561 denali->nand.ecc.size = ECC_SECTOR_SIZE * denali->devnum; 1616 chip->ecc.size = ECC_SECTOR_SIZE;
1562 denali->nand.ecc.read_page = denali_read_page; 1617 chip->ecc.read_page = denali_read_page;
1563 denali->nand.ecc.read_page_raw = denali_read_page_raw; 1618 chip->ecc.read_page_raw = denali_read_page_raw;
1564 denali->nand.ecc.write_page = denali_write_page; 1619 chip->ecc.write_page = denali_write_page;
1565 denali->nand.ecc.write_page_raw = denali_write_page_raw; 1620 chip->ecc.write_page_raw = denali_write_page_raw;
1566 denali->nand.ecc.read_oob = denali_read_oob; 1621 chip->ecc.read_oob = denali_read_oob;
1567 denali->nand.ecc.write_oob = denali_write_oob; 1622 chip->ecc.write_oob = denali_write_oob;
1568 denali->nand.erase = denali_erase; 1623 chip->erase = denali_erase;
1624
1625 ret = denali_multidev_fixup(denali);
1626 if (ret)
1627 goto failed_req_irq;
1569 1628
1570 ret = nand_scan_tail(mtd); 1629 ret = nand_scan_tail(mtd);
1571 if (ret) 1630 if (ret)
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
index ea22191e8515..ec004850652a 100644
--- a/drivers/mtd/nand/denali.h
+++ b/drivers/mtd/nand/denali.h
@@ -20,6 +20,7 @@
20#ifndef __DENALI_H__ 20#ifndef __DENALI_H__
21#define __DENALI_H__ 21#define __DENALI_H__
22 22
23#include <linux/bitops.h>
23#include <linux/mtd/nand.h> 24#include <linux/mtd/nand.h>
24 25
25#define DEVICE_RESET 0x0 26#define DEVICE_RESET 0x0
@@ -178,8 +179,6 @@
178 179
179#define REVISION 0x370 180#define REVISION 0x370
180#define REVISION__VALUE 0xffff 181#define REVISION__VALUE 0xffff
181#define MAKE_COMPARABLE_REVISION(x) swab16((x) & REVISION__VALUE)
182#define REVISION_5_1 0x00000501
183 182
184#define ONFI_DEVICE_FEATURES 0x380 183#define ONFI_DEVICE_FEATURES 0x380
185#define ONFI_DEVICE_FEATURES__VALUE 0x003f 184#define ONFI_DEVICE_FEATURES__VALUE 0x003f
@@ -218,65 +217,29 @@
218 217
219#define INTR_STATUS(__bank) (0x410 + ((__bank) * 0x50)) 218#define INTR_STATUS(__bank) (0x410 + ((__bank) * 0x50))
220#define INTR_EN(__bank) (0x420 + ((__bank) * 0x50)) 219#define INTR_EN(__bank) (0x420 + ((__bank) * 0x50))
221 220/* bit[1:0] is used differently depending on IP version */
222#define INTR_STATUS__ECC_TRANSACTION_DONE 0x0001 221#define INTR__ECC_UNCOR_ERR 0x0001 /* new IP */
223#define INTR_STATUS__ECC_ERR 0x0002 222#define INTR__ECC_TRANSACTION_DONE 0x0001 /* old IP */
224#define INTR_STATUS__DMA_CMD_COMP 0x0004 223#define INTR__ECC_ERR 0x0002 /* old IP */
225#define INTR_STATUS__TIME_OUT 0x0008 224#define INTR__DMA_CMD_COMP 0x0004
226#define INTR_STATUS__PROGRAM_FAIL 0x0010 225#define INTR__TIME_OUT 0x0008
227#define INTR_STATUS__ERASE_FAIL 0x0020 226#define INTR__PROGRAM_FAIL 0x0010
228#define INTR_STATUS__LOAD_COMP 0x0040 227#define INTR__ERASE_FAIL 0x0020
229#define INTR_STATUS__PROGRAM_COMP 0x0080 228#define INTR__LOAD_COMP 0x0040
230#define INTR_STATUS__ERASE_COMP 0x0100 229#define INTR__PROGRAM_COMP 0x0080
231#define INTR_STATUS__PIPE_CPYBCK_CMD_COMP 0x0200 230#define INTR__ERASE_COMP 0x0100
232#define INTR_STATUS__LOCKED_BLK 0x0400 231#define INTR__PIPE_CPYBCK_CMD_COMP 0x0200
233#define INTR_STATUS__UNSUP_CMD 0x0800 232#define INTR__LOCKED_BLK 0x0400
234#define INTR_STATUS__INT_ACT 0x1000 233#define INTR__UNSUP_CMD 0x0800
235#define INTR_STATUS__RST_COMP 0x2000 234#define INTR__INT_ACT 0x1000
236#define INTR_STATUS__PIPE_CMD_ERR 0x4000 235#define INTR__RST_COMP 0x2000
237#define INTR_STATUS__PAGE_XFER_INC 0x8000 236#define INTR__PIPE_CMD_ERR 0x4000
238 237#define INTR__PAGE_XFER_INC 0x8000
239#define INTR_EN__ECC_TRANSACTION_DONE 0x0001
240#define INTR_EN__ECC_ERR 0x0002
241#define INTR_EN__DMA_CMD_COMP 0x0004
242#define INTR_EN__TIME_OUT 0x0008
243#define INTR_EN__PROGRAM_FAIL 0x0010
244#define INTR_EN__ERASE_FAIL 0x0020
245#define INTR_EN__LOAD_COMP 0x0040
246#define INTR_EN__PROGRAM_COMP 0x0080
247#define INTR_EN__ERASE_COMP 0x0100
248#define INTR_EN__PIPE_CPYBCK_CMD_COMP 0x0200
249#define INTR_EN__LOCKED_BLK 0x0400
250#define INTR_EN__UNSUP_CMD 0x0800
251#define INTR_EN__INT_ACT 0x1000
252#define INTR_EN__RST_COMP 0x2000
253#define INTR_EN__PIPE_CMD_ERR 0x4000
254#define INTR_EN__PAGE_XFER_INC 0x8000
255 238
256#define PAGE_CNT(__bank) (0x430 + ((__bank) * 0x50)) 239#define PAGE_CNT(__bank) (0x430 + ((__bank) * 0x50))
257#define ERR_PAGE_ADDR(__bank) (0x440 + ((__bank) * 0x50)) 240#define ERR_PAGE_ADDR(__bank) (0x440 + ((__bank) * 0x50))
258#define ERR_BLOCK_ADDR(__bank) (0x450 + ((__bank) * 0x50)) 241#define ERR_BLOCK_ADDR(__bank) (0x450 + ((__bank) * 0x50))
259 242
260#define DATA_INTR 0x550
261#define DATA_INTR__WRITE_SPACE_AV 0x0001
262#define DATA_INTR__READ_DATA_AV 0x0002
263
264#define DATA_INTR_EN 0x560
265#define DATA_INTR_EN__WRITE_SPACE_AV 0x0001
266#define DATA_INTR_EN__READ_DATA_AV 0x0002
267
268#define GPREG_0 0x570
269#define GPREG_0__VALUE 0xffff
270
271#define GPREG_1 0x580
272#define GPREG_1__VALUE 0xffff
273
274#define GPREG_2 0x590
275#define GPREG_2__VALUE 0xffff
276
277#define GPREG_3 0x5a0
278#define GPREG_3__VALUE 0xffff
279
280#define ECC_THRESHOLD 0x600 243#define ECC_THRESHOLD 0x600
281#define ECC_THRESHOLD__VALUE 0x03ff 244#define ECC_THRESHOLD__VALUE 0x03ff
282 245
@@ -297,6 +260,11 @@
297#define ERR_CORRECTION_INFO__ERROR_TYPE 0x4000 260#define ERR_CORRECTION_INFO__ERROR_TYPE 0x4000
298#define ERR_CORRECTION_INFO__LAST_ERR_INFO 0x8000 261#define ERR_CORRECTION_INFO__LAST_ERR_INFO 0x8000
299 262
263#define ECC_COR_INFO(bank) (0x650 + (bank) / 2 * 0x10)
264#define ECC_COR_INFO__SHIFT(bank) ((bank) % 2 * 8)
265#define ECC_COR_INFO__MAX_ERRORS 0x007f
266#define ECC_COR_INFO__UNCOR_ERR 0x0080
267
300#define DMA_ENABLE 0x700 268#define DMA_ENABLE 0x700
301#define DMA_ENABLE__FLAG 0x0001 269#define DMA_ENABLE__FLAG 0x0001
302 270
@@ -304,20 +272,13 @@
304#define IGNORE_ECC_DONE__FLAG 0x0001 272#define IGNORE_ECC_DONE__FLAG 0x0001
305 273
306#define DMA_INTR 0x720 274#define DMA_INTR 0x720
275#define DMA_INTR_EN 0x730
307#define DMA_INTR__TARGET_ERROR 0x0001 276#define DMA_INTR__TARGET_ERROR 0x0001
308#define DMA_INTR__DESC_COMP_CHANNEL0 0x0002 277#define DMA_INTR__DESC_COMP_CHANNEL0 0x0002
309#define DMA_INTR__DESC_COMP_CHANNEL1 0x0004 278#define DMA_INTR__DESC_COMP_CHANNEL1 0x0004
310#define DMA_INTR__DESC_COMP_CHANNEL2 0x0008 279#define DMA_INTR__DESC_COMP_CHANNEL2 0x0008
311#define DMA_INTR__DESC_COMP_CHANNEL3 0x0010 280#define DMA_INTR__DESC_COMP_CHANNEL3 0x0010
312#define DMA_INTR__MEMCOPY_DESC_COMP 0x0020 281#define DMA_INTR__MEMCOPY_DESC_COMP 0x0020
313
314#define DMA_INTR_EN 0x730
315#define DMA_INTR_EN__TARGET_ERROR 0x0001
316#define DMA_INTR_EN__DESC_COMP_CHANNEL0 0x0002
317#define DMA_INTR_EN__DESC_COMP_CHANNEL1 0x0004
318#define DMA_INTR_EN__DESC_COMP_CHANNEL2 0x0008
319#define DMA_INTR_EN__DESC_COMP_CHANNEL3 0x0010
320#define DMA_INTR_EN__MEMCOPY_DESC_COMP 0x0020
321 282
322#define TARGET_ERR_ADDR_LO 0x740 283#define TARGET_ERR_ADDR_LO 0x740
323#define TARGET_ERR_ADDR_LO__VALUE 0xffff 284#define TARGET_ERR_ADDR_LO__VALUE 0xffff
@@ -331,69 +292,12 @@
331#define CHNL_ACTIVE__CHANNEL2 0x0004 292#define CHNL_ACTIVE__CHANNEL2 0x0004
332#define CHNL_ACTIVE__CHANNEL3 0x0008 293#define CHNL_ACTIVE__CHANNEL3 0x0008
333 294
334#define ACTIVE_SRC_ID 0x800
335#define ACTIVE_SRC_ID__VALUE 0x00ff
336
337#define PTN_INTR 0x810
338#define PTN_INTR__CONFIG_ERROR 0x0001
339#define PTN_INTR__ACCESS_ERROR_BANK0 0x0002
340#define PTN_INTR__ACCESS_ERROR_BANK1 0x0004
341#define PTN_INTR__ACCESS_ERROR_BANK2 0x0008
342#define PTN_INTR__ACCESS_ERROR_BANK3 0x0010
343#define PTN_INTR__REG_ACCESS_ERROR 0x0020
344
345#define PTN_INTR_EN 0x820
346#define PTN_INTR_EN__CONFIG_ERROR 0x0001
347#define PTN_INTR_EN__ACCESS_ERROR_BANK0 0x0002
348#define PTN_INTR_EN__ACCESS_ERROR_BANK1 0x0004
349#define PTN_INTR_EN__ACCESS_ERROR_BANK2 0x0008
350#define PTN_INTR_EN__ACCESS_ERROR_BANK3 0x0010
351#define PTN_INTR_EN__REG_ACCESS_ERROR 0x0020
352
353#define PERM_SRC_ID(__bank) (0x830 + ((__bank) * 0x40))
354#define PERM_SRC_ID__SRCID 0x00ff
355#define PERM_SRC_ID__DIRECT_ACCESS_ACTIVE 0x0800
356#define PERM_SRC_ID__WRITE_ACTIVE 0x2000
357#define PERM_SRC_ID__READ_ACTIVE 0x4000
358#define PERM_SRC_ID__PARTITION_VALID 0x8000
359
360#define MIN_BLK_ADDR(__bank) (0x840 + ((__bank) * 0x40))
361#define MIN_BLK_ADDR__VALUE 0xffff
362
363#define MAX_BLK_ADDR(__bank) (0x850 + ((__bank) * 0x40))
364#define MAX_BLK_ADDR__VALUE 0xffff
365
366#define MIN_MAX_BANK(__bank) (0x860 + ((__bank) * 0x40))
367#define MIN_MAX_BANK__MIN_VALUE 0x0003
368#define MIN_MAX_BANK__MAX_VALUE 0x000c
369
370
371/* ffsdefs.h */
372#define CLEAR 0 /*use this to clear a field instead of "fail"*/
373#define SET 1 /*use this to set a field instead of "pass"*/
374#define FAIL 1 /*failed flag*/ 295#define FAIL 1 /*failed flag*/
375#define PASS 0 /*success flag*/ 296#define PASS 0 /*success flag*/
376#define ERR -1 /*error flag*/
377
378/* lld.h */
379#define GOOD_BLOCK 0
380#define DEFECTIVE_BLOCK 1
381#define READ_ERROR 2
382 297
383#define CLK_X 5 298#define CLK_X 5
384#define CLK_MULTI 4 299#define CLK_MULTI 4
385 300
386/* KBV - Updated to LNW scratch register address */
387#define SCRATCH_REG_ADDR CONFIG_MTD_NAND_DENALI_SCRATCH_REG_ADDR
388#define SCRATCH_REG_SIZE 64
389
390#define GLOB_HWCTL_DEFAULT_BLKS 2048
391
392#define SUPPORT_15BITECC 1
393#define SUPPORT_8BITECC 1
394
395#define CUSTOM_CONF_PARAMS 0
396
397#define ONFI_BLOOM_TIME 1 301#define ONFI_BLOOM_TIME 1
398#define MODE5_WORKAROUND 0 302#define MODE5_WORKAROUND 0
399 303
@@ -403,31 +307,6 @@
403#define MODE_10 0x08000000 307#define MODE_10 0x08000000
404#define MODE_11 0x0C000000 308#define MODE_11 0x0C000000
405 309
406
407#define DATA_TRANSFER_MODE 0
408#define PROTECTION_PER_BLOCK 1
409#define LOAD_WAIT_COUNT 2
410#define PROGRAM_WAIT_COUNT 3
411#define ERASE_WAIT_COUNT 4
412#define INT_MONITOR_CYCLE_COUNT 5
413#define READ_BUSY_PIN_ENABLED 6
414#define MULTIPLANE_OPERATION_SUPPORT 7
415#define PRE_FETCH_MODE 8
416#define CE_DONT_CARE_SUPPORT 9
417#define COPYBACK_SUPPORT 10
418#define CACHE_WRITE_SUPPORT 11
419#define CACHE_READ_SUPPORT 12
420#define NUM_PAGES_IN_BLOCK 13
421#define ECC_ENABLE_SELECT 14
422#define WRITE_ENABLE_2_READ_ENABLE 15
423#define ADDRESS_2_DATA 16
424#define READ_ENABLE_2_WRITE_ENABLE 17
425#define TWO_ROW_ADDRESS_CYCLES 18
426#define MULTIPLANE_ADDRESS_RESTRICT 19
427#define ACC_CLOCKS 20
428#define READ_WRITE_ENABLE_LOW_COUNT 21
429#define READ_WRITE_ENABLE_HIGH_COUNT 22
430
431#define ECC_SECTOR_SIZE 512 310#define ECC_SECTOR_SIZE 512
432 311
433struct nand_buf { 312struct nand_buf {
@@ -449,23 +328,26 @@ struct denali_nand_info {
449 struct nand_buf buf; 328 struct nand_buf buf;
450 struct device *dev; 329 struct device *dev;
451 int total_used_banks; 330 int total_used_banks;
452 uint32_t block; /* stored for future use */ 331 int page;
453 uint16_t page; 332 void __iomem *flash_reg; /* Register Interface */
454 void __iomem *flash_reg; /* Mapped io reg base address */ 333 void __iomem *flash_mem; /* Host Data/Command Interface */
455 void __iomem *flash_mem; /* Mapped io reg base address */
456 334
457 /* elements used by ISR */ 335 /* elements used by ISR */
458 struct completion complete; 336 struct completion complete;
459 spinlock_t irq_lock; 337 spinlock_t irq_lock;
460 uint32_t irq_status; 338 uint32_t irq_status;
461 int irq_debug_array[32];
462 int irq; 339 int irq;
463 340
464 uint32_t devnum; /* represent how many nands connected */ 341 int devnum; /* represent how many nands connected */
465 uint32_t bbtskipbytes; 342 int bbtskipbytes;
466 uint32_t max_banks; 343 int max_banks;
344 unsigned int revision;
345 unsigned int caps;
467}; 346};
468 347
348#define DENALI_CAP_HW_ECC_FIXUP BIT(0)
349#define DENALI_CAP_DMA_64BIT BIT(1)
350
469extern int denali_init(struct denali_nand_info *denali); 351extern int denali_init(struct denali_nand_info *denali);
470extern void denali_remove(struct denali_nand_info *denali); 352extern void denali_remove(struct denali_nand_info *denali);
471 353
diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c
index 5607fcd3b8ed..df9ef36cc2ce 100644
--- a/drivers/mtd/nand/denali_dt.c
+++ b/drivers/mtd/nand/denali_dt.c
@@ -29,64 +29,66 @@ struct denali_dt {
29 struct clk *clk; 29 struct clk *clk;
30}; 30};
31 31
32static const struct of_device_id denali_nand_dt_ids[] = { 32struct denali_dt_data {
33 { .compatible = "denali,denali-nand-dt" }, 33 unsigned int revision;
34 { /* sentinel */ } 34 unsigned int caps;
35 }; 35};
36 36
37MODULE_DEVICE_TABLE(of, denali_nand_dt_ids); 37static const struct denali_dt_data denali_socfpga_data = {
38 .caps = DENALI_CAP_HW_ECC_FIXUP,
39};
38 40
39static u64 denali_dma_mask; 41static const struct of_device_id denali_nand_dt_ids[] = {
42 {
43 .compatible = "altr,socfpga-denali-nand",
44 .data = &denali_socfpga_data,
45 },
46 { /* sentinel */ }
47};
48MODULE_DEVICE_TABLE(of, denali_nand_dt_ids);
40 49
41static int denali_dt_probe(struct platform_device *ofdev) 50static int denali_dt_probe(struct platform_device *pdev)
42{ 51{
43 struct resource *denali_reg, *nand_data; 52 struct resource *denali_reg, *nand_data;
44 struct denali_dt *dt; 53 struct denali_dt *dt;
54 const struct denali_dt_data *data;
45 struct denali_nand_info *denali; 55 struct denali_nand_info *denali;
46 int ret; 56 int ret;
47 const struct of_device_id *of_id;
48 57
49 of_id = of_match_device(denali_nand_dt_ids, &ofdev->dev); 58 dt = devm_kzalloc(&pdev->dev, sizeof(*dt), GFP_KERNEL);
50 if (of_id) {
51 ofdev->id_entry = of_id->data;
52 } else {
53 pr_err("Failed to find the right device id.\n");
54 return -ENOMEM;
55 }
56
57 dt = devm_kzalloc(&ofdev->dev, sizeof(*dt), GFP_KERNEL);
58 if (!dt) 59 if (!dt)
59 return -ENOMEM; 60 return -ENOMEM;
60 denali = &dt->denali; 61 denali = &dt->denali;
61 62
63 data = of_device_get_match_data(&pdev->dev);
64 if (data) {
65 denali->revision = data->revision;
66 denali->caps = data->caps;
67 }
68
62 denali->platform = DT; 69 denali->platform = DT;
63 denali->dev = &ofdev->dev; 70 denali->dev = &pdev->dev;
64 denali->irq = platform_get_irq(ofdev, 0); 71 denali->irq = platform_get_irq(pdev, 0);
65 if (denali->irq < 0) { 72 if (denali->irq < 0) {
66 dev_err(&ofdev->dev, "no irq defined\n"); 73 dev_err(&pdev->dev, "no irq defined\n");
67 return denali->irq; 74 return denali->irq;
68 } 75 }
69 76
70 denali_reg = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "denali_reg"); 77 denali_reg = platform_get_resource_byname(pdev, IORESOURCE_MEM,
71 denali->flash_reg = devm_ioremap_resource(&ofdev->dev, denali_reg); 78 "denali_reg");
79 denali->flash_reg = devm_ioremap_resource(&pdev->dev, denali_reg);
72 if (IS_ERR(denali->flash_reg)) 80 if (IS_ERR(denali->flash_reg))
73 return PTR_ERR(denali->flash_reg); 81 return PTR_ERR(denali->flash_reg);
74 82
75 nand_data = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "nand_data"); 83 nand_data = platform_get_resource_byname(pdev, IORESOURCE_MEM,
76 denali->flash_mem = devm_ioremap_resource(&ofdev->dev, nand_data); 84 "nand_data");
85 denali->flash_mem = devm_ioremap_resource(&pdev->dev, nand_data);
77 if (IS_ERR(denali->flash_mem)) 86 if (IS_ERR(denali->flash_mem))
78 return PTR_ERR(denali->flash_mem); 87 return PTR_ERR(denali->flash_mem);
79 88
80 if (!of_property_read_u32(ofdev->dev.of_node, 89 dt->clk = devm_clk_get(&pdev->dev, NULL);
81 "dma-mask", (u32 *)&denali_dma_mask)) {
82 denali->dev->dma_mask = &denali_dma_mask;
83 } else {
84 denali->dev->dma_mask = NULL;
85 }
86
87 dt->clk = devm_clk_get(&ofdev->dev, NULL);
88 if (IS_ERR(dt->clk)) { 90 if (IS_ERR(dt->clk)) {
89 dev_err(&ofdev->dev, "no clk available\n"); 91 dev_err(&pdev->dev, "no clk available\n");
90 return PTR_ERR(dt->clk); 92 return PTR_ERR(dt->clk);
91 } 93 }
92 clk_prepare_enable(dt->clk); 94 clk_prepare_enable(dt->clk);
@@ -95,7 +97,7 @@ static int denali_dt_probe(struct platform_device *ofdev)
95 if (ret) 97 if (ret)
96 goto out_disable_clk; 98 goto out_disable_clk;
97 99
98 platform_set_drvdata(ofdev, dt); 100 platform_set_drvdata(pdev, dt);
99 return 0; 101 return 0;
100 102
101out_disable_clk: 103out_disable_clk:
@@ -104,9 +106,9 @@ out_disable_clk:
104 return ret; 106 return ret;
105} 107}
106 108
107static int denali_dt_remove(struct platform_device *ofdev) 109static int denali_dt_remove(struct platform_device *pdev)
108{ 110{
109 struct denali_dt *dt = platform_get_drvdata(ofdev); 111 struct denali_dt *dt = platform_get_drvdata(pdev);
110 112
111 denali_remove(&dt->denali); 113 denali_remove(&dt->denali);
112 clk_disable_unprepare(dt->clk); 114 clk_disable_unprepare(dt->clk);
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index bda1e4667138..cea50d2f218c 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -38,15 +38,6 @@
38#include <linux/amba/bus.h> 38#include <linux/amba/bus.h>
39#include <mtd/mtd-abi.h> 39#include <mtd/mtd-abi.h>
40 40
41#define FSMC_NAND_BW8 1
42#define FSMC_NAND_BW16 2
43
44#define FSMC_MAX_NOR_BANKS 4
45#define FSMC_MAX_NAND_BANKS 4
46
47#define FSMC_FLASH_WIDTH8 1
48#define FSMC_FLASH_WIDTH16 2
49
50/* fsmc controller registers for NOR flash */ 41/* fsmc controller registers for NOR flash */
51#define CTRL 0x0 42#define CTRL 0x0
52 /* ctrl register definitions */ 43 /* ctrl register definitions */
@@ -133,33 +124,48 @@ enum access_mode {
133}; 124};
134 125
135/** 126/**
136 * fsmc_nand_platform_data - platform specific NAND controller config 127 * struct fsmc_nand_data - structure for FSMC NAND device state
137 * @nand_timings: timing setup for the physical NAND interface 128 *
138 * @partitions: partition table for the platform, use a default fallback 129 * @pid: Part ID on the AMBA PrimeCell format
139 * if this is NULL 130 * @mtd: MTD info for a NAND flash.
140 * @nr_partitions: the number of partitions in the previous entry 131 * @nand: Chip related info for a NAND flash.
141 * @options: different options for the driver 132 * @partitions: Partition info for a NAND Flash.
142 * @width: bus width 133 * @nr_partitions: Total number of partition of a NAND flash.
143 * @bank: default bank 134 *
144 * @select_bank: callback to select a certain bank, this is 135 * @bank: Bank number for probed device.
145 * platform-specific. If the controller only supports one bank 136 * @clk: Clock structure for FSMC.
146 * this may be set to NULL 137 *
138 * @read_dma_chan: DMA channel for read access
139 * @write_dma_chan: DMA channel for write access to NAND
140 * @dma_access_complete: Completion structure
141 *
142 * @data_pa: NAND Physical port for Data.
143 * @data_va: NAND port for Data.
144 * @cmd_va: NAND port for Command.
145 * @addr_va: NAND port for Address.
146 * @regs_va: FSMC regs base address.
147 */ 147 */
148struct fsmc_nand_platform_data { 148struct fsmc_nand_data {
149 struct fsmc_nand_timings *nand_timings; 149 u32 pid;
150 struct mtd_partition *partitions; 150 struct nand_chip nand;
151 unsigned int nr_partitions;
152 unsigned int options;
153 unsigned int width;
154 unsigned int bank;
155 151
152 unsigned int bank;
153 struct device *dev;
156 enum access_mode mode; 154 enum access_mode mode;
155 struct clk *clk;
157 156
158 void (*select_bank)(uint32_t bank, uint32_t busw); 157 /* DMA related objects */
158 struct dma_chan *read_dma_chan;
159 struct dma_chan *write_dma_chan;
160 struct completion dma_access_complete;
159 161
160 /* priv structures for dma accesses */ 162 struct fsmc_nand_timings *dev_timings;
161 void *read_dma_priv; 163
162 void *write_dma_priv; 164 dma_addr_t data_pa;
165 void __iomem *data_va;
166 void __iomem *cmd_va;
167 void __iomem *addr_va;
168 void __iomem *regs_va;
163}; 169};
164 170
165static int fsmc_ecc1_ooblayout_ecc(struct mtd_info *mtd, int section, 171static int fsmc_ecc1_ooblayout_ecc(struct mtd_info *mtd, int section,
@@ -246,86 +252,11 @@ static const struct mtd_ooblayout_ops fsmc_ecc4_ooblayout_ops = {
246 .free = fsmc_ecc4_ooblayout_free, 252 .free = fsmc_ecc4_ooblayout_free,
247}; 253};
248 254
249/**
250 * struct fsmc_nand_data - structure for FSMC NAND device state
251 *
252 * @pid: Part ID on the AMBA PrimeCell format
253 * @mtd: MTD info for a NAND flash.
254 * @nand: Chip related info for a NAND flash.
255 * @partitions: Partition info for a NAND Flash.
256 * @nr_partitions: Total number of partition of a NAND flash.
257 *
258 * @bank: Bank number for probed device.
259 * @clk: Clock structure for FSMC.
260 *
261 * @read_dma_chan: DMA channel for read access
262 * @write_dma_chan: DMA channel for write access to NAND
263 * @dma_access_complete: Completion structure
264 *
265 * @data_pa: NAND Physical port for Data.
266 * @data_va: NAND port for Data.
267 * @cmd_va: NAND port for Command.
268 * @addr_va: NAND port for Address.
269 * @regs_va: FSMC regs base address.
270 */
271struct fsmc_nand_data {
272 u32 pid;
273 struct nand_chip nand;
274 struct mtd_partition *partitions;
275 unsigned int nr_partitions;
276
277 unsigned int bank;
278 struct device *dev;
279 enum access_mode mode;
280 struct clk *clk;
281
282 /* DMA related objects */
283 struct dma_chan *read_dma_chan;
284 struct dma_chan *write_dma_chan;
285 struct completion dma_access_complete;
286
287 struct fsmc_nand_timings *dev_timings;
288
289 dma_addr_t data_pa;
290 void __iomem *data_va;
291 void __iomem *cmd_va;
292 void __iomem *addr_va;
293 void __iomem *regs_va;
294
295 void (*select_chip)(uint32_t bank, uint32_t busw);
296};
297
298static inline struct fsmc_nand_data *mtd_to_fsmc(struct mtd_info *mtd) 255static inline struct fsmc_nand_data *mtd_to_fsmc(struct mtd_info *mtd)
299{ 256{
300 return container_of(mtd_to_nand(mtd), struct fsmc_nand_data, nand); 257 return container_of(mtd_to_nand(mtd), struct fsmc_nand_data, nand);
301} 258}
302 259
303/* Assert CS signal based on chipnr */
304static void fsmc_select_chip(struct mtd_info *mtd, int chipnr)
305{
306 struct nand_chip *chip = mtd_to_nand(mtd);
307 struct fsmc_nand_data *host;
308
309 host = mtd_to_fsmc(mtd);
310
311 switch (chipnr) {
312 case -1:
313 chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
314 break;
315 case 0:
316 case 1:
317 case 2:
318 case 3:
319 if (host->select_chip)
320 host->select_chip(chipnr,
321 chip->options & NAND_BUSWIDTH_16);
322 break;
323
324 default:
325 dev_err(host->dev, "unsupported chip-select %d\n", chipnr);
326 }
327}
328
329/* 260/*
330 * fsmc_cmd_ctrl - For facilitaing Hardware access 261 * fsmc_cmd_ctrl - For facilitaing Hardware access
331 * This routine allows hardware specific access to control-lines(ALE,CLE) 262 * This routine allows hardware specific access to control-lines(ALE,CLE)
@@ -838,44 +769,46 @@ static bool filter(struct dma_chan *chan, void *slave)
838} 769}
839 770
840static int fsmc_nand_probe_config_dt(struct platform_device *pdev, 771static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
841 struct device_node *np) 772 struct fsmc_nand_data *host,
773 struct nand_chip *nand)
842{ 774{
843 struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev); 775 struct device_node *np = pdev->dev.of_node;
844 u32 val; 776 u32 val;
845 int ret; 777 int ret;
846 778
847 /* Set default NAND width to 8 bits */ 779 nand->options = 0;
848 pdata->width = 8; 780
849 if (!of_property_read_u32(np, "bank-width", &val)) { 781 if (!of_property_read_u32(np, "bank-width", &val)) {
850 if (val == 2) { 782 if (val == 2) {
851 pdata->width = 16; 783 nand->options |= NAND_BUSWIDTH_16;
852 } else if (val != 1) { 784 } else if (val != 1) {
853 dev_err(&pdev->dev, "invalid bank-width %u\n", val); 785 dev_err(&pdev->dev, "invalid bank-width %u\n", val);
854 return -EINVAL; 786 return -EINVAL;
855 } 787 }
856 } 788 }
789
857 if (of_get_property(np, "nand-skip-bbtscan", NULL)) 790 if (of_get_property(np, "nand-skip-bbtscan", NULL))
858 pdata->options = NAND_SKIP_BBTSCAN; 791 nand->options |= NAND_SKIP_BBTSCAN;
859 792
860 pdata->nand_timings = devm_kzalloc(&pdev->dev, 793 host->dev_timings = devm_kzalloc(&pdev->dev,
861 sizeof(*pdata->nand_timings), GFP_KERNEL); 794 sizeof(*host->dev_timings), GFP_KERNEL);
862 if (!pdata->nand_timings) 795 if (!host->dev_timings)
863 return -ENOMEM; 796 return -ENOMEM;
864 ret = of_property_read_u8_array(np, "timings", (u8 *)pdata->nand_timings, 797 ret = of_property_read_u8_array(np, "timings", (u8 *)host->dev_timings,
865 sizeof(*pdata->nand_timings)); 798 sizeof(*host->dev_timings));
866 if (ret) { 799 if (ret) {
867 dev_info(&pdev->dev, "No timings in dts specified, using default timings!\n"); 800 dev_info(&pdev->dev, "No timings in dts specified, using default timings!\n");
868 pdata->nand_timings = NULL; 801 host->dev_timings = NULL;
869 } 802 }
870 803
871 /* Set default NAND bank to 0 */ 804 /* Set default NAND bank to 0 */
872 pdata->bank = 0; 805 host->bank = 0;
873 if (!of_property_read_u32(np, "bank", &val)) { 806 if (!of_property_read_u32(np, "bank", &val)) {
874 if (val > 3) { 807 if (val > 3) {
875 dev_err(&pdev->dev, "invalid bank %u\n", val); 808 dev_err(&pdev->dev, "invalid bank %u\n", val);
876 return -EINVAL; 809 return -EINVAL;
877 } 810 }
878 pdata->bank = val; 811 host->bank = val;
879 } 812 }
880 return 0; 813 return 0;
881} 814}
@@ -886,8 +819,6 @@ static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
886 */ 819 */
887static int __init fsmc_nand_probe(struct platform_device *pdev) 820static int __init fsmc_nand_probe(struct platform_device *pdev)
888{ 821{
889 struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
890 struct device_node __maybe_unused *np = pdev->dev.of_node;
891 struct fsmc_nand_data *host; 822 struct fsmc_nand_data *host;
892 struct mtd_info *mtd; 823 struct mtd_info *mtd;
893 struct nand_chip *nand; 824 struct nand_chip *nand;
@@ -897,22 +828,17 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
897 u32 pid; 828 u32 pid;
898 int i; 829 int i;
899 830
900 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
901 if (!pdata)
902 return -ENOMEM;
903
904 pdev->dev.platform_data = pdata;
905 ret = fsmc_nand_probe_config_dt(pdev, np);
906 if (ret) {
907 dev_err(&pdev->dev, "no platform data\n");
908 return -ENODEV;
909 }
910
911 /* Allocate memory for the device structure (and zero it) */ 831 /* Allocate memory for the device structure (and zero it) */
912 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); 832 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
913 if (!host) 833 if (!host)
914 return -ENOMEM; 834 return -ENOMEM;
915 835
836 nand = &host->nand;
837
838 ret = fsmc_nand_probe_config_dt(pdev, host, nand);
839 if (ret)
840 return ret;
841
916 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data"); 842 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
917 host->data_va = devm_ioremap_resource(&pdev->dev, res); 843 host->data_va = devm_ioremap_resource(&pdev->dev, res);
918 if (IS_ERR(host->data_va)) 844 if (IS_ERR(host->data_va))
@@ -935,7 +861,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
935 if (IS_ERR(host->regs_va)) 861 if (IS_ERR(host->regs_va))
936 return PTR_ERR(host->regs_va); 862 return PTR_ERR(host->regs_va);
937 863
938 host->clk = clk_get(&pdev->dev, NULL); 864 host->clk = devm_clk_get(&pdev->dev, NULL);
939 if (IS_ERR(host->clk)) { 865 if (IS_ERR(host->clk)) {
940 dev_err(&pdev->dev, "failed to fetch block clock\n"); 866 dev_err(&pdev->dev, "failed to fetch block clock\n");
941 return PTR_ERR(host->clk); 867 return PTR_ERR(host->clk);
@@ -943,7 +869,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
943 869
944 ret = clk_prepare_enable(host->clk); 870 ret = clk_prepare_enable(host->clk);
945 if (ret) 871 if (ret)
946 goto err_clk_prepare_enable; 872 return ret;
947 873
948 /* 874 /*
949 * This device ID is actually a common AMBA ID as used on the 875 * This device ID is actually a common AMBA ID as used on the
@@ -957,22 +883,15 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
957 AMBA_PART_BITS(pid), AMBA_MANF_BITS(pid), 883 AMBA_PART_BITS(pid), AMBA_MANF_BITS(pid),
958 AMBA_REV_BITS(pid), AMBA_CONFIG_BITS(pid)); 884 AMBA_REV_BITS(pid), AMBA_CONFIG_BITS(pid));
959 885
960 host->bank = pdata->bank;
961 host->select_chip = pdata->select_bank;
962 host->partitions = pdata->partitions;
963 host->nr_partitions = pdata->nr_partitions;
964 host->dev = &pdev->dev; 886 host->dev = &pdev->dev;
965 host->dev_timings = pdata->nand_timings;
966 host->mode = pdata->mode;
967 887
968 if (host->mode == USE_DMA_ACCESS) 888 if (host->mode == USE_DMA_ACCESS)
969 init_completion(&host->dma_access_complete); 889 init_completion(&host->dma_access_complete);
970 890
971 /* Link all private pointers */ 891 /* Link all private pointers */
972 mtd = nand_to_mtd(&host->nand); 892 mtd = nand_to_mtd(&host->nand);
973 nand = &host->nand;
974 nand_set_controller_data(nand, host); 893 nand_set_controller_data(nand, host);
975 nand_set_flash_node(nand, np); 894 nand_set_flash_node(nand, pdev->dev.of_node);
976 895
977 mtd->dev.parent = &pdev->dev; 896 mtd->dev.parent = &pdev->dev;
978 nand->IO_ADDR_R = host->data_va; 897 nand->IO_ADDR_R = host->data_va;
@@ -987,26 +906,18 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
987 nand->ecc.mode = NAND_ECC_HW; 906 nand->ecc.mode = NAND_ECC_HW;
988 nand->ecc.hwctl = fsmc_enable_hwecc; 907 nand->ecc.hwctl = fsmc_enable_hwecc;
989 nand->ecc.size = 512; 908 nand->ecc.size = 512;
990 nand->options = pdata->options;
991 nand->select_chip = fsmc_select_chip;
992 nand->badblockbits = 7; 909 nand->badblockbits = 7;
993 nand_set_flash_node(nand, np);
994
995 if (pdata->width == FSMC_NAND_BW16)
996 nand->options |= NAND_BUSWIDTH_16;
997 910
998 switch (host->mode) { 911 switch (host->mode) {
999 case USE_DMA_ACCESS: 912 case USE_DMA_ACCESS:
1000 dma_cap_zero(mask); 913 dma_cap_zero(mask);
1001 dma_cap_set(DMA_MEMCPY, mask); 914 dma_cap_set(DMA_MEMCPY, mask);
1002 host->read_dma_chan = dma_request_channel(mask, filter, 915 host->read_dma_chan = dma_request_channel(mask, filter, NULL);
1003 pdata->read_dma_priv);
1004 if (!host->read_dma_chan) { 916 if (!host->read_dma_chan) {
1005 dev_err(&pdev->dev, "Unable to get read dma channel\n"); 917 dev_err(&pdev->dev, "Unable to get read dma channel\n");
1006 goto err_req_read_chnl; 918 goto err_req_read_chnl;
1007 } 919 }
1008 host->write_dma_chan = dma_request_channel(mask, filter, 920 host->write_dma_chan = dma_request_channel(mask, filter, NULL);
1009 pdata->write_dma_priv);
1010 if (!host->write_dma_chan) { 921 if (!host->write_dma_chan) {
1011 dev_err(&pdev->dev, "Unable to get write dma channel\n"); 922 dev_err(&pdev->dev, "Unable to get write dma channel\n");
1012 goto err_req_write_chnl; 923 goto err_req_write_chnl;
@@ -1107,18 +1018,8 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
1107 if (ret) 1018 if (ret)
1108 goto err_probe; 1019 goto err_probe;
1109 1020
1110 /*
1111 * The partition information can is accessed by (in the same precedence)
1112 *
1113 * command line through Bootloader,
1114 * platform data,
1115 * default partition information present in driver.
1116 */
1117 /*
1118 * Check for partition info passed
1119 */
1120 mtd->name = "nand"; 1021 mtd->name = "nand";
1121 ret = mtd_device_register(mtd, host->partitions, host->nr_partitions); 1022 ret = mtd_device_register(mtd, NULL, 0);
1122 if (ret) 1023 if (ret)
1123 goto err_probe; 1024 goto err_probe;
1124 1025
@@ -1135,8 +1036,6 @@ err_req_write_chnl:
1135 dma_release_channel(host->read_dma_chan); 1036 dma_release_channel(host->read_dma_chan);
1136err_req_read_chnl: 1037err_req_read_chnl:
1137 clk_disable_unprepare(host->clk); 1038 clk_disable_unprepare(host->clk);
1138err_clk_prepare_enable:
1139 clk_put(host->clk);
1140 return ret; 1039 return ret;
1141} 1040}
1142 1041
@@ -1155,7 +1054,6 @@ static int fsmc_nand_remove(struct platform_device *pdev)
1155 dma_release_channel(host->read_dma_chan); 1054 dma_release_channel(host->read_dma_chan);
1156 } 1055 }
1157 clk_disable_unprepare(host->clk); 1056 clk_disable_unprepare(host->clk);
1158 clk_put(host->clk);
1159 } 1057 }
1160 1058
1161 return 0; 1059 return 0;
@@ -1185,20 +1083,18 @@ static int fsmc_nand_resume(struct device *dev)
1185 1083
1186static SIMPLE_DEV_PM_OPS(fsmc_nand_pm_ops, fsmc_nand_suspend, fsmc_nand_resume); 1084static SIMPLE_DEV_PM_OPS(fsmc_nand_pm_ops, fsmc_nand_suspend, fsmc_nand_resume);
1187 1085
1188#ifdef CONFIG_OF
1189static const struct of_device_id fsmc_nand_id_table[] = { 1086static const struct of_device_id fsmc_nand_id_table[] = {
1190 { .compatible = "st,spear600-fsmc-nand" }, 1087 { .compatible = "st,spear600-fsmc-nand" },
1191 { .compatible = "stericsson,fsmc-nand" }, 1088 { .compatible = "stericsson,fsmc-nand" },
1192 {} 1089 {}
1193}; 1090};
1194MODULE_DEVICE_TABLE(of, fsmc_nand_id_table); 1091MODULE_DEVICE_TABLE(of, fsmc_nand_id_table);
1195#endif
1196 1092
1197static struct platform_driver fsmc_nand_driver = { 1093static struct platform_driver fsmc_nand_driver = {
1198 .remove = fsmc_nand_remove, 1094 .remove = fsmc_nand_remove,
1199 .driver = { 1095 .driver = {
1200 .name = "fsmc-nand", 1096 .name = "fsmc-nand",
1201 .of_match_table = of_match_ptr(fsmc_nand_id_table), 1097 .of_match_table = fsmc_nand_id_table,
1202 .pm = &fsmc_nand_pm_ops, 1098 .pm = &fsmc_nand_pm_ops,
1203 }, 1099 },
1204}; 1100};
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index 0d24857469ab..85294f150f4f 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -78,7 +78,9 @@ static void gpio_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
78 gpio_nand_dosync(gpiomtd); 78 gpio_nand_dosync(gpiomtd);
79 79
80 if (ctrl & NAND_CTRL_CHANGE) { 80 if (ctrl & NAND_CTRL_CHANGE) {
81 gpio_set_value(gpiomtd->plat.gpio_nce, !(ctrl & NAND_NCE)); 81 if (gpio_is_valid(gpiomtd->plat.gpio_nce))
82 gpio_set_value(gpiomtd->plat.gpio_nce,
83 !(ctrl & NAND_NCE));
82 gpio_set_value(gpiomtd->plat.gpio_cle, !!(ctrl & NAND_CLE)); 84 gpio_set_value(gpiomtd->plat.gpio_cle, !!(ctrl & NAND_CLE));
83 gpio_set_value(gpiomtd->plat.gpio_ale, !!(ctrl & NAND_ALE)); 85 gpio_set_value(gpiomtd->plat.gpio_ale, !!(ctrl & NAND_ALE));
84 gpio_nand_dosync(gpiomtd); 86 gpio_nand_dosync(gpiomtd);
@@ -201,7 +203,8 @@ static int gpio_nand_remove(struct platform_device *pdev)
201 203
202 if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) 204 if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
203 gpio_set_value(gpiomtd->plat.gpio_nwp, 0); 205 gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
204 gpio_set_value(gpiomtd->plat.gpio_nce, 1); 206 if (gpio_is_valid(gpiomtd->plat.gpio_nce))
207 gpio_set_value(gpiomtd->plat.gpio_nce, 1);
205 208
206 return 0; 209 return 0;
207} 210}
@@ -239,10 +242,13 @@ static int gpio_nand_probe(struct platform_device *pdev)
239 if (ret) 242 if (ret)
240 return ret; 243 return ret;
241 244
242 ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nce, "NAND NCE"); 245 if (gpio_is_valid(gpiomtd->plat.gpio_nce)) {
243 if (ret) 246 ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nce,
244 return ret; 247 "NAND NCE");
245 gpio_direction_output(gpiomtd->plat.gpio_nce, 1); 248 if (ret)
249 return ret;
250 gpio_direction_output(gpiomtd->plat.gpio_nce, 1);
251 }
246 252
247 if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) { 253 if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) {
248 ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nwp, 254 ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nwp,
diff --git a/drivers/mtd/nand/nand_amd.c b/drivers/mtd/nand/nand_amd.c
new file mode 100644
index 000000000000..170403a3bfa8
--- /dev/null
+++ b/drivers/mtd/nand/nand_amd.c
@@ -0,0 +1,51 @@
1/*
2 * Copyright (C) 2017 Free Electrons
3 * Copyright (C) 2017 NextThing Co
4 *
5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/mtd/nand.h>
19
20static void amd_nand_decode_id(struct nand_chip *chip)
21{
22 struct mtd_info *mtd = nand_to_mtd(chip);
23
24 nand_decode_ext_id(chip);
25
26 /*
27 * Check for Spansion/AMD ID + repeating 5th, 6th byte since
28 * some Spansion chips have erasesize that conflicts with size
29 * listed in nand_ids table.
30 * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39)
31 */
32 if (chip->id.data[4] != 0x00 && chip->id.data[5] == 0x00 &&
33 chip->id.data[6] == 0x00 && chip->id.data[7] == 0x00 &&
34 mtd->writesize == 512) {
35 mtd->erasesize = 128 * 1024;
36 mtd->erasesize <<= ((chip->id.data[3] & 0x03) << 1);
37 }
38}
39
40static int amd_nand_init(struct nand_chip *chip)
41{
42 if (nand_is_slc(chip))
43 chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
44
45 return 0;
46}
47
48const struct nand_manufacturer_ops amd_nand_manuf_ops = {
49 .detect = amd_nand_decode_id,
50 .init = amd_nand_init,
51};
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index b0524f8accb6..d474378ed810 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -139,6 +139,74 @@ const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
139}; 139};
140EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops); 140EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
141 141
142/*
143 * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
144 * are placed at a fixed offset.
145 */
146static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
147 struct mtd_oob_region *oobregion)
148{
149 struct nand_chip *chip = mtd_to_nand(mtd);
150 struct nand_ecc_ctrl *ecc = &chip->ecc;
151
152 if (section)
153 return -ERANGE;
154
155 switch (mtd->oobsize) {
156 case 64:
157 oobregion->offset = 40;
158 break;
159 case 128:
160 oobregion->offset = 80;
161 break;
162 default:
163 return -EINVAL;
164 }
165
166 oobregion->length = ecc->total;
167 if (oobregion->offset + oobregion->length > mtd->oobsize)
168 return -ERANGE;
169
170 return 0;
171}
172
173static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
174 struct mtd_oob_region *oobregion)
175{
176 struct nand_chip *chip = mtd_to_nand(mtd);
177 struct nand_ecc_ctrl *ecc = &chip->ecc;
178 int ecc_offset = 0;
179
180 if (section < 0 || section > 1)
181 return -ERANGE;
182
183 switch (mtd->oobsize) {
184 case 64:
185 ecc_offset = 40;
186 break;
187 case 128:
188 ecc_offset = 80;
189 break;
190 default:
191 return -EINVAL;
192 }
193
194 if (section == 0) {
195 oobregion->offset = 2;
196 oobregion->length = ecc_offset - 2;
197 } else {
198 oobregion->offset = ecc_offset + ecc->total;
199 oobregion->length = mtd->oobsize - oobregion->offset;
200 }
201
202 return 0;
203}
204
205const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
206 .ecc = nand_ooblayout_ecc_lp_hamming,
207 .free = nand_ooblayout_free_lp_hamming,
208};
209
142static int check_offs_len(struct mtd_info *mtd, 210static int check_offs_len(struct mtd_info *mtd,
143 loff_t ofs, uint64_t len) 211 loff_t ofs, uint64_t len)
144{ 212{
@@ -354,40 +422,32 @@ static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
354 */ 422 */
355static int nand_block_bad(struct mtd_info *mtd, loff_t ofs) 423static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
356{ 424{
357 int page, res = 0, i = 0; 425 int page, page_end, res;
358 struct nand_chip *chip = mtd_to_nand(mtd); 426 struct nand_chip *chip = mtd_to_nand(mtd);
359 u16 bad; 427 u8 bad;
360 428
361 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE) 429 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
362 ofs += mtd->erasesize - mtd->writesize; 430 ofs += mtd->erasesize - mtd->writesize;
363 431
364 page = (int)(ofs >> chip->page_shift) & chip->pagemask; 432 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
433 page_end = page + (chip->bbt_options & NAND_BBT_SCAN2NDPAGE ? 2 : 1);
365 434
366 do { 435 for (; page < page_end; page++) {
367 if (chip->options & NAND_BUSWIDTH_16) { 436 res = chip->ecc.read_oob(mtd, chip, page);
368 chip->cmdfunc(mtd, NAND_CMD_READOOB, 437 if (res)
369 chip->badblockpos & 0xFE, page); 438 return res;
370 bad = cpu_to_le16(chip->read_word(mtd)); 439
371 if (chip->badblockpos & 0x1) 440 bad = chip->oob_poi[chip->badblockpos];
372 bad >>= 8;
373 else
374 bad &= 0xFF;
375 } else {
376 chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos,
377 page);
378 bad = chip->read_byte(mtd);
379 }
380 441
381 if (likely(chip->badblockbits == 8)) 442 if (likely(chip->badblockbits == 8))
382 res = bad != 0xFF; 443 res = bad != 0xFF;
383 else 444 else
384 res = hweight8(bad) < chip->badblockbits; 445 res = hweight8(bad) < chip->badblockbits;
385 ofs += mtd->writesize; 446 if (res)
386 page = (int)(ofs >> chip->page_shift) & chip->pagemask; 447 return res;
387 i++; 448 }
388 } while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE));
389 449
390 return res; 450 return 0;
391} 451}
392 452
393/** 453/**
@@ -676,6 +736,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
676 case NAND_CMD_ERASE2: 736 case NAND_CMD_ERASE2:
677 case NAND_CMD_SEQIN: 737 case NAND_CMD_SEQIN:
678 case NAND_CMD_STATUS: 738 case NAND_CMD_STATUS:
739 case NAND_CMD_READID:
740 case NAND_CMD_SET_FEATURES:
679 return; 741 return;
680 742
681 case NAND_CMD_RESET: 743 case NAND_CMD_RESET:
@@ -794,6 +856,8 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
794 case NAND_CMD_ERASE2: 856 case NAND_CMD_ERASE2:
795 case NAND_CMD_SEQIN: 857 case NAND_CMD_SEQIN:
796 case NAND_CMD_STATUS: 858 case NAND_CMD_STATUS:
859 case NAND_CMD_READID:
860 case NAND_CMD_SET_FEATURES:
797 return; 861 return;
798 862
799 case NAND_CMD_RNDIN: 863 case NAND_CMD_RNDIN:
@@ -1958,7 +2022,9 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1958 if (!aligned) 2022 if (!aligned)
1959 use_bufpoi = 1; 2023 use_bufpoi = 1;
1960 else if (chip->options & NAND_USE_BOUNCE_BUFFER) 2024 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
1961 use_bufpoi = !virt_addr_valid(buf); 2025 use_bufpoi = !virt_addr_valid(buf) ||
2026 !IS_ALIGNED((unsigned long)buf,
2027 chip->buf_align);
1962 else 2028 else
1963 use_bufpoi = 0; 2029 use_bufpoi = 0;
1964 2030
@@ -1997,8 +2063,6 @@ read_retry:
1997 break; 2063 break;
1998 } 2064 }
1999 2065
2000 max_bitflips = max_t(unsigned int, max_bitflips, ret);
2001
2002 /* Transfer not aligned data */ 2066 /* Transfer not aligned data */
2003 if (use_bufpoi) { 2067 if (use_bufpoi) {
2004 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob && 2068 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
@@ -2049,6 +2113,7 @@ read_retry:
2049 } 2113 }
2050 2114
2051 buf += bytes; 2115 buf += bytes;
2116 max_bitflips = max_t(unsigned int, max_bitflips, ret);
2052 } else { 2117 } else {
2053 memcpy(buf, chip->buffers->databuf + col, bytes); 2118 memcpy(buf, chip->buffers->databuf + col, bytes);
2054 buf += bytes; 2119 buf += bytes;
@@ -2637,7 +2702,7 @@ static int nand_write_page_syndrome(struct mtd_info *mtd,
2637} 2702}
2638 2703
2639/** 2704/**
2640 * nand_write_page - [REPLACEABLE] write one page 2705 * nand_write_page - write one page
2641 * @mtd: MTD device structure 2706 * @mtd: MTD device structure
2642 * @chip: NAND chip descriptor 2707 * @chip: NAND chip descriptor
2643 * @offset: address offset within the page 2708 * @offset: address offset within the page
@@ -2815,7 +2880,9 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2815 if (part_pagewr) 2880 if (part_pagewr)
2816 use_bufpoi = 1; 2881 use_bufpoi = 1;
2817 else if (chip->options & NAND_USE_BOUNCE_BUFFER) 2882 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
2818 use_bufpoi = !virt_addr_valid(buf); 2883 use_bufpoi = !virt_addr_valid(buf) ||
2884 !IS_ALIGNED((unsigned long)buf,
2885 chip->buf_align);
2819 else 2886 else
2820 use_bufpoi = 0; 2887 use_bufpoi = 0;
2821 2888
@@ -2840,9 +2907,10 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2840 /* We still need to erase leftover OOB data */ 2907 /* We still need to erase leftover OOB data */
2841 memset(chip->oob_poi, 0xff, mtd->oobsize); 2908 memset(chip->oob_poi, 0xff, mtd->oobsize);
2842 } 2909 }
2843 ret = chip->write_page(mtd, chip, column, bytes, wbuf, 2910
2844 oob_required, page, cached, 2911 ret = nand_write_page(mtd, chip, column, bytes, wbuf,
2845 (ops->mode == MTD_OPS_RAW)); 2912 oob_required, page, cached,
2913 (ops->mode == MTD_OPS_RAW));
2846 if (ret) 2914 if (ret)
2847 break; 2915 break;
2848 2916
@@ -3385,8 +3453,10 @@ static void nand_shutdown(struct mtd_info *mtd)
3385} 3453}
3386 3454
3387/* Set default functions */ 3455/* Set default functions */
3388static void nand_set_defaults(struct nand_chip *chip, int busw) 3456static void nand_set_defaults(struct nand_chip *chip)
3389{ 3457{
3458 unsigned int busw = chip->options & NAND_BUSWIDTH_16;
3459
3390 /* check for proper chip_delay setup, set 20us if not */ 3460 /* check for proper chip_delay setup, set 20us if not */
3391 if (!chip->chip_delay) 3461 if (!chip->chip_delay)
3392 chip->chip_delay = 20; 3462 chip->chip_delay = 20;
@@ -3431,6 +3501,8 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
3431 nand_hw_control_init(chip->controller); 3501 nand_hw_control_init(chip->controller);
3432 } 3502 }
3433 3503
3504 if (!chip->buf_align)
3505 chip->buf_align = 1;
3434} 3506}
3435 3507
3436/* Sanitize ONFI strings so we can safely print them */ 3508/* Sanitize ONFI strings so we can safely print them */
@@ -3464,9 +3536,10 @@ static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
3464} 3536}
3465 3537
3466/* Parse the Extended Parameter Page. */ 3538/* Parse the Extended Parameter Page. */
3467static int nand_flash_detect_ext_param_page(struct mtd_info *mtd, 3539static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
3468 struct nand_chip *chip, struct nand_onfi_params *p) 3540 struct nand_onfi_params *p)
3469{ 3541{
3542 struct mtd_info *mtd = nand_to_mtd(chip);
3470 struct onfi_ext_param_page *ep; 3543 struct onfi_ext_param_page *ep;
3471 struct onfi_ext_section *s; 3544 struct onfi_ext_section *s;
3472 struct onfi_ext_ecc_info *ecc; 3545 struct onfi_ext_ecc_info *ecc;
@@ -3534,36 +3607,12 @@ ext_out:
3534 return ret; 3607 return ret;
3535} 3608}
3536 3609
3537static int nand_setup_read_retry_micron(struct mtd_info *mtd, int retry_mode)
3538{
3539 struct nand_chip *chip = mtd_to_nand(mtd);
3540 uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {retry_mode};
3541
3542 return chip->onfi_set_features(mtd, chip, ONFI_FEATURE_ADDR_READ_RETRY,
3543 feature);
3544}
3545
3546/*
3547 * Configure chip properties from Micron vendor-specific ONFI table
3548 */
3549static void nand_onfi_detect_micron(struct nand_chip *chip,
3550 struct nand_onfi_params *p)
3551{
3552 struct nand_onfi_vendor_micron *micron = (void *)p->vendor;
3553
3554 if (le16_to_cpu(p->vendor_revision) < 1)
3555 return;
3556
3557 chip->read_retries = micron->read_retry_options;
3558 chip->setup_read_retry = nand_setup_read_retry_micron;
3559}
3560
3561/* 3610/*
3562 * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise. 3611 * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
3563 */ 3612 */
3564static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip, 3613static int nand_flash_detect_onfi(struct nand_chip *chip)
3565 int *busw)
3566{ 3614{
3615 struct mtd_info *mtd = nand_to_mtd(chip);
3567 struct nand_onfi_params *p = &chip->onfi_params; 3616 struct nand_onfi_params *p = &chip->onfi_params;
3568 int i, j; 3617 int i, j;
3569 int val; 3618 int val;
@@ -3633,9 +3682,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
3633 chip->blocks_per_die = le32_to_cpu(p->blocks_per_lun); 3682 chip->blocks_per_die = le32_to_cpu(p->blocks_per_lun);
3634 3683
3635 if (onfi_feature(chip) & ONFI_FEATURE_16_BIT_BUS) 3684 if (onfi_feature(chip) & ONFI_FEATURE_16_BIT_BUS)
3636 *busw = NAND_BUSWIDTH_16; 3685 chip->options |= NAND_BUSWIDTH_16;
3637 else
3638 *busw = 0;
3639 3686
3640 if (p->ecc_bits != 0xff) { 3687 if (p->ecc_bits != 0xff) {
3641 chip->ecc_strength_ds = p->ecc_bits; 3688 chip->ecc_strength_ds = p->ecc_bits;
@@ -3653,24 +3700,21 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
3653 chip->cmdfunc = nand_command_lp; 3700 chip->cmdfunc = nand_command_lp;
3654 3701
3655 /* The Extended Parameter Page is supported since ONFI 2.1. */ 3702 /* The Extended Parameter Page is supported since ONFI 2.1. */
3656 if (nand_flash_detect_ext_param_page(mtd, chip, p)) 3703 if (nand_flash_detect_ext_param_page(chip, p))
3657 pr_warn("Failed to detect ONFI extended param page\n"); 3704 pr_warn("Failed to detect ONFI extended param page\n");
3658 } else { 3705 } else {
3659 pr_warn("Could not retrieve ONFI ECC requirements\n"); 3706 pr_warn("Could not retrieve ONFI ECC requirements\n");
3660 } 3707 }
3661 3708
3662 if (p->jedec_id == NAND_MFR_MICRON)
3663 nand_onfi_detect_micron(chip, p);
3664
3665 return 1; 3709 return 1;
3666} 3710}
3667 3711
3668/* 3712/*
3669 * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise. 3713 * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise.
3670 */ 3714 */
3671static int nand_flash_detect_jedec(struct mtd_info *mtd, struct nand_chip *chip, 3715static int nand_flash_detect_jedec(struct nand_chip *chip)
3672 int *busw)
3673{ 3716{
3717 struct mtd_info *mtd = nand_to_mtd(chip);
3674 struct nand_jedec_params *p = &chip->jedec_params; 3718 struct nand_jedec_params *p = &chip->jedec_params;
3675 struct jedec_ecc_info *ecc; 3719 struct jedec_ecc_info *ecc;
3676 int val; 3720 int val;
@@ -3729,9 +3773,7 @@ static int nand_flash_detect_jedec(struct mtd_info *mtd, struct nand_chip *chip,
3729 chip->bits_per_cell = p->bits_per_cell; 3773 chip->bits_per_cell = p->bits_per_cell;
3730 3774
3731 if (jedec_feature(chip) & JEDEC_FEATURE_16_BIT_BUS) 3775 if (jedec_feature(chip) & JEDEC_FEATURE_16_BIT_BUS)
3732 *busw = NAND_BUSWIDTH_16; 3776 chip->options |= NAND_BUSWIDTH_16;
3733 else
3734 *busw = 0;
3735 3777
3736 /* ECC info */ 3778 /* ECC info */
3737 ecc = &p->ecc_info[0]; 3779 ecc = &p->ecc_info[0];
@@ -3820,165 +3862,46 @@ static int nand_get_bits_per_cell(u8 cellinfo)
3820 * chip. The rest of the parameters must be decoded according to generic or 3862 * chip. The rest of the parameters must be decoded according to generic or
3821 * manufacturer-specific "extended ID" decoding patterns. 3863 * manufacturer-specific "extended ID" decoding patterns.
3822 */ 3864 */
3823static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip, 3865void nand_decode_ext_id(struct nand_chip *chip)
3824 u8 id_data[8], int *busw)
3825{ 3866{
3826 int extid, id_len; 3867 struct mtd_info *mtd = nand_to_mtd(chip);
3868 int extid;
3869 u8 *id_data = chip->id.data;
3827 /* The 3rd id byte holds MLC / multichip data */ 3870 /* The 3rd id byte holds MLC / multichip data */
3828 chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]); 3871 chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
3829 /* The 4th id byte is the important one */ 3872 /* The 4th id byte is the important one */
3830 extid = id_data[3]; 3873 extid = id_data[3];
3831 3874
3832 id_len = nand_id_len(id_data, 8); 3875 /* Calc pagesize */
3833 3876 mtd->writesize = 1024 << (extid & 0x03);
3834 /* 3877 extid >>= 2;
3835 * Field definitions are in the following datasheets: 3878 /* Calc oobsize */
3836 * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32) 3879 mtd->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
3837 * New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44) 3880 extid >>= 2;
3838 * Hynix MLC (6 byte ID): Hynix H27UBG8T2B (p.22) 3881 /* Calc blocksize. Blocksize is multiples of 64KiB */
3839 * 3882 mtd->erasesize = (64 * 1024) << (extid & 0x03);
3840 * Check for ID length, non-zero 6th byte, cell type, and Hynix/Samsung 3883 extid >>= 2;
3841 * ID to decide what to do. 3884 /* Get buswidth information */
3842 */ 3885 if (extid & 0x1)
3843 if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG && 3886 chip->options |= NAND_BUSWIDTH_16;
3844 !nand_is_slc(chip) && id_data[5] != 0x00) {
3845 /* Calc pagesize */
3846 mtd->writesize = 2048 << (extid & 0x03);
3847 extid >>= 2;
3848 /* Calc oobsize */
3849 switch (((extid >> 2) & 0x04) | (extid & 0x03)) {
3850 case 1:
3851 mtd->oobsize = 128;
3852 break;
3853 case 2:
3854 mtd->oobsize = 218;
3855 break;
3856 case 3:
3857 mtd->oobsize = 400;
3858 break;
3859 case 4:
3860 mtd->oobsize = 436;
3861 break;
3862 case 5:
3863 mtd->oobsize = 512;
3864 break;
3865 case 6:
3866 mtd->oobsize = 640;
3867 break;
3868 case 7:
3869 default: /* Other cases are "reserved" (unknown) */
3870 mtd->oobsize = 1024;
3871 break;
3872 }
3873 extid >>= 2;
3874 /* Calc blocksize */
3875 mtd->erasesize = (128 * 1024) <<
3876 (((extid >> 1) & 0x04) | (extid & 0x03));
3877 *busw = 0;
3878 } else if (id_len == 6 && id_data[0] == NAND_MFR_HYNIX &&
3879 !nand_is_slc(chip)) {
3880 unsigned int tmp;
3881
3882 /* Calc pagesize */
3883 mtd->writesize = 2048 << (extid & 0x03);
3884 extid >>= 2;
3885 /* Calc oobsize */
3886 switch (((extid >> 2) & 0x04) | (extid & 0x03)) {
3887 case 0:
3888 mtd->oobsize = 128;
3889 break;
3890 case 1:
3891 mtd->oobsize = 224;
3892 break;
3893 case 2:
3894 mtd->oobsize = 448;
3895 break;
3896 case 3:
3897 mtd->oobsize = 64;
3898 break;
3899 case 4:
3900 mtd->oobsize = 32;
3901 break;
3902 case 5:
3903 mtd->oobsize = 16;
3904 break;
3905 default:
3906 mtd->oobsize = 640;
3907 break;
3908 }
3909 extid >>= 2;
3910 /* Calc blocksize */
3911 tmp = ((extid >> 1) & 0x04) | (extid & 0x03);
3912 if (tmp < 0x03)
3913 mtd->erasesize = (128 * 1024) << tmp;
3914 else if (tmp == 0x03)
3915 mtd->erasesize = 768 * 1024;
3916 else
3917 mtd->erasesize = (64 * 1024) << tmp;
3918 *busw = 0;
3919 } else {
3920 /* Calc pagesize */
3921 mtd->writesize = 1024 << (extid & 0x03);
3922 extid >>= 2;
3923 /* Calc oobsize */
3924 mtd->oobsize = (8 << (extid & 0x01)) *
3925 (mtd->writesize >> 9);
3926 extid >>= 2;
3927 /* Calc blocksize. Blocksize is multiples of 64KiB */
3928 mtd->erasesize = (64 * 1024) << (extid & 0x03);
3929 extid >>= 2;
3930 /* Get buswidth information */
3931 *busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
3932
3933 /*
3934 * Toshiba 24nm raw SLC (i.e., not BENAND) have 32B OOB per
3935 * 512B page. For Toshiba SLC, we decode the 5th/6th byte as
3936 * follows:
3937 * - ID byte 6, bits[2:0]: 100b -> 43nm, 101b -> 32nm,
3938 * 110b -> 24nm
3939 * - ID byte 5, bit[7]: 1 -> BENAND, 0 -> raw SLC
3940 */
3941 if (id_len >= 6 && id_data[0] == NAND_MFR_TOSHIBA &&
3942 nand_is_slc(chip) &&
3943 (id_data[5] & 0x7) == 0x6 /* 24nm */ &&
3944 !(id_data[4] & 0x80) /* !BENAND */) {
3945 mtd->oobsize = 32 * mtd->writesize >> 9;
3946 }
3947
3948 }
3949} 3887}
3888EXPORT_SYMBOL_GPL(nand_decode_ext_id);
3950 3889
3951/* 3890/*
3952 * Old devices have chip data hardcoded in the device ID table. nand_decode_id 3891 * Old devices have chip data hardcoded in the device ID table. nand_decode_id
3953 * decodes a matching ID table entry and assigns the MTD size parameters for 3892 * decodes a matching ID table entry and assigns the MTD size parameters for
3954 * the chip. 3893 * the chip.
3955 */ 3894 */
3956static void nand_decode_id(struct mtd_info *mtd, struct nand_chip *chip, 3895static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
3957 struct nand_flash_dev *type, u8 id_data[8],
3958 int *busw)
3959{ 3896{
3960 int maf_id = id_data[0]; 3897 struct mtd_info *mtd = nand_to_mtd(chip);
3961 3898
3962 mtd->erasesize = type->erasesize; 3899 mtd->erasesize = type->erasesize;
3963 mtd->writesize = type->pagesize; 3900 mtd->writesize = type->pagesize;
3964 mtd->oobsize = mtd->writesize / 32; 3901 mtd->oobsize = mtd->writesize / 32;
3965 *busw = type->options & NAND_BUSWIDTH_16;
3966 3902
3967 /* All legacy ID NAND are small-page, SLC */ 3903 /* All legacy ID NAND are small-page, SLC */
3968 chip->bits_per_cell = 1; 3904 chip->bits_per_cell = 1;
3969
3970 /*
3971 * Check for Spansion/AMD ID + repeating 5th, 6th byte since
3972 * some Spansion chips have erasesize that conflicts with size
3973 * listed in nand_ids table.
3974 * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39)
3975 */
3976 if (maf_id == NAND_MFR_AMD && id_data[4] != 0x00 && id_data[5] == 0x00
3977 && id_data[6] == 0x00 && id_data[7] == 0x00
3978 && mtd->writesize == 512) {
3979 mtd->erasesize = 128 * 1024;
3980 mtd->erasesize <<= ((id_data[3] & 0x03) << 1);
3981 }
3982} 3905}
3983 3906
3984/* 3907/*
@@ -3986,36 +3909,15 @@ static void nand_decode_id(struct mtd_info *mtd, struct nand_chip *chip,
3986 * heuristic patterns using various detected parameters (e.g., manufacturer, 3909 * heuristic patterns using various detected parameters (e.g., manufacturer,
3987 * page size, cell-type information). 3910 * page size, cell-type information).
3988 */ 3911 */
3989static void nand_decode_bbm_options(struct mtd_info *mtd, 3912static void nand_decode_bbm_options(struct nand_chip *chip)
3990 struct nand_chip *chip, u8 id_data[8])
3991{ 3913{
3992 int maf_id = id_data[0]; 3914 struct mtd_info *mtd = nand_to_mtd(chip);
3993 3915
3994 /* Set the bad block position */ 3916 /* Set the bad block position */
3995 if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16)) 3917 if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
3996 chip->badblockpos = NAND_LARGE_BADBLOCK_POS; 3918 chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
3997 else 3919 else
3998 chip->badblockpos = NAND_SMALL_BADBLOCK_POS; 3920 chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
3999
4000 /*
4001 * Bad block marker is stored in the last page of each block on Samsung
4002 * and Hynix MLC devices; stored in first two pages of each block on
4003 * Micron devices with 2KiB pages and on SLC Samsung, Hynix, Toshiba,
4004 * AMD/Spansion, and Macronix. All others scan only the first page.
4005 */
4006 if (!nand_is_slc(chip) &&
4007 (maf_id == NAND_MFR_SAMSUNG ||
4008 maf_id == NAND_MFR_HYNIX))
4009 chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
4010 else if ((nand_is_slc(chip) &&
4011 (maf_id == NAND_MFR_SAMSUNG ||
4012 maf_id == NAND_MFR_HYNIX ||
4013 maf_id == NAND_MFR_TOSHIBA ||
4014 maf_id == NAND_MFR_AMD ||
4015 maf_id == NAND_MFR_MACRONIX)) ||
4016 (mtd->writesize == 2048 &&
4017 maf_id == NAND_MFR_MICRON))
4018 chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
4019} 3921}
4020 3922
4021static inline bool is_full_id_nand(struct nand_flash_dev *type) 3923static inline bool is_full_id_nand(struct nand_flash_dev *type)
@@ -4023,9 +3925,12 @@ static inline bool is_full_id_nand(struct nand_flash_dev *type)
4023 return type->id_len; 3925 return type->id_len;
4024} 3926}
4025 3927
4026static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip, 3928static bool find_full_id_nand(struct nand_chip *chip,
4027 struct nand_flash_dev *type, u8 *id_data, int *busw) 3929 struct nand_flash_dev *type)
4028{ 3930{
3931 struct mtd_info *mtd = nand_to_mtd(chip);
3932 u8 *id_data = chip->id.data;
3933
4029 if (!strncmp(type->id, id_data, type->id_len)) { 3934 if (!strncmp(type->id, id_data, type->id_len)) {
4030 mtd->writesize = type->pagesize; 3935 mtd->writesize = type->pagesize;
4031 mtd->erasesize = type->erasesize; 3936 mtd->erasesize = type->erasesize;
@@ -4039,8 +3944,6 @@ static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip,
4039 chip->onfi_timing_mode_default = 3944 chip->onfi_timing_mode_default =
4040 type->onfi_timing_mode_default; 3945 type->onfi_timing_mode_default;
4041 3946
4042 *busw = type->options & NAND_BUSWIDTH_16;
4043
4044 if (!mtd->name) 3947 if (!mtd->name)
4045 mtd->name = type->name; 3948 mtd->name = type->name;
4046 3949
@@ -4050,15 +3953,63 @@ static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip,
4050} 3953}
4051 3954
4052/* 3955/*
3956 * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC
3957 * compliant and does not have a full-id or legacy-id entry in the nand_ids
3958 * table.
3959 */
3960static void nand_manufacturer_detect(struct nand_chip *chip)
3961{
3962 /*
3963 * Try manufacturer detection if available and use
3964 * nand_decode_ext_id() otherwise.
3965 */
3966 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
3967 chip->manufacturer.desc->ops->detect)
3968 chip->manufacturer.desc->ops->detect(chip);
3969 else
3970 nand_decode_ext_id(chip);
3971}
3972
3973/*
3974 * Manufacturer initialization. This function is called for all NANDs including
3975 * ONFI and JEDEC compliant ones.
3976 * Manufacturer drivers should put all their specific initialization code in
3977 * their ->init() hook.
3978 */
3979static int nand_manufacturer_init(struct nand_chip *chip)
3980{
3981 if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
3982 !chip->manufacturer.desc->ops->init)
3983 return 0;
3984
3985 return chip->manufacturer.desc->ops->init(chip);
3986}
3987
3988/*
3989 * Manufacturer cleanup. This function is called for all NANDs including
3990 * ONFI and JEDEC compliant ones.
3991 * Manufacturer drivers should put all their specific cleanup code in their
3992 * ->cleanup() hook.
3993 */
3994static void nand_manufacturer_cleanup(struct nand_chip *chip)
3995{
3996 /* Release manufacturer private data */
3997 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
3998 chip->manufacturer.desc->ops->cleanup)
3999 chip->manufacturer.desc->ops->cleanup(chip);
4000}
4001
4002/*
4053 * Get the flash and manufacturer id and lookup if the type is supported. 4003 * Get the flash and manufacturer id and lookup if the type is supported.
4054 */ 4004 */
4055static int nand_get_flash_type(struct mtd_info *mtd, struct nand_chip *chip, 4005static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
4056 int *maf_id, int *dev_id,
4057 struct nand_flash_dev *type)
4058{ 4006{
4007 const struct nand_manufacturer *manufacturer;
4008 struct mtd_info *mtd = nand_to_mtd(chip);
4059 int busw; 4009 int busw;
4060 int i, maf_idx; 4010 int i, ret;
4061 u8 id_data[8]; 4011 u8 *id_data = chip->id.data;
4012 u8 maf_id, dev_id;
4062 4013
4063 /* 4014 /*
4064 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx) 4015 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
@@ -4073,8 +4024,8 @@ static int nand_get_flash_type(struct mtd_info *mtd, struct nand_chip *chip,
4073 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); 4024 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
4074 4025
4075 /* Read manufacturer and device IDs */ 4026 /* Read manufacturer and device IDs */
4076 *maf_id = chip->read_byte(mtd); 4027 maf_id = chip->read_byte(mtd);
4077 *dev_id = chip->read_byte(mtd); 4028 dev_id = chip->read_byte(mtd);
4078 4029
4079 /* 4030 /*
4080 * Try again to make sure, as some systems the bus-hold or other 4031 * Try again to make sure, as some systems the bus-hold or other
@@ -4089,20 +4040,41 @@ static int nand_get_flash_type(struct mtd_info *mtd, struct nand_chip *chip,
4089 for (i = 0; i < 8; i++) 4040 for (i = 0; i < 8; i++)
4090 id_data[i] = chip->read_byte(mtd); 4041 id_data[i] = chip->read_byte(mtd);
4091 4042
4092 if (id_data[0] != *maf_id || id_data[1] != *dev_id) { 4043 if (id_data[0] != maf_id || id_data[1] != dev_id) {
4093 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n", 4044 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
4094 *maf_id, *dev_id, id_data[0], id_data[1]); 4045 maf_id, dev_id, id_data[0], id_data[1]);
4095 return -ENODEV; 4046 return -ENODEV;
4096 } 4047 }
4097 4048
4049 chip->id.len = nand_id_len(id_data, 8);
4050
4051 /* Try to identify manufacturer */
4052 manufacturer = nand_get_manufacturer(maf_id);
4053 chip->manufacturer.desc = manufacturer;
4054
4098 if (!type) 4055 if (!type)
4099 type = nand_flash_ids; 4056 type = nand_flash_ids;
4100 4057
4058 /*
4059 * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic
4060 * override it.
4061 * This is required to make sure initial NAND bus width set by the
4062 * NAND controller driver is coherent with the real NAND bus width
4063 * (extracted by auto-detection code).
4064 */
4065 busw = chip->options & NAND_BUSWIDTH_16;
4066
4067 /*
4068 * The flag is only set (never cleared), reset it to its default value
4069 * before starting auto-detection.
4070 */
4071 chip->options &= ~NAND_BUSWIDTH_16;
4072
4101 for (; type->name != NULL; type++) { 4073 for (; type->name != NULL; type++) {
4102 if (is_full_id_nand(type)) { 4074 if (is_full_id_nand(type)) {
4103 if (find_full_id_nand(mtd, chip, type, id_data, &busw)) 4075 if (find_full_id_nand(chip, type))
4104 goto ident_done; 4076 goto ident_done;
4105 } else if (*dev_id == type->dev_id) { 4077 } else if (dev_id == type->dev_id) {
4106 break; 4078 break;
4107 } 4079 }
4108 } 4080 }
@@ -4110,11 +4082,11 @@ static int nand_get_flash_type(struct mtd_info *mtd, struct nand_chip *chip,
4110 chip->onfi_version = 0; 4082 chip->onfi_version = 0;
4111 if (!type->name || !type->pagesize) { 4083 if (!type->name || !type->pagesize) {
4112 /* Check if the chip is ONFI compliant */ 4084 /* Check if the chip is ONFI compliant */
4113 if (nand_flash_detect_onfi(mtd, chip, &busw)) 4085 if (nand_flash_detect_onfi(chip))
4114 goto ident_done; 4086 goto ident_done;
4115 4087
4116 /* Check if the chip is JEDEC compliant */ 4088 /* Check if the chip is JEDEC compliant */
4117 if (nand_flash_detect_jedec(mtd, chip, &busw)) 4089 if (nand_flash_detect_jedec(chip))
4118 goto ident_done; 4090 goto ident_done;
4119 } 4091 }
4120 4092
@@ -4126,48 +4098,34 @@ static int nand_get_flash_type(struct mtd_info *mtd, struct nand_chip *chip,
4126 4098
4127 chip->chipsize = (uint64_t)type->chipsize << 20; 4099 chip->chipsize = (uint64_t)type->chipsize << 20;
4128 4100
4129 if (!type->pagesize) { 4101 if (!type->pagesize)
4130 /* Decode parameters from extended ID */ 4102 nand_manufacturer_detect(chip);
4131 nand_decode_ext_id(mtd, chip, id_data, &busw); 4103 else
4132 } else { 4104 nand_decode_id(chip, type);
4133 nand_decode_id(mtd, chip, type, id_data, &busw); 4105
4134 }
4135 /* Get chip options */ 4106 /* Get chip options */
4136 chip->options |= type->options; 4107 chip->options |= type->options;
4137 4108
4138 /*
4139 * Check if chip is not a Samsung device. Do not clear the
4140 * options for chips which do not have an extended id.
4141 */
4142 if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)
4143 chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
4144ident_done: 4109ident_done:
4145 4110
4146 /* Try to identify manufacturer */
4147 for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) {
4148 if (nand_manuf_ids[maf_idx].id == *maf_id)
4149 break;
4150 }
4151
4152 if (chip->options & NAND_BUSWIDTH_AUTO) { 4111 if (chip->options & NAND_BUSWIDTH_AUTO) {
4153 WARN_ON(chip->options & NAND_BUSWIDTH_16); 4112 WARN_ON(busw & NAND_BUSWIDTH_16);
4154 chip->options |= busw; 4113 nand_set_defaults(chip);
4155 nand_set_defaults(chip, busw);
4156 } else if (busw != (chip->options & NAND_BUSWIDTH_16)) { 4114 } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
4157 /* 4115 /*
4158 * Check, if buswidth is correct. Hardware drivers should set 4116 * Check, if buswidth is correct. Hardware drivers should set
4159 * chip correct! 4117 * chip correct!
4160 */ 4118 */
4161 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n", 4119 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4162 *maf_id, *dev_id); 4120 maf_id, dev_id);
4163 pr_info("%s %s\n", nand_manuf_ids[maf_idx].name, mtd->name); 4121 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4164 pr_warn("bus width %d instead %d bit\n", 4122 mtd->name);
4165 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8, 4123 pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
4166 busw ? 16 : 8); 4124 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
4167 return -EINVAL; 4125 return -EINVAL;
4168 } 4126 }
4169 4127
4170 nand_decode_bbm_options(mtd, chip, id_data); 4128 nand_decode_bbm_options(chip);
4171 4129
4172 /* Calculate the address shift from the page size */ 4130 /* Calculate the address shift from the page size */
4173 chip->page_shift = ffs(mtd->writesize) - 1; 4131 chip->page_shift = ffs(mtd->writesize) - 1;
@@ -4190,18 +4148,22 @@ ident_done:
4190 if (mtd->writesize > 512 && chip->cmdfunc == nand_command) 4148 if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
4191 chip->cmdfunc = nand_command_lp; 4149 chip->cmdfunc = nand_command_lp;
4192 4150
4151 ret = nand_manufacturer_init(chip);
4152 if (ret)
4153 return ret;
4154
4193 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n", 4155 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4194 *maf_id, *dev_id); 4156 maf_id, dev_id);
4195 4157
4196 if (chip->onfi_version) 4158 if (chip->onfi_version)
4197 pr_info("%s %s\n", nand_manuf_ids[maf_idx].name, 4159 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4198 chip->onfi_params.model); 4160 chip->onfi_params.model);
4199 else if (chip->jedec_version) 4161 else if (chip->jedec_version)
4200 pr_info("%s %s\n", nand_manuf_ids[maf_idx].name, 4162 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4201 chip->jedec_params.model); 4163 chip->jedec_params.model);
4202 else 4164 else
4203 pr_info("%s %s\n", nand_manuf_ids[maf_idx].name, 4165 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4204 type->name); 4166 type->name);
4205 4167
4206 pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n", 4168 pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
4207 (int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC", 4169 (int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
@@ -4333,12 +4295,6 @@ static int nand_dt_init(struct nand_chip *chip)
4333 ecc_strength = of_get_nand_ecc_strength(dn); 4295 ecc_strength = of_get_nand_ecc_strength(dn);
4334 ecc_step = of_get_nand_ecc_step_size(dn); 4296 ecc_step = of_get_nand_ecc_step_size(dn);
4335 4297
4336 if ((ecc_step >= 0 && !(ecc_strength >= 0)) ||
4337 (!(ecc_step >= 0) && ecc_strength >= 0)) {
4338 pr_err("must set both strength and step size in DT\n");
4339 return -EINVAL;
4340 }
4341
4342 if (ecc_mode >= 0) 4298 if (ecc_mode >= 0)
4343 chip->ecc.mode = ecc_mode; 4299 chip->ecc.mode = ecc_mode;
4344 4300
@@ -4391,10 +4347,10 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
4391 return -EINVAL; 4347 return -EINVAL;
4392 } 4348 }
4393 /* Set the default functions */ 4349 /* Set the default functions */
4394 nand_set_defaults(chip, chip->options & NAND_BUSWIDTH_16); 4350 nand_set_defaults(chip);
4395 4351
4396 /* Read the flash type */ 4352 /* Read the flash type */
4397 ret = nand_get_flash_type(mtd, chip, &nand_maf_id, &nand_dev_id, table); 4353 ret = nand_detect(chip, table);
4398 if (ret) { 4354 if (ret) {
4399 if (!(chip->options & NAND_SCAN_SILENT_NODEV)) 4355 if (!(chip->options & NAND_SCAN_SILENT_NODEV))
4400 pr_warn("No NAND device found\n"); 4356 pr_warn("No NAND device found\n");
@@ -4419,6 +4375,9 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
4419 if (ret) 4375 if (ret)
4420 return ret; 4376 return ret;
4421 4377
4378 nand_maf_id = chip->id.data[0];
4379 nand_dev_id = chip->id.data[1];
4380
4422 chip->select_chip(mtd, -1); 4381 chip->select_chip(mtd, -1);
4423 4382
4424 /* Check for a chip array */ 4383 /* Check for a chip array */
@@ -4610,7 +4569,7 @@ int nand_scan_tail(struct mtd_info *mtd)
4610{ 4569{
4611 struct nand_chip *chip = mtd_to_nand(mtd); 4570 struct nand_chip *chip = mtd_to_nand(mtd);
4612 struct nand_ecc_ctrl *ecc = &chip->ecc; 4571 struct nand_ecc_ctrl *ecc = &chip->ecc;
4613 struct nand_buffers *nbuf; 4572 struct nand_buffers *nbuf = NULL;
4614 int ret; 4573 int ret;
4615 4574
4616 /* New bad blocks should be marked in OOB, flash-based BBT, or both */ 4575 /* New bad blocks should be marked in OOB, flash-based BBT, or both */
@@ -4624,13 +4583,28 @@ int nand_scan_tail(struct mtd_info *mtd)
4624 } 4583 }
4625 4584
4626 if (!(chip->options & NAND_OWN_BUFFERS)) { 4585 if (!(chip->options & NAND_OWN_BUFFERS)) {
4627 nbuf = kzalloc(sizeof(*nbuf) + mtd->writesize 4586 nbuf = kzalloc(sizeof(*nbuf), GFP_KERNEL);
4628 + mtd->oobsize * 3, GFP_KERNEL);
4629 if (!nbuf) 4587 if (!nbuf)
4630 return -ENOMEM; 4588 return -ENOMEM;
4631 nbuf->ecccalc = (uint8_t *)(nbuf + 1); 4589
4632 nbuf->ecccode = nbuf->ecccalc + mtd->oobsize; 4590 nbuf->ecccalc = kmalloc(mtd->oobsize, GFP_KERNEL);
4633 nbuf->databuf = nbuf->ecccode + mtd->oobsize; 4591 if (!nbuf->ecccalc) {
4592 ret = -ENOMEM;
4593 goto err_free;
4594 }
4595
4596 nbuf->ecccode = kmalloc(mtd->oobsize, GFP_KERNEL);
4597 if (!nbuf->ecccode) {
4598 ret = -ENOMEM;
4599 goto err_free;
4600 }
4601
4602 nbuf->databuf = kmalloc(mtd->writesize + mtd->oobsize,
4603 GFP_KERNEL);
4604 if (!nbuf->databuf) {
4605 ret = -ENOMEM;
4606 goto err_free;
4607 }
4634 4608
4635 chip->buffers = nbuf; 4609 chip->buffers = nbuf;
4636 } else { 4610 } else {
@@ -4653,7 +4627,7 @@ int nand_scan_tail(struct mtd_info *mtd)
4653 break; 4627 break;
4654 case 64: 4628 case 64:
4655 case 128: 4629 case 128:
4656 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); 4630 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
4657 break; 4631 break;
4658 default: 4632 default:
4659 WARN(1, "No oob scheme defined for oobsize %d\n", 4633 WARN(1, "No oob scheme defined for oobsize %d\n",
@@ -4663,9 +4637,6 @@ int nand_scan_tail(struct mtd_info *mtd)
4663 } 4637 }
4664 } 4638 }
4665 4639
4666 if (!chip->write_page)
4667 chip->write_page = nand_write_page;
4668
4669 /* 4640 /*
4670 * Check ECC mode, default to software if 3byte/512byte hardware ECC is 4641 * Check ECC mode, default to software if 3byte/512byte hardware ECC is
4671 * selected and we have 256 byte pagesize fallback to software ECC 4642 * selected and we have 256 byte pagesize fallback to software ECC
@@ -4873,8 +4844,12 @@ int nand_scan_tail(struct mtd_info *mtd)
4873 /* Build bad block table */ 4844 /* Build bad block table */
4874 return chip->scan_bbt(mtd); 4845 return chip->scan_bbt(mtd);
4875err_free: 4846err_free:
4876 if (!(chip->options & NAND_OWN_BUFFERS)) 4847 if (nbuf) {
4877 kfree(chip->buffers); 4848 kfree(nbuf->databuf);
4849 kfree(nbuf->ecccode);
4850 kfree(nbuf->ecccalc);
4851 kfree(nbuf);
4852 }
4878 return ret; 4853 return ret;
4879} 4854}
4880EXPORT_SYMBOL(nand_scan_tail); 4855EXPORT_SYMBOL(nand_scan_tail);
@@ -4925,13 +4900,20 @@ void nand_cleanup(struct nand_chip *chip)
4925 4900
4926 /* Free bad block table memory */ 4901 /* Free bad block table memory */
4927 kfree(chip->bbt); 4902 kfree(chip->bbt);
4928 if (!(chip->options & NAND_OWN_BUFFERS)) 4903 if (!(chip->options & NAND_OWN_BUFFERS) && chip->buffers) {
4904 kfree(chip->buffers->databuf);
4905 kfree(chip->buffers->ecccode);
4906 kfree(chip->buffers->ecccalc);
4929 kfree(chip->buffers); 4907 kfree(chip->buffers);
4908 }
4930 4909
4931 /* Free bad block descriptor memory */ 4910 /* Free bad block descriptor memory */
4932 if (chip->badblock_pattern && chip->badblock_pattern->options 4911 if (chip->badblock_pattern && chip->badblock_pattern->options
4933 & NAND_BBT_DYNAMICSTRUCT) 4912 & NAND_BBT_DYNAMICSTRUCT)
4934 kfree(chip->badblock_pattern); 4913 kfree(chip->badblock_pattern);
4914
4915 /* Free manufacturer priv data. */
4916 nand_manufacturer_cleanup(chip);
4935} 4917}
4936EXPORT_SYMBOL_GPL(nand_cleanup); 4918EXPORT_SYMBOL_GPL(nand_cleanup);
4937 4919
diff --git a/drivers/mtd/nand/nand_hynix.c b/drivers/mtd/nand/nand_hynix.c
new file mode 100644
index 000000000000..b12dc7325378
--- /dev/null
+++ b/drivers/mtd/nand/nand_hynix.c
@@ -0,0 +1,631 @@
1/*
2 * Copyright (C) 2017 Free Electrons
3 * Copyright (C) 2017 NextThing Co
4 *
5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/mtd/nand.h>
19#include <linux/sizes.h>
20#include <linux/slab.h>
21
22#define NAND_HYNIX_CMD_SET_PARAMS 0x36
23#define NAND_HYNIX_CMD_APPLY_PARAMS 0x16
24
25#define NAND_HYNIX_1XNM_RR_REPEAT 8
26
27/**
28 * struct hynix_read_retry - read-retry data
29 * @nregs: number of register to set when applying a new read-retry mode
30 * @regs: register offsets (NAND chip dependent)
31 * @values: array of values to set in registers. The array size is equal to
32 * (nregs * nmodes)
33 */
34struct hynix_read_retry {
35 int nregs;
36 const u8 *regs;
37 u8 values[0];
38};
39
40/**
41 * struct hynix_nand - private Hynix NAND struct
42 * @nand_technology: manufacturing process expressed in picometer
43 * @read_retry: read-retry information
44 */
45struct hynix_nand {
46 const struct hynix_read_retry *read_retry;
47};
48
49/**
50 * struct hynix_read_retry_otp - structure describing how the read-retry OTP
51 * area
52 * @nregs: number of hynix private registers to set before reading the reading
53 * the OTP area
54 * @regs: registers that should be configured
55 * @values: values that should be set in regs
56 * @page: the address to pass to the READ_PAGE command. Depends on the NAND
57 * chip
58 * @size: size of the read-retry OTP section
59 */
60struct hynix_read_retry_otp {
61 int nregs;
62 const u8 *regs;
63 const u8 *values;
64 int page;
65 int size;
66};
67
68static bool hynix_nand_has_valid_jedecid(struct nand_chip *chip)
69{
70 struct mtd_info *mtd = nand_to_mtd(chip);
71 u8 jedecid[6] = { };
72 int i = 0;
73
74 chip->cmdfunc(mtd, NAND_CMD_READID, 0x40, -1);
75 for (i = 0; i < 5; i++)
76 jedecid[i] = chip->read_byte(mtd);
77
78 return !strcmp("JEDEC", jedecid);
79}
80
81static int hynix_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
82{
83 struct nand_chip *chip = mtd_to_nand(mtd);
84 struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
85 const u8 *values;
86 int status;
87 int i;
88
89 values = hynix->read_retry->values +
90 (retry_mode * hynix->read_retry->nregs);
91
92 /* Enter 'Set Hynix Parameters' mode */
93 chip->cmdfunc(mtd, NAND_HYNIX_CMD_SET_PARAMS, -1, -1);
94
95 /*
96 * Configure the NAND in the requested read-retry mode.
97 * This is done by setting pre-defined values in internal NAND
98 * registers.
99 *
100 * The set of registers is NAND specific, and the values are either
101 * predefined or extracted from an OTP area on the NAND (values are
102 * probably tweaked at production in this case).
103 */
104 for (i = 0; i < hynix->read_retry->nregs; i++) {
105 int column = hynix->read_retry->regs[i];
106
107 column |= column << 8;
108 chip->cmdfunc(mtd, NAND_CMD_NONE, column, -1);
109 chip->write_byte(mtd, values[i]);
110 }
111
112 /* Apply the new settings. */
113 chip->cmdfunc(mtd, NAND_HYNIX_CMD_APPLY_PARAMS, -1, -1);
114
115 status = chip->waitfunc(mtd, chip);
116 if (status & NAND_STATUS_FAIL)
117 return -EIO;
118
119 return 0;
120}
121
122/**
123 * hynix_get_majority - get the value that is occurring the most in a given
124 * set of values
125 * @in: the array of values to test
126 * @repeat: the size of the in array
127 * @out: pointer used to store the output value
128 *
129 * This function implements the 'majority check' logic that is supposed to
130 * overcome the unreliability of MLC NANDs when reading the OTP area storing
131 * the read-retry parameters.
132 *
133 * It's based on a pretty simple assumption: if we repeat the same value
134 * several times and then take the one that is occurring the most, we should
135 * find the correct value.
136 * Let's hope this dummy algorithm prevents us from losing the read-retry
137 * parameters.
138 */
139static int hynix_get_majority(const u8 *in, int repeat, u8 *out)
140{
141 int i, j, half = repeat / 2;
142
143 /*
144 * We only test the first half of the in array because we must ensure
145 * that the value is at least occurring repeat / 2 times.
146 *
147 * This loop is suboptimal since we may count the occurrences of the
148 * same value several time, but we are doing that on small sets, which
149 * makes it acceptable.
150 */
151 for (i = 0; i < half; i++) {
152 int cnt = 0;
153 u8 val = in[i];
154
155 /* Count all values that are matching the one at index i. */
156 for (j = i + 1; j < repeat; j++) {
157 if (in[j] == val)
158 cnt++;
159 }
160
161 /* We found a value occurring more than repeat / 2. */
162 if (cnt > half) {
163 *out = val;
164 return 0;
165 }
166 }
167
168 return -EIO;
169}
170
171static int hynix_read_rr_otp(struct nand_chip *chip,
172 const struct hynix_read_retry_otp *info,
173 void *buf)
174{
175 struct mtd_info *mtd = nand_to_mtd(chip);
176 int i;
177
178 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
179
180 chip->cmdfunc(mtd, NAND_HYNIX_CMD_SET_PARAMS, -1, -1);
181
182 for (i = 0; i < info->nregs; i++) {
183 int column = info->regs[i];
184
185 column |= column << 8;
186 chip->cmdfunc(mtd, NAND_CMD_NONE, column, -1);
187 chip->write_byte(mtd, info->values[i]);
188 }
189
190 chip->cmdfunc(mtd, NAND_HYNIX_CMD_APPLY_PARAMS, -1, -1);
191
192 /* Sequence to enter OTP mode? */
193 chip->cmdfunc(mtd, 0x17, -1, -1);
194 chip->cmdfunc(mtd, 0x04, -1, -1);
195 chip->cmdfunc(mtd, 0x19, -1, -1);
196
197 /* Now read the page */
198 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x0, info->page);
199 chip->read_buf(mtd, buf, info->size);
200
201 /* Put everything back to normal */
202 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
203 chip->cmdfunc(mtd, NAND_HYNIX_CMD_SET_PARAMS, 0x38, -1);
204 chip->write_byte(mtd, 0x0);
205 chip->cmdfunc(mtd, NAND_HYNIX_CMD_APPLY_PARAMS, -1, -1);
206 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x0, -1);
207
208 return 0;
209}
210
211#define NAND_HYNIX_1XNM_RR_COUNT_OFFS 0
212#define NAND_HYNIX_1XNM_RR_REG_COUNT_OFFS 8
213#define NAND_HYNIX_1XNM_RR_SET_OFFS(x, setsize, inv) \
214 (16 + ((((x) * 2) + ((inv) ? 1 : 0)) * (setsize)))
215
216static int hynix_mlc_1xnm_rr_value(const u8 *buf, int nmodes, int nregs,
217 int mode, int reg, bool inv, u8 *val)
218{
219 u8 tmp[NAND_HYNIX_1XNM_RR_REPEAT];
220 int val_offs = (mode * nregs) + reg;
221 int set_size = nmodes * nregs;
222 int i, ret;
223
224 for (i = 0; i < NAND_HYNIX_1XNM_RR_REPEAT; i++) {
225 int set_offs = NAND_HYNIX_1XNM_RR_SET_OFFS(i, set_size, inv);
226
227 tmp[i] = buf[val_offs + set_offs];
228 }
229
230 ret = hynix_get_majority(tmp, NAND_HYNIX_1XNM_RR_REPEAT, val);
231 if (ret)
232 return ret;
233
234 if (inv)
235 *val = ~*val;
236
237 return 0;
238}
239
240static u8 hynix_1xnm_mlc_read_retry_regs[] = {
241 0xcc, 0xbf, 0xaa, 0xab, 0xcd, 0xad, 0xae, 0xaf
242};
243
244static int hynix_mlc_1xnm_rr_init(struct nand_chip *chip,
245 const struct hynix_read_retry_otp *info)
246{
247 struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
248 struct hynix_read_retry *rr = NULL;
249 int ret, i, j;
250 u8 nregs, nmodes;
251 u8 *buf;
252
253 buf = kmalloc(info->size, GFP_KERNEL);
254 if (!buf)
255 return -ENOMEM;
256
257 ret = hynix_read_rr_otp(chip, info, buf);
258 if (ret)
259 goto out;
260
261 ret = hynix_get_majority(buf, NAND_HYNIX_1XNM_RR_REPEAT,
262 &nmodes);
263 if (ret)
264 goto out;
265
266 ret = hynix_get_majority(buf + NAND_HYNIX_1XNM_RR_REPEAT,
267 NAND_HYNIX_1XNM_RR_REPEAT,
268 &nregs);
269 if (ret)
270 goto out;
271
272 rr = kzalloc(sizeof(*rr) + (nregs * nmodes), GFP_KERNEL);
273 if (!rr) {
274 ret = -ENOMEM;
275 goto out;
276 }
277
278 for (i = 0; i < nmodes; i++) {
279 for (j = 0; j < nregs; j++) {
280 u8 *val = rr->values + (i * nregs);
281
282 ret = hynix_mlc_1xnm_rr_value(buf, nmodes, nregs, i, j,
283 false, val);
284 if (!ret)
285 continue;
286
287 ret = hynix_mlc_1xnm_rr_value(buf, nmodes, nregs, i, j,
288 true, val);
289 if (ret)
290 goto out;
291 }
292 }
293
294 rr->nregs = nregs;
295 rr->regs = hynix_1xnm_mlc_read_retry_regs;
296 hynix->read_retry = rr;
297 chip->setup_read_retry = hynix_nand_setup_read_retry;
298 chip->read_retries = nmodes;
299
300out:
301 kfree(buf);
302
303 if (ret)
304 kfree(rr);
305
306 return ret;
307}
308
309static const u8 hynix_mlc_1xnm_rr_otp_regs[] = { 0x38 };
310static const u8 hynix_mlc_1xnm_rr_otp_values[] = { 0x52 };
311
312static const struct hynix_read_retry_otp hynix_mlc_1xnm_rr_otps[] = {
313 {
314 .nregs = ARRAY_SIZE(hynix_mlc_1xnm_rr_otp_regs),
315 .regs = hynix_mlc_1xnm_rr_otp_regs,
316 .values = hynix_mlc_1xnm_rr_otp_values,
317 .page = 0x21f,
318 .size = 784
319 },
320 {
321 .nregs = ARRAY_SIZE(hynix_mlc_1xnm_rr_otp_regs),
322 .regs = hynix_mlc_1xnm_rr_otp_regs,
323 .values = hynix_mlc_1xnm_rr_otp_values,
324 .page = 0x200,
325 .size = 528,
326 },
327};
328
329static int hynix_nand_rr_init(struct nand_chip *chip)
330{
331 int i, ret = 0;
332 bool valid_jedecid;
333
334 valid_jedecid = hynix_nand_has_valid_jedecid(chip);
335
336 /*
337 * We only support read-retry for 1xnm NANDs, and those NANDs all
338 * expose a valid JEDEC ID.
339 */
340 if (valid_jedecid) {
341 u8 nand_tech = chip->id.data[5] >> 4;
342
343 /* 1xnm technology */
344 if (nand_tech == 4) {
345 for (i = 0; i < ARRAY_SIZE(hynix_mlc_1xnm_rr_otps);
346 i++) {
347 /*
348 * FIXME: Hynix recommend to copy the
349 * read-retry OTP area into a normal page.
350 */
351 ret = hynix_mlc_1xnm_rr_init(chip,
352 hynix_mlc_1xnm_rr_otps);
353 if (!ret)
354 break;
355 }
356 }
357 }
358
359 if (ret)
360 pr_warn("failed to initialize read-retry infrastructure");
361
362 return 0;
363}
364
365static void hynix_nand_extract_oobsize(struct nand_chip *chip,
366 bool valid_jedecid)
367{
368 struct mtd_info *mtd = nand_to_mtd(chip);
369 u8 oobsize;
370
371 oobsize = ((chip->id.data[3] >> 2) & 0x3) |
372 ((chip->id.data[3] >> 4) & 0x4);
373
374 if (valid_jedecid) {
375 switch (oobsize) {
376 case 0:
377 mtd->oobsize = 2048;
378 break;
379 case 1:
380 mtd->oobsize = 1664;
381 break;
382 case 2:
383 mtd->oobsize = 1024;
384 break;
385 case 3:
386 mtd->oobsize = 640;
387 break;
388 default:
389 /*
390 * We should never reach this case, but if that
391 * happens, this probably means Hynix decided to use
392 * a different extended ID format, and we should find
393 * a way to support it.
394 */
395 WARN(1, "Invalid OOB size");
396 break;
397 }
398 } else {
399 switch (oobsize) {
400 case 0:
401 mtd->oobsize = 128;
402 break;
403 case 1:
404 mtd->oobsize = 224;
405 break;
406 case 2:
407 mtd->oobsize = 448;
408 break;
409 case 3:
410 mtd->oobsize = 64;
411 break;
412 case 4:
413 mtd->oobsize = 32;
414 break;
415 case 5:
416 mtd->oobsize = 16;
417 break;
418 case 6:
419 mtd->oobsize = 640;
420 break;
421 default:
422 /*
423 * We should never reach this case, but if that
424 * happens, this probably means Hynix decided to use
425 * a different extended ID format, and we should find
426 * a way to support it.
427 */
428 WARN(1, "Invalid OOB size");
429 break;
430 }
431 }
432}
433
434static void hynix_nand_extract_ecc_requirements(struct nand_chip *chip,
435 bool valid_jedecid)
436{
437 u8 ecc_level = (chip->id.data[4] >> 4) & 0x7;
438
439 if (valid_jedecid) {
440 /* Reference: H27UCG8T2E datasheet */
441 chip->ecc_step_ds = 1024;
442
443 switch (ecc_level) {
444 case 0:
445 chip->ecc_step_ds = 0;
446 chip->ecc_strength_ds = 0;
447 break;
448 case 1:
449 chip->ecc_strength_ds = 4;
450 break;
451 case 2:
452 chip->ecc_strength_ds = 24;
453 break;
454 case 3:
455 chip->ecc_strength_ds = 32;
456 break;
457 case 4:
458 chip->ecc_strength_ds = 40;
459 break;
460 case 5:
461 chip->ecc_strength_ds = 50;
462 break;
463 case 6:
464 chip->ecc_strength_ds = 60;
465 break;
466 default:
467 /*
468 * We should never reach this case, but if that
469 * happens, this probably means Hynix decided to use
470 * a different extended ID format, and we should find
471 * a way to support it.
472 */
473 WARN(1, "Invalid ECC requirements");
474 }
475 } else {
476 /*
477 * The ECC requirements field meaning depends on the
478 * NAND technology.
479 */
480 u8 nand_tech = chip->id.data[5] & 0x3;
481
482 if (nand_tech < 3) {
483 /* > 26nm, reference: H27UBG8T2A datasheet */
484 if (ecc_level < 5) {
485 chip->ecc_step_ds = 512;
486 chip->ecc_strength_ds = 1 << ecc_level;
487 } else if (ecc_level < 7) {
488 if (ecc_level == 5)
489 chip->ecc_step_ds = 2048;
490 else
491 chip->ecc_step_ds = 1024;
492 chip->ecc_strength_ds = 24;
493 } else {
494 /*
495 * We should never reach this case, but if that
496 * happens, this probably means Hynix decided
497 * to use a different extended ID format, and
498 * we should find a way to support it.
499 */
500 WARN(1, "Invalid ECC requirements");
501 }
502 } else {
503 /* <= 26nm, reference: H27UBG8T2B datasheet */
504 if (!ecc_level) {
505 chip->ecc_step_ds = 0;
506 chip->ecc_strength_ds = 0;
507 } else if (ecc_level < 5) {
508 chip->ecc_step_ds = 512;
509 chip->ecc_strength_ds = 1 << (ecc_level - 1);
510 } else {
511 chip->ecc_step_ds = 1024;
512 chip->ecc_strength_ds = 24 +
513 (8 * (ecc_level - 5));
514 }
515 }
516 }
517}
518
519static void hynix_nand_extract_scrambling_requirements(struct nand_chip *chip,
520 bool valid_jedecid)
521{
522 u8 nand_tech;
523
524 /* We need scrambling on all TLC NANDs*/
525 if (chip->bits_per_cell > 2)
526 chip->options |= NAND_NEED_SCRAMBLING;
527
528 /* And on MLC NANDs with sub-3xnm process */
529 if (valid_jedecid) {
530 nand_tech = chip->id.data[5] >> 4;
531
532 /* < 3xnm */
533 if (nand_tech > 0)
534 chip->options |= NAND_NEED_SCRAMBLING;
535 } else {
536 nand_tech = chip->id.data[5] & 0x3;
537
538 /* < 32nm */
539 if (nand_tech > 2)
540 chip->options |= NAND_NEED_SCRAMBLING;
541 }
542}
543
544static void hynix_nand_decode_id(struct nand_chip *chip)
545{
546 struct mtd_info *mtd = nand_to_mtd(chip);
547 bool valid_jedecid;
548 u8 tmp;
549
550 /*
551 * Exclude all SLC NANDs from this advanced detection scheme.
552 * According to the ranges defined in several datasheets, it might
553 * appear that even SLC NANDs could fall in this extended ID scheme.
554 * If that the case rework the test to let SLC NANDs go through the
555 * detection process.
556 */
557 if (chip->id.len < 6 || nand_is_slc(chip)) {
558 nand_decode_ext_id(chip);
559 return;
560 }
561
562 /* Extract pagesize */
563 mtd->writesize = 2048 << (chip->id.data[3] & 0x03);
564
565 tmp = (chip->id.data[3] >> 4) & 0x3;
566 /*
567 * When bit7 is set that means we start counting at 1MiB, otherwise
568 * we start counting at 128KiB and shift this value the content of
569 * ID[3][4:5].
570 * The only exception is when ID[3][4:5] == 3 and ID[3][7] == 0, in
571 * this case the erasesize is set to 768KiB.
572 */
573 if (chip->id.data[3] & 0x80)
574 mtd->erasesize = SZ_1M << tmp;
575 else if (tmp == 3)
576 mtd->erasesize = SZ_512K + SZ_256K;
577 else
578 mtd->erasesize = SZ_128K << tmp;
579
580 /*
581 * Modern Toggle DDR NANDs have a valid JEDECID even though they are
582 * not exposing a valid JEDEC parameter table.
583 * These NANDs use a different NAND ID scheme.
584 */
585 valid_jedecid = hynix_nand_has_valid_jedecid(chip);
586
587 hynix_nand_extract_oobsize(chip, valid_jedecid);
588 hynix_nand_extract_ecc_requirements(chip, valid_jedecid);
589 hynix_nand_extract_scrambling_requirements(chip, valid_jedecid);
590}
591
592static void hynix_nand_cleanup(struct nand_chip *chip)
593{
594 struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
595
596 if (!hynix)
597 return;
598
599 kfree(hynix->read_retry);
600 kfree(hynix);
601 nand_set_manufacturer_data(chip, NULL);
602}
603
604static int hynix_nand_init(struct nand_chip *chip)
605{
606 struct hynix_nand *hynix;
607 int ret;
608
609 if (!nand_is_slc(chip))
610 chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
611 else
612 chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
613
614 hynix = kzalloc(sizeof(*hynix), GFP_KERNEL);
615 if (!hynix)
616 return -ENOMEM;
617
618 nand_set_manufacturer_data(chip, hynix);
619
620 ret = hynix_nand_rr_init(chip);
621 if (ret)
622 hynix_nand_cleanup(chip);
623
624 return ret;
625}
626
627const struct nand_manufacturer_ops hynix_nand_manuf_ops = {
628 .detect = hynix_nand_decode_id,
629 .init = hynix_nand_init,
630 .cleanup = hynix_nand_cleanup,
631};
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index 4a2f75b0c200..9d5ca0e540b5 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -10,7 +10,7 @@
10#include <linux/mtd/nand.h> 10#include <linux/mtd/nand.h>
11#include <linux/sizes.h> 11#include <linux/sizes.h>
12 12
13#define LP_OPTIONS NAND_SAMSUNG_LP_OPTIONS 13#define LP_OPTIONS 0
14#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16) 14#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
15 15
16#define SP_OPTIONS NAND_NEED_READRDY 16#define SP_OPTIONS NAND_NEED_READRDY
@@ -169,29 +169,40 @@ struct nand_flash_dev nand_flash_ids[] = {
169}; 169};
170 170
171/* Manufacturer IDs */ 171/* Manufacturer IDs */
172struct nand_manufacturers nand_manuf_ids[] = { 172static const struct nand_manufacturer nand_manufacturers[] = {
173 {NAND_MFR_TOSHIBA, "Toshiba"}, 173 {NAND_MFR_TOSHIBA, "Toshiba", &toshiba_nand_manuf_ops},
174 {NAND_MFR_ESMT, "ESMT"}, 174 {NAND_MFR_ESMT, "ESMT"},
175 {NAND_MFR_SAMSUNG, "Samsung"}, 175 {NAND_MFR_SAMSUNG, "Samsung", &samsung_nand_manuf_ops},
176 {NAND_MFR_FUJITSU, "Fujitsu"}, 176 {NAND_MFR_FUJITSU, "Fujitsu"},
177 {NAND_MFR_NATIONAL, "National"}, 177 {NAND_MFR_NATIONAL, "National"},
178 {NAND_MFR_RENESAS, "Renesas"}, 178 {NAND_MFR_RENESAS, "Renesas"},
179 {NAND_MFR_STMICRO, "ST Micro"}, 179 {NAND_MFR_STMICRO, "ST Micro"},
180 {NAND_MFR_HYNIX, "Hynix"}, 180 {NAND_MFR_HYNIX, "Hynix", &hynix_nand_manuf_ops},
181 {NAND_MFR_MICRON, "Micron"}, 181 {NAND_MFR_MICRON, "Micron", &micron_nand_manuf_ops},
182 {NAND_MFR_AMD, "AMD/Spansion"}, 182 {NAND_MFR_AMD, "AMD/Spansion", &amd_nand_manuf_ops},
183 {NAND_MFR_MACRONIX, "Macronix"}, 183 {NAND_MFR_MACRONIX, "Macronix", &macronix_nand_manuf_ops},
184 {NAND_MFR_EON, "Eon"}, 184 {NAND_MFR_EON, "Eon"},
185 {NAND_MFR_SANDISK, "SanDisk"}, 185 {NAND_MFR_SANDISK, "SanDisk"},
186 {NAND_MFR_INTEL, "Intel"}, 186 {NAND_MFR_INTEL, "Intel"},
187 {NAND_MFR_ATO, "ATO"}, 187 {NAND_MFR_ATO, "ATO"},
188 {NAND_MFR_WINBOND, "Winbond"}, 188 {NAND_MFR_WINBOND, "Winbond"},
189 {0x0, "Unknown"}
190}; 189};
191 190
192EXPORT_SYMBOL(nand_manuf_ids); 191/**
193EXPORT_SYMBOL(nand_flash_ids); 192 * nand_get_manufacturer - Get manufacturer information from the manufacturer
193 * ID
194 * @id: manufacturer ID
195 *
196 * Returns a pointer a nand_manufacturer object if the manufacturer is defined
197 * in the NAND manufacturers database, NULL otherwise.
198 */
199const struct nand_manufacturer *nand_get_manufacturer(u8 id)
200{
201 int i;
202
203 for (i = 0; i < ARRAY_SIZE(nand_manufacturers); i++)
204 if (nand_manufacturers[i].id == id)
205 return &nand_manufacturers[i];
194 206
195MODULE_LICENSE("GPL"); 207 return NULL;
196MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>"); 208}
197MODULE_DESCRIPTION("Nand device & manufacturer IDs");
diff --git a/drivers/mtd/nand/nand_macronix.c b/drivers/mtd/nand/nand_macronix.c
new file mode 100644
index 000000000000..84855c3e1a02
--- /dev/null
+++ b/drivers/mtd/nand/nand_macronix.c
@@ -0,0 +1,30 @@
1/*
2 * Copyright (C) 2017 Free Electrons
3 * Copyright (C) 2017 NextThing Co
4 *
5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/mtd/nand.h>
19
20static int macronix_nand_init(struct nand_chip *chip)
21{
22 if (nand_is_slc(chip))
23 chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
24
25 return 0;
26}
27
28const struct nand_manufacturer_ops macronix_nand_manuf_ops = {
29 .init = macronix_nand_init,
30};
diff --git a/drivers/mtd/nand/nand_micron.c b/drivers/mtd/nand/nand_micron.c
new file mode 100644
index 000000000000..877011069251
--- /dev/null
+++ b/drivers/mtd/nand/nand_micron.c
@@ -0,0 +1,86 @@
1/*
2 * Copyright (C) 2017 Free Electrons
3 * Copyright (C) 2017 NextThing Co
4 *
5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/mtd/nand.h>
19
20struct nand_onfi_vendor_micron {
21 u8 two_plane_read;
22 u8 read_cache;
23 u8 read_unique_id;
24 u8 dq_imped;
25 u8 dq_imped_num_settings;
26 u8 dq_imped_feat_addr;
27 u8 rb_pulldown_strength;
28 u8 rb_pulldown_strength_feat_addr;
29 u8 rb_pulldown_strength_num_settings;
30 u8 otp_mode;
31 u8 otp_page_start;
32 u8 otp_data_prot_addr;
33 u8 otp_num_pages;
34 u8 otp_feat_addr;
35 u8 read_retry_options;
36 u8 reserved[72];
37 u8 param_revision;
38} __packed;
39
40static int micron_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
41{
42 struct nand_chip *chip = mtd_to_nand(mtd);
43 u8 feature[ONFI_SUBFEATURE_PARAM_LEN] = {retry_mode};
44
45 return chip->onfi_set_features(mtd, chip, ONFI_FEATURE_ADDR_READ_RETRY,
46 feature);
47}
48
49/*
50 * Configure chip properties from Micron vendor-specific ONFI table
51 */
52static int micron_nand_onfi_init(struct nand_chip *chip)
53{
54 struct nand_onfi_params *p = &chip->onfi_params;
55 struct nand_onfi_vendor_micron *micron = (void *)p->vendor;
56
57 if (!chip->onfi_version)
58 return 0;
59
60 if (le16_to_cpu(p->vendor_revision) < 1)
61 return 0;
62
63 chip->read_retries = micron->read_retry_options;
64 chip->setup_read_retry = micron_nand_setup_read_retry;
65
66 return 0;
67}
68
69static int micron_nand_init(struct nand_chip *chip)
70{
71 struct mtd_info *mtd = nand_to_mtd(chip);
72 int ret;
73
74 ret = micron_nand_onfi_init(chip);
75 if (ret)
76 return ret;
77
78 if (mtd->writesize == 2048)
79 chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
80
81 return 0;
82}
83
84const struct nand_manufacturer_ops micron_nand_manuf_ops = {
85 .init = micron_nand_init,
86};
diff --git a/drivers/mtd/nand/nand_samsung.c b/drivers/mtd/nand/nand_samsung.c
new file mode 100644
index 000000000000..9cfc4035a420
--- /dev/null
+++ b/drivers/mtd/nand/nand_samsung.c
@@ -0,0 +1,112 @@
1/*
2 * Copyright (C) 2017 Free Electrons
3 * Copyright (C) 2017 NextThing Co
4 *
5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/mtd/nand.h>
19
20static void samsung_nand_decode_id(struct nand_chip *chip)
21{
22 struct mtd_info *mtd = nand_to_mtd(chip);
23
24 /* New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44) */
25 if (chip->id.len == 6 && !nand_is_slc(chip) &&
26 chip->id.data[5] != 0x00) {
27 u8 extid = chip->id.data[3];
28
29 /* Get pagesize */
30 mtd->writesize = 2048 << (extid & 0x03);
31
32 extid >>= 2;
33
34 /* Get oobsize */
35 switch (((extid >> 2) & 0x4) | (extid & 0x3)) {
36 case 1:
37 mtd->oobsize = 128;
38 break;
39 case 2:
40 mtd->oobsize = 218;
41 break;
42 case 3:
43 mtd->oobsize = 400;
44 break;
45 case 4:
46 mtd->oobsize = 436;
47 break;
48 case 5:
49 mtd->oobsize = 512;
50 break;
51 case 6:
52 mtd->oobsize = 640;
53 break;
54 default:
55 /*
56 * We should never reach this case, but if that
57 * happens, this probably means Samsung decided to use
58 * a different extended ID format, and we should find
59 * a way to support it.
60 */
61 WARN(1, "Invalid OOB size value");
62 break;
63 }
64
65 /* Get blocksize */
66 extid >>= 2;
67 mtd->erasesize = (128 * 1024) <<
68 (((extid >> 1) & 0x04) | (extid & 0x03));
69
70 /* Extract ECC requirements from 5th id byte*/
71 extid = (chip->id.data[4] >> 4) & 0x07;
72 if (extid < 5) {
73 chip->ecc_step_ds = 512;
74 chip->ecc_strength_ds = 1 << extid;
75 } else {
76 chip->ecc_step_ds = 1024;
77 switch (extid) {
78 case 5:
79 chip->ecc_strength_ds = 24;
80 break;
81 case 6:
82 chip->ecc_strength_ds = 40;
83 break;
84 case 7:
85 chip->ecc_strength_ds = 60;
86 break;
87 }
88 }
89 } else {
90 nand_decode_ext_id(chip);
91 }
92}
93
94static int samsung_nand_init(struct nand_chip *chip)
95{
96 struct mtd_info *mtd = nand_to_mtd(chip);
97
98 if (mtd->writesize > 512)
99 chip->options |= NAND_SAMSUNG_LP_OPTIONS;
100
101 if (!nand_is_slc(chip))
102 chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
103 else
104 chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
105
106 return 0;
107}
108
109const struct nand_manufacturer_ops samsung_nand_manuf_ops = {
110 .detect = samsung_nand_decode_id,
111 .init = samsung_nand_init,
112};
diff --git a/drivers/mtd/nand/nand_toshiba.c b/drivers/mtd/nand/nand_toshiba.c
new file mode 100644
index 000000000000..fa787ba38dcd
--- /dev/null
+++ b/drivers/mtd/nand/nand_toshiba.c
@@ -0,0 +1,51 @@
1/*
2 * Copyright (C) 2017 Free Electrons
3 * Copyright (C) 2017 NextThing Co
4 *
5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/mtd/nand.h>
19
20static void toshiba_nand_decode_id(struct nand_chip *chip)
21{
22 struct mtd_info *mtd = nand_to_mtd(chip);
23
24 nand_decode_ext_id(chip);
25
26 /*
27 * Toshiba 24nm raw SLC (i.e., not BENAND) have 32B OOB per
28 * 512B page. For Toshiba SLC, we decode the 5th/6th byte as
29 * follows:
30 * - ID byte 6, bits[2:0]: 100b -> 43nm, 101b -> 32nm,
31 * 110b -> 24nm
32 * - ID byte 5, bit[7]: 1 -> BENAND, 0 -> raw SLC
33 */
34 if (chip->id.len >= 6 && nand_is_slc(chip) &&
35 (chip->id.data[5] & 0x7) == 0x6 /* 24nm */ &&
36 !(chip->id.data[4] & 0x80) /* !BENAND */)
37 mtd->oobsize = 32 * mtd->writesize >> 9;
38}
39
40static int toshiba_nand_init(struct nand_chip *chip)
41{
42 if (nand_is_slc(chip))
43 chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
44
45 return 0;
46}
47
48const struct nand_manufacturer_ops toshiba_nand_manuf_ops = {
49 .detect = toshiba_nand_decode_id,
50 .init = toshiba_nand_init,
51};
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 092c9bd225be..03a0d057bf2f 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -902,7 +902,7 @@ static int parse_weakpages(void)
902 zero_ok = (*w == '0' ? 1 : 0); 902 zero_ok = (*w == '0' ? 1 : 0);
903 page_no = simple_strtoul(w, &w, 0); 903 page_no = simple_strtoul(w, &w, 0);
904 if (!zero_ok && !page_no) { 904 if (!zero_ok && !page_no) {
905 NS_ERR("invalid weakpagess.\n"); 905 NS_ERR("invalid weakpages.\n");
906 return -EINVAL; 906 return -EINVAL;
907 } 907 }
908 max_writes = 3; 908 max_writes = 3;
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 2a52101120d4..084934a9f19c 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1856,6 +1856,15 @@ static int omap_nand_probe(struct platform_device *pdev)
1856 nand_chip->ecc.priv = NULL; 1856 nand_chip->ecc.priv = NULL;
1857 nand_set_flash_node(nand_chip, dev->of_node); 1857 nand_set_flash_node(nand_chip, dev->of_node);
1858 1858
1859 if (!mtd->name) {
1860 mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
1861 "omap2-nand.%d", info->gpmc_cs);
1862 if (!mtd->name) {
1863 dev_err(&pdev->dev, "Failed to set MTD name\n");
1864 return -ENOMEM;
1865 }
1866 }
1867
1859 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1868 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1860 nand_chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res); 1869 nand_chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
1861 if (IS_ERR(nand_chip->IO_ADDR_R)) 1870 if (IS_ERR(nand_chip->IO_ADDR_R))
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 4a91c5d000be..f8e463a97b9e 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -23,6 +23,11 @@
23#include <asm/sizes.h> 23#include <asm/sizes.h>
24#include <linux/platform_data/mtd-orion_nand.h> 24#include <linux/platform_data/mtd-orion_nand.h>
25 25
26struct orion_nand_info {
27 struct nand_chip chip;
28 struct clk *clk;
29};
30
26static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) 31static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
27{ 32{
28 struct nand_chip *nc = mtd_to_nand(mtd); 33 struct nand_chip *nc = mtd_to_nand(mtd);
@@ -75,20 +80,21 @@ static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
75 80
76static int __init orion_nand_probe(struct platform_device *pdev) 81static int __init orion_nand_probe(struct platform_device *pdev)
77{ 82{
83 struct orion_nand_info *info;
78 struct mtd_info *mtd; 84 struct mtd_info *mtd;
79 struct nand_chip *nc; 85 struct nand_chip *nc;
80 struct orion_nand_data *board; 86 struct orion_nand_data *board;
81 struct resource *res; 87 struct resource *res;
82 struct clk *clk;
83 void __iomem *io_base; 88 void __iomem *io_base;
84 int ret = 0; 89 int ret = 0;
85 u32 val = 0; 90 u32 val = 0;
86 91
87 nc = devm_kzalloc(&pdev->dev, 92 info = devm_kzalloc(&pdev->dev,
88 sizeof(struct nand_chip), 93 sizeof(struct orion_nand_info),
89 GFP_KERNEL); 94 GFP_KERNEL);
90 if (!nc) 95 if (!info)
91 return -ENOMEM; 96 return -ENOMEM;
97 nc = &info->chip;
92 mtd = nand_to_mtd(nc); 98 mtd = nand_to_mtd(nc);
93 99
94 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 100 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -145,16 +151,23 @@ static int __init orion_nand_probe(struct platform_device *pdev)
145 if (board->dev_ready) 151 if (board->dev_ready)
146 nc->dev_ready = board->dev_ready; 152 nc->dev_ready = board->dev_ready;
147 153
148 platform_set_drvdata(pdev, mtd); 154 platform_set_drvdata(pdev, info);
149 155
150 /* Not all platforms can gate the clock, so it is not 156 /* Not all platforms can gate the clock, so it is not
151 an error if the clock does not exists. */ 157 an error if the clock does not exists. */
152 clk = clk_get(&pdev->dev, NULL); 158 info->clk = devm_clk_get(&pdev->dev, NULL);
153 if (!IS_ERR(clk)) { 159 if (IS_ERR(info->clk)) {
154 clk_prepare_enable(clk); 160 ret = PTR_ERR(info->clk);
155 clk_put(clk); 161 if (ret == -ENOENT) {
162 info->clk = NULL;
163 } else {
164 dev_err(&pdev->dev, "failed to get clock!\n");
165 return ret;
166 }
156 } 167 }
157 168
169 clk_prepare_enable(info->clk);
170
158 ret = nand_scan(mtd, 1); 171 ret = nand_scan(mtd, 1);
159 if (ret) 172 if (ret)
160 goto no_dev; 173 goto no_dev;
@@ -169,26 +182,19 @@ static int __init orion_nand_probe(struct platform_device *pdev)
169 return 0; 182 return 0;
170 183
171no_dev: 184no_dev:
172 if (!IS_ERR(clk)) { 185 clk_disable_unprepare(info->clk);
173 clk_disable_unprepare(clk);
174 clk_put(clk);
175 }
176
177 return ret; 186 return ret;
178} 187}
179 188
180static int orion_nand_remove(struct platform_device *pdev) 189static int orion_nand_remove(struct platform_device *pdev)
181{ 190{
182 struct mtd_info *mtd = platform_get_drvdata(pdev); 191 struct orion_nand_info *info = platform_get_drvdata(pdev);
183 struct clk *clk; 192 struct nand_chip *chip = &info->chip;
193 struct mtd_info *mtd = nand_to_mtd(chip);
184 194
185 nand_release(mtd); 195 nand_release(mtd);
186 196
187 clk = clk_get(&pdev->dev, NULL); 197 clk_disable_unprepare(info->clk);
188 if (!IS_ERR(clk)) {
189 clk_disable_unprepare(clk);
190 clk_put(clk);
191 }
192 198
193 return 0; 199 return 0;
194} 200}
diff --git a/drivers/mtd/nand/oxnas_nand.c b/drivers/mtd/nand/oxnas_nand.c
index 3e3bf3b364d2..1b207aac840c 100644
--- a/drivers/mtd/nand/oxnas_nand.c
+++ b/drivers/mtd/nand/oxnas_nand.c
@@ -91,7 +91,7 @@ static int oxnas_nand_probe(struct platform_device *pdev)
91 int err = 0; 91 int err = 0;
92 92
93 /* Allocate memory for the device structure (and zero it) */ 93 /* Allocate memory for the device structure (and zero it) */
94 oxnas = devm_kzalloc(&pdev->dev, sizeof(struct nand_chip), 94 oxnas = devm_kzalloc(&pdev->dev, sizeof(*oxnas),
95 GFP_KERNEL); 95 GFP_KERNEL);
96 if (!oxnas) 96 if (!oxnas)
97 return -ENOMEM; 97 return -ENOMEM;
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index 0eeeb8b889ea..118a26fff368 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -2212,17 +2212,17 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
2212 goto out_ahb_clk_unprepare; 2212 goto out_ahb_clk_unprepare;
2213 2213
2214 nfc->reset = devm_reset_control_get_optional(dev, "ahb"); 2214 nfc->reset = devm_reset_control_get_optional(dev, "ahb");
2215 if (!IS_ERR(nfc->reset)) { 2215 if (IS_ERR(nfc->reset)) {
2216 ret = reset_control_deassert(nfc->reset);
2217 if (ret) {
2218 dev_err(dev, "reset err %d\n", ret);
2219 goto out_mod_clk_unprepare;
2220 }
2221 } else if (PTR_ERR(nfc->reset) != -ENOENT) {
2222 ret = PTR_ERR(nfc->reset); 2216 ret = PTR_ERR(nfc->reset);
2223 goto out_mod_clk_unprepare; 2217 goto out_mod_clk_unprepare;
2224 } 2218 }
2225 2219
2220 ret = reset_control_deassert(nfc->reset);
2221 if (ret) {
2222 dev_err(dev, "reset err %d\n", ret);
2223 goto out_mod_clk_unprepare;
2224 }
2225
2226 ret = sunxi_nfc_rst(nfc); 2226 ret = sunxi_nfc_rst(nfc);
2227 if (ret) 2227 if (ret)
2228 goto out_ahb_reset_reassert; 2228 goto out_ahb_reset_reassert;
@@ -2262,8 +2262,7 @@ out_release_dmac:
2262 if (nfc->dmac) 2262 if (nfc->dmac)
2263 dma_release_channel(nfc->dmac); 2263 dma_release_channel(nfc->dmac);
2264out_ahb_reset_reassert: 2264out_ahb_reset_reassert:
2265 if (!IS_ERR(nfc->reset)) 2265 reset_control_assert(nfc->reset);
2266 reset_control_assert(nfc->reset);
2267out_mod_clk_unprepare: 2266out_mod_clk_unprepare:
2268 clk_disable_unprepare(nfc->mod_clk); 2267 clk_disable_unprepare(nfc->mod_clk);
2269out_ahb_clk_unprepare: 2268out_ahb_clk_unprepare:
@@ -2278,8 +2277,7 @@ static int sunxi_nfc_remove(struct platform_device *pdev)
2278 2277
2279 sunxi_nand_chips_cleanup(nfc); 2278 sunxi_nand_chips_cleanup(nfc);
2280 2279
2281 if (!IS_ERR(nfc->reset)) 2280 reset_control_assert(nfc->reset);
2282 reset_control_assert(nfc->reset);
2283 2281
2284 if (nfc->dmac) 2282 if (nfc->dmac)
2285 dma_release_channel(nfc->dmac); 2283 dma_release_channel(nfc->dmac);
diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c
index 4a5e948c62df..05b6e1065203 100644
--- a/drivers/mtd/nand/tango_nand.c
+++ b/drivers/mtd/nand/tango_nand.c
@@ -223,12 +223,13 @@ static void tango_dma_callback(void *arg)
223 complete(arg); 223 complete(arg);
224} 224}
225 225
226static int do_dma(struct tango_nfc *nfc, int dir, int cmd, const void *buf, 226static int do_dma(struct tango_nfc *nfc, enum dma_data_direction dir, int cmd,
227 int len, int page) 227 const void *buf, int len, int page)
228{ 228{
229 void __iomem *addr = nfc->reg_base + NFC_STATUS; 229 void __iomem *addr = nfc->reg_base + NFC_STATUS;
230 struct dma_chan *chan = nfc->chan; 230 struct dma_chan *chan = nfc->chan;
231 struct dma_async_tx_descriptor *desc; 231 struct dma_async_tx_descriptor *desc;
232 enum dma_transfer_direction tdir;
232 struct scatterlist sg; 233 struct scatterlist sg;
233 struct completion tx_done; 234 struct completion tx_done;
234 int err = -EIO; 235 int err = -EIO;
@@ -238,7 +239,8 @@ static int do_dma(struct tango_nfc *nfc, int dir, int cmd, const void *buf,
238 if (dma_map_sg(chan->device->dev, &sg, 1, dir) != 1) 239 if (dma_map_sg(chan->device->dev, &sg, 1, dir) != 1)
239 return -EIO; 240 return -EIO;
240 241
241 desc = dmaengine_prep_slave_sg(chan, &sg, 1, dir, DMA_PREP_INTERRUPT); 242 tdir = dir == DMA_TO_DEVICE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
243 desc = dmaengine_prep_slave_sg(chan, &sg, 1, tdir, DMA_PREP_INTERRUPT);
242 if (!desc) 244 if (!desc)
243 goto dma_unmap; 245 goto dma_unmap;
244 246
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index 464470122493..2861c7079d7b 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -166,8 +166,8 @@ static int parse_ofoldpart_partitions(struct mtd_info *master,
166 if (!part) 166 if (!part)
167 return 0; /* No partitions found */ 167 return 0; /* No partitions found */
168 168
169 pr_warning("Device tree uses obsolete partition map binding: %s\n", 169 pr_warn("Device tree uses obsolete partition map binding: %s\n",
170 dp->full_name); 170 dp->full_name);
171 171
172 nr_parts = plen / sizeof(part[0]); 172 nr_parts = plen / sizeof(part[0]);
173 173
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
index 7252087ef407..bfdfb1e72b38 100644
--- a/drivers/mtd/spi-nor/Kconfig
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -106,4 +106,11 @@ config SPI_INTEL_SPI_PLATFORM
106 To compile this driver as a module, choose M here: the module 106 To compile this driver as a module, choose M here: the module
107 will be called intel-spi-platform. 107 will be called intel-spi-platform.
108 108
109config SPI_STM32_QUADSPI
110 tristate "STM32 Quad SPI controller"
111 depends on ARCH_STM32
112 help
113 This enables support for the STM32 Quad SPI controller.
114 We only connect the NOR to this controller.
115
109endif # MTD_SPI_NOR 116endif # MTD_SPI_NOR
diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile
index 72238a793198..285aab86c7ca 100644
--- a/drivers/mtd/spi-nor/Makefile
+++ b/drivers/mtd/spi-nor/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_MTD_MT81xx_NOR) += mtk-quadspi.o
8obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o 8obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o
9obj-$(CONFIG_SPI_INTEL_SPI) += intel-spi.o 9obj-$(CONFIG_SPI_INTEL_SPI) += intel-spi.o
10obj-$(CONFIG_SPI_INTEL_SPI_PLATFORM) += intel-spi-platform.o 10obj-$(CONFIG_SPI_INTEL_SPI_PLATFORM) += intel-spi-platform.o
11obj-$(CONFIG_SPI_STM32_QUADSPI) += stm32-quadspi.o \ No newline at end of file
diff --git a/drivers/mtd/spi-nor/hisi-sfc.c b/drivers/mtd/spi-nor/hisi-sfc.c
index 20378b0d55e9..a286350627a6 100644
--- a/drivers/mtd/spi-nor/hisi-sfc.c
+++ b/drivers/mtd/spi-nor/hisi-sfc.c
@@ -448,8 +448,11 @@ static int hisi_spi_nor_probe(struct platform_device *pdev)
448 if (!host->buffer) 448 if (!host->buffer)
449 return -ENOMEM; 449 return -ENOMEM;
450 450
451 ret = clk_prepare_enable(host->clk);
452 if (ret)
453 return ret;
454
451 mutex_init(&host->lock); 455 mutex_init(&host->lock);
452 clk_prepare_enable(host->clk);
453 hisi_spi_nor_init(host); 456 hisi_spi_nor_init(host);
454 ret = hisi_spi_nor_register_all(host); 457 ret = hisi_spi_nor_register_all(host);
455 if (ret) 458 if (ret)
diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c
index a10f6027b386..986a3d020a3a 100644
--- a/drivers/mtd/spi-nor/intel-spi.c
+++ b/drivers/mtd/spi-nor/intel-spi.c
@@ -704,7 +704,7 @@ static void intel_spi_fill_partition(struct intel_spi *ispi,
704 * whole partition read-only to be on the safe side. 704 * whole partition read-only to be on the safe side.
705 */ 705 */
706 if (intel_spi_is_protected(ispi, base, limit)) 706 if (intel_spi_is_protected(ispi, base, limit))
707 ispi->writeable = 0; 707 ispi->writeable = false;
708 708
709 end = (limit << 12) + 4096; 709 end = (limit << 12) + 4096;
710 if (end > part->size) 710 if (end > part->size)
@@ -728,7 +728,7 @@ struct intel_spi *intel_spi_probe(struct device *dev,
728 728
729 ispi->base = devm_ioremap_resource(dev, mem); 729 ispi->base = devm_ioremap_resource(dev, mem);
730 if (IS_ERR(ispi->base)) 730 if (IS_ERR(ispi->base))
731 return ispi->base; 731 return ERR_CAST(ispi->base);
732 732
733 ispi->dev = dev; 733 ispi->dev = dev;
734 ispi->info = info; 734 ispi->info = info;
diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c
index e661877c23de..b6377707ce32 100644
--- a/drivers/mtd/spi-nor/mtk-quadspi.c
+++ b/drivers/mtd/spi-nor/mtk-quadspi.c
@@ -104,6 +104,8 @@
104#define MTK_NOR_MAX_RX_TX_SHIFT 6 104#define MTK_NOR_MAX_RX_TX_SHIFT 6
105/* can shift up to 56 bits (7 bytes) transfer by MTK_NOR_PRG_CMD */ 105/* can shift up to 56 bits (7 bytes) transfer by MTK_NOR_PRG_CMD */
106#define MTK_NOR_MAX_SHIFT 7 106#define MTK_NOR_MAX_SHIFT 7
107/* nor controller 4-byte address mode enable bit */
108#define MTK_NOR_4B_ADDR_EN BIT(4)
107 109
108/* Helpers for accessing the program data / shift data registers */ 110/* Helpers for accessing the program data / shift data registers */
109#define MTK_NOR_PRG_REG(n) (MTK_NOR_PRGDATA0_REG + 4 * (n)) 111#define MTK_NOR_PRG_REG(n) (MTK_NOR_PRGDATA0_REG + 4 * (n))
@@ -230,10 +232,35 @@ static int mt8173_nor_write_buffer_disable(struct mt8173_nor *mt8173_nor)
230 10000); 232 10000);
231} 233}
232 234
235static void mt8173_nor_set_addr_width(struct mt8173_nor *mt8173_nor)
236{
237 u8 val;
238 struct spi_nor *nor = &mt8173_nor->nor;
239
240 val = readb(mt8173_nor->base + MTK_NOR_DUAL_REG);
241
242 switch (nor->addr_width) {
243 case 3:
244 val &= ~MTK_NOR_4B_ADDR_EN;
245 break;
246 case 4:
247 val |= MTK_NOR_4B_ADDR_EN;
248 break;
249 default:
250 dev_warn(mt8173_nor->dev, "Unexpected address width %u.\n",
251 nor->addr_width);
252 break;
253 }
254
255 writeb(val, mt8173_nor->base + MTK_NOR_DUAL_REG);
256}
257
233static void mt8173_nor_set_addr(struct mt8173_nor *mt8173_nor, u32 addr) 258static void mt8173_nor_set_addr(struct mt8173_nor *mt8173_nor, u32 addr)
234{ 259{
235 int i; 260 int i;
236 261
262 mt8173_nor_set_addr_width(mt8173_nor);
263
237 for (i = 0; i < 3; i++) { 264 for (i = 0; i < 3; i++) {
238 writeb(addr & 0xff, mt8173_nor->base + MTK_NOR_RADR0_REG + i * 4); 265 writeb(addr & 0xff, mt8173_nor->base + MTK_NOR_RADR0_REG + i * 4);
239 addr >>= 8; 266 addr >>= 8;
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 747645c74134..dea8c9cbadf0 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -85,6 +85,7 @@ struct flash_info {
85 * Use dedicated 4byte address op codes 85 * Use dedicated 4byte address op codes
86 * to support memory size above 128Mib. 86 * to support memory size above 128Mib.
87 */ 87 */
88#define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */
88}; 89};
89 90
90#define JEDEC_MFR(info) ((info)->id[0]) 91#define JEDEC_MFR(info) ((info)->id[0])
@@ -960,6 +961,8 @@ static const struct flash_info spi_nor_ids[] = {
960 961
961 /* ESMT */ 962 /* ESMT */
962 { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) }, 963 { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
964 { "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
965 { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_HAS_LOCK) },
963 966
964 /* Everspin */ 967 /* Everspin */
965 { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, 968 { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
@@ -1013,11 +1016,14 @@ static const struct flash_info spi_nor_ids[] = {
1013 { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, SECT_4K) }, 1016 { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, SECT_4K) },
1014 { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) }, 1017 { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
1015 { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) }, 1018 { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
1019 { "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4, SECT_4K) },
1020 { "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8, SECT_4K) },
1021 { "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16, SECT_4K) },
1016 { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) }, 1022 { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
1017 { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) }, 1023 { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
1018 { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) }, 1024 { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
1019 { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) }, 1025 { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
1020 { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K) }, 1026 { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
1021 { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) }, 1027 { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
1022 { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_QUAD_READ) }, 1028 { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_QUAD_READ) },
1023 { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) }, 1029 { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
@@ -1031,10 +1037,11 @@ static const struct flash_info spi_nor_ids[] = {
1031 { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) }, 1037 { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
1032 { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) }, 1038 { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
1033 { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) }, 1039 { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
1040 { "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
1034 { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, 1041 { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
1035 { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, 1042 { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
1036 { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, 1043 { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
1037 { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, 1044 { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
1038 1045
1039 /* PMC */ 1046 /* PMC */
1040 { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) }, 1047 { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
@@ -1128,6 +1135,9 @@ static const struct flash_info spi_nor_ids[] = {
1128 { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) }, 1135 { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
1129 { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) }, 1136 { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
1130 { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) }, 1137 { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
1138 { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) },
1139 { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) },
1140 { "w25q20ew", INFO(0xef6012, 0, 64 * 1024, 4, SECT_4K) },
1131 { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) }, 1141 { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
1132 { 1142 {
1133 "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, 1143 "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64,
@@ -1629,6 +1639,8 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
1629 nor->flags |= SNOR_F_USE_FSR; 1639 nor->flags |= SNOR_F_USE_FSR;
1630 if (info->flags & SPI_NOR_HAS_TB) 1640 if (info->flags & SPI_NOR_HAS_TB)
1631 nor->flags |= SNOR_F_HAS_SR_TB; 1641 nor->flags |= SNOR_F_HAS_SR_TB;
1642 if (info->flags & NO_CHIP_ERASE)
1643 nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
1632 1644
1633#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS 1645#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
1634 /* prefer "small sector" erase if possible */ 1646 /* prefer "small sector" erase if possible */
diff --git a/drivers/mtd/spi-nor/stm32-quadspi.c b/drivers/mtd/spi-nor/stm32-quadspi.c
new file mode 100644
index 000000000000..ae45f81b8cd3
--- /dev/null
+++ b/drivers/mtd/spi-nor/stm32-quadspi.c
@@ -0,0 +1,693 @@
1/*
2 * stm32_quadspi.c
3 *
4 * Copyright (C) 2017, Ludovic Barre
5 *
6 * License terms: GNU General Public License (GPL), version 2
7 */
8#include <linux/clk.h>
9#include <linux/errno.h>
10#include <linux/io.h>
11#include <linux/iopoll.h>
12#include <linux/interrupt.h>
13#include <linux/module.h>
14#include <linux/mtd/mtd.h>
15#include <linux/mtd/partitions.h>
16#include <linux/mtd/spi-nor.h>
17#include <linux/mutex.h>
18#include <linux/of.h>
19#include <linux/of_device.h>
20#include <linux/platform_device.h>
21#include <linux/reset.h>
22
23#define QUADSPI_CR 0x00
24#define CR_EN BIT(0)
25#define CR_ABORT BIT(1)
26#define CR_DMAEN BIT(2)
27#define CR_TCEN BIT(3)
28#define CR_SSHIFT BIT(4)
29#define CR_DFM BIT(6)
30#define CR_FSEL BIT(7)
31#define CR_FTHRES_SHIFT 8
32#define CR_FTHRES_MASK GENMASK(12, 8)
33#define CR_FTHRES(n) (((n) << CR_FTHRES_SHIFT) & CR_FTHRES_MASK)
34#define CR_TEIE BIT(16)
35#define CR_TCIE BIT(17)
36#define CR_FTIE BIT(18)
37#define CR_SMIE BIT(19)
38#define CR_TOIE BIT(20)
39#define CR_PRESC_SHIFT 24
40#define CR_PRESC_MASK GENMASK(31, 24)
41#define CR_PRESC(n) (((n) << CR_PRESC_SHIFT) & CR_PRESC_MASK)
42
43#define QUADSPI_DCR 0x04
44#define DCR_CSHT_SHIFT 8
45#define DCR_CSHT_MASK GENMASK(10, 8)
46#define DCR_CSHT(n) (((n) << DCR_CSHT_SHIFT) & DCR_CSHT_MASK)
47#define DCR_FSIZE_SHIFT 16
48#define DCR_FSIZE_MASK GENMASK(20, 16)
49#define DCR_FSIZE(n) (((n) << DCR_FSIZE_SHIFT) & DCR_FSIZE_MASK)
50
51#define QUADSPI_SR 0x08
52#define SR_TEF BIT(0)
53#define SR_TCF BIT(1)
54#define SR_FTF BIT(2)
55#define SR_SMF BIT(3)
56#define SR_TOF BIT(4)
57#define SR_BUSY BIT(5)
58#define SR_FLEVEL_SHIFT 8
59#define SR_FLEVEL_MASK GENMASK(13, 8)
60
61#define QUADSPI_FCR 0x0c
62#define FCR_CTCF BIT(1)
63
64#define QUADSPI_DLR 0x10
65
66#define QUADSPI_CCR 0x14
67#define CCR_INST_SHIFT 0
68#define CCR_INST_MASK GENMASK(7, 0)
69#define CCR_INST(n) (((n) << CCR_INST_SHIFT) & CCR_INST_MASK)
70#define CCR_IMODE_NONE (0U << 8)
71#define CCR_IMODE_1 (1U << 8)
72#define CCR_IMODE_2 (2U << 8)
73#define CCR_IMODE_4 (3U << 8)
74#define CCR_ADMODE_NONE (0U << 10)
75#define CCR_ADMODE_1 (1U << 10)
76#define CCR_ADMODE_2 (2U << 10)
77#define CCR_ADMODE_4 (3U << 10)
78#define CCR_ADSIZE_SHIFT 12
79#define CCR_ADSIZE_MASK GENMASK(13, 12)
80#define CCR_ADSIZE(n) (((n) << CCR_ADSIZE_SHIFT) & CCR_ADSIZE_MASK)
81#define CCR_ABMODE_NONE (0U << 14)
82#define CCR_ABMODE_1 (1U << 14)
83#define CCR_ABMODE_2 (2U << 14)
84#define CCR_ABMODE_4 (3U << 14)
85#define CCR_ABSIZE_8 (0U << 16)
86#define CCR_ABSIZE_16 (1U << 16)
87#define CCR_ABSIZE_24 (2U << 16)
88#define CCR_ABSIZE_32 (3U << 16)
89#define CCR_DCYC_SHIFT 18
90#define CCR_DCYC_MASK GENMASK(22, 18)
91#define CCR_DCYC(n) (((n) << CCR_DCYC_SHIFT) & CCR_DCYC_MASK)
92#define CCR_DMODE_NONE (0U << 24)
93#define CCR_DMODE_1 (1U << 24)
94#define CCR_DMODE_2 (2U << 24)
95#define CCR_DMODE_4 (3U << 24)
96#define CCR_FMODE_INDW (0U << 26)
97#define CCR_FMODE_INDR (1U << 26)
98#define CCR_FMODE_APM (2U << 26)
99#define CCR_FMODE_MM (3U << 26)
100
101#define QUADSPI_AR 0x18
102#define QUADSPI_ABR 0x1c
103#define QUADSPI_DR 0x20
104#define QUADSPI_PSMKR 0x24
105#define QUADSPI_PSMAR 0x28
106#define QUADSPI_PIR 0x2c
107#define QUADSPI_LPTR 0x30
108#define LPTR_DFT_TIMEOUT 0x10
109
110#define FSIZE_VAL(size) (__fls(size) - 1)
111
112#define STM32_MAX_MMAP_SZ SZ_256M
113#define STM32_MAX_NORCHIP 2
114
115#define STM32_QSPI_FIFO_TIMEOUT_US 30000
116#define STM32_QSPI_BUSY_TIMEOUT_US 100000
117
118struct stm32_qspi_flash {
119 struct spi_nor nor;
120 struct stm32_qspi *qspi;
121 u32 cs;
122 u32 fsize;
123 u32 presc;
124 u32 read_mode;
125 bool registered;
126};
127
128struct stm32_qspi {
129 struct device *dev;
130 void __iomem *io_base;
131 void __iomem *mm_base;
132 resource_size_t mm_size;
133 u32 nor_num;
134 struct clk *clk;
135 u32 clk_rate;
136 struct stm32_qspi_flash flash[STM32_MAX_NORCHIP];
137 struct completion cmd_completion;
138
139 /*
140 * to protect device configuration, could be different between
141 * 2 flash access (bk1, bk2)
142 */
143 struct mutex lock;
144};
145
146struct stm32_qspi_cmd {
147 u8 addr_width;
148 u8 dummy;
149 bool tx_data;
150 u8 opcode;
151 u32 framemode;
152 u32 qspimode;
153 u32 addr;
154 size_t len;
155 void *buf;
156};
157
158static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi)
159{
160 u32 cr;
161 int err = 0;
162
163 if (readl_relaxed(qspi->io_base + QUADSPI_SR) & SR_TCF)
164 return 0;
165
166 reinit_completion(&qspi->cmd_completion);
167 cr = readl_relaxed(qspi->io_base + QUADSPI_CR);
168 writel_relaxed(cr | CR_TCIE, qspi->io_base + QUADSPI_CR);
169
170 if (!wait_for_completion_interruptible_timeout(&qspi->cmd_completion,
171 msecs_to_jiffies(1000)))
172 err = -ETIMEDOUT;
173
174 writel_relaxed(cr, qspi->io_base + QUADSPI_CR);
175 return err;
176}
177
178static int stm32_qspi_wait_nobusy(struct stm32_qspi *qspi)
179{
180 u32 sr;
181
182 return readl_relaxed_poll_timeout(qspi->io_base + QUADSPI_SR, sr,
183 !(sr & SR_BUSY), 10,
184 STM32_QSPI_BUSY_TIMEOUT_US);
185}
186
187static void stm32_qspi_set_framemode(struct spi_nor *nor,
188 struct stm32_qspi_cmd *cmd, bool read)
189{
190 u32 dmode = CCR_DMODE_1;
191
192 cmd->framemode = CCR_IMODE_1;
193
194 if (read) {
195 switch (nor->flash_read) {
196 case SPI_NOR_NORMAL:
197 case SPI_NOR_FAST:
198 dmode = CCR_DMODE_1;
199 break;
200 case SPI_NOR_DUAL:
201 dmode = CCR_DMODE_2;
202 break;
203 case SPI_NOR_QUAD:
204 dmode = CCR_DMODE_4;
205 break;
206 }
207 }
208
209 cmd->framemode |= cmd->tx_data ? dmode : 0;
210 cmd->framemode |= cmd->addr_width ? CCR_ADMODE_1 : 0;
211}
212
213static void stm32_qspi_read_fifo(u8 *val, void __iomem *addr)
214{
215 *val = readb_relaxed(addr);
216}
217
218static void stm32_qspi_write_fifo(u8 *val, void __iomem *addr)
219{
220 writeb_relaxed(*val, addr);
221}
222
223static int stm32_qspi_tx_poll(struct stm32_qspi *qspi,
224 const struct stm32_qspi_cmd *cmd)
225{
226 void (*tx_fifo)(u8 *, void __iomem *);
227 u32 len = cmd->len, sr;
228 u8 *buf = cmd->buf;
229 int ret;
230
231 if (cmd->qspimode == CCR_FMODE_INDW)
232 tx_fifo = stm32_qspi_write_fifo;
233 else
234 tx_fifo = stm32_qspi_read_fifo;
235
236 while (len--) {
237 ret = readl_relaxed_poll_timeout(qspi->io_base + QUADSPI_SR,
238 sr, (sr & SR_FTF), 10,
239 STM32_QSPI_FIFO_TIMEOUT_US);
240 if (ret) {
241 dev_err(qspi->dev, "fifo timeout (stat:%#x)\n", sr);
242 break;
243 }
244 tx_fifo(buf++, qspi->io_base + QUADSPI_DR);
245 }
246
247 return ret;
248}
249
250static int stm32_qspi_tx_mm(struct stm32_qspi *qspi,
251 const struct stm32_qspi_cmd *cmd)
252{
253 memcpy_fromio(cmd->buf, qspi->mm_base + cmd->addr, cmd->len);
254 return 0;
255}
256
257static int stm32_qspi_tx(struct stm32_qspi *qspi,
258 const struct stm32_qspi_cmd *cmd)
259{
260 if (!cmd->tx_data)
261 return 0;
262
263 if (cmd->qspimode == CCR_FMODE_MM)
264 return stm32_qspi_tx_mm(qspi, cmd);
265
266 return stm32_qspi_tx_poll(qspi, cmd);
267}
268
269static int stm32_qspi_send(struct stm32_qspi_flash *flash,
270 const struct stm32_qspi_cmd *cmd)
271{
272 struct stm32_qspi *qspi = flash->qspi;
273 u32 ccr, dcr, cr;
274 int err;
275
276 err = stm32_qspi_wait_nobusy(qspi);
277 if (err)
278 goto abort;
279
280 dcr = readl_relaxed(qspi->io_base + QUADSPI_DCR) & ~DCR_FSIZE_MASK;
281 dcr |= DCR_FSIZE(flash->fsize);
282 writel_relaxed(dcr, qspi->io_base + QUADSPI_DCR);
283
284 cr = readl_relaxed(qspi->io_base + QUADSPI_CR);
285 cr &= ~CR_PRESC_MASK & ~CR_FSEL;
286 cr |= CR_PRESC(flash->presc);
287 cr |= flash->cs ? CR_FSEL : 0;
288 writel_relaxed(cr, qspi->io_base + QUADSPI_CR);
289
290 if (cmd->tx_data)
291 writel_relaxed(cmd->len - 1, qspi->io_base + QUADSPI_DLR);
292
293 ccr = cmd->framemode | cmd->qspimode;
294
295 if (cmd->dummy)
296 ccr |= CCR_DCYC(cmd->dummy);
297
298 if (cmd->addr_width)
299 ccr |= CCR_ADSIZE(cmd->addr_width - 1);
300
301 ccr |= CCR_INST(cmd->opcode);
302 writel_relaxed(ccr, qspi->io_base + QUADSPI_CCR);
303
304 if (cmd->addr_width && cmd->qspimode != CCR_FMODE_MM)
305 writel_relaxed(cmd->addr, qspi->io_base + QUADSPI_AR);
306
307 err = stm32_qspi_tx(qspi, cmd);
308 if (err)
309 goto abort;
310
311 if (cmd->qspimode != CCR_FMODE_MM) {
312 err = stm32_qspi_wait_cmd(qspi);
313 if (err)
314 goto abort;
315 writel_relaxed(FCR_CTCF, qspi->io_base + QUADSPI_FCR);
316 }
317
318 return err;
319
320abort:
321 cr = readl_relaxed(qspi->io_base + QUADSPI_CR) | CR_ABORT;
322 writel_relaxed(cr, qspi->io_base + QUADSPI_CR);
323
324 dev_err(qspi->dev, "%s abort err:%d\n", __func__, err);
325 return err;
326}
327
328static int stm32_qspi_read_reg(struct spi_nor *nor,
329 u8 opcode, u8 *buf, int len)
330{
331 struct stm32_qspi_flash *flash = nor->priv;
332 struct device *dev = flash->qspi->dev;
333 struct stm32_qspi_cmd cmd;
334
335 dev_dbg(dev, "read_reg: cmd:%#.2x buf:%p len:%#x\n", opcode, buf, len);
336
337 memset(&cmd, 0, sizeof(cmd));
338 cmd.opcode = opcode;
339 cmd.tx_data = true;
340 cmd.len = len;
341 cmd.buf = buf;
342 cmd.qspimode = CCR_FMODE_INDR;
343
344 stm32_qspi_set_framemode(nor, &cmd, false);
345
346 return stm32_qspi_send(flash, &cmd);
347}
348
349static int stm32_qspi_write_reg(struct spi_nor *nor, u8 opcode,
350 u8 *buf, int len)
351{
352 struct stm32_qspi_flash *flash = nor->priv;
353 struct device *dev = flash->qspi->dev;
354 struct stm32_qspi_cmd cmd;
355
356 dev_dbg(dev, "write_reg: cmd:%#.2x buf:%p len:%#x\n", opcode, buf, len);
357
358 memset(&cmd, 0, sizeof(cmd));
359 cmd.opcode = opcode;
360 cmd.tx_data = !!(buf && len > 0);
361 cmd.len = len;
362 cmd.buf = buf;
363 cmd.qspimode = CCR_FMODE_INDW;
364
365 stm32_qspi_set_framemode(nor, &cmd, false);
366
367 return stm32_qspi_send(flash, &cmd);
368}
369
370static ssize_t stm32_qspi_read(struct spi_nor *nor, loff_t from, size_t len,
371 u_char *buf)
372{
373 struct stm32_qspi_flash *flash = nor->priv;
374 struct stm32_qspi *qspi = flash->qspi;
375 struct stm32_qspi_cmd cmd;
376 int err;
377
378 dev_dbg(qspi->dev, "read(%#.2x): buf:%p from:%#.8x len:%#x\n",
379 nor->read_opcode, buf, (u32)from, len);
380
381 memset(&cmd, 0, sizeof(cmd));
382 cmd.opcode = nor->read_opcode;
383 cmd.addr_width = nor->addr_width;
384 cmd.addr = (u32)from;
385 cmd.tx_data = true;
386 cmd.dummy = nor->read_dummy;
387 cmd.len = len;
388 cmd.buf = buf;
389 cmd.qspimode = flash->read_mode;
390
391 stm32_qspi_set_framemode(nor, &cmd, true);
392 err = stm32_qspi_send(flash, &cmd);
393
394 return err ? err : len;
395}
396
397static ssize_t stm32_qspi_write(struct spi_nor *nor, loff_t to, size_t len,
398 const u_char *buf)
399{
400 struct stm32_qspi_flash *flash = nor->priv;
401 struct device *dev = flash->qspi->dev;
402 struct stm32_qspi_cmd cmd;
403 int err;
404
405 dev_dbg(dev, "write(%#.2x): buf:%p to:%#.8x len:%#x\n",
406 nor->program_opcode, buf, (u32)to, len);
407
408 memset(&cmd, 0, sizeof(cmd));
409 cmd.opcode = nor->program_opcode;
410 cmd.addr_width = nor->addr_width;
411 cmd.addr = (u32)to;
412 cmd.tx_data = true;
413 cmd.len = len;
414 cmd.buf = (void *)buf;
415 cmd.qspimode = CCR_FMODE_INDW;
416
417 stm32_qspi_set_framemode(nor, &cmd, false);
418 err = stm32_qspi_send(flash, &cmd);
419
420 return err ? err : len;
421}
422
423static int stm32_qspi_erase(struct spi_nor *nor, loff_t offs)
424{
425 struct stm32_qspi_flash *flash = nor->priv;
426 struct device *dev = flash->qspi->dev;
427 struct stm32_qspi_cmd cmd;
428
429 dev_dbg(dev, "erase(%#.2x):offs:%#x\n", nor->erase_opcode, (u32)offs);
430
431 memset(&cmd, 0, sizeof(cmd));
432 cmd.opcode = nor->erase_opcode;
433 cmd.addr_width = nor->addr_width;
434 cmd.addr = (u32)offs;
435 cmd.qspimode = CCR_FMODE_INDW;
436
437 stm32_qspi_set_framemode(nor, &cmd, false);
438
439 return stm32_qspi_send(flash, &cmd);
440}
441
442static irqreturn_t stm32_qspi_irq(int irq, void *dev_id)
443{
444 struct stm32_qspi *qspi = (struct stm32_qspi *)dev_id;
445 u32 cr, sr, fcr = 0;
446
447 cr = readl_relaxed(qspi->io_base + QUADSPI_CR);
448 sr = readl_relaxed(qspi->io_base + QUADSPI_SR);
449
450 if ((cr & CR_TCIE) && (sr & SR_TCF)) {
451 /* tx complete */
452 fcr |= FCR_CTCF;
453 complete(&qspi->cmd_completion);
454 } else {
455 dev_info_ratelimited(qspi->dev, "spurious interrupt\n");
456 }
457
458 writel_relaxed(fcr, qspi->io_base + QUADSPI_FCR);
459
460 return IRQ_HANDLED;
461}
462
463static int stm32_qspi_prep(struct spi_nor *nor, enum spi_nor_ops ops)
464{
465 struct stm32_qspi_flash *flash = nor->priv;
466 struct stm32_qspi *qspi = flash->qspi;
467
468 mutex_lock(&qspi->lock);
469 return 0;
470}
471
472static void stm32_qspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
473{
474 struct stm32_qspi_flash *flash = nor->priv;
475 struct stm32_qspi *qspi = flash->qspi;
476
477 mutex_unlock(&qspi->lock);
478}
479
480static int stm32_qspi_flash_setup(struct stm32_qspi *qspi,
481 struct device_node *np)
482{
483 u32 width, flash_read, presc, cs_num, max_rate = 0;
484 struct stm32_qspi_flash *flash;
485 struct mtd_info *mtd;
486 int ret;
487
488 of_property_read_u32(np, "reg", &cs_num);
489 if (cs_num >= STM32_MAX_NORCHIP)
490 return -EINVAL;
491
492 of_property_read_u32(np, "spi-max-frequency", &max_rate);
493 if (!max_rate)
494 return -EINVAL;
495
496 presc = DIV_ROUND_UP(qspi->clk_rate, max_rate) - 1;
497
498 if (of_property_read_u32(np, "spi-rx-bus-width", &width))
499 width = 1;
500
501 if (width == 4)
502 flash_read = SPI_NOR_QUAD;
503 else if (width == 2)
504 flash_read = SPI_NOR_DUAL;
505 else if (width == 1)
506 flash_read = SPI_NOR_NORMAL;
507 else
508 return -EINVAL;
509
510 flash = &qspi->flash[cs_num];
511 flash->qspi = qspi;
512 flash->cs = cs_num;
513 flash->presc = presc;
514
515 flash->nor.dev = qspi->dev;
516 spi_nor_set_flash_node(&flash->nor, np);
517 flash->nor.priv = flash;
518 mtd = &flash->nor.mtd;
519
520 flash->nor.read = stm32_qspi_read;
521 flash->nor.write = stm32_qspi_write;
522 flash->nor.erase = stm32_qspi_erase;
523 flash->nor.read_reg = stm32_qspi_read_reg;
524 flash->nor.write_reg = stm32_qspi_write_reg;
525 flash->nor.prepare = stm32_qspi_prep;
526 flash->nor.unprepare = stm32_qspi_unprep;
527
528 writel_relaxed(LPTR_DFT_TIMEOUT, qspi->io_base + QUADSPI_LPTR);
529
530 writel_relaxed(CR_PRESC(presc) | CR_FTHRES(3) | CR_TCEN | CR_SSHIFT
531 | CR_EN, qspi->io_base + QUADSPI_CR);
532
533 /*
534 * in stm32 qspi controller, QUADSPI_DCR register has a fsize field
535 * which define the size of nor flash.
536 * if fsize is NULL, the controller can't sent spi-nor command.
537 * set a temporary value just to discover the nor flash with
538 * "spi_nor_scan". After, the right value (mtd->size) can be set.
539 */
540 flash->fsize = FSIZE_VAL(SZ_1K);
541
542 ret = spi_nor_scan(&flash->nor, NULL, flash_read);
543 if (ret) {
544 dev_err(qspi->dev, "device scan failed\n");
545 return ret;
546 }
547
548 flash->fsize = FSIZE_VAL(mtd->size);
549
550 flash->read_mode = CCR_FMODE_MM;
551 if (mtd->size > qspi->mm_size)
552 flash->read_mode = CCR_FMODE_INDR;
553
554 writel_relaxed(DCR_CSHT(1), qspi->io_base + QUADSPI_DCR);
555
556 ret = mtd_device_register(mtd, NULL, 0);
557 if (ret) {
558 dev_err(qspi->dev, "mtd device parse failed\n");
559 return ret;
560 }
561
562 flash->registered = true;
563
564 dev_dbg(qspi->dev, "read mm:%s cs:%d bus:%d\n",
565 flash->read_mode == CCR_FMODE_MM ? "yes" : "no", cs_num, width);
566
567 return 0;
568}
569
570static void stm32_qspi_mtd_free(struct stm32_qspi *qspi)
571{
572 int i;
573
574 for (i = 0; i < STM32_MAX_NORCHIP; i++)
575 if (qspi->flash[i].registered)
576 mtd_device_unregister(&qspi->flash[i].nor.mtd);
577}
578
579static int stm32_qspi_probe(struct platform_device *pdev)
580{
581 struct device *dev = &pdev->dev;
582 struct device_node *flash_np;
583 struct reset_control *rstc;
584 struct stm32_qspi *qspi;
585 struct resource *res;
586 int ret, irq;
587
588 qspi = devm_kzalloc(dev, sizeof(*qspi), GFP_KERNEL);
589 if (!qspi)
590 return -ENOMEM;
591
592 qspi->nor_num = of_get_child_count(dev->of_node);
593 if (!qspi->nor_num || qspi->nor_num > STM32_MAX_NORCHIP)
594 return -ENODEV;
595
596 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi");
597 qspi->io_base = devm_ioremap_resource(dev, res);
598 if (IS_ERR(qspi->io_base))
599 return PTR_ERR(qspi->io_base);
600
601 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mm");
602 qspi->mm_base = devm_ioremap_resource(dev, res);
603 if (IS_ERR(qspi->mm_base))
604 return PTR_ERR(qspi->mm_base);
605
606 qspi->mm_size = resource_size(res);
607
608 irq = platform_get_irq(pdev, 0);
609 ret = devm_request_irq(dev, irq, stm32_qspi_irq, 0,
610 dev_name(dev), qspi);
611 if (ret) {
612 dev_err(dev, "failed to request irq\n");
613 return ret;
614 }
615
616 init_completion(&qspi->cmd_completion);
617
618 qspi->clk = devm_clk_get(dev, NULL);
619 if (IS_ERR(qspi->clk))
620 return PTR_ERR(qspi->clk);
621
622 qspi->clk_rate = clk_get_rate(qspi->clk);
623 if (!qspi->clk_rate)
624 return -EINVAL;
625
626 ret = clk_prepare_enable(qspi->clk);
627 if (ret) {
628 dev_err(dev, "can not enable the clock\n");
629 return ret;
630 }
631
632 rstc = devm_reset_control_get(dev, NULL);
633 if (!IS_ERR(rstc)) {
634 reset_control_assert(rstc);
635 udelay(2);
636 reset_control_deassert(rstc);
637 }
638
639 qspi->dev = dev;
640 platform_set_drvdata(pdev, qspi);
641 mutex_init(&qspi->lock);
642
643 for_each_available_child_of_node(dev->of_node, flash_np) {
644 ret = stm32_qspi_flash_setup(qspi, flash_np);
645 if (ret) {
646 dev_err(dev, "unable to setup flash chip\n");
647 goto err_flash;
648 }
649 }
650
651 return 0;
652
653err_flash:
654 mutex_destroy(&qspi->lock);
655 stm32_qspi_mtd_free(qspi);
656
657 clk_disable_unprepare(qspi->clk);
658 return ret;
659}
660
661static int stm32_qspi_remove(struct platform_device *pdev)
662{
663 struct stm32_qspi *qspi = platform_get_drvdata(pdev);
664
665 /* disable qspi */
666 writel_relaxed(0, qspi->io_base + QUADSPI_CR);
667
668 stm32_qspi_mtd_free(qspi);
669 mutex_destroy(&qspi->lock);
670
671 clk_disable_unprepare(qspi->clk);
672 return 0;
673}
674
675static const struct of_device_id stm32_qspi_match[] = {
676 {.compatible = "st,stm32f469-qspi"},
677 {}
678};
679MODULE_DEVICE_TABLE(of, stm32_qspi_match);
680
681static struct platform_driver stm32_qspi_driver = {
682 .probe = stm32_qspi_probe,
683 .remove = stm32_qspi_remove,
684 .driver = {
685 .name = "stm32-quadspi",
686 .of_match_table = stm32_qspi_match,
687 },
688};
689module_platform_driver(stm32_qspi_driver);
690
691MODULE_AUTHOR("Ludovic Barre <ludovic.barre@st.com>");
692MODULE_DESCRIPTION("STMicroelectronics STM32 quad spi driver");
693MODULE_LICENSE("GPL v2");