aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-03-24 22:57:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-24 22:57:15 -0400
commit8f40842e4260f73792c156aded004197a19135ee (patch)
treeb192ed354a34839a8c1607883d1ebc09b06a76c0
parent88875667ebbcb95da3f93a4cf657d5dad7db9673 (diff)
parent6871c1b96de88d3576d935b528fd1b0ec70e81f5 (diff)
Merge tag 'for-linus-20160324' of git://git.infradead.org/linux-mtd
Pull MTD updates from Brian Norris: "NAND: - Add sunxi_nand randomizer support - begin refactoring NAND ecclayout structs - fix pxa3xx_nand dmaengine usage - brcmnand: fix support for v7.1 controller - add Qualcomm NAND controller driver SPI NOR: - add new ls1021a, ls2080a support to Freescale QuadSPI - add new flash ID entries - support bottom-block protection for Winbond flash - support Status Register Write Protect - remove broken QPI support for Micron SPI flash JFFS2: - improve post-mount CRC scan efficiency General: - refactor bcm63xxpart parser, to later extend for NAND - add writebuf size parameter to mtdram Other minor code quality improvements" * tag 'for-linus-20160324' of git://git.infradead.org/linux-mtd: (72 commits) mtd: nand: remove kerneldoc for removed function parameter mtd: nand: Qualcomm NAND controller driver dt/bindings: qcom_nandc: Add DT bindings mtd: nand: don't select chip in nand_chip's block_bad op mtd: spi-nor: support lock/unlock for a few Winbond chips mtd: spi-nor: add TB (Top/Bottom) protect support mtd: spi-nor: add SPI_NOR_HAS_LOCK flag mtd: spi-nor: use BIT() for flash_info flags mtd: spi-nor: disallow further writes to SR if WP# is low mtd: spi-nor: make lock/unlock bounds checks more obvious and robust mtd: spi-nor: silently drop lock/unlock for already locked/unlocked region mtd: spi-nor: wait for SR_WIP to clear on initial unlock mtd: nand: simplify nand_bch_init() usage mtd: mtdswap: remove useless if (!mtd->ecclayout) test mtd: create an mtd_oobavail() helper and make use of it mtd: kill the ecclayout->oobavail field mtd: nand: check status before reporting timeout mtd: bcm63xxpart: give width specifier an 'int', not 'size_t' mtd: mtdram: Add parameter for setting writebuf size mtd: nand: pxa3xx_nand: kill unused field 'drcmr_cmd' ...
-rw-r--r--Documentation/devicetree/bindings/mtd/atmel-nand.txt31
-rw-r--r--Documentation/devicetree/bindings/mtd/fsl-quadspi.txt5
-rw-r--r--Documentation/devicetree/bindings/mtd/qcom_nandc.txt86
-rw-r--r--arch/arm/plat-samsung/devs.c9
-rw-r--r--arch/mips/include/asm/mach-jz4740/jz4740_nand.h2
-rw-r--r--drivers/memory/fsl_ifc.c2
-rw-r--r--drivers/mtd/Kconfig2
-rw-r--r--drivers/mtd/bcm47xxpart.c42
-rw-r--r--drivers/mtd/bcm63xxpart.c182
-rw-r--r--drivers/mtd/devices/docg3.c5
-rw-r--r--drivers/mtd/devices/mtdram.c5
-rw-r--r--drivers/mtd/mtdpart.c5
-rw-r--r--drivers/mtd/mtdswap.c24
-rw-r--r--drivers/mtd/nand/Kconfig10
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/atmel_nand.c89
-rw-r--r--drivers/mtd/nand/atmel_nand_ecc.h9
-rw-r--r--drivers/mtd/nand/atmel_nand_nfc.h3
-rw-r--r--drivers/mtd/nand/brcmnand/brcmnand.c42
-rw-r--r--drivers/mtd/nand/cafe_nand.c2
-rw-r--r--drivers/mtd/nand/diskonchip.c2
-rw-r--r--drivers/mtd/nand/docg4.c3
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c73
-rw-r--r--drivers/mtd/nand/hisi504_nand.c1
-rw-r--r--drivers/mtd/nand/jz4740_nand.c3
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c2
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c7
-rw-r--r--drivers/mtd/nand/nand_base.c78
-rw-r--r--drivers/mtd/nand/nand_bbt.c2
-rw-r--r--drivers/mtd/nand/nand_bch.c27
-rw-r--r--drivers/mtd/nand/nand_ids.c4
-rw-r--r--drivers/mtd/nand/nuc900_nand.c2
-rw-r--r--drivers/mtd/nand/omap2.c28
-rw-r--r--drivers/mtd/nand/plat_nand.c1
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c191
-rw-r--r--drivers/mtd/nand/qcom_nandc.c2223
-rw-r--r--drivers/mtd/nand/s3c2410.c3
-rw-r--r--drivers/mtd/nand/sunxi_nand.c287
-rw-r--r--drivers/mtd/nand/vf610_nfc.c2
-rw-r--r--drivers/mtd/onenand/onenand_base.c32
-rw-r--r--drivers/mtd/onenand/onenand_bbt.c5
-rw-r--r--drivers/mtd/spi-nor/Kconfig3
-rw-r--r--drivers/mtd/spi-nor/fsl-quadspi.c167
-rw-r--r--drivers/mtd/spi-nor/mtk-quadspi.c4
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c246
-rw-r--r--drivers/mtd/tests/oobtest.c49
-rw-r--r--drivers/staging/mt29f_spinand/mt29f_spinand.c1
-rw-r--r--drivers/staging/mt29f_spinand/mt29f_spinand.h1
-rw-r--r--fs/jffs2/gc.c64
-rw-r--r--fs/jffs2/jffs2_fs_sb.h2
-rw-r--r--fs/jffs2/nodemgmt.c4
-rw-r--r--fs/jffs2/wbuf.c6
-rw-r--r--include/linux/mtd/bbm.h1
-rw-r--r--include/linux/mtd/inftl.h1
-rw-r--r--include/linux/mtd/map.h7
-rw-r--r--include/linux/mtd/mtd.h6
-rw-r--r--include/linux/mtd/nand.h10
-rw-r--r--include/linux/mtd/nand_bch.h8
-rw-r--r--include/linux/mtd/nftl.h1
-rw-r--r--include/linux/mtd/spi-nor.h2
-rw-r--r--include/linux/platform_data/mtd-nand-s3c2410.h1
61 files changed, 3524 insertions, 592 deletions
diff --git a/Documentation/devicetree/bindings/mtd/atmel-nand.txt b/Documentation/devicetree/bindings/mtd/atmel-nand.txt
index 7d4c8eb775a5..d53aba98fbc9 100644
--- a/Documentation/devicetree/bindings/mtd/atmel-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/atmel-nand.txt
@@ -1,7 +1,10 @@
1Atmel NAND flash 1Atmel NAND flash
2 2
3Required properties: 3Required properties:
4- compatible : should be "atmel,at91rm9200-nand" or "atmel,sama5d4-nand". 4- compatible: The possible values are:
5 "atmel,at91rm9200-nand"
6 "atmel,sama5d2-nand"
7 "atmel,sama5d4-nand"
5- reg : should specify localbus address and size used for the chip, 8- reg : should specify localbus address and size used for the chip,
6 and hardware ECC controller if available. 9 and hardware ECC controller if available.
7 If the hardware ECC is PMECC, it should contain address and size for 10 If the hardware ECC is PMECC, it should contain address and size for
@@ -21,10 +24,11 @@ Optional properties:
21- nand-ecc-mode : String, operation mode of the NAND ecc mode, soft by default. 24- nand-ecc-mode : String, operation mode of the NAND ecc mode, soft by default.
22 Supported values are: "none", "soft", "hw", "hw_syndrome", "hw_oob_first", 25 Supported values are: "none", "soft", "hw", "hw_syndrome", "hw_oob_first",
23 "soft_bch". 26 "soft_bch".
24- atmel,has-pmecc : boolean to enable Programmable Multibit ECC hardware. 27- atmel,has-pmecc : boolean to enable Programmable Multibit ECC hardware,
25 Only supported by at91sam9x5 or later sam9 product. 28 capable of BCH encoding and decoding, on devices where it is present.
26- atmel,pmecc-cap : error correct capability for Programmable Multibit ECC 29- atmel,pmecc-cap : error correct capability for Programmable Multibit ECC
27 Controller. Supported values are: 2, 4, 8, 12, 24. 30 Controller. Supported values are: 2, 4, 8, 12, 24. If the compatible string
31 is "atmel,sama5d2-nand", 32 is also valid.
28- atmel,pmecc-sector-size : sector size for ECC computation. Supported values 32- atmel,pmecc-sector-size : sector size for ECC computation. Supported values
29 are: 512, 1024. 33 are: 512, 1024.
30- atmel,pmecc-lookup-table-offset : includes two offsets of lookup table in ROM 34- atmel,pmecc-lookup-table-offset : includes two offsets of lookup table in ROM
@@ -32,15 +36,16 @@ Optional properties:
32 sector size 1024. If not specified, driver will build the table in runtime. 36 sector size 1024. If not specified, driver will build the table in runtime.
33- nand-bus-width : 8 or 16 bus width if not present 8 37- nand-bus-width : 8 or 16 bus width if not present 8
34- nand-on-flash-bbt: boolean to enable on flash bbt option if not present false 38- nand-on-flash-bbt: boolean to enable on flash bbt option if not present false
35- Nand Flash Controller(NFC) is a slave driver under Atmel nand flash 39
36 - Required properties: 40Nand Flash Controller(NFC) is an optional sub-node
37 - compatible : "atmel,sama5d3-nfc". 41Required properties:
38 - reg : should specify the address and size used for NFC command registers, 42- compatible : "atmel,sama5d3-nfc" or "atmel,sama5d4-nfc".
39 NFC registers and NFC Sram. NFC Sram address and size can be absent 43- reg : should specify the address and size used for NFC command registers,
40 if don't want to use it. 44 NFC registers and NFC SRAM. NFC SRAM address and size can be absent
41 - clocks: phandle to the peripheral clock 45 if don't want to use it.
42 - Optional properties: 46- clocks: phandle to the peripheral clock
43 - atmel,write-by-sram: boolean to enable NFC write by sram. 47Optional properties:
48- atmel,write-by-sram: boolean to enable NFC write by SRAM.
44 49
45Examples: 50Examples:
46nand0: nand@40000000,0 { 51nand0: nand@40000000,0 {
diff --git a/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt b/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt
index 00c587b3d3ae..0333ec87dc49 100644
--- a/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt
+++ b/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt
@@ -3,7 +3,9 @@
3Required properties: 3Required properties:
4 - compatible : Should be "fsl,vf610-qspi", "fsl,imx6sx-qspi", 4 - compatible : Should be "fsl,vf610-qspi", "fsl,imx6sx-qspi",
5 "fsl,imx7d-qspi", "fsl,imx6ul-qspi", 5 "fsl,imx7d-qspi", "fsl,imx6ul-qspi",
6 "fsl,ls1021-qspi" 6 "fsl,ls1021a-qspi"
7 or
8 "fsl,ls2080a-qspi" followed by "fsl,ls1021a-qspi"
7 - reg : the first contains the register location and length, 9 - reg : the first contains the register location and length,
8 the second contains the memory mapping address and length 10 the second contains the memory mapping address and length
9 - reg-names: Should contain the reg names "QuadSPI" and "QuadSPI-memory" 11 - reg-names: Should contain the reg names "QuadSPI" and "QuadSPI-memory"
@@ -19,6 +21,7 @@ Optional properties:
19 But if there are two NOR flashes connected to the 21 But if there are two NOR flashes connected to the
20 bus, you should enable this property. 22 bus, you should enable this property.
21 (Please check the board's schematic.) 23 (Please check the board's schematic.)
24 - big-endian : That means the IP register is big endian
22 25
23Example: 26Example:
24 27
diff --git a/Documentation/devicetree/bindings/mtd/qcom_nandc.txt b/Documentation/devicetree/bindings/mtd/qcom_nandc.txt
new file mode 100644
index 000000000000..70dd5118a324
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/qcom_nandc.txt
@@ -0,0 +1,86 @@
1* Qualcomm NAND controller
2
3Required properties:
4- compatible: should be "qcom,ipq806x-nand"
5- reg: MMIO address range
6- clocks: must contain core clock and always on clock
7- clock-names: must contain "core" for the core clock and "aon" for the
8 always on clock
9- dmas: DMA specifier, consisting of a phandle to the ADM DMA
10 controller node and the channel number to be used for
11 NAND. Refer to dma.txt and qcom_adm.txt for more details
12- dma-names: must be "rxtx"
13- qcom,cmd-crci: must contain the ADM command type CRCI block instance
14 number specified for the NAND controller on the given
15 platform
16- qcom,data-crci: must contain the ADM data type CRCI block instance
17 number specified for the NAND controller on the given
18 platform
19- #address-cells: <1> - subnodes give the chip-select number
20- #size-cells: <0>
21
22* NAND chip-select
23
24Each controller may contain one or more subnodes to represent enabled
25chip-selects which (may) contain NAND flash chips. Their properties are as
26follows.
27
28Required properties:
29- compatible: should contain "qcom,nandcs"
30- reg: a single integer representing the chip-select
31 number (e.g., 0, 1, 2, etc.)
32- #address-cells: see partition.txt
33- #size-cells: see partition.txt
34- nand-ecc-strength: see nand.txt
35- nand-ecc-step-size: must be 512. see nand.txt for more details.
36
37Optional properties:
38- nand-bus-width: see nand.txt
39
40Each nandcs device node may optionally contain a 'partitions' sub-node, which
41further contains sub-nodes describing the flash partition mapping. See
42partition.txt for more detail.
43
44Example:
45
46nand@1ac00000 {
47 compatible = "qcom,ebi2-nandc";
48 reg = <0x1ac00000 0x800>;
49
50 clocks = <&gcc EBI2_CLK>,
51 <&gcc EBI2_AON_CLK>;
52 clock-names = "core", "aon";
53
54 dmas = <&adm_dma 3>;
55 dma-names = "rxtx";
56 qcom,cmd-crci = <15>;
57 qcom,data-crci = <3>;
58
59 #address-cells = <1>;
60 #size-cells = <0>;
61
62 nandcs@0 {
63 compatible = "qcom,nandcs";
64 reg = <0>;
65
66 nand-ecc-strength = <4>;
67 nand-ecc-step-size = <512>;
68 nand-bus-width = <8>;
69
70 partitions {
71 compatible = "fixed-partitions";
72 #address-cells = <1>;
73 #size-cells = <1>;
74
75 partition@0 {
76 label = "boot-nand";
77 reg = <0 0x58a0000>;
78 };
79
80 partition@58a0000 {
81 label = "fs-nand";
82 reg = <0x58a0000 0x4000000>;
83 };
84 };
85 };
86};
diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c
index b53d4ff3befb..84baa16f4c0b 100644
--- a/arch/arm/plat-samsung/devs.c
+++ b/arch/arm/plat-samsung/devs.c
@@ -727,15 +727,6 @@ static int __init s3c_nand_copy_set(struct s3c2410_nand_set *set)
727 return -ENOMEM; 727 return -ENOMEM;
728 } 728 }
729 729
730 if (set->ecc_layout) {
731 ptr = kmemdup(set->ecc_layout,
732 sizeof(struct nand_ecclayout), GFP_KERNEL);
733 set->ecc_layout = ptr;
734
735 if (!ptr)
736 return -ENOMEM;
737 }
738
739 return 0; 730 return 0;
740} 731}
741 732
diff --git a/arch/mips/include/asm/mach-jz4740/jz4740_nand.h b/arch/mips/include/asm/mach-jz4740/jz4740_nand.h
index 79cff26d8b36..398733e3e2cf 100644
--- a/arch/mips/include/asm/mach-jz4740/jz4740_nand.h
+++ b/arch/mips/include/asm/mach-jz4740/jz4740_nand.h
@@ -25,8 +25,6 @@ struct jz_nand_platform_data {
25 int num_partitions; 25 int num_partitions;
26 struct mtd_partition *partitions; 26 struct mtd_partition *partitions;
27 27
28 struct nand_ecclayout *ecc_layout;
29
30 unsigned char banks[JZ_NAND_NUM_BANKS]; 28 unsigned char banks[JZ_NAND_NUM_BANKS];
31 29
32 void (*ident_callback)(struct platform_device *, struct nand_chip *, 30 void (*ident_callback)(struct platform_device *, struct nand_chip *,
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
index acd1460cf787..2a691da8c1c7 100644
--- a/drivers/memory/fsl_ifc.c
+++ b/drivers/memory/fsl_ifc.c
@@ -260,7 +260,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
260 260
261 /* get the Controller level irq */ 261 /* get the Controller level irq */
262 fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0); 262 fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
263 if (fsl_ifc_ctrl_dev->irq == NO_IRQ) { 263 if (fsl_ifc_ctrl_dev->irq == 0) {
264 dev_err(&dev->dev, "failed to get irq resource " 264 dev_err(&dev->dev, "failed to get irq resource "
265 "for IFC\n"); 265 "for IFC\n");
266 ret = -ENODEV; 266 ret = -ENODEV;
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 42cc953309f1..e83a279f1217 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -142,7 +142,7 @@ config MTD_AR7_PARTS
142 142
143config MTD_BCM63XX_PARTS 143config MTD_BCM63XX_PARTS
144 tristate "BCM63XX CFE partitioning support" 144 tristate "BCM63XX CFE partitioning support"
145 depends on BCM63XX 145 depends on BCM63XX || BMIPS_GENERIC || COMPILE_TEST
146 select CRC32 146 select CRC32
147 help 147 help
148 This provides partions parsing for BCM63xx devices with CFE 148 This provides partions parsing for BCM63xx devices with CFE
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
index 8282f47bcf5d..845dd27d9f41 100644
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -66,11 +66,13 @@ static const char *bcm47xxpart_trx_data_part_name(struct mtd_info *master,
66{ 66{
67 uint32_t buf; 67 uint32_t buf;
68 size_t bytes_read; 68 size_t bytes_read;
69 int err;
69 70
70 if (mtd_read(master, offset, sizeof(buf), &bytes_read, 71 err = mtd_read(master, offset, sizeof(buf), &bytes_read,
71 (uint8_t *)&buf) < 0) { 72 (uint8_t *)&buf);
72 pr_err("mtd_read error while parsing (offset: 0x%X)!\n", 73 if (err && !mtd_is_bitflip(err)) {
73 offset); 74 pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
75 offset, err);
74 goto out_default; 76 goto out_default;
75 } 77 }
76 78
@@ -95,6 +97,7 @@ static int bcm47xxpart_parse(struct mtd_info *master,
95 int trx_part = -1; 97 int trx_part = -1;
96 int last_trx_part = -1; 98 int last_trx_part = -1;
97 int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, }; 99 int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, };
100 int err;
98 101
99 /* 102 /*
100 * Some really old flashes (like AT45DB*) had smaller erasesize-s, but 103 * Some really old flashes (like AT45DB*) had smaller erasesize-s, but
@@ -118,8 +121,8 @@ static int bcm47xxpart_parse(struct mtd_info *master,
118 /* Parse block by block looking for magics */ 121 /* Parse block by block looking for magics */
119 for (offset = 0; offset <= master->size - blocksize; 122 for (offset = 0; offset <= master->size - blocksize;
120 offset += blocksize) { 123 offset += blocksize) {
121 /* Nothing more in higher memory */ 124 /* Nothing more in higher memory on BCM47XX (MIPS) */
122 if (offset >= 0x2000000) 125 if (config_enabled(CONFIG_BCM47XX) && offset >= 0x2000000)
123 break; 126 break;
124 127
125 if (curr_part >= BCM47XXPART_MAX_PARTS) { 128 if (curr_part >= BCM47XXPART_MAX_PARTS) {
@@ -128,10 +131,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
128 } 131 }
129 132
130 /* Read beginning of the block */ 133 /* Read beginning of the block */
131 if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ, 134 err = mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
132 &bytes_read, (uint8_t *)buf) < 0) { 135 &bytes_read, (uint8_t *)buf);
133 pr_err("mtd_read error while parsing (offset: 0x%X)!\n", 136 if (err && !mtd_is_bitflip(err)) {
134 offset); 137 pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
138 offset, err);
135 continue; 139 continue;
136 } 140 }
137 141
@@ -254,10 +258,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
254 } 258 }
255 259
256 /* Read middle of the block */ 260 /* Read middle of the block */
257 if (mtd_read(master, offset + 0x8000, 0x4, 261 err = mtd_read(master, offset + 0x8000, 0x4, &bytes_read,
258 &bytes_read, (uint8_t *)buf) < 0) { 262 (uint8_t *)buf);
259 pr_err("mtd_read error while parsing (offset: 0x%X)!\n", 263 if (err && !mtd_is_bitflip(err)) {
260 offset); 264 pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
265 offset, err);
261 continue; 266 continue;
262 } 267 }
263 268
@@ -277,10 +282,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
277 } 282 }
278 283
279 offset = master->size - possible_nvram_sizes[i]; 284 offset = master->size - possible_nvram_sizes[i];
280 if (mtd_read(master, offset, 0x4, &bytes_read, 285 err = mtd_read(master, offset, 0x4, &bytes_read,
281 (uint8_t *)buf) < 0) { 286 (uint8_t *)buf);
282 pr_err("mtd_read error while reading at offset 0x%X!\n", 287 if (err && !mtd_is_bitflip(err)) {
283 offset); 288 pr_err("mtd_read error while reading (offset 0x%X): %d\n",
289 offset, err);
284 continue; 290 continue;
285 } 291 }
286 292
diff --git a/drivers/mtd/bcm63xxpart.c b/drivers/mtd/bcm63xxpart.c
index cec3188a170d..41d1d3149c61 100644
--- a/drivers/mtd/bcm63xxpart.c
+++ b/drivers/mtd/bcm63xxpart.c
@@ -24,6 +24,7 @@
24 24
25#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 25#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26 26
27#include <linux/bcm963xx_nvram.h>
27#include <linux/bcm963xx_tag.h> 28#include <linux/bcm963xx_tag.h>
28#include <linux/crc32.h> 29#include <linux/crc32.h>
29#include <linux/module.h> 30#include <linux/module.h>
@@ -34,12 +35,15 @@
34#include <linux/mtd/mtd.h> 35#include <linux/mtd/mtd.h>
35#include <linux/mtd/partitions.h> 36#include <linux/mtd/partitions.h>
36 37
37#include <asm/mach-bcm63xx/bcm63xx_nvram.h> 38#define BCM963XX_CFE_BLOCK_SIZE SZ_64K /* always at least 64KiB */
38#include <asm/mach-bcm63xx/board_bcm963xx.h>
39 39
40#define BCM63XX_CFE_BLOCK_SIZE SZ_64K /* always at least 64KiB */ 40#define BCM963XX_CFE_MAGIC_OFFSET 0x4e0
41#define BCM963XX_CFE_VERSION_OFFSET 0x570
42#define BCM963XX_NVRAM_OFFSET 0x580
41 43
42#define BCM63XX_CFE_MAGIC_OFFSET 0x4e0 44/* Ensure strings read from flash structs are null terminated */
45#define STR_NULL_TERMINATE(x) \
46 do { char *_str = (x); _str[sizeof(x) - 1] = 0; } while (0)
43 47
44static int bcm63xx_detect_cfe(struct mtd_info *master) 48static int bcm63xx_detect_cfe(struct mtd_info *master)
45{ 49{
@@ -58,68 +62,130 @@ static int bcm63xx_detect_cfe(struct mtd_info *master)
58 return 0; 62 return 0;
59 63
60 /* very old CFE's do not have the cfe-v string, so check for magic */ 64 /* very old CFE's do not have the cfe-v string, so check for magic */
61 ret = mtd_read(master, BCM63XX_CFE_MAGIC_OFFSET, 8, &retlen, 65 ret = mtd_read(master, BCM963XX_CFE_MAGIC_OFFSET, 8, &retlen,
62 (void *)buf); 66 (void *)buf);
63 buf[retlen] = 0; 67 buf[retlen] = 0;
64 68
65 return strncmp("CFE1CFE1", buf, 8); 69 return strncmp("CFE1CFE1", buf, 8);
66} 70}
67 71
68static int bcm63xx_parse_cfe_partitions(struct mtd_info *master, 72static int bcm63xx_read_nvram(struct mtd_info *master,
69 const struct mtd_partition **pparts, 73 struct bcm963xx_nvram *nvram)
70 struct mtd_part_parser_data *data) 74{
75 u32 actual_crc, expected_crc;
76 size_t retlen;
77 int ret;
78
79 /* extract nvram data */
80 ret = mtd_read(master, BCM963XX_NVRAM_OFFSET, BCM963XX_NVRAM_V5_SIZE,
81 &retlen, (void *)nvram);
82 if (ret)
83 return ret;
84
85 ret = bcm963xx_nvram_checksum(nvram, &expected_crc, &actual_crc);
86 if (ret)
87 pr_warn("nvram checksum failed, contents may be invalid (expected %08x, got %08x)\n",
88 expected_crc, actual_crc);
89
90 if (!nvram->psi_size)
91 nvram->psi_size = BCM963XX_DEFAULT_PSI_SIZE;
92
93 return 0;
94}
95
96static int bcm63xx_read_image_tag(struct mtd_info *master, const char *name,
97 loff_t tag_offset, struct bcm_tag *buf)
98{
99 int ret;
100 size_t retlen;
101 u32 computed_crc;
102
103 ret = mtd_read(master, tag_offset, sizeof(*buf), &retlen, (void *)buf);
104 if (ret)
105 return ret;
106
107 if (retlen != sizeof(*buf))
108 return -EIO;
109
110 computed_crc = crc32_le(IMAGETAG_CRC_START, (u8 *)buf,
111 offsetof(struct bcm_tag, header_crc));
112 if (computed_crc == buf->header_crc) {
113 STR_NULL_TERMINATE(buf->board_id);
114 STR_NULL_TERMINATE(buf->tag_version);
115
116 pr_info("%s: CFE image tag found at 0x%llx with version %s, board type %s\n",
117 name, tag_offset, buf->tag_version, buf->board_id);
118
119 return 0;
120 }
121
122 pr_warn("%s: CFE image tag at 0x%llx CRC invalid (expected %08x, actual %08x)\n",
123 name, tag_offset, buf->header_crc, computed_crc);
124 return 1;
125}
126
127static int bcm63xx_parse_cfe_nor_partitions(struct mtd_info *master,
128 const struct mtd_partition **pparts, struct bcm963xx_nvram *nvram)
71{ 129{
72 /* CFE, NVRAM and global Linux are always present */ 130 /* CFE, NVRAM and global Linux are always present */
73 int nrparts = 3, curpart = 0; 131 int nrparts = 3, curpart = 0;
74 struct bcm_tag *buf; 132 struct bcm_tag *buf = NULL;
75 struct mtd_partition *parts; 133 struct mtd_partition *parts;
76 int ret; 134 int ret;
77 size_t retlen;
78 unsigned int rootfsaddr, kerneladdr, spareaddr; 135 unsigned int rootfsaddr, kerneladdr, spareaddr;
79 unsigned int rootfslen, kernellen, sparelen, totallen; 136 unsigned int rootfslen, kernellen, sparelen, totallen;
80 unsigned int cfelen, nvramlen; 137 unsigned int cfelen, nvramlen;
81 unsigned int cfe_erasesize; 138 unsigned int cfe_erasesize;
82 int i; 139 int i;
83 u32 computed_crc;
84 bool rootfs_first = false; 140 bool rootfs_first = false;
85 141
86 if (bcm63xx_detect_cfe(master))
87 return -EINVAL;
88
89 cfe_erasesize = max_t(uint32_t, master->erasesize, 142 cfe_erasesize = max_t(uint32_t, master->erasesize,
90 BCM63XX_CFE_BLOCK_SIZE); 143 BCM963XX_CFE_BLOCK_SIZE);
91 144
92 cfelen = cfe_erasesize; 145 cfelen = cfe_erasesize;
93 nvramlen = bcm63xx_nvram_get_psi_size() * SZ_1K; 146 nvramlen = nvram->psi_size * SZ_1K;
94 nvramlen = roundup(nvramlen, cfe_erasesize); 147 nvramlen = roundup(nvramlen, cfe_erasesize);
95 148
96 /* Allocate memory for buffer */
97 buf = vmalloc(sizeof(struct bcm_tag)); 149 buf = vmalloc(sizeof(struct bcm_tag));
98 if (!buf) 150 if (!buf)
99 return -ENOMEM; 151 return -ENOMEM;
100 152
101 /* Get the tag */ 153 /* Get the tag */
102 ret = mtd_read(master, cfelen, sizeof(struct bcm_tag), &retlen, 154 ret = bcm63xx_read_image_tag(master, "rootfs", cfelen, buf);
103 (void *)buf); 155 if (!ret) {
104 156 STR_NULL_TERMINATE(buf->flash_image_start);
105 if (retlen != sizeof(struct bcm_tag)) { 157 if (kstrtouint(buf->flash_image_start, 10, &rootfsaddr) ||
106 vfree(buf); 158 rootfsaddr < BCM963XX_EXTENDED_SIZE) {
107 return -EIO; 159 pr_err("invalid rootfs address: %*ph\n",
108 } 160 (int)sizeof(buf->flash_image_start),
161 buf->flash_image_start);
162 goto invalid_tag;
163 }
109 164
110 computed_crc = crc32_le(IMAGETAG_CRC_START, (u8 *)buf, 165 STR_NULL_TERMINATE(buf->kernel_address);
111 offsetof(struct bcm_tag, header_crc)); 166 if (kstrtouint(buf->kernel_address, 10, &kerneladdr) ||
112 if (computed_crc == buf->header_crc) { 167 kerneladdr < BCM963XX_EXTENDED_SIZE) {
113 char *boardid = &(buf->board_id[0]); 168 pr_err("invalid kernel address: %*ph\n",
114 char *tagversion = &(buf->tag_version[0]); 169 (int)sizeof(buf->kernel_address),
170 buf->kernel_address);
171 goto invalid_tag;
172 }
115 173
116 sscanf(buf->flash_image_start, "%u", &rootfsaddr); 174 STR_NULL_TERMINATE(buf->kernel_length);
117 sscanf(buf->kernel_address, "%u", &kerneladdr); 175 if (kstrtouint(buf->kernel_length, 10, &kernellen)) {
118 sscanf(buf->kernel_length, "%u", &kernellen); 176 pr_err("invalid kernel length: %*ph\n",
119 sscanf(buf->total_length, "%u", &totallen); 177 (int)sizeof(buf->kernel_length),
178 buf->kernel_length);
179 goto invalid_tag;
180 }
120 181
121 pr_info("CFE boot tag found with version %s and board type %s\n", 182 STR_NULL_TERMINATE(buf->total_length);
122 tagversion, boardid); 183 if (kstrtouint(buf->total_length, 10, &totallen)) {
184 pr_err("invalid total length: %*ph\n",
185 (int)sizeof(buf->total_length),
186 buf->total_length);
187 goto invalid_tag;
188 }
123 189
124 kerneladdr = kerneladdr - BCM963XX_EXTENDED_SIZE; 190 kerneladdr = kerneladdr - BCM963XX_EXTENDED_SIZE;
125 rootfsaddr = rootfsaddr - BCM963XX_EXTENDED_SIZE; 191 rootfsaddr = rootfsaddr - BCM963XX_EXTENDED_SIZE;
@@ -134,13 +200,14 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
134 rootfsaddr = kerneladdr + kernellen; 200 rootfsaddr = kerneladdr + kernellen;
135 rootfslen = spareaddr - rootfsaddr; 201 rootfslen = spareaddr - rootfsaddr;
136 } 202 }
137 } else { 203 } else if (ret > 0) {
138 pr_warn("CFE boot tag CRC invalid (expected %08x, actual %08x)\n", 204invalid_tag:
139 buf->header_crc, computed_crc);
140 kernellen = 0; 205 kernellen = 0;
141 rootfslen = 0; 206 rootfslen = 0;
142 rootfsaddr = 0; 207 rootfsaddr = 0;
143 spareaddr = cfelen; 208 spareaddr = cfelen;
209 } else {
210 goto out;
144 } 211 }
145 sparelen = master->size - spareaddr - nvramlen; 212 sparelen = master->size - spareaddr - nvramlen;
146 213
@@ -151,11 +218,10 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
151 if (kernellen > 0) 218 if (kernellen > 0)
152 nrparts++; 219 nrparts++;
153 220
154 /* Ask kernel for more memory */
155 parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL); 221 parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL);
156 if (!parts) { 222 if (!parts) {
157 vfree(buf); 223 ret = -ENOMEM;
158 return -ENOMEM; 224 goto out;
159 } 225 }
160 226
161 /* Start building partition list */ 227 /* Start building partition list */
@@ -206,9 +272,43 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
206 sparelen); 272 sparelen);
207 273
208 *pparts = parts; 274 *pparts = parts;
275 ret = 0;
276
277out:
209 vfree(buf); 278 vfree(buf);
210 279
280 if (ret)
281 return ret;
282
211 return nrparts; 283 return nrparts;
284}
285
286static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
287 const struct mtd_partition **pparts,
288 struct mtd_part_parser_data *data)
289{
290 struct bcm963xx_nvram *nvram = NULL;
291 int ret;
292
293 if (bcm63xx_detect_cfe(master))
294 return -EINVAL;
295
296 nvram = vzalloc(sizeof(*nvram));
297 if (!nvram)
298 return -ENOMEM;
299
300 ret = bcm63xx_read_nvram(master, nvram);
301 if (ret)
302 goto out;
303
304 if (!mtd_type_is_nand(master))
305 ret = bcm63xx_parse_cfe_nor_partitions(master, pparts, nvram);
306 else
307 ret = -EINVAL;
308
309out:
310 vfree(nvram);
311 return ret;
212}; 312};
213 313
214static struct mtd_part_parser bcm63xx_cfe_parser = { 314static struct mtd_part_parser bcm63xx_cfe_parser = {
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index c3a2695a4420..e7b2e439696c 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -72,13 +72,11 @@ MODULE_PARM_DESC(reliable_mode, "Set the docg3 mode (0=normal MLC, 1=fast, "
72 * @eccbytes: 8 bytes are used (1 for Hamming ECC, 7 for BCH ECC) 72 * @eccbytes: 8 bytes are used (1 for Hamming ECC, 7 for BCH ECC)
73 * @eccpos: ecc positions (byte 7 is Hamming ECC, byte 8-14 are BCH ECC) 73 * @eccpos: ecc positions (byte 7 is Hamming ECC, byte 8-14 are BCH ECC)
74 * @oobfree: free pageinfo bytes (byte 0 until byte 6, byte 15 74 * @oobfree: free pageinfo bytes (byte 0 until byte 6, byte 15
75 * @oobavail: 8 available bytes remaining after ECC toll
76 */ 75 */
77static struct nand_ecclayout docg3_oobinfo = { 76static struct nand_ecclayout docg3_oobinfo = {
78 .eccbytes = 8, 77 .eccbytes = 8,
79 .eccpos = {7, 8, 9, 10, 11, 12, 13, 14}, 78 .eccpos = {7, 8, 9, 10, 11, 12, 13, 14},
80 .oobfree = {{0, 7}, {15, 1} }, 79 .oobfree = {{0, 7}, {15, 1} },
81 .oobavail = 8,
82}; 80};
83 81
84static inline u8 doc_readb(struct docg3 *docg3, u16 reg) 82static inline u8 doc_readb(struct docg3 *docg3, u16 reg)
@@ -1438,7 +1436,7 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
1438 oobdelta = mtd->oobsize; 1436 oobdelta = mtd->oobsize;
1439 break; 1437 break;
1440 case MTD_OPS_AUTO_OOB: 1438 case MTD_OPS_AUTO_OOB:
1441 oobdelta = mtd->ecclayout->oobavail; 1439 oobdelta = mtd->oobavail;
1442 break; 1440 break;
1443 default: 1441 default:
1444 return -EINVAL; 1442 return -EINVAL;
@@ -1860,6 +1858,7 @@ static int __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
1860 mtd->_write_oob = doc_write_oob; 1858 mtd->_write_oob = doc_write_oob;
1861 mtd->_block_isbad = doc_block_isbad; 1859 mtd->_block_isbad = doc_block_isbad;
1862 mtd->ecclayout = &docg3_oobinfo; 1860 mtd->ecclayout = &docg3_oobinfo;
1861 mtd->oobavail = 8;
1863 mtd->ecc_strength = DOC_ECC_BCH_T; 1862 mtd->ecc_strength = DOC_ECC_BCH_T;
1864 1863
1865 return 0; 1864 return 0;
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c
index 627a9bc37679..cbd8547d7aad 100644
--- a/drivers/mtd/devices/mtdram.c
+++ b/drivers/mtd/devices/mtdram.c
@@ -19,6 +19,7 @@
19 19
20static unsigned long total_size = CONFIG_MTDRAM_TOTAL_SIZE; 20static unsigned long total_size = CONFIG_MTDRAM_TOTAL_SIZE;
21static unsigned long erase_size = CONFIG_MTDRAM_ERASE_SIZE; 21static unsigned long erase_size = CONFIG_MTDRAM_ERASE_SIZE;
22static unsigned long writebuf_size = 64;
22#define MTDRAM_TOTAL_SIZE (total_size * 1024) 23#define MTDRAM_TOTAL_SIZE (total_size * 1024)
23#define MTDRAM_ERASE_SIZE (erase_size * 1024) 24#define MTDRAM_ERASE_SIZE (erase_size * 1024)
24 25
@@ -27,6 +28,8 @@ module_param(total_size, ulong, 0);
27MODULE_PARM_DESC(total_size, "Total device size in KiB"); 28MODULE_PARM_DESC(total_size, "Total device size in KiB");
28module_param(erase_size, ulong, 0); 29module_param(erase_size, ulong, 0);
29MODULE_PARM_DESC(erase_size, "Device erase block size in KiB"); 30MODULE_PARM_DESC(erase_size, "Device erase block size in KiB");
31module_param(writebuf_size, ulong, 0);
32MODULE_PARM_DESC(writebuf_size, "Device write buf size in Bytes (Default: 64)");
30#endif 33#endif
31 34
32// We could store these in the mtd structure, but we only support 1 device.. 35// We could store these in the mtd structure, but we only support 1 device..
@@ -123,7 +126,7 @@ int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
123 mtd->flags = MTD_CAP_RAM; 126 mtd->flags = MTD_CAP_RAM;
124 mtd->size = size; 127 mtd->size = size;
125 mtd->writesize = 1; 128 mtd->writesize = 1;
126 mtd->writebufsize = 64; /* Mimic CFI NOR flashes */ 129 mtd->writebufsize = writebuf_size;
127 mtd->erasesize = MTDRAM_ERASE_SIZE; 130 mtd->erasesize = MTDRAM_ERASE_SIZE;
128 mtd->priv = mapped_address; 131 mtd->priv = mapped_address;
129 132
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 10bf304027dd..08de4b2cf0f5 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -126,10 +126,7 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
126 if (ops->oobbuf) { 126 if (ops->oobbuf) {
127 size_t len, pages; 127 size_t len, pages;
128 128
129 if (ops->mode == MTD_OPS_AUTO_OOB) 129 len = mtd_oobavail(mtd, ops);
130 len = mtd->oobavail;
131 else
132 len = mtd->oobsize;
133 pages = mtd_div_by_ws(mtd->size, mtd); 130 pages = mtd_div_by_ws(mtd->size, mtd);
134 pages -= mtd_div_by_ws(from, mtd); 131 pages -= mtd_div_by_ws(from, mtd);
135 if (ops->ooboffs + ops->ooblen > pages * len) 132 if (ops->ooboffs + ops->ooblen > pages * len)
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index fc8b3d16cce7..cb06bdd21a1b 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -346,7 +346,7 @@ static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb)
346 if (mtd_can_have_bb(d->mtd) && mtd_block_isbad(d->mtd, offset)) 346 if (mtd_can_have_bb(d->mtd) && mtd_block_isbad(d->mtd, offset))
347 return MTDSWAP_SCANNED_BAD; 347 return MTDSWAP_SCANNED_BAD;
348 348
349 ops.ooblen = 2 * d->mtd->ecclayout->oobavail; 349 ops.ooblen = 2 * d->mtd->oobavail;
350 ops.oobbuf = d->oob_buf; 350 ops.oobbuf = d->oob_buf;
351 ops.ooboffs = 0; 351 ops.ooboffs = 0;
352 ops.datbuf = NULL; 352 ops.datbuf = NULL;
@@ -359,7 +359,7 @@ static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb)
359 359
360 data = (struct mtdswap_oobdata *)d->oob_buf; 360 data = (struct mtdswap_oobdata *)d->oob_buf;
361 data2 = (struct mtdswap_oobdata *) 361 data2 = (struct mtdswap_oobdata *)
362 (d->oob_buf + d->mtd->ecclayout->oobavail); 362 (d->oob_buf + d->mtd->oobavail);
363 363
364 if (le16_to_cpu(data->magic) == MTDSWAP_MAGIC_CLEAN) { 364 if (le16_to_cpu(data->magic) == MTDSWAP_MAGIC_CLEAN) {
365 eb->erase_count = le32_to_cpu(data->count); 365 eb->erase_count = le32_to_cpu(data->count);
@@ -933,7 +933,7 @@ static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
933 933
934 ops.mode = MTD_OPS_AUTO_OOB; 934 ops.mode = MTD_OPS_AUTO_OOB;
935 ops.len = mtd->writesize; 935 ops.len = mtd->writesize;
936 ops.ooblen = mtd->ecclayout->oobavail; 936 ops.ooblen = mtd->oobavail;
937 ops.ooboffs = 0; 937 ops.ooboffs = 0;
938 ops.datbuf = d->page_buf; 938 ops.datbuf = d->page_buf;
939 ops.oobbuf = d->oob_buf; 939 ops.oobbuf = d->oob_buf;
@@ -945,7 +945,7 @@ static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
945 for (i = 0; i < mtd_pages; i++) { 945 for (i = 0; i < mtd_pages; i++) {
946 patt = mtdswap_test_patt(test + i); 946 patt = mtdswap_test_patt(test + i);
947 memset(d->page_buf, patt, mtd->writesize); 947 memset(d->page_buf, patt, mtd->writesize);
948 memset(d->oob_buf, patt, mtd->ecclayout->oobavail); 948 memset(d->oob_buf, patt, mtd->oobavail);
949 ret = mtd_write_oob(mtd, pos, &ops); 949 ret = mtd_write_oob(mtd, pos, &ops);
950 if (ret) 950 if (ret)
951 goto error; 951 goto error;
@@ -964,7 +964,7 @@ static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
964 if (p1[j] != patt) 964 if (p1[j] != patt)
965 goto error; 965 goto error;
966 966
967 for (j = 0; j < mtd->ecclayout->oobavail; j++) 967 for (j = 0; j < mtd->oobavail; j++)
968 if (p2[j] != (unsigned char)patt) 968 if (p2[j] != (unsigned char)patt)
969 goto error; 969 goto error;
970 970
@@ -1387,7 +1387,7 @@ static int mtdswap_init(struct mtdswap_dev *d, unsigned int eblocks,
1387 if (!d->page_buf) 1387 if (!d->page_buf)
1388 goto page_buf_fail; 1388 goto page_buf_fail;
1389 1389
1390 d->oob_buf = kmalloc(2 * mtd->ecclayout->oobavail, GFP_KERNEL); 1390 d->oob_buf = kmalloc(2 * mtd->oobavail, GFP_KERNEL);
1391 if (!d->oob_buf) 1391 if (!d->oob_buf)
1392 goto oob_buf_fail; 1392 goto oob_buf_fail;
1393 1393
@@ -1417,7 +1417,6 @@ static void mtdswap_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
1417 unsigned long part; 1417 unsigned long part;
1418 unsigned int eblocks, eavailable, bad_blocks, spare_cnt; 1418 unsigned int eblocks, eavailable, bad_blocks, spare_cnt;
1419 uint64_t swap_size, use_size, size_limit; 1419 uint64_t swap_size, use_size, size_limit;
1420 struct nand_ecclayout *oinfo;
1421 int ret; 1420 int ret;
1422 1421
1423 parts = &partitions[0]; 1422 parts = &partitions[0];
@@ -1447,17 +1446,10 @@ static void mtdswap_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
1447 return; 1446 return;
1448 } 1447 }
1449 1448
1450 oinfo = mtd->ecclayout; 1449 if (!mtd->oobsize || mtd->oobavail < MTDSWAP_OOBSIZE) {
1451 if (!oinfo) {
1452 printk(KERN_ERR "%s: mtd%d does not have OOB\n",
1453 MTDSWAP_PREFIX, mtd->index);
1454 return;
1455 }
1456
1457 if (!mtd->oobsize || oinfo->oobavail < MTDSWAP_OOBSIZE) {
1458 printk(KERN_ERR "%s: Not enough free bytes in OOB, " 1450 printk(KERN_ERR "%s: Not enough free bytes in OOB, "
1459 "%d available, %zu needed.\n", 1451 "%d available, %zu needed.\n",
1460 MTDSWAP_PREFIX, oinfo->oobavail, MTDSWAP_OOBSIZE); 1452 MTDSWAP_PREFIX, mtd->oobavail, MTDSWAP_OOBSIZE);
1461 return; 1453 return;
1462 } 1454 }
1463 1455
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 20f01b3ec23d..f05e0e9eb2f7 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -74,6 +74,7 @@ config MTD_NAND_DENALI_SCRATCH_REG_ADDR
74config MTD_NAND_GPIO 74config MTD_NAND_GPIO
75 tristate "GPIO assisted NAND Flash driver" 75 tristate "GPIO assisted NAND Flash driver"
76 depends on GPIOLIB || COMPILE_TEST 76 depends on GPIOLIB || COMPILE_TEST
77 depends on HAS_IOMEM
77 help 78 help
78 This enables a NAND flash driver where control signals are 79 This enables a NAND flash driver where control signals are
79 connected to GPIO pins, and commands and data are communicated 80 connected to GPIO pins, and commands and data are communicated
@@ -310,6 +311,7 @@ config MTD_NAND_CAFE
310config MTD_NAND_CS553X 311config MTD_NAND_CS553X
311 tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)" 312 tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)"
312 depends on X86_32 313 depends on X86_32
314 depends on !UML && HAS_IOMEM
313 help 315 help
314 The CS553x companion chips for the AMD Geode processor 316 The CS553x companion chips for the AMD Geode processor
315 include NAND flash controllers with built-in hardware ECC 317 include NAND flash controllers with built-in hardware ECC
@@ -463,6 +465,7 @@ config MTD_NAND_MPC5121_NFC
463config MTD_NAND_VF610_NFC 465config MTD_NAND_VF610_NFC
464 tristate "Support for Freescale NFC for VF610/MPC5125" 466 tristate "Support for Freescale NFC for VF610/MPC5125"
465 depends on (SOC_VF610 || COMPILE_TEST) 467 depends on (SOC_VF610 || COMPILE_TEST)
468 depends on HAS_IOMEM
466 help 469 help
467 Enables support for NAND Flash Controller on some Freescale 470 Enables support for NAND Flash Controller on some Freescale
468 processors like the VF610, MPC5125, MCF54418 or Kinetis K70. 471 processors like the VF610, MPC5125, MCF54418 or Kinetis K70.
@@ -553,4 +556,11 @@ config MTD_NAND_HISI504
553 help 556 help
554 Enables support for NAND controller on Hisilicon SoC Hip04. 557 Enables support for NAND controller on Hisilicon SoC Hip04.
555 558
559config MTD_NAND_QCOM
560 tristate "Support for NAND on QCOM SoCs"
561 depends on ARCH_QCOM
562 help
563 Enables support for NAND flash chips on SoCs containing the EBI2 NAND
564 controller. This controller is found on IPQ806x SoC.
565
556endif # MTD_NAND 566endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 9e3623308509..f55335373f7c 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -56,5 +56,6 @@ obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/
56obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o 56obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o
57obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o 57obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o
58obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/ 58obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
59obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o
59 60
60nand-objs := nand_base.o nand_bbt.o nand_timings.o 61nand-objs := nand_base.o nand_bbt.o nand_timings.o
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index bddcf83d6859..20cbaabb2959 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -65,6 +65,11 @@ module_param(on_flash_bbt, int, 0);
65 65
66struct atmel_nand_caps { 66struct atmel_nand_caps {
67 bool pmecc_correct_erase_page; 67 bool pmecc_correct_erase_page;
68 uint8_t pmecc_max_correction;
69};
70
71struct atmel_nand_nfc_caps {
72 uint32_t rb_mask;
68}; 73};
69 74
70/* oob layout for large page size 75/* oob layout for large page size
@@ -111,6 +116,7 @@ struct atmel_nfc {
111 /* Point to the sram bank which include readed data via NFC */ 116 /* Point to the sram bank which include readed data via NFC */
112 void *data_in_sram; 117 void *data_in_sram;
113 bool will_write_sram; 118 bool will_write_sram;
119 const struct atmel_nand_nfc_caps *caps;
114}; 120};
115static struct atmel_nfc nand_nfc; 121static struct atmel_nfc nand_nfc;
116 122
@@ -140,6 +146,7 @@ struct atmel_nand_host {
140 int pmecc_cw_len; /* Length of codeword */ 146 int pmecc_cw_len; /* Length of codeword */
141 147
142 void __iomem *pmerrloc_base; 148 void __iomem *pmerrloc_base;
149 void __iomem *pmerrloc_el_base;
143 void __iomem *pmecc_rom_base; 150 void __iomem *pmecc_rom_base;
144 151
145 /* lookup table for alpha_to and index_of */ 152 /* lookup table for alpha_to and index_of */
@@ -468,6 +475,7 @@ static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
468 * 8-bits 13-bytes 14-bytes 475 * 8-bits 13-bytes 14-bytes
469 * 12-bits 20-bytes 21-bytes 476 * 12-bits 20-bytes 21-bytes
470 * 24-bits 39-bytes 42-bytes 477 * 24-bits 39-bytes 42-bytes
478 * 32-bits 52-bytes 56-bytes
471 */ 479 */
472static int pmecc_get_ecc_bytes(int cap, int sector_size) 480static int pmecc_get_ecc_bytes(int cap, int sector_size)
473{ 481{
@@ -813,7 +821,7 @@ static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc,
813 sector_size = host->pmecc_sector_size; 821 sector_size = host->pmecc_sector_size;
814 822
815 while (err_nbr) { 823 while (err_nbr) {
816 tmp = pmerrloc_readl_el_relaxed(host->pmerrloc_base, i) - 1; 824 tmp = pmerrloc_readl_el_relaxed(host->pmerrloc_el_base, i) - 1;
817 byte_pos = tmp / 8; 825 byte_pos = tmp / 8;
818 bit_pos = tmp % 8; 826 bit_pos = tmp % 8;
819 827
@@ -825,7 +833,7 @@ static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc,
825 *(buf + byte_pos) ^= (1 << bit_pos); 833 *(buf + byte_pos) ^= (1 << bit_pos);
826 834
827 pos = sector_num * host->pmecc_sector_size + byte_pos; 835 pos = sector_num * host->pmecc_sector_size + byte_pos;
828 dev_info(host->dev, "Bit flip in data area, byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n", 836 dev_dbg(host->dev, "Bit flip in data area, byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
829 pos, bit_pos, err_byte, *(buf + byte_pos)); 837 pos, bit_pos, err_byte, *(buf + byte_pos));
830 } else { 838 } else {
831 /* Bit flip in OOB area */ 839 /* Bit flip in OOB area */
@@ -835,7 +843,7 @@ static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc,
835 ecc[tmp] ^= (1 << bit_pos); 843 ecc[tmp] ^= (1 << bit_pos);
836 844
837 pos = tmp + nand_chip->ecc.layout->eccpos[0]; 845 pos = tmp + nand_chip->ecc.layout->eccpos[0];
838 dev_info(host->dev, "Bit flip in OOB, oob_byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n", 846 dev_dbg(host->dev, "Bit flip in OOB, oob_byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
839 pos, bit_pos, err_byte, ecc[tmp]); 847 pos, bit_pos, err_byte, ecc[tmp]);
840 } 848 }
841 849
@@ -1017,6 +1025,9 @@ static void atmel_pmecc_core_init(struct mtd_info *mtd)
1017 case 24: 1025 case 24:
1018 val = PMECC_CFG_BCH_ERR24; 1026 val = PMECC_CFG_BCH_ERR24;
1019 break; 1027 break;
1028 case 32:
1029 val = PMECC_CFG_BCH_ERR32;
1030 break;
1020 } 1031 }
1021 1032
1022 if (host->pmecc_sector_size == 512) 1033 if (host->pmecc_sector_size == 512)
@@ -1078,6 +1089,9 @@ static int pmecc_choose_ecc(struct atmel_nand_host *host,
1078 1089
1079 /* If device tree doesn't specify, use NAND's minimum ECC parameters */ 1090 /* If device tree doesn't specify, use NAND's minimum ECC parameters */
1080 if (host->pmecc_corr_cap == 0) { 1091 if (host->pmecc_corr_cap == 0) {
1092 if (*cap > host->caps->pmecc_max_correction)
1093 return -EINVAL;
1094
1081 /* use the most fitable ecc bits (the near bigger one ) */ 1095 /* use the most fitable ecc bits (the near bigger one ) */
1082 if (*cap <= 2) 1096 if (*cap <= 2)
1083 host->pmecc_corr_cap = 2; 1097 host->pmecc_corr_cap = 2;
@@ -1089,6 +1103,8 @@ static int pmecc_choose_ecc(struct atmel_nand_host *host,
1089 host->pmecc_corr_cap = 12; 1103 host->pmecc_corr_cap = 12;
1090 else if (*cap <= 24) 1104 else if (*cap <= 24)
1091 host->pmecc_corr_cap = 24; 1105 host->pmecc_corr_cap = 24;
1106 else if (*cap <= 32)
1107 host->pmecc_corr_cap = 32;
1092 else 1108 else
1093 return -EINVAL; 1109 return -EINVAL;
1094 } 1110 }
@@ -1205,6 +1221,8 @@ static int atmel_pmecc_nand_init_params(struct platform_device *pdev,
1205 err_no = PTR_ERR(host->pmerrloc_base); 1221 err_no = PTR_ERR(host->pmerrloc_base);
1206 goto err; 1222 goto err;
1207 } 1223 }
1224 host->pmerrloc_el_base = host->pmerrloc_base + ATMEL_PMERRLOC_SIGMAx +
1225 (host->caps->pmecc_max_correction + 1) * 4;
1208 1226
1209 if (!host->has_no_lookup_table) { 1227 if (!host->has_no_lookup_table) {
1210 regs_rom = platform_get_resource(pdev, IORESOURCE_MEM, 3); 1228 regs_rom = platform_get_resource(pdev, IORESOURCE_MEM, 3);
@@ -1486,8 +1504,6 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
1486 ecc_writel(host->ecc, CR, ATMEL_ECC_RST); 1504 ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
1487} 1505}
1488 1506
1489static const struct of_device_id atmel_nand_dt_ids[];
1490
1491static int atmel_of_init_port(struct atmel_nand_host *host, 1507static int atmel_of_init_port(struct atmel_nand_host *host,
1492 struct device_node *np) 1508 struct device_node *np)
1493{ 1509{
@@ -1498,7 +1514,7 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
1498 enum of_gpio_flags flags = 0; 1514 enum of_gpio_flags flags = 0;
1499 1515
1500 host->caps = (struct atmel_nand_caps *) 1516 host->caps = (struct atmel_nand_caps *)
1501 of_match_device(atmel_nand_dt_ids, host->dev)->data; 1517 of_device_get_match_data(host->dev);
1502 1518
1503 if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) { 1519 if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) {
1504 if (val >= 32) { 1520 if (val >= 32) {
@@ -1547,10 +1563,16 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
1547 * them from NAND ONFI parameters. 1563 * them from NAND ONFI parameters.
1548 */ 1564 */
1549 if (of_property_read_u32(np, "atmel,pmecc-cap", &val) == 0) { 1565 if (of_property_read_u32(np, "atmel,pmecc-cap", &val) == 0) {
1550 if ((val != 2) && (val != 4) && (val != 8) && (val != 12) && 1566 if (val > host->caps->pmecc_max_correction) {
1551 (val != 24)) {
1552 dev_err(host->dev, 1567 dev_err(host->dev,
1553 "Unsupported PMECC correction capability: %d; should be 2, 4, 8, 12 or 24\n", 1568 "Required ECC strength too high: %u max %u\n",
1569 val, host->caps->pmecc_max_correction);
1570 return -EINVAL;
1571 }
1572 if ((val != 2) && (val != 4) && (val != 8) &&
1573 (val != 12) && (val != 24) && (val != 32)) {
1574 dev_err(host->dev,
1575 "Required ECC strength not supported: %u\n",
1554 val); 1576 val);
1555 return -EINVAL; 1577 return -EINVAL;
1556 } 1578 }
@@ -1560,7 +1582,7 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
1560 if (of_property_read_u32(np, "atmel,pmecc-sector-size", &val) == 0) { 1582 if (of_property_read_u32(np, "atmel,pmecc-sector-size", &val) == 0) {
1561 if ((val != 512) && (val != 1024)) { 1583 if ((val != 512) && (val != 1024)) {
1562 dev_err(host->dev, 1584 dev_err(host->dev,
1563 "Unsupported PMECC sector size: %d; should be 512 or 1024 bytes\n", 1585 "Required ECC sector size not supported: %u\n",
1564 val); 1586 val);
1565 return -EINVAL; 1587 return -EINVAL;
1566 } 1588 }
@@ -1677,9 +1699,9 @@ static irqreturn_t hsmc_interrupt(int irq, void *dev_id)
1677 nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_XFR_DONE); 1699 nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_XFR_DONE);
1678 ret = IRQ_HANDLED; 1700 ret = IRQ_HANDLED;
1679 } 1701 }
1680 if (pending & NFC_SR_RB_EDGE) { 1702 if (pending & host->nfc->caps->rb_mask) {
1681 complete(&host->nfc->comp_ready); 1703 complete(&host->nfc->comp_ready);
1682 nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_RB_EDGE); 1704 nfc_writel(host->nfc->hsmc_regs, IDR, host->nfc->caps->rb_mask);
1683 ret = IRQ_HANDLED; 1705 ret = IRQ_HANDLED;
1684 } 1706 }
1685 if (pending & NFC_SR_CMD_DONE) { 1707 if (pending & NFC_SR_CMD_DONE) {
@@ -1697,7 +1719,7 @@ static void nfc_prepare_interrupt(struct atmel_nand_host *host, u32 flag)
1697 if (flag & NFC_SR_XFR_DONE) 1719 if (flag & NFC_SR_XFR_DONE)
1698 init_completion(&host->nfc->comp_xfer_done); 1720 init_completion(&host->nfc->comp_xfer_done);
1699 1721
1700 if (flag & NFC_SR_RB_EDGE) 1722 if (flag & host->nfc->caps->rb_mask)
1701 init_completion(&host->nfc->comp_ready); 1723 init_completion(&host->nfc->comp_ready);
1702 1724
1703 if (flag & NFC_SR_CMD_DONE) 1725 if (flag & NFC_SR_CMD_DONE)
@@ -1715,7 +1737,7 @@ static int nfc_wait_interrupt(struct atmel_nand_host *host, u32 flag)
1715 if (flag & NFC_SR_XFR_DONE) 1737 if (flag & NFC_SR_XFR_DONE)
1716 comp[index++] = &host->nfc->comp_xfer_done; 1738 comp[index++] = &host->nfc->comp_xfer_done;
1717 1739
1718 if (flag & NFC_SR_RB_EDGE) 1740 if (flag & host->nfc->caps->rb_mask)
1719 comp[index++] = &host->nfc->comp_ready; 1741 comp[index++] = &host->nfc->comp_ready;
1720 1742
1721 if (flag & NFC_SR_CMD_DONE) 1743 if (flag & NFC_SR_CMD_DONE)
@@ -1783,7 +1805,7 @@ static int nfc_device_ready(struct mtd_info *mtd)
1783 dev_err(host->dev, "Lost the interrupt flags: 0x%08x\n", 1805 dev_err(host->dev, "Lost the interrupt flags: 0x%08x\n",
1784 mask & status); 1806 mask & status);
1785 1807
1786 return status & NFC_SR_RB_EDGE; 1808 return status & host->nfc->caps->rb_mask;
1787} 1809}
1788 1810
1789static void nfc_select_chip(struct mtd_info *mtd, int chip) 1811static void nfc_select_chip(struct mtd_info *mtd, int chip)
@@ -1956,8 +1978,8 @@ static void nfc_nand_command(struct mtd_info *mtd, unsigned int command,
1956 } 1978 }
1957 /* fall through */ 1979 /* fall through */
1958 default: 1980 default:
1959 nfc_prepare_interrupt(host, NFC_SR_RB_EDGE); 1981 nfc_prepare_interrupt(host, host->nfc->caps->rb_mask);
1960 nfc_wait_interrupt(host, NFC_SR_RB_EDGE); 1982 nfc_wait_interrupt(host, host->nfc->caps->rb_mask);
1961 } 1983 }
1962} 1984}
1963 1985
@@ -2304,17 +2326,34 @@ static int atmel_nand_remove(struct platform_device *pdev)
2304 return 0; 2326 return 0;
2305} 2327}
2306 2328
2329/*
2330 * AT91RM9200 does not have PMECC or PMECC Errloc peripherals for
2331 * BCH ECC. Combined with the "atmel,has-pmecc", it is used to describe
2332 * devices from the SAM9 family that have those.
2333 */
2307static const struct atmel_nand_caps at91rm9200_caps = { 2334static const struct atmel_nand_caps at91rm9200_caps = {
2308 .pmecc_correct_erase_page = false, 2335 .pmecc_correct_erase_page = false,
2336 .pmecc_max_correction = 24,
2309}; 2337};
2310 2338
2311static const struct atmel_nand_caps sama5d4_caps = { 2339static const struct atmel_nand_caps sama5d4_caps = {
2312 .pmecc_correct_erase_page = true, 2340 .pmecc_correct_erase_page = true,
2341 .pmecc_max_correction = 24,
2342};
2343
2344/*
2345 * The PMECC Errloc controller starting in SAMA5D2 is not compatible,
2346 * as the increased correction strength requires more registers.
2347 */
2348static const struct atmel_nand_caps sama5d2_caps = {
2349 .pmecc_correct_erase_page = true,
2350 .pmecc_max_correction = 32,
2313}; 2351};
2314 2352
2315static const struct of_device_id atmel_nand_dt_ids[] = { 2353static const struct of_device_id atmel_nand_dt_ids[] = {
2316 { .compatible = "atmel,at91rm9200-nand", .data = &at91rm9200_caps }, 2354 { .compatible = "atmel,at91rm9200-nand", .data = &at91rm9200_caps },
2317 { .compatible = "atmel,sama5d4-nand", .data = &sama5d4_caps }, 2355 { .compatible = "atmel,sama5d4-nand", .data = &sama5d4_caps },
2356 { .compatible = "atmel,sama5d2-nand", .data = &sama5d2_caps },
2318 { /* sentinel */ } 2357 { /* sentinel */ }
2319}; 2358};
2320 2359
@@ -2354,6 +2393,11 @@ static int atmel_nand_nfc_probe(struct platform_device *pdev)
2354 } 2393 }
2355 } 2394 }
2356 2395
2396 nfc->caps = (const struct atmel_nand_nfc_caps *)
2397 of_device_get_match_data(&pdev->dev);
2398 if (!nfc->caps)
2399 return -ENODEV;
2400
2357 nfc_writel(nfc->hsmc_regs, IDR, 0xffffffff); 2401 nfc_writel(nfc->hsmc_regs, IDR, 0xffffffff);
2358 nfc_readl(nfc->hsmc_regs, SR); /* clear the NFC_SR */ 2402 nfc_readl(nfc->hsmc_regs, SR); /* clear the NFC_SR */
2359 2403
@@ -2382,8 +2426,17 @@ static int atmel_nand_nfc_remove(struct platform_device *pdev)
2382 return 0; 2426 return 0;
2383} 2427}
2384 2428
2429static const struct atmel_nand_nfc_caps sama5d3_nfc_caps = {
2430 .rb_mask = NFC_SR_RB_EDGE0,
2431};
2432
2433static const struct atmel_nand_nfc_caps sama5d4_nfc_caps = {
2434 .rb_mask = NFC_SR_RB_EDGE3,
2435};
2436
2385static const struct of_device_id atmel_nand_nfc_match[] = { 2437static const struct of_device_id atmel_nand_nfc_match[] = {
2386 { .compatible = "atmel,sama5d3-nfc" }, 2438 { .compatible = "atmel,sama5d3-nfc", .data = &sama5d3_nfc_caps },
2439 { .compatible = "atmel,sama5d4-nfc", .data = &sama5d4_nfc_caps },
2387 { /* sentinel */ } 2440 { /* sentinel */ }
2388}; 2441};
2389MODULE_DEVICE_TABLE(of, atmel_nand_nfc_match); 2442MODULE_DEVICE_TABLE(of, atmel_nand_nfc_match);
diff --git a/drivers/mtd/nand/atmel_nand_ecc.h b/drivers/mtd/nand/atmel_nand_ecc.h
index 668e7358f19b..834d694487bd 100644
--- a/drivers/mtd/nand/atmel_nand_ecc.h
+++ b/drivers/mtd/nand/atmel_nand_ecc.h
@@ -43,6 +43,7 @@
43#define PMECC_CFG_BCH_ERR8 (2 << 0) 43#define PMECC_CFG_BCH_ERR8 (2 << 0)
44#define PMECC_CFG_BCH_ERR12 (3 << 0) 44#define PMECC_CFG_BCH_ERR12 (3 << 0)
45#define PMECC_CFG_BCH_ERR24 (4 << 0) 45#define PMECC_CFG_BCH_ERR24 (4 << 0)
46#define PMECC_CFG_BCH_ERR32 (5 << 0)
46 47
47#define PMECC_CFG_SECTOR512 (0 << 4) 48#define PMECC_CFG_SECTOR512 (0 << 4)
48#define PMECC_CFG_SECTOR1024 (1 << 4) 49#define PMECC_CFG_SECTOR1024 (1 << 4)
@@ -108,7 +109,11 @@
108#define PMERRLOC_ERR_NUM_MASK (0x1f << 8) 109#define PMERRLOC_ERR_NUM_MASK (0x1f << 8)
109#define PMERRLOC_CALC_DONE (1 << 0) 110#define PMERRLOC_CALC_DONE (1 << 0)
110#define ATMEL_PMERRLOC_SIGMAx 0x028 /* Error location SIGMA x */ 111#define ATMEL_PMERRLOC_SIGMAx 0x028 /* Error location SIGMA x */
111#define ATMEL_PMERRLOC_ELx 0x08c /* Error location x */ 112
113/*
114 * The ATMEL_PMERRLOC_ELx register location depends from the number of
115 * bits corrected by the PMECC controller. Do not use it.
116 */
112 117
113/* Register access macros for PMECC */ 118/* Register access macros for PMECC */
114#define pmecc_readl_relaxed(addr, reg) \ 119#define pmecc_readl_relaxed(addr, reg) \
@@ -136,7 +141,7 @@
136 readl_relaxed((addr) + ATMEL_PMERRLOC_SIGMAx + ((n) * 4)) 141 readl_relaxed((addr) + ATMEL_PMERRLOC_SIGMAx + ((n) * 4))
137 142
138#define pmerrloc_readl_el_relaxed(addr, n) \ 143#define pmerrloc_readl_el_relaxed(addr, n) \
139 readl_relaxed((addr) + ATMEL_PMERRLOC_ELx + ((n) * 4)) 144 readl_relaxed((addr) + ((n) * 4))
140 145
141/* Galois field dimension */ 146/* Galois field dimension */
142#define PMECC_GF_DIMENSION_13 13 147#define PMECC_GF_DIMENSION_13 13
diff --git a/drivers/mtd/nand/atmel_nand_nfc.h b/drivers/mtd/nand/atmel_nand_nfc.h
index 4d5d26221a7e..0bbc1fa97dba 100644
--- a/drivers/mtd/nand/atmel_nand_nfc.h
+++ b/drivers/mtd/nand/atmel_nand_nfc.h
@@ -42,7 +42,8 @@
42#define NFC_SR_UNDEF (1 << 21) 42#define NFC_SR_UNDEF (1 << 21)
43#define NFC_SR_AWB (1 << 22) 43#define NFC_SR_AWB (1 << 22)
44#define NFC_SR_ASE (1 << 23) 44#define NFC_SR_ASE (1 << 23)
45#define NFC_SR_RB_EDGE (1 << 24) 45#define NFC_SR_RB_EDGE0 (1 << 24)
46#define NFC_SR_RB_EDGE3 (1 << 27)
46 47
47#define ATMEL_HSMC_NFC_IER 0x0c 48#define ATMEL_HSMC_NFC_IER 0x0c
48#define ATMEL_HSMC_NFC_IDR 0x10 49#define ATMEL_HSMC_NFC_IDR 0x10
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
index 844fc07d22cd..e0528397306a 100644
--- a/drivers/mtd/nand/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/brcmnand/brcmnand.c
@@ -311,6 +311,36 @@ static const u16 brcmnand_regs_v60[] = {
311 [BRCMNAND_FC_BASE] = 0x400, 311 [BRCMNAND_FC_BASE] = 0x400,
312}; 312};
313 313
314/* BRCMNAND v7.1 */
315static const u16 brcmnand_regs_v71[] = {
316 [BRCMNAND_CMD_START] = 0x04,
317 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
318 [BRCMNAND_CMD_ADDRESS] = 0x0c,
319 [BRCMNAND_INTFC_STATUS] = 0x14,
320 [BRCMNAND_CS_SELECT] = 0x18,
321 [BRCMNAND_CS_XOR] = 0x1c,
322 [BRCMNAND_LL_OP] = 0x20,
323 [BRCMNAND_CS0_BASE] = 0x50,
324 [BRCMNAND_CS1_BASE] = 0,
325 [BRCMNAND_CORR_THRESHOLD] = 0xdc,
326 [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0,
327 [BRCMNAND_UNCORR_COUNT] = 0xfc,
328 [BRCMNAND_CORR_COUNT] = 0x100,
329 [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
330 [BRCMNAND_CORR_ADDR] = 0x110,
331 [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
332 [BRCMNAND_UNCORR_ADDR] = 0x118,
333 [BRCMNAND_SEMAPHORE] = 0x150,
334 [BRCMNAND_ID] = 0x194,
335 [BRCMNAND_ID_EXT] = 0x198,
336 [BRCMNAND_LL_RDATA] = 0x19c,
337 [BRCMNAND_OOB_READ_BASE] = 0x200,
338 [BRCMNAND_OOB_READ_10_BASE] = 0,
339 [BRCMNAND_OOB_WRITE_BASE] = 0x280,
340 [BRCMNAND_OOB_WRITE_10_BASE] = 0,
341 [BRCMNAND_FC_BASE] = 0x400,
342};
343
314enum brcmnand_cs_reg { 344enum brcmnand_cs_reg {
315 BRCMNAND_CS_CFG_EXT = 0, 345 BRCMNAND_CS_CFG_EXT = 0,
316 BRCMNAND_CS_CFG, 346 BRCMNAND_CS_CFG,
@@ -406,7 +436,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
406 } 436 }
407 437
408 /* Register offsets */ 438 /* Register offsets */
409 if (ctrl->nand_version >= 0x0600) 439 if (ctrl->nand_version >= 0x0701)
440 ctrl->reg_offsets = brcmnand_regs_v71;
441 else if (ctrl->nand_version >= 0x0600)
410 ctrl->reg_offsets = brcmnand_regs_v60; 442 ctrl->reg_offsets = brcmnand_regs_v60;
411 else if (ctrl->nand_version >= 0x0500) 443 else if (ctrl->nand_version >= 0x0500)
412 ctrl->reg_offsets = brcmnand_regs_v50; 444 ctrl->reg_offsets = brcmnand_regs_v50;
@@ -796,7 +828,8 @@ static struct nand_ecclayout *brcmnand_create_layout(int ecc_level,
796 idx2 >= MTD_MAX_OOBFREE_ENTRIES_LARGE - 1) 828 idx2 >= MTD_MAX_OOBFREE_ENTRIES_LARGE - 1)
797 break; 829 break;
798 } 830 }
799 goto out; 831
832 return layout;
800 } 833 }
801 834
802 /* 835 /*
@@ -847,10 +880,7 @@ static struct nand_ecclayout *brcmnand_create_layout(int ecc_level,
847 idx2 >= MTD_MAX_OOBFREE_ENTRIES_LARGE - 1) 880 idx2 >= MTD_MAX_OOBFREE_ENTRIES_LARGE - 1)
848 break; 881 break;
849 } 882 }
850out: 883
851 /* Sum available OOB */
852 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES_LARGE; i++)
853 layout->oobavail += layout->oobfree[i].length;
854 return layout; 884 return layout;
855} 885}
856 886
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index aa1a616b9fb6..e553aff68987 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -537,7 +537,7 @@ static int cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
537 return 0; 537 return 0;
538} 538}
539 539
540static int cafe_nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip) 540static int cafe_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
541{ 541{
542 return 0; 542 return 0;
543} 543}
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index f170f3c31b34..547c1002941d 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -794,7 +794,7 @@ static int doc200x_dev_ready(struct mtd_info *mtd)
794 } 794 }
795} 795}
796 796
797static int doc200x_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip) 797static int doc200x_block_bad(struct mtd_info *mtd, loff_t ofs)
798{ 798{
799 /* This is our last resort if we couldn't find or create a BBT. Just 799 /* This is our last resort if we couldn't find or create a BBT. Just
800 pretend all blocks are good. */ 800 pretend all blocks are good. */
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index df4165b02c62..d86a60e1bbcb 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -225,7 +225,6 @@ struct docg4_priv {
225static struct nand_ecclayout docg4_oobinfo = { 225static struct nand_ecclayout docg4_oobinfo = {
226 .eccbytes = 9, 226 .eccbytes = 9,
227 .eccpos = {7, 8, 9, 10, 11, 12, 13, 14, 15}, 227 .eccpos = {7, 8, 9, 10, 11, 12, 13, 14, 15},
228 .oobavail = 5,
229 .oobfree = { {.offset = 2, .length = 5} } 228 .oobfree = { {.offset = 2, .length = 5} }
230}; 229};
231 230
@@ -1121,7 +1120,7 @@ static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs)
1121 return ret; 1120 return ret;
1122} 1121}
1123 1122
1124static int docg4_block_neverbad(struct mtd_info *mtd, loff_t ofs, int getchip) 1123static int docg4_block_neverbad(struct mtd_info *mtd, loff_t ofs)
1125{ 1124{
1126 /* only called when module_param ignore_badblocks is set */ 1125 /* only called when module_param ignore_badblocks is set */
1127 return 0; 1126 return 0;
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 235ddcb58f39..8122c699ccf2 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Freescale GPMI NAND Flash Driver 2 * Freescale GPMI NAND Flash Driver
3 * 3 *
4 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. 4 * Copyright (C) 2010-2015 Freescale Semiconductor, Inc.
5 * Copyright (C) 2008 Embedded Alley Solutions, Inc. 5 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -136,7 +136,7 @@ static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
136 * 136 *
137 * We may have available oob space in this case. 137 * We may have available oob space in this case.
138 */ 138 */
139static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this) 139static int set_geometry_by_ecc_info(struct gpmi_nand_data *this)
140{ 140{
141 struct bch_geometry *geo = &this->bch_geometry; 141 struct bch_geometry *geo = &this->bch_geometry;
142 struct nand_chip *chip = &this->nand; 142 struct nand_chip *chip = &this->nand;
@@ -145,7 +145,7 @@ static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this)
145 unsigned int block_mark_bit_offset; 145 unsigned int block_mark_bit_offset;
146 146
147 if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0)) 147 if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
148 return false; 148 return -EINVAL;
149 149
150 switch (chip->ecc_step_ds) { 150 switch (chip->ecc_step_ds) {
151 case SZ_512: 151 case SZ_512:
@@ -158,19 +158,19 @@ static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this)
158 dev_err(this->dev, 158 dev_err(this->dev,
159 "unsupported nand chip. ecc bits : %d, ecc size : %d\n", 159 "unsupported nand chip. ecc bits : %d, ecc size : %d\n",
160 chip->ecc_strength_ds, chip->ecc_step_ds); 160 chip->ecc_strength_ds, chip->ecc_step_ds);
161 return false; 161 return -EINVAL;
162 } 162 }
163 geo->ecc_chunk_size = chip->ecc_step_ds; 163 geo->ecc_chunk_size = chip->ecc_step_ds;
164 geo->ecc_strength = round_up(chip->ecc_strength_ds, 2); 164 geo->ecc_strength = round_up(chip->ecc_strength_ds, 2);
165 if (!gpmi_check_ecc(this)) 165 if (!gpmi_check_ecc(this))
166 return false; 166 return -EINVAL;
167 167
168 /* Keep the C >= O */ 168 /* Keep the C >= O */
169 if (geo->ecc_chunk_size < mtd->oobsize) { 169 if (geo->ecc_chunk_size < mtd->oobsize) {
170 dev_err(this->dev, 170 dev_err(this->dev,
171 "unsupported nand chip. ecc size: %d, oob size : %d\n", 171 "unsupported nand chip. ecc size: %d, oob size : %d\n",
172 chip->ecc_step_ds, mtd->oobsize); 172 chip->ecc_step_ds, mtd->oobsize);
173 return false; 173 return -EINVAL;
174 } 174 }
175 175
176 /* The default value, see comment in the legacy_set_geometry(). */ 176 /* The default value, see comment in the legacy_set_geometry(). */
@@ -242,7 +242,7 @@ static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this)
242 + ALIGN(geo->ecc_chunk_count, 4); 242 + ALIGN(geo->ecc_chunk_count, 4);
243 243
244 if (!this->swap_block_mark) 244 if (!this->swap_block_mark)
245 return true; 245 return 0;
246 246
247 /* For bit swap. */ 247 /* For bit swap. */
248 block_mark_bit_offset = mtd->writesize * 8 - 248 block_mark_bit_offset = mtd->writesize * 8 -
@@ -251,7 +251,7 @@ static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this)
251 251
252 geo->block_mark_byte_offset = block_mark_bit_offset / 8; 252 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
253 geo->block_mark_bit_offset = block_mark_bit_offset % 8; 253 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
254 return true; 254 return 0;
255} 255}
256 256
257static int legacy_set_geometry(struct gpmi_nand_data *this) 257static int legacy_set_geometry(struct gpmi_nand_data *this)
@@ -285,7 +285,8 @@ static int legacy_set_geometry(struct gpmi_nand_data *this)
285 geo->ecc_strength = get_ecc_strength(this); 285 geo->ecc_strength = get_ecc_strength(this);
286 if (!gpmi_check_ecc(this)) { 286 if (!gpmi_check_ecc(this)) {
287 dev_err(this->dev, 287 dev_err(this->dev,
288 "required ecc strength of the NAND chip: %d is not supported by the GPMI controller (%d)\n", 288 "ecc strength: %d cannot be supported by the controller (%d)\n"
289 "try to use minimum ecc strength that NAND chip required\n",
289 geo->ecc_strength, 290 geo->ecc_strength,
290 this->devdata->bch_max_ecc_strength); 291 this->devdata->bch_max_ecc_strength);
291 return -EINVAL; 292 return -EINVAL;
@@ -366,10 +367,11 @@ static int legacy_set_geometry(struct gpmi_nand_data *this)
366 367
367int common_nfc_set_geometry(struct gpmi_nand_data *this) 368int common_nfc_set_geometry(struct gpmi_nand_data *this)
368{ 369{
369 if (of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc") 370 if ((of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc"))
370 && set_geometry_by_ecc_info(this)) 371 || legacy_set_geometry(this))
371 return 0; 372 return set_geometry_by_ecc_info(this);
372 return legacy_set_geometry(this); 373
374 return 0;
373} 375}
374 376
375struct dma_chan *get_dma_chan(struct gpmi_nand_data *this) 377struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
@@ -2033,9 +2035,54 @@ static int gpmi_nand_remove(struct platform_device *pdev)
2033 return 0; 2035 return 0;
2034} 2036}
2035 2037
2038#ifdef CONFIG_PM_SLEEP
2039static int gpmi_pm_suspend(struct device *dev)
2040{
2041 struct gpmi_nand_data *this = dev_get_drvdata(dev);
2042
2043 release_dma_channels(this);
2044 return 0;
2045}
2046
2047static int gpmi_pm_resume(struct device *dev)
2048{
2049 struct gpmi_nand_data *this = dev_get_drvdata(dev);
2050 int ret;
2051
2052 ret = acquire_dma_channels(this);
2053 if (ret < 0)
2054 return ret;
2055
2056 /* re-init the GPMI registers */
2057 this->flags &= ~GPMI_TIMING_INIT_OK;
2058 ret = gpmi_init(this);
2059 if (ret) {
2060 dev_err(this->dev, "Error setting GPMI : %d\n", ret);
2061 return ret;
2062 }
2063
2064 /* re-init the BCH registers */
2065 ret = bch_set_geometry(this);
2066 if (ret) {
2067 dev_err(this->dev, "Error setting BCH : %d\n", ret);
2068 return ret;
2069 }
2070
2071 /* re-init others */
2072 gpmi_extra_init(this);
2073
2074 return 0;
2075}
2076#endif /* CONFIG_PM_SLEEP */
2077
2078static const struct dev_pm_ops gpmi_pm_ops = {
2079 SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume)
2080};
2081
2036static struct platform_driver gpmi_nand_driver = { 2082static struct platform_driver gpmi_nand_driver = {
2037 .driver = { 2083 .driver = {
2038 .name = "gpmi-nand", 2084 .name = "gpmi-nand",
2085 .pm = &gpmi_pm_ops,
2039 .of_match_table = gpmi_nand_id_table, 2086 .of_match_table = gpmi_nand_id_table,
2040 }, 2087 },
2041 .probe = gpmi_nand_probe, 2088 .probe = gpmi_nand_probe,
diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c
index f8d37f36a81c..96502b624cfb 100644
--- a/drivers/mtd/nand/hisi504_nand.c
+++ b/drivers/mtd/nand/hisi504_nand.c
@@ -632,7 +632,6 @@ static void hisi_nfc_host_init(struct hinfc_host *host)
632} 632}
633 633
634static struct nand_ecclayout nand_ecc_2K_16bits = { 634static struct nand_ecclayout nand_ecc_2K_16bits = {
635 .oobavail = 6,
636 .oobfree = { {2, 6} }, 635 .oobfree = { {2, 6} },
637}; 636};
638 637
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index b19d2a9a5eb9..673ceb2a0b44 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -427,9 +427,6 @@ static int jz_nand_probe(struct platform_device *pdev)
427 chip->ecc.strength = 4; 427 chip->ecc.strength = 4;
428 chip->ecc.options = NAND_ECC_GENERIC_ERASED_CHECK; 428 chip->ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
429 429
430 if (pdata)
431 chip->ecc.layout = pdata->ecc_layout;
432
433 chip->chip_delay = 50; 430 chip->chip_delay = 50;
434 chip->cmd_ctrl = jz_nand_cmd_ctrl; 431 chip->cmd_ctrl = jz_nand_cmd_ctrl;
435 chip->select_chip = jz_nand_select_chip; 432 chip->select_chip = jz_nand_select_chip;
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index 9bc435d72a86..d8c3e7afcc0b 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -750,7 +750,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
750 } 750 }
751 751
752 nand_chip->ecc.mode = NAND_ECC_HW; 752 nand_chip->ecc.mode = NAND_ECC_HW;
753 nand_chip->ecc.size = mtd->writesize; 753 nand_chip->ecc.size = 512;
754 nand_chip->ecc.layout = &lpc32xx_nand_oob; 754 nand_chip->ecc.layout = &lpc32xx_nand_oob;
755 host->mlcsubpages = mtd->writesize / 512; 755 host->mlcsubpages = mtd->writesize / 512;
756 756
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index 6b93e899d4e9..5d7843ffff6a 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -626,7 +626,7 @@ static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
626 626
627static int mpc5121_nfc_probe(struct platform_device *op) 627static int mpc5121_nfc_probe(struct platform_device *op)
628{ 628{
629 struct device_node *rootnode, *dn = op->dev.of_node; 629 struct device_node *dn = op->dev.of_node;
630 struct clk *clk; 630 struct clk *clk;
631 struct device *dev = &op->dev; 631 struct device *dev = &op->dev;
632 struct mpc5121_nfc_prv *prv; 632 struct mpc5121_nfc_prv *prv;
@@ -712,18 +712,15 @@ static int mpc5121_nfc_probe(struct platform_device *op)
712 chip->ecc.mode = NAND_ECC_SOFT; 712 chip->ecc.mode = NAND_ECC_SOFT;
713 713
714 /* Support external chip-select logic on ADS5121 board */ 714 /* Support external chip-select logic on ADS5121 board */
715 rootnode = of_find_node_by_path("/"); 715 if (of_machine_is_compatible("fsl,mpc5121ads")) {
716 if (of_device_is_compatible(rootnode, "fsl,mpc5121ads")) {
717 retval = ads5121_chipselect_init(mtd); 716 retval = ads5121_chipselect_init(mtd);
718 if (retval) { 717 if (retval) {
719 dev_err(dev, "Chipselect init error!\n"); 718 dev_err(dev, "Chipselect init error!\n");
720 of_node_put(rootnode);
721 return retval; 719 return retval;
722 } 720 }
723 721
724 chip->select_chip = ads5121_select_chip; 722 chip->select_chip = ads5121_select_chip;
725 } 723 }
726 of_node_put(rootnode);
727 724
728 /* Enable NFC clock */ 725 /* Enable NFC clock */
729 clk = devm_clk_get(dev, "ipg"); 726 clk = devm_clk_get(dev, "ipg");
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index f2c8ff398d6c..b6facac54fc0 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -313,13 +313,12 @@ static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
313 * nand_block_bad - [DEFAULT] Read bad block marker from the chip 313 * nand_block_bad - [DEFAULT] Read bad block marker from the chip
314 * @mtd: MTD device structure 314 * @mtd: MTD device structure
315 * @ofs: offset from device start 315 * @ofs: offset from device start
316 * @getchip: 0, if the chip is already selected
317 * 316 *
318 * Check, if the block is bad. 317 * Check, if the block is bad.
319 */ 318 */
320static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip) 319static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
321{ 320{
322 int page, chipnr, res = 0, i = 0; 321 int page, res = 0, i = 0;
323 struct nand_chip *chip = mtd_to_nand(mtd); 322 struct nand_chip *chip = mtd_to_nand(mtd);
324 u16 bad; 323 u16 bad;
325 324
@@ -328,15 +327,6 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
328 327
329 page = (int)(ofs >> chip->page_shift) & chip->pagemask; 328 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
330 329
331 if (getchip) {
332 chipnr = (int)(ofs >> chip->chip_shift);
333
334 nand_get_device(mtd, FL_READING);
335
336 /* Select the NAND device */
337 chip->select_chip(mtd, chipnr);
338 }
339
340 do { 330 do {
341 if (chip->options & NAND_BUSWIDTH_16) { 331 if (chip->options & NAND_BUSWIDTH_16) {
342 chip->cmdfunc(mtd, NAND_CMD_READOOB, 332 chip->cmdfunc(mtd, NAND_CMD_READOOB,
@@ -361,11 +351,6 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
361 i++; 351 i++;
362 } while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE)); 352 } while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE));
363 353
364 if (getchip) {
365 chip->select_chip(mtd, -1);
366 nand_release_device(mtd);
367 }
368
369 return res; 354 return res;
370} 355}
371 356
@@ -503,19 +488,17 @@ static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
503 * nand_block_checkbad - [GENERIC] Check if a block is marked bad 488 * nand_block_checkbad - [GENERIC] Check if a block is marked bad
504 * @mtd: MTD device structure 489 * @mtd: MTD device structure
505 * @ofs: offset from device start 490 * @ofs: offset from device start
506 * @getchip: 0, if the chip is already selected
507 * @allowbbt: 1, if its allowed to access the bbt area 491 * @allowbbt: 1, if its allowed to access the bbt area
508 * 492 *
509 * Check, if the block is bad. Either by reading the bad block table or 493 * Check, if the block is bad. Either by reading the bad block table or
510 * calling of the scan function. 494 * calling of the scan function.
511 */ 495 */
512static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip, 496static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int allowbbt)
513 int allowbbt)
514{ 497{
515 struct nand_chip *chip = mtd_to_nand(mtd); 498 struct nand_chip *chip = mtd_to_nand(mtd);
516 499
517 if (!chip->bbt) 500 if (!chip->bbt)
518 return chip->block_bad(mtd, ofs, getchip); 501 return chip->block_bad(mtd, ofs);
519 502
520 /* Return info from the table */ 503 /* Return info from the table */
521 return nand_isbad_bbt(mtd, ofs, allowbbt); 504 return nand_isbad_bbt(mtd, ofs, allowbbt);
@@ -566,8 +549,8 @@ void nand_wait_ready(struct mtd_info *mtd)
566 cond_resched(); 549 cond_resched();
567 } while (time_before(jiffies, timeo)); 550 } while (time_before(jiffies, timeo));
568 551
569 pr_warn_ratelimited( 552 if (!chip->dev_ready(mtd))
570 "timeout while waiting for chip to become ready\n"); 553 pr_warn_ratelimited("timeout while waiting for chip to become ready\n");
571out: 554out:
572 led_trigger_event(nand_led_trigger, LED_OFF); 555 led_trigger_event(nand_led_trigger, LED_OFF);
573} 556}
@@ -1723,8 +1706,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1723 int ret = 0; 1706 int ret = 0;
1724 uint32_t readlen = ops->len; 1707 uint32_t readlen = ops->len;
1725 uint32_t oobreadlen = ops->ooblen; 1708 uint32_t oobreadlen = ops->ooblen;
1726 uint32_t max_oobsize = ops->mode == MTD_OPS_AUTO_OOB ? 1709 uint32_t max_oobsize = mtd_oobavail(mtd, ops);
1727 mtd->oobavail : mtd->oobsize;
1728 1710
1729 uint8_t *bufpoi, *oob, *buf; 1711 uint8_t *bufpoi, *oob, *buf;
1730 int use_bufpoi; 1712 int use_bufpoi;
@@ -2075,10 +2057,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
2075 2057
2076 stats = mtd->ecc_stats; 2058 stats = mtd->ecc_stats;
2077 2059
2078 if (ops->mode == MTD_OPS_AUTO_OOB) 2060 len = mtd_oobavail(mtd, ops);
2079 len = chip->ecc.layout->oobavail;
2080 else
2081 len = mtd->oobsize;
2082 2061
2083 if (unlikely(ops->ooboffs >= len)) { 2062 if (unlikely(ops->ooboffs >= len)) {
2084 pr_debug("%s: attempt to start read outside oob\n", 2063 pr_debug("%s: attempt to start read outside oob\n",
@@ -2575,8 +2554,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2575 uint32_t writelen = ops->len; 2554 uint32_t writelen = ops->len;
2576 2555
2577 uint32_t oobwritelen = ops->ooblen; 2556 uint32_t oobwritelen = ops->ooblen;
2578 uint32_t oobmaxlen = ops->mode == MTD_OPS_AUTO_OOB ? 2557 uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
2579 mtd->oobavail : mtd->oobsize;
2580 2558
2581 uint8_t *oob = ops->oobbuf; 2559 uint8_t *oob = ops->oobbuf;
2582 uint8_t *buf = ops->datbuf; 2560 uint8_t *buf = ops->datbuf;
@@ -2766,10 +2744,7 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
2766 pr_debug("%s: to = 0x%08x, len = %i\n", 2744 pr_debug("%s: to = 0x%08x, len = %i\n",
2767 __func__, (unsigned int)to, (int)ops->ooblen); 2745 __func__, (unsigned int)to, (int)ops->ooblen);
2768 2746
2769 if (ops->mode == MTD_OPS_AUTO_OOB) 2747 len = mtd_oobavail(mtd, ops);
2770 len = chip->ecc.layout->oobavail;
2771 else
2772 len = mtd->oobsize;
2773 2748
2774 /* Do not allow write past end of page */ 2749 /* Do not allow write past end of page */
2775 if ((ops->ooboffs + ops->ooblen) > len) { 2750 if ((ops->ooboffs + ops->ooblen) > len) {
@@ -2957,7 +2932,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2957 while (len) { 2932 while (len) {
2958 /* Check if we have a bad block, we do not erase bad blocks! */ 2933 /* Check if we have a bad block, we do not erase bad blocks! */
2959 if (nand_block_checkbad(mtd, ((loff_t) page) << 2934 if (nand_block_checkbad(mtd, ((loff_t) page) <<
2960 chip->page_shift, 0, allowbbt)) { 2935 chip->page_shift, allowbbt)) {
2961 pr_warn("%s: attempt to erase a bad block at page 0x%08x\n", 2936 pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
2962 __func__, page); 2937 __func__, page);
2963 instr->state = MTD_ERASE_FAILED; 2938 instr->state = MTD_ERASE_FAILED;
@@ -3044,7 +3019,20 @@ static void nand_sync(struct mtd_info *mtd)
3044 */ 3019 */
3045static int nand_block_isbad(struct mtd_info *mtd, loff_t offs) 3020static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
3046{ 3021{
3047 return nand_block_checkbad(mtd, offs, 1, 0); 3022 struct nand_chip *chip = mtd_to_nand(mtd);
3023 int chipnr = (int)(offs >> chip->chip_shift);
3024 int ret;
3025
3026 /* Select the NAND device */
3027 nand_get_device(mtd, FL_READING);
3028 chip->select_chip(mtd, chipnr);
3029
3030 ret = nand_block_checkbad(mtd, offs, 0);
3031
3032 chip->select_chip(mtd, -1);
3033 nand_release_device(mtd);
3034
3035 return ret;
3048} 3036}
3049 3037
3050/** 3038/**
@@ -4287,10 +4275,8 @@ int nand_scan_tail(struct mtd_info *mtd)
4287 } 4275 }
4288 4276
4289 /* See nand_bch_init() for details. */ 4277 /* See nand_bch_init() for details. */
4290 ecc->bytes = DIV_ROUND_UP( 4278 ecc->bytes = 0;
4291 ecc->strength * fls(8 * ecc->size), 8); 4279 ecc->priv = nand_bch_init(mtd);
4292 ecc->priv = nand_bch_init(mtd, ecc->size, ecc->bytes,
4293 &ecc->layout);
4294 if (!ecc->priv) { 4280 if (!ecc->priv) {
4295 pr_warn("BCH ECC initialization failed!\n"); 4281 pr_warn("BCH ECC initialization failed!\n");
4296 BUG(); 4282 BUG();
@@ -4325,11 +4311,11 @@ int nand_scan_tail(struct mtd_info *mtd)
4325 * The number of bytes available for a client to place data into 4311 * The number of bytes available for a client to place data into
4326 * the out of band area. 4312 * the out of band area.
4327 */ 4313 */
4328 ecc->layout->oobavail = 0; 4314 mtd->oobavail = 0;
4329 for (i = 0; ecc->layout->oobfree[i].length 4315 if (ecc->layout) {
4330 && i < ARRAY_SIZE(ecc->layout->oobfree); i++) 4316 for (i = 0; ecc->layout->oobfree[i].length; i++)
4331 ecc->layout->oobavail += ecc->layout->oobfree[i].length; 4317 mtd->oobavail += ecc->layout->oobfree[i].length;
4332 mtd->oobavail = ecc->layout->oobavail; 4318 }
4333 4319
4334 /* ECC sanity check: warn if it's too weak */ 4320 /* ECC sanity check: warn if it's too weak */
4335 if (!nand_ecc_strength_good(mtd)) 4321 if (!nand_ecc_strength_good(mtd))
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 4b6a7085b442..2fbb523df066 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -1373,5 +1373,3 @@ int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs)
1373 1373
1374 return ret; 1374 return ret;
1375} 1375}
1376
1377EXPORT_SYMBOL(nand_scan_bbt);
diff --git a/drivers/mtd/nand/nand_bch.c b/drivers/mtd/nand/nand_bch.c
index a87c1b628dfc..b585bae37929 100644
--- a/drivers/mtd/nand/nand_bch.c
+++ b/drivers/mtd/nand/nand_bch.c
@@ -107,9 +107,6 @@ EXPORT_SYMBOL(nand_bch_correct_data);
107/** 107/**
108 * nand_bch_init - [NAND Interface] Initialize NAND BCH error correction 108 * nand_bch_init - [NAND Interface] Initialize NAND BCH error correction
109 * @mtd: MTD block structure 109 * @mtd: MTD block structure
110 * @eccsize: ecc block size in bytes
111 * @eccbytes: ecc length in bytes
112 * @ecclayout: output default layout
113 * 110 *
114 * Returns: 111 * Returns:
115 * a pointer to a new NAND BCH control structure, or NULL upon failure 112 * a pointer to a new NAND BCH control structure, or NULL upon failure
@@ -123,14 +120,21 @@ EXPORT_SYMBOL(nand_bch_correct_data);
123 * @eccsize = 512 (thus, m=13 is the smallest integer such that 2^m-1 > 512*8) 120 * @eccsize = 512 (thus, m=13 is the smallest integer such that 2^m-1 > 512*8)
124 * @eccbytes = 7 (7 bytes are required to store m*t = 13*4 = 52 bits) 121 * @eccbytes = 7 (7 bytes are required to store m*t = 13*4 = 52 bits)
125 */ 122 */
126struct nand_bch_control * 123struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
127nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, unsigned int eccbytes,
128 struct nand_ecclayout **ecclayout)
129{ 124{
125 struct nand_chip *nand = mtd_to_nand(mtd);
130 unsigned int m, t, eccsteps, i; 126 unsigned int m, t, eccsteps, i;
131 struct nand_ecclayout *layout; 127 struct nand_ecclayout *layout = nand->ecc.layout;
132 struct nand_bch_control *nbc = NULL; 128 struct nand_bch_control *nbc = NULL;
133 unsigned char *erased_page; 129 unsigned char *erased_page;
130 unsigned int eccsize = nand->ecc.size;
131 unsigned int eccbytes = nand->ecc.bytes;
132 unsigned int eccstrength = nand->ecc.strength;
133
134 if (!eccbytes && eccstrength) {
135 eccbytes = DIV_ROUND_UP(eccstrength * fls(8 * eccsize), 8);
136 nand->ecc.bytes = eccbytes;
137 }
134 138
135 if (!eccsize || !eccbytes) { 139 if (!eccsize || !eccbytes) {
136 printk(KERN_WARNING "ecc parameters not supplied\n"); 140 printk(KERN_WARNING "ecc parameters not supplied\n");
@@ -158,7 +162,7 @@ nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, unsigned int eccbytes,
158 eccsteps = mtd->writesize/eccsize; 162 eccsteps = mtd->writesize/eccsize;
159 163
160 /* if no ecc placement scheme was provided, build one */ 164 /* if no ecc placement scheme was provided, build one */
161 if (!*ecclayout) { 165 if (!layout) {
162 166
163 /* handle large page devices only */ 167 /* handle large page devices only */
164 if (mtd->oobsize < 64) { 168 if (mtd->oobsize < 64) {
@@ -184,7 +188,7 @@ nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, unsigned int eccbytes,
184 layout->oobfree[0].offset = 2; 188 layout->oobfree[0].offset = 2;
185 layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes; 189 layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
186 190
187 *ecclayout = layout; 191 nand->ecc.layout = layout;
188 } 192 }
189 193
190 /* sanity checks */ 194 /* sanity checks */
@@ -192,7 +196,7 @@ nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, unsigned int eccbytes,
192 printk(KERN_WARNING "eccsize %u is too large\n", eccsize); 196 printk(KERN_WARNING "eccsize %u is too large\n", eccsize);
193 goto fail; 197 goto fail;
194 } 198 }
195 if ((*ecclayout)->eccbytes != (eccsteps*eccbytes)) { 199 if (layout->eccbytes != (eccsteps*eccbytes)) {
196 printk(KERN_WARNING "invalid ecc layout\n"); 200 printk(KERN_WARNING "invalid ecc layout\n");
197 goto fail; 201 goto fail;
198 } 202 }
@@ -216,6 +220,9 @@ nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, unsigned int eccbytes,
216 for (i = 0; i < eccbytes; i++) 220 for (i = 0; i < eccbytes; i++)
217 nbc->eccmask[i] ^= 0xff; 221 nbc->eccmask[i] ^= 0xff;
218 222
223 if (!eccstrength)
224 nand->ecc.strength = (eccbytes * 8) / fls(8 * eccsize);
225
219 return nbc; 226 return nbc;
220fail: 227fail:
221 nand_bch_free(nbc); 228 nand_bch_free(nbc);
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index a8804a3da076..ccc05f5b2695 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -50,8 +50,8 @@ struct nand_flash_dev nand_flash_ids[] = {
50 SZ_16K, SZ_8K, SZ_4M, 0, 6, 1280, NAND_ECC_INFO(40, SZ_1K) }, 50 SZ_16K, SZ_8K, SZ_4M, 0, 6, 1280, NAND_ECC_INFO(40, SZ_1K) },
51 {"H27UCG8T2ATR-BC 64G 3.3V 8-bit", 51 {"H27UCG8T2ATR-BC 64G 3.3V 8-bit",
52 { .id = {0xad, 0xde, 0x94, 0xda, 0x74, 0xc4} }, 52 { .id = {0xad, 0xde, 0x94, 0xda, 0x74, 0xc4} },
53 SZ_8K, SZ_8K, SZ_2M, 0, 6, 640, NAND_ECC_INFO(40, SZ_1K), 53 SZ_8K, SZ_8K, SZ_2M, NAND_NEED_SCRAMBLING, 6, 640,
54 4 }, 54 NAND_ECC_INFO(40, SZ_1K), 4 },
55 55
56 LEGACY_ID_NAND("NAND 4MiB 5V 8-bit", 0x6B, 4, SZ_8K, SP_OPTIONS), 56 LEGACY_ID_NAND("NAND 4MiB 5V 8-bit", 0x6B, 4, SZ_8K, SP_OPTIONS),
57 LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS), 57 LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS),
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 220ddfcf29f5..dbc5b571c2bb 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -113,7 +113,7 @@ static int nuc900_check_rb(struct nuc900_nand *nand)
113{ 113{
114 unsigned int val; 114 unsigned int val;
115 spin_lock(&nand->lock); 115 spin_lock(&nand->lock);
116 val = __raw_readl(REG_SMISR); 116 val = __raw_readl(nand->reg + REG_SMISR);
117 val &= READYBUSY; 117 val &= READYBUSY;
118 spin_unlock(&nand->lock); 118 spin_unlock(&nand->lock);
119 119
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index c553f78ab83f..0749ca1a1456 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1807,13 +1807,19 @@ static int omap_nand_probe(struct platform_device *pdev)
1807 goto return_error; 1807 goto return_error;
1808 } 1808 }
1809 1809
1810 /*
1811 * Bail out earlier to let NAND_ECC_SOFT code create its own
1812 * ecclayout instead of using ours.
1813 */
1814 if (info->ecc_opt == OMAP_ECC_HAM1_CODE_SW) {
1815 nand_chip->ecc.mode = NAND_ECC_SOFT;
1816 goto scan_tail;
1817 }
1818
1810 /* populate MTD interface based on ECC scheme */ 1819 /* populate MTD interface based on ECC scheme */
1811 ecclayout = &info->oobinfo; 1820 ecclayout = &info->oobinfo;
1821 nand_chip->ecc.layout = ecclayout;
1812 switch (info->ecc_opt) { 1822 switch (info->ecc_opt) {
1813 case OMAP_ECC_HAM1_CODE_SW:
1814 nand_chip->ecc.mode = NAND_ECC_SOFT;
1815 break;
1816
1817 case OMAP_ECC_HAM1_CODE_HW: 1823 case OMAP_ECC_HAM1_CODE_HW:
1818 pr_info("nand: using OMAP_ECC_HAM1_CODE_HW\n"); 1824 pr_info("nand: using OMAP_ECC_HAM1_CODE_HW\n");
1819 nand_chip->ecc.mode = NAND_ECC_HW; 1825 nand_chip->ecc.mode = NAND_ECC_HW;
@@ -1861,10 +1867,7 @@ static int omap_nand_probe(struct platform_device *pdev)
1861 ecclayout->oobfree->offset = 1 + 1867 ecclayout->oobfree->offset = 1 +
1862 ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; 1868 ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
1863 /* software bch library is used for locating errors */ 1869 /* software bch library is used for locating errors */
1864 nand_chip->ecc.priv = nand_bch_init(mtd, 1870 nand_chip->ecc.priv = nand_bch_init(mtd);
1865 nand_chip->ecc.size,
1866 nand_chip->ecc.bytes,
1867 &ecclayout);
1868 if (!nand_chip->ecc.priv) { 1871 if (!nand_chip->ecc.priv) {
1869 dev_err(&info->pdev->dev, "unable to use BCH library\n"); 1872 dev_err(&info->pdev->dev, "unable to use BCH library\n");
1870 err = -EINVAL; 1873 err = -EINVAL;
@@ -1925,10 +1928,7 @@ static int omap_nand_probe(struct platform_device *pdev)
1925 ecclayout->oobfree->offset = 1 + 1928 ecclayout->oobfree->offset = 1 +
1926 ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; 1929 ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
1927 /* software bch library is used for locating errors */ 1930 /* software bch library is used for locating errors */
1928 nand_chip->ecc.priv = nand_bch_init(mtd, 1931 nand_chip->ecc.priv = nand_bch_init(mtd);
1929 nand_chip->ecc.size,
1930 nand_chip->ecc.bytes,
1931 &ecclayout);
1932 if (!nand_chip->ecc.priv) { 1932 if (!nand_chip->ecc.priv) {
1933 dev_err(&info->pdev->dev, "unable to use BCH library\n"); 1933 dev_err(&info->pdev->dev, "unable to use BCH library\n");
1934 err = -EINVAL; 1934 err = -EINVAL;
@@ -2002,9 +2002,6 @@ static int omap_nand_probe(struct platform_device *pdev)
2002 goto return_error; 2002 goto return_error;
2003 } 2003 }
2004 2004
2005 if (info->ecc_opt == OMAP_ECC_HAM1_CODE_SW)
2006 goto scan_tail;
2007
2008 /* all OOB bytes from oobfree->offset till end off OOB are free */ 2005 /* all OOB bytes from oobfree->offset till end off OOB are free */
2009 ecclayout->oobfree->length = mtd->oobsize - ecclayout->oobfree->offset; 2006 ecclayout->oobfree->length = mtd->oobsize - ecclayout->oobfree->offset;
2010 /* check if NAND device's OOB is enough to store ECC signatures */ 2007 /* check if NAND device's OOB is enough to store ECC signatures */
@@ -2015,7 +2012,6 @@ static int omap_nand_probe(struct platform_device *pdev)
2015 err = -EINVAL; 2012 err = -EINVAL;
2016 goto return_error; 2013 goto return_error;
2017 } 2014 }
2018 nand_chip->ecc.layout = ecclayout;
2019 2015
2020scan_tail: 2016scan_tail:
2021 /* second phase scan */ 2017 /* second phase scan */
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index a0e26dea1424..e4e50da30444 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -73,7 +73,6 @@ static int plat_nand_probe(struct platform_device *pdev)
73 data->chip.bbt_options |= pdata->chip.bbt_options; 73 data->chip.bbt_options |= pdata->chip.bbt_options;
74 74
75 data->chip.ecc.hwctl = pdata->ctrl.hwcontrol; 75 data->chip.ecc.hwctl = pdata->ctrl.hwcontrol;
76 data->chip.ecc.layout = pdata->chip.ecclayout;
77 data->chip.ecc.mode = NAND_ECC_SOFT; 76 data->chip.ecc.mode = NAND_ECC_SOFT;
78 77
79 platform_set_drvdata(pdev, data); 78 platform_set_drvdata(pdev, data);
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 86fc245dc71a..d6508856da99 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -131,11 +131,23 @@
131#define READ_ID_BYTES 7 131#define READ_ID_BYTES 7
132 132
133/* macros for registers read/write */ 133/* macros for registers read/write */
134#define nand_writel(info, off, val) \ 134#define nand_writel(info, off, val) \
135 writel_relaxed((val), (info)->mmio_base + (off)) 135 do { \
136 136 dev_vdbg(&info->pdev->dev, \
137#define nand_readl(info, off) \ 137 "%s():%d nand_writel(0x%x, 0x%04x)\n", \
138 readl_relaxed((info)->mmio_base + (off)) 138 __func__, __LINE__, (val), (off)); \
139 writel_relaxed((val), (info)->mmio_base + (off)); \
140 } while (0)
141
142#define nand_readl(info, off) \
143 ({ \
144 unsigned int _v; \
145 _v = readl_relaxed((info)->mmio_base + (off)); \
146 dev_vdbg(&info->pdev->dev, \
147 "%s():%d nand_readl(0x%04x) = 0x%x\n", \
148 __func__, __LINE__, (off), _v); \
149 _v; \
150 })
139 151
140/* error code and state */ 152/* error code and state */
141enum { 153enum {
@@ -199,7 +211,6 @@ struct pxa3xx_nand_info {
199 struct dma_chan *dma_chan; 211 struct dma_chan *dma_chan;
200 dma_cookie_t dma_cookie; 212 dma_cookie_t dma_cookie;
201 int drcmr_dat; 213 int drcmr_dat;
202 int drcmr_cmd;
203 214
204 unsigned char *data_buff; 215 unsigned char *data_buff;
205 unsigned char *oob_buff; 216 unsigned char *oob_buff;
@@ -222,15 +233,44 @@ struct pxa3xx_nand_info {
222 int use_spare; /* use spare ? */ 233 int use_spare; /* use spare ? */
223 int need_wait; 234 int need_wait;
224 235
225 unsigned int data_size; /* data to be read from FIFO */ 236 /* Amount of real data per full chunk */
226 unsigned int chunk_size; /* split commands chunk size */ 237 unsigned int chunk_size;
227 unsigned int oob_size; 238
239 /* Amount of spare data per full chunk */
228 unsigned int spare_size; 240 unsigned int spare_size;
241
242 /* Number of full chunks (i.e chunk_size + spare_size) */
243 unsigned int nfullchunks;
244
245 /*
246 * Total number of chunks. If equal to nfullchunks, then there
247 * are only full chunks. Otherwise, there is one last chunk of
248 * size (last_chunk_size + last_spare_size)
249 */
250 unsigned int ntotalchunks;
251
252 /* Amount of real data in the last chunk */
253 unsigned int last_chunk_size;
254
255 /* Amount of spare data in the last chunk */
256 unsigned int last_spare_size;
257
229 unsigned int ecc_size; 258 unsigned int ecc_size;
230 unsigned int ecc_err_cnt; 259 unsigned int ecc_err_cnt;
231 unsigned int max_bitflips; 260 unsigned int max_bitflips;
232 int retcode; 261 int retcode;
233 262
263 /*
264 * Variables only valid during command
265 * execution. step_chunk_size and step_spare_size is the
266 * amount of real data and spare data in the current
267 * chunk. cur_chunk is the current chunk being
268 * read/programmed.
269 */
270 unsigned int step_chunk_size;
271 unsigned int step_spare_size;
272 unsigned int cur_chunk;
273
234 /* cached register value */ 274 /* cached register value */
235 uint32_t reg_ndcr; 275 uint32_t reg_ndcr;
236 uint32_t ndtr0cs0; 276 uint32_t ndtr0cs0;
@@ -526,25 +566,6 @@ static int pxa3xx_nand_init(struct pxa3xx_nand_host *host)
526 return 0; 566 return 0;
527} 567}
528 568
529/*
530 * Set the data and OOB size, depending on the selected
531 * spare and ECC configuration.
532 * Only applicable to READ0, READOOB and PAGEPROG commands.
533 */
534static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
535 struct mtd_info *mtd)
536{
537 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
538
539 info->data_size = mtd->writesize;
540 if (!oob_enable)
541 return;
542
543 info->oob_size = info->spare_size;
544 if (!info->use_ecc)
545 info->oob_size += info->ecc_size;
546}
547
548/** 569/**
549 * NOTE: it is a must to set ND_RUN firstly, then write 570 * NOTE: it is a must to set ND_RUN firstly, then write
550 * command buffer, otherwise, it does not work. 571 * command buffer, otherwise, it does not work.
@@ -660,28 +681,28 @@ static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
660 681
661static void handle_data_pio(struct pxa3xx_nand_info *info) 682static void handle_data_pio(struct pxa3xx_nand_info *info)
662{ 683{
663 unsigned int do_bytes = min(info->data_size, info->chunk_size);
664
665 switch (info->state) { 684 switch (info->state) {
666 case STATE_PIO_WRITING: 685 case STATE_PIO_WRITING:
667 writesl(info->mmio_base + NDDB, 686 if (info->step_chunk_size)
668 info->data_buff + info->data_buff_pos, 687 writesl(info->mmio_base + NDDB,
669 DIV_ROUND_UP(do_bytes, 4)); 688 info->data_buff + info->data_buff_pos,
689 DIV_ROUND_UP(info->step_chunk_size, 4));
670 690
671 if (info->oob_size > 0) 691 if (info->step_spare_size)
672 writesl(info->mmio_base + NDDB, 692 writesl(info->mmio_base + NDDB,
673 info->oob_buff + info->oob_buff_pos, 693 info->oob_buff + info->oob_buff_pos,
674 DIV_ROUND_UP(info->oob_size, 4)); 694 DIV_ROUND_UP(info->step_spare_size, 4));
675 break; 695 break;
676 case STATE_PIO_READING: 696 case STATE_PIO_READING:
677 drain_fifo(info, 697 if (info->step_chunk_size)
678 info->data_buff + info->data_buff_pos, 698 drain_fifo(info,
679 DIV_ROUND_UP(do_bytes, 4)); 699 info->data_buff + info->data_buff_pos,
700 DIV_ROUND_UP(info->step_chunk_size, 4));
680 701
681 if (info->oob_size > 0) 702 if (info->step_spare_size)
682 drain_fifo(info, 703 drain_fifo(info,
683 info->oob_buff + info->oob_buff_pos, 704 info->oob_buff + info->oob_buff_pos,
684 DIV_ROUND_UP(info->oob_size, 4)); 705 DIV_ROUND_UP(info->step_spare_size, 4));
685 break; 706 break;
686 default: 707 default:
687 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, 708 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
@@ -690,9 +711,8 @@ static void handle_data_pio(struct pxa3xx_nand_info *info)
690 } 711 }
691 712
692 /* Update buffer pointers for multi-page read/write */ 713 /* Update buffer pointers for multi-page read/write */
693 info->data_buff_pos += do_bytes; 714 info->data_buff_pos += info->step_chunk_size;
694 info->oob_buff_pos += info->oob_size; 715 info->oob_buff_pos += info->step_spare_size;
695 info->data_size -= do_bytes;
696} 716}
697 717
698static void pxa3xx_nand_data_dma_irq(void *data) 718static void pxa3xx_nand_data_dma_irq(void *data)
@@ -733,8 +753,9 @@ static void start_data_dma(struct pxa3xx_nand_info *info)
733 info->state); 753 info->state);
734 BUG(); 754 BUG();
735 } 755 }
736 info->sg.length = info->data_size + 756 info->sg.length = info->chunk_size;
737 (info->oob_size ? info->spare_size + info->ecc_size : 0); 757 if (info->use_spare)
758 info->sg.length += info->spare_size + info->ecc_size;
738 dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir); 759 dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
739 760
740 tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction, 761 tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction,
@@ -895,9 +916,11 @@ static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
895 /* reset data and oob column point to handle data */ 916 /* reset data and oob column point to handle data */
896 info->buf_start = 0; 917 info->buf_start = 0;
897 info->buf_count = 0; 918 info->buf_count = 0;
898 info->oob_size = 0;
899 info->data_buff_pos = 0; 919 info->data_buff_pos = 0;
900 info->oob_buff_pos = 0; 920 info->oob_buff_pos = 0;
921 info->step_chunk_size = 0;
922 info->step_spare_size = 0;
923 info->cur_chunk = 0;
901 info->use_ecc = 0; 924 info->use_ecc = 0;
902 info->use_spare = 1; 925 info->use_spare = 1;
903 info->retcode = ERR_NONE; 926 info->retcode = ERR_NONE;
@@ -909,8 +932,6 @@ static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
909 case NAND_CMD_READ0: 932 case NAND_CMD_READ0:
910 case NAND_CMD_PAGEPROG: 933 case NAND_CMD_PAGEPROG:
911 info->use_ecc = 1; 934 info->use_ecc = 1;
912 case NAND_CMD_READOOB:
913 pxa3xx_set_datasize(info, mtd);
914 break; 935 break;
915 case NAND_CMD_PARAM: 936 case NAND_CMD_PARAM:
916 info->use_spare = 0; 937 info->use_spare = 0;
@@ -969,6 +990,14 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
969 if (command == NAND_CMD_READOOB) 990 if (command == NAND_CMD_READOOB)
970 info->buf_start += mtd->writesize; 991 info->buf_start += mtd->writesize;
971 992
993 if (info->cur_chunk < info->nfullchunks) {
994 info->step_chunk_size = info->chunk_size;
995 info->step_spare_size = info->spare_size;
996 } else {
997 info->step_chunk_size = info->last_chunk_size;
998 info->step_spare_size = info->last_spare_size;
999 }
1000
972 /* 1001 /*
973 * Multiple page read needs an 'extended command type' field, 1002 * Multiple page read needs an 'extended command type' field,
974 * which is either naked-read or last-read according to the 1003 * which is either naked-read or last-read according to the
@@ -980,8 +1009,8 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
980 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8) 1009 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
981 | NDCB0_LEN_OVRD 1010 | NDCB0_LEN_OVRD
982 | NDCB0_EXT_CMD_TYPE(ext_cmd_type); 1011 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
983 info->ndcb3 = info->chunk_size + 1012 info->ndcb3 = info->step_chunk_size +
984 info->oob_size; 1013 info->step_spare_size;
985 } 1014 }
986 1015
987 set_command_address(info, mtd->writesize, column, page_addr); 1016 set_command_address(info, mtd->writesize, column, page_addr);
@@ -1001,8 +1030,6 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
1001 | NDCB0_EXT_CMD_TYPE(ext_cmd_type) 1030 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
1002 | addr_cycle 1031 | addr_cycle
1003 | command; 1032 | command;
1004 /* No data transfer in this case */
1005 info->data_size = 0;
1006 exec_cmd = 1; 1033 exec_cmd = 1;
1007 } 1034 }
1008 break; 1035 break;
@@ -1014,6 +1041,14 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
1014 break; 1041 break;
1015 } 1042 }
1016 1043
1044 if (info->cur_chunk < info->nfullchunks) {
1045 info->step_chunk_size = info->chunk_size;
1046 info->step_spare_size = info->spare_size;
1047 } else {
1048 info->step_chunk_size = info->last_chunk_size;
1049 info->step_spare_size = info->last_spare_size;
1050 }
1051
1017 /* Second command setting for large pages */ 1052 /* Second command setting for large pages */
1018 if (mtd->writesize > PAGE_CHUNK_SIZE) { 1053 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1019 /* 1054 /*
@@ -1024,14 +1059,14 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
1024 info->ndcb0 |= NDCB0_CMD_TYPE(0x1) 1059 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1025 | NDCB0_LEN_OVRD 1060 | NDCB0_LEN_OVRD
1026 | NDCB0_EXT_CMD_TYPE(ext_cmd_type); 1061 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
1027 info->ndcb3 = info->chunk_size + 1062 info->ndcb3 = info->step_chunk_size +
1028 info->oob_size; 1063 info->step_spare_size;
1029 1064
1030 /* 1065 /*
1031 * This is the command dispatch that completes a chunked 1066 * This is the command dispatch that completes a chunked
1032 * page program operation. 1067 * page program operation.
1033 */ 1068 */
1034 if (info->data_size == 0) { 1069 if (info->cur_chunk == info->ntotalchunks) {
1035 info->ndcb0 = NDCB0_CMD_TYPE(0x1) 1070 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
1036 | NDCB0_EXT_CMD_TYPE(ext_cmd_type) 1071 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
1037 | command; 1072 | command;
@@ -1058,7 +1093,7 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
1058 | command; 1093 | command;
1059 info->ndcb1 = (column & 0xFF); 1094 info->ndcb1 = (column & 0xFF);
1060 info->ndcb3 = INIT_BUFFER_SIZE; 1095 info->ndcb3 = INIT_BUFFER_SIZE;
1061 info->data_size = INIT_BUFFER_SIZE; 1096 info->step_chunk_size = INIT_BUFFER_SIZE;
1062 break; 1097 break;
1063 1098
1064 case NAND_CMD_READID: 1099 case NAND_CMD_READID:
@@ -1068,7 +1103,7 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
1068 | command; 1103 | command;
1069 info->ndcb1 = (column & 0xFF); 1104 info->ndcb1 = (column & 0xFF);
1070 1105
1071 info->data_size = 8; 1106 info->step_chunk_size = 8;
1072 break; 1107 break;
1073 case NAND_CMD_STATUS: 1108 case NAND_CMD_STATUS:
1074 info->buf_count = 1; 1109 info->buf_count = 1;
@@ -1076,7 +1111,7 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
1076 | NDCB0_ADDR_CYC(1) 1111 | NDCB0_ADDR_CYC(1)
1077 | command; 1112 | command;
1078 1113
1079 info->data_size = 8; 1114 info->step_chunk_size = 8;
1080 break; 1115 break;
1081 1116
1082 case NAND_CMD_ERASE1: 1117 case NAND_CMD_ERASE1:
@@ -1217,6 +1252,7 @@ static void nand_cmdfunc_extended(struct mtd_info *mtd,
1217 init_completion(&info->dev_ready); 1252 init_completion(&info->dev_ready);
1218 do { 1253 do {
1219 info->state = STATE_PREPARED; 1254 info->state = STATE_PREPARED;
1255
1220 exec_cmd = prepare_set_command(info, command, ext_cmd_type, 1256 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1221 column, page_addr); 1257 column, page_addr);
1222 if (!exec_cmd) { 1258 if (!exec_cmd) {
@@ -1236,22 +1272,30 @@ static void nand_cmdfunc_extended(struct mtd_info *mtd,
1236 break; 1272 break;
1237 } 1273 }
1238 1274
1275 /* Only a few commands need several steps */
1276 if (command != NAND_CMD_PAGEPROG &&
1277 command != NAND_CMD_READ0 &&
1278 command != NAND_CMD_READOOB)
1279 break;
1280
1281 info->cur_chunk++;
1282
1239 /* Check if the sequence is complete */ 1283 /* Check if the sequence is complete */
1240 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG) 1284 if (info->cur_chunk == info->ntotalchunks && command != NAND_CMD_PAGEPROG)
1241 break; 1285 break;
1242 1286
1243 /* 1287 /*
1244 * After a splitted program command sequence has issued 1288 * After a splitted program command sequence has issued
1245 * the command dispatch, the command sequence is complete. 1289 * the command dispatch, the command sequence is complete.
1246 */ 1290 */
1247 if (info->data_size == 0 && 1291 if (info->cur_chunk == (info->ntotalchunks + 1) &&
1248 command == NAND_CMD_PAGEPROG && 1292 command == NAND_CMD_PAGEPROG &&
1249 ext_cmd_type == EXT_CMD_TYPE_DISPATCH) 1293 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1250 break; 1294 break;
1251 1295
1252 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) { 1296 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1253 /* Last read: issue a 'last naked read' */ 1297 /* Last read: issue a 'last naked read' */
1254 if (info->data_size == info->chunk_size) 1298 if (info->cur_chunk == info->ntotalchunks - 1)
1255 ext_cmd_type = EXT_CMD_TYPE_LAST_RW; 1299 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1256 else 1300 else
1257 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW; 1301 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
@@ -1261,7 +1305,7 @@ static void nand_cmdfunc_extended(struct mtd_info *mtd,
1261 * the command dispatch must be issued to complete. 1305 * the command dispatch must be issued to complete.
1262 */ 1306 */
1263 } else if (command == NAND_CMD_PAGEPROG && 1307 } else if (command == NAND_CMD_PAGEPROG &&
1264 info->data_size == 0) { 1308 info->cur_chunk == info->ntotalchunks) {
1265 ext_cmd_type = EXT_CMD_TYPE_DISPATCH; 1309 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1266 } 1310 }
1267 } while (1); 1311 } while (1);
@@ -1506,6 +1550,8 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1506 int strength, int ecc_stepsize, int page_size) 1550 int strength, int ecc_stepsize, int page_size)
1507{ 1551{
1508 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) { 1552 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1553 info->nfullchunks = 1;
1554 info->ntotalchunks = 1;
1509 info->chunk_size = 2048; 1555 info->chunk_size = 2048;
1510 info->spare_size = 40; 1556 info->spare_size = 40;
1511 info->ecc_size = 24; 1557 info->ecc_size = 24;
@@ -1514,6 +1560,8 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1514 ecc->strength = 1; 1560 ecc->strength = 1;
1515 1561
1516 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) { 1562 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1563 info->nfullchunks = 1;
1564 info->ntotalchunks = 1;
1517 info->chunk_size = 512; 1565 info->chunk_size = 512;
1518 info->spare_size = 8; 1566 info->spare_size = 8;
1519 info->ecc_size = 8; 1567 info->ecc_size = 8;
@@ -1527,6 +1575,8 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1527 */ 1575 */
1528 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) { 1576 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1529 info->ecc_bch = 1; 1577 info->ecc_bch = 1;
1578 info->nfullchunks = 1;
1579 info->ntotalchunks = 1;
1530 info->chunk_size = 2048; 1580 info->chunk_size = 2048;
1531 info->spare_size = 32; 1581 info->spare_size = 32;
1532 info->ecc_size = 32; 1582 info->ecc_size = 32;
@@ -1537,6 +1587,8 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1537 1587
1538 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) { 1588 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1539 info->ecc_bch = 1; 1589 info->ecc_bch = 1;
1590 info->nfullchunks = 2;
1591 info->ntotalchunks = 2;
1540 info->chunk_size = 2048; 1592 info->chunk_size = 2048;
1541 info->spare_size = 32; 1593 info->spare_size = 32;
1542 info->ecc_size = 32; 1594 info->ecc_size = 32;
@@ -1551,8 +1603,12 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1551 */ 1603 */
1552 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) { 1604 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1553 info->ecc_bch = 1; 1605 info->ecc_bch = 1;
1606 info->nfullchunks = 4;
1607 info->ntotalchunks = 5;
1554 info->chunk_size = 1024; 1608 info->chunk_size = 1024;
1555 info->spare_size = 0; 1609 info->spare_size = 0;
1610 info->last_chunk_size = 0;
1611 info->last_spare_size = 64;
1556 info->ecc_size = 32; 1612 info->ecc_size = 32;
1557 ecc->mode = NAND_ECC_HW; 1613 ecc->mode = NAND_ECC_HW;
1558 ecc->size = info->chunk_size; 1614 ecc->size = info->chunk_size;
@@ -1738,7 +1794,7 @@ static int alloc_nand_resource(struct platform_device *pdev)
1738 if (ret < 0) 1794 if (ret < 0)
1739 return ret; 1795 return ret;
1740 1796
1741 if (use_dma) { 1797 if (!np && use_dma) {
1742 r = platform_get_resource(pdev, IORESOURCE_DMA, 0); 1798 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1743 if (r == NULL) { 1799 if (r == NULL) {
1744 dev_err(&pdev->dev, 1800 dev_err(&pdev->dev,
@@ -1747,15 +1803,6 @@ static int alloc_nand_resource(struct platform_device *pdev)
1747 goto fail_disable_clk; 1803 goto fail_disable_clk;
1748 } 1804 }
1749 info->drcmr_dat = r->start; 1805 info->drcmr_dat = r->start;
1750
1751 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1752 if (r == NULL) {
1753 dev_err(&pdev->dev,
1754 "no resource defined for cmd DMA\n");
1755 ret = -ENXIO;
1756 goto fail_disable_clk;
1757 }
1758 info->drcmr_cmd = r->start;
1759 } 1806 }
1760 1807
1761 irq = platform_get_irq(pdev, 0); 1808 irq = platform_get_irq(pdev, 0);
diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c
new file mode 100644
index 000000000000..f550a57e6eea
--- /dev/null
+++ b/drivers/mtd/nand/qcom_nandc.c
@@ -0,0 +1,2223 @@
1/*
2 * Copyright (c) 2016, The Linux Foundation. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/clk.h>
15#include <linux/slab.h>
16#include <linux/bitops.h>
17#include <linux/dma-mapping.h>
18#include <linux/dmaengine.h>
19#include <linux/module.h>
20#include <linux/mtd/nand.h>
21#include <linux/mtd/partitions.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/of_mtd.h>
25#include <linux/delay.h>
26
27/* NANDc reg offsets */
28#define NAND_FLASH_CMD 0x00
29#define NAND_ADDR0 0x04
30#define NAND_ADDR1 0x08
31#define NAND_FLASH_CHIP_SELECT 0x0c
32#define NAND_EXEC_CMD 0x10
33#define NAND_FLASH_STATUS 0x14
34#define NAND_BUFFER_STATUS 0x18
35#define NAND_DEV0_CFG0 0x20
36#define NAND_DEV0_CFG1 0x24
37#define NAND_DEV0_ECC_CFG 0x28
38#define NAND_DEV1_ECC_CFG 0x2c
39#define NAND_DEV1_CFG0 0x30
40#define NAND_DEV1_CFG1 0x34
41#define NAND_READ_ID 0x40
42#define NAND_READ_STATUS 0x44
43#define NAND_DEV_CMD0 0xa0
44#define NAND_DEV_CMD1 0xa4
45#define NAND_DEV_CMD2 0xa8
46#define NAND_DEV_CMD_VLD 0xac
47#define SFLASHC_BURST_CFG 0xe0
48#define NAND_ERASED_CW_DETECT_CFG 0xe8
49#define NAND_ERASED_CW_DETECT_STATUS 0xec
50#define NAND_EBI2_ECC_BUF_CFG 0xf0
51#define FLASH_BUF_ACC 0x100
52
53#define NAND_CTRL 0xf00
54#define NAND_VERSION 0xf08
55#define NAND_READ_LOCATION_0 0xf20
56#define NAND_READ_LOCATION_1 0xf24
57
58/* dummy register offsets, used by write_reg_dma */
59#define NAND_DEV_CMD1_RESTORE 0xdead
60#define NAND_DEV_CMD_VLD_RESTORE 0xbeef
61
62/* NAND_FLASH_CMD bits */
63#define PAGE_ACC BIT(4)
64#define LAST_PAGE BIT(5)
65
66/* NAND_FLASH_CHIP_SELECT bits */
67#define NAND_DEV_SEL 0
68#define DM_EN BIT(2)
69
70/* NAND_FLASH_STATUS bits */
71#define FS_OP_ERR BIT(4)
72#define FS_READY_BSY_N BIT(5)
73#define FS_MPU_ERR BIT(8)
74#define FS_DEVICE_STS_ERR BIT(16)
75#define FS_DEVICE_WP BIT(23)
76
77/* NAND_BUFFER_STATUS bits */
78#define BS_UNCORRECTABLE_BIT BIT(8)
79#define BS_CORRECTABLE_ERR_MSK 0x1f
80
81/* NAND_DEVn_CFG0 bits */
82#define DISABLE_STATUS_AFTER_WRITE 4
83#define CW_PER_PAGE 6
84#define UD_SIZE_BYTES 9
85#define ECC_PARITY_SIZE_BYTES_RS 19
86#define SPARE_SIZE_BYTES 23
87#define NUM_ADDR_CYCLES 27
88#define STATUS_BFR_READ 30
89#define SET_RD_MODE_AFTER_STATUS 31
90
91/* NAND_DEVn_CFG0 bits */
92#define DEV0_CFG1_ECC_DISABLE 0
93#define WIDE_FLASH 1
94#define NAND_RECOVERY_CYCLES 2
95#define CS_ACTIVE_BSY 5
96#define BAD_BLOCK_BYTE_NUM 6
97#define BAD_BLOCK_IN_SPARE_AREA 16
98#define WR_RD_BSY_GAP 17
99#define ENABLE_BCH_ECC 27
100
101/* NAND_DEV0_ECC_CFG bits */
102#define ECC_CFG_ECC_DISABLE 0
103#define ECC_SW_RESET 1
104#define ECC_MODE 4
105#define ECC_PARITY_SIZE_BYTES_BCH 8
106#define ECC_NUM_DATA_BYTES 16
107#define ECC_FORCE_CLK_OPEN 30
108
109/* NAND_DEV_CMD1 bits */
110#define READ_ADDR 0
111
112/* NAND_DEV_CMD_VLD bits */
113#define READ_START_VLD 0
114
115/* NAND_EBI2_ECC_BUF_CFG bits */
116#define NUM_STEPS 0
117
118/* NAND_ERASED_CW_DETECT_CFG bits */
119#define ERASED_CW_ECC_MASK 1
120#define AUTO_DETECT_RES 0
121#define MASK_ECC (1 << ERASED_CW_ECC_MASK)
122#define RESET_ERASED_DET (1 << AUTO_DETECT_RES)
123#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
124#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
125#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
126
127/* NAND_ERASED_CW_DETECT_STATUS bits */
128#define PAGE_ALL_ERASED BIT(7)
129#define CODEWORD_ALL_ERASED BIT(6)
130#define PAGE_ERASED BIT(5)
131#define CODEWORD_ERASED BIT(4)
132#define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
133#define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
134
135/* Version Mask */
136#define NAND_VERSION_MAJOR_MASK 0xf0000000
137#define NAND_VERSION_MAJOR_SHIFT 28
138#define NAND_VERSION_MINOR_MASK 0x0fff0000
139#define NAND_VERSION_MINOR_SHIFT 16
140
141/* NAND OP_CMDs */
142#define PAGE_READ 0x2
143#define PAGE_READ_WITH_ECC 0x3
144#define PAGE_READ_WITH_ECC_SPARE 0x4
145#define PROGRAM_PAGE 0x6
146#define PAGE_PROGRAM_WITH_ECC 0x7
147#define PROGRAM_PAGE_SPARE 0x9
148#define BLOCK_ERASE 0xa
149#define FETCH_ID 0xb
150#define RESET_DEVICE 0xd
151
152/*
153 * the NAND controller performs reads/writes with ECC in 516 byte chunks.
154 * the driver calls the chunks 'step' or 'codeword' interchangeably
155 */
156#define NANDC_STEP_SIZE 512
157
158/*
159 * the largest page size we support is 8K, this will have 16 steps/codewords
160 * of 512 bytes each
161 */
162#define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
163
164/* we read at most 3 registers per codeword scan */
165#define MAX_REG_RD (3 * MAX_NUM_STEPS)
166
167/* ECC modes supported by the controller */
168#define ECC_NONE BIT(0)
169#define ECC_RS_4BIT BIT(1)
170#define ECC_BCH_4BIT BIT(2)
171#define ECC_BCH_8BIT BIT(3)
172
173struct desc_info {
174 struct list_head node;
175
176 enum dma_data_direction dir;
177 struct scatterlist sgl;
178 struct dma_async_tx_descriptor *dma_desc;
179};
180
181/*
182 * holds the current register values that we want to write. acts as a contiguous
183 * chunk of memory which we use to write the controller registers through DMA.
184 */
185struct nandc_regs {
186 __le32 cmd;
187 __le32 addr0;
188 __le32 addr1;
189 __le32 chip_sel;
190 __le32 exec;
191
192 __le32 cfg0;
193 __le32 cfg1;
194 __le32 ecc_bch_cfg;
195
196 __le32 clrflashstatus;
197 __le32 clrreadstatus;
198
199 __le32 cmd1;
200 __le32 vld;
201
202 __le32 orig_cmd1;
203 __le32 orig_vld;
204
205 __le32 ecc_buf_cfg;
206};
207
208/*
209 * NAND controller data struct
210 *
211 * @controller: base controller structure
212 * @host_list: list containing all the chips attached to the
213 * controller
214 * @dev: parent device
215 * @base: MMIO base
216 * @base_dma: physical base address of controller registers
217 * @core_clk: controller clock
218 * @aon_clk: another controller clock
219 *
220 * @chan: dma channel
221 * @cmd_crci: ADM DMA CRCI for command flow control
222 * @data_crci: ADM DMA CRCI for data flow control
223 * @desc_list: DMA descriptor list (list of desc_infos)
224 *
225 * @data_buffer: our local DMA buffer for page read/writes,
226 * used when we can't use the buffer provided
227 * by upper layers directly
228 * @buf_size/count/start: markers for chip->read_buf/write_buf functions
229 * @reg_read_buf: local buffer for reading back registers via DMA
230 * @reg_read_pos: marker for data read in reg_read_buf
231 *
232 * @regs: a contiguous chunk of memory for DMA register
233 * writes. contains the register values to be
234 * written to controller
235 * @cmd1/vld: some fixed controller register values
236 * @ecc_modes: supported ECC modes by the current controller,
237 * initialized via DT match data
238 */
239struct qcom_nand_controller {
240 struct nand_hw_control controller;
241 struct list_head host_list;
242
243 struct device *dev;
244
245 void __iomem *base;
246 dma_addr_t base_dma;
247
248 struct clk *core_clk;
249 struct clk *aon_clk;
250
251 struct dma_chan *chan;
252 unsigned int cmd_crci;
253 unsigned int data_crci;
254 struct list_head desc_list;
255
256 u8 *data_buffer;
257 int buf_size;
258 int buf_count;
259 int buf_start;
260
261 __le32 *reg_read_buf;
262 int reg_read_pos;
263
264 struct nandc_regs *regs;
265
266 u32 cmd1, vld;
267 u32 ecc_modes;
268};
269
270/*
271 * NAND chip structure
272 *
273 * @chip: base NAND chip structure
274 * @node: list node to add itself to host_list in
275 * qcom_nand_controller
276 *
277 * @cs: chip select value for this chip
278 * @cw_size: the number of bytes in a single step/codeword
279 * of a page, consisting of all data, ecc, spare
280 * and reserved bytes
281 * @cw_data: the number of bytes within a codeword protected
282 * by ECC
283 * @use_ecc: request the controller to use ECC for the
284 * upcoming read/write
285 * @bch_enabled: flag to tell whether BCH ECC mode is used
286 * @ecc_bytes_hw: ECC bytes used by controller hardware for this
287 * chip
288 * @status: value to be returned if NAND_CMD_STATUS command
289 * is executed
290 * @last_command: keeps track of last command on this chip. used
291 * for reading correct status
292 *
293 * @cfg0, cfg1, cfg0_raw..: NANDc register configurations needed for
294 * ecc/non-ecc mode for the current nand flash
295 * device
296 */
297struct qcom_nand_host {
298 struct nand_chip chip;
299 struct list_head node;
300
301 int cs;
302 int cw_size;
303 int cw_data;
304 bool use_ecc;
305 bool bch_enabled;
306 int ecc_bytes_hw;
307 int spare_bytes;
308 int bbm_size;
309 u8 status;
310 int last_command;
311
312 u32 cfg0, cfg1;
313 u32 cfg0_raw, cfg1_raw;
314 u32 ecc_buf_cfg;
315 u32 ecc_bch_cfg;
316 u32 clrflashstatus;
317 u32 clrreadstatus;
318};
319
320static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
321{
322 return container_of(chip, struct qcom_nand_host, chip);
323}
324
325static inline struct qcom_nand_controller *
326get_qcom_nand_controller(struct nand_chip *chip)
327{
328 return container_of(chip->controller, struct qcom_nand_controller,
329 controller);
330}
331
332static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
333{
334 return ioread32(nandc->base + offset);
335}
336
337static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
338 u32 val)
339{
340 iowrite32(val, nandc->base + offset);
341}
342
343static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
344{
345 switch (offset) {
346 case NAND_FLASH_CMD:
347 return &regs->cmd;
348 case NAND_ADDR0:
349 return &regs->addr0;
350 case NAND_ADDR1:
351 return &regs->addr1;
352 case NAND_FLASH_CHIP_SELECT:
353 return &regs->chip_sel;
354 case NAND_EXEC_CMD:
355 return &regs->exec;
356 case NAND_FLASH_STATUS:
357 return &regs->clrflashstatus;
358 case NAND_DEV0_CFG0:
359 return &regs->cfg0;
360 case NAND_DEV0_CFG1:
361 return &regs->cfg1;
362 case NAND_DEV0_ECC_CFG:
363 return &regs->ecc_bch_cfg;
364 case NAND_READ_STATUS:
365 return &regs->clrreadstatus;
366 case NAND_DEV_CMD1:
367 return &regs->cmd1;
368 case NAND_DEV_CMD1_RESTORE:
369 return &regs->orig_cmd1;
370 case NAND_DEV_CMD_VLD:
371 return &regs->vld;
372 case NAND_DEV_CMD_VLD_RESTORE:
373 return &regs->orig_vld;
374 case NAND_EBI2_ECC_BUF_CFG:
375 return &regs->ecc_buf_cfg;
376 default:
377 return NULL;
378 }
379}
380
381static void nandc_set_reg(struct qcom_nand_controller *nandc, int offset,
382 u32 val)
383{
384 struct nandc_regs *regs = nandc->regs;
385 __le32 *reg;
386
387 reg = offset_to_nandc_reg(regs, offset);
388
389 if (reg)
390 *reg = cpu_to_le32(val);
391}
392
393/* helper to configure address register values */
394static void set_address(struct qcom_nand_host *host, u16 column, int page)
395{
396 struct nand_chip *chip = &host->chip;
397 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
398
399 if (chip->options & NAND_BUSWIDTH_16)
400 column >>= 1;
401
402 nandc_set_reg(nandc, NAND_ADDR0, page << 16 | column);
403 nandc_set_reg(nandc, NAND_ADDR1, page >> 16 & 0xff);
404}
405
406/*
407 * update_rw_regs: set up read/write register values, these will be
408 * written to the NAND controller registers via DMA
409 *
410 * @num_cw: number of steps for the read/write operation
411 * @read: read or write operation
412 */
413static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
414{
415 struct nand_chip *chip = &host->chip;
416 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
417 u32 cmd, cfg0, cfg1, ecc_bch_cfg;
418
419 if (read) {
420 if (host->use_ecc)
421 cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
422 else
423 cmd = PAGE_READ | PAGE_ACC | LAST_PAGE;
424 } else {
425 cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
426 }
427
428 if (host->use_ecc) {
429 cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
430 (num_cw - 1) << CW_PER_PAGE;
431
432 cfg1 = host->cfg1;
433 ecc_bch_cfg = host->ecc_bch_cfg;
434 } else {
435 cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
436 (num_cw - 1) << CW_PER_PAGE;
437
438 cfg1 = host->cfg1_raw;
439 ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
440 }
441
442 nandc_set_reg(nandc, NAND_FLASH_CMD, cmd);
443 nandc_set_reg(nandc, NAND_DEV0_CFG0, cfg0);
444 nandc_set_reg(nandc, NAND_DEV0_CFG1, cfg1);
445 nandc_set_reg(nandc, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
446 nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
447 nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
448 nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
449 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
450}
451
452static int prep_dma_desc(struct qcom_nand_controller *nandc, bool read,
453 int reg_off, const void *vaddr, int size,
454 bool flow_control)
455{
456 struct desc_info *desc;
457 struct dma_async_tx_descriptor *dma_desc;
458 struct scatterlist *sgl;
459 struct dma_slave_config slave_conf;
460 enum dma_transfer_direction dir_eng;
461 int ret;
462
463 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
464 if (!desc)
465 return -ENOMEM;
466
467 sgl = &desc->sgl;
468
469 sg_init_one(sgl, vaddr, size);
470
471 if (read) {
472 dir_eng = DMA_DEV_TO_MEM;
473 desc->dir = DMA_FROM_DEVICE;
474 } else {
475 dir_eng = DMA_MEM_TO_DEV;
476 desc->dir = DMA_TO_DEVICE;
477 }
478
479 ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
480 if (ret == 0) {
481 ret = -ENOMEM;
482 goto err;
483 }
484
485 memset(&slave_conf, 0x00, sizeof(slave_conf));
486
487 slave_conf.device_fc = flow_control;
488 if (read) {
489 slave_conf.src_maxburst = 16;
490 slave_conf.src_addr = nandc->base_dma + reg_off;
491 slave_conf.slave_id = nandc->data_crci;
492 } else {
493 slave_conf.dst_maxburst = 16;
494 slave_conf.dst_addr = nandc->base_dma + reg_off;
495 slave_conf.slave_id = nandc->cmd_crci;
496 }
497
498 ret = dmaengine_slave_config(nandc->chan, &slave_conf);
499 if (ret) {
500 dev_err(nandc->dev, "failed to configure dma channel\n");
501 goto err;
502 }
503
504 dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
505 if (!dma_desc) {
506 dev_err(nandc->dev, "failed to prepare desc\n");
507 ret = -EINVAL;
508 goto err;
509 }
510
511 desc->dma_desc = dma_desc;
512
513 list_add_tail(&desc->node, &nandc->desc_list);
514
515 return 0;
516err:
517 kfree(desc);
518
519 return ret;
520}
521
522/*
523 * read_reg_dma: prepares a descriptor to read a given number of
524 * contiguous registers to the reg_read_buf pointer
525 *
526 * @first: offset of the first register in the contiguous block
527 * @num_regs: number of registers to read
528 */
529static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
530 int num_regs)
531{
532 bool flow_control = false;
533 void *vaddr;
534 int size;
535
536 if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
537 flow_control = true;
538
539 size = num_regs * sizeof(u32);
540 vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
541 nandc->reg_read_pos += num_regs;
542
543 return prep_dma_desc(nandc, true, first, vaddr, size, flow_control);
544}
545
546/*
547 * write_reg_dma: prepares a descriptor to write a given number of
548 * contiguous registers
549 *
550 * @first: offset of the first register in the contiguous block
551 * @num_regs: number of registers to write
552 */
553static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
554 int num_regs)
555{
556 bool flow_control = false;
557 struct nandc_regs *regs = nandc->regs;
558 void *vaddr;
559 int size;
560
561 vaddr = offset_to_nandc_reg(regs, first);
562
563 if (first == NAND_FLASH_CMD)
564 flow_control = true;
565
566 if (first == NAND_DEV_CMD1_RESTORE)
567 first = NAND_DEV_CMD1;
568
569 if (first == NAND_DEV_CMD_VLD_RESTORE)
570 first = NAND_DEV_CMD_VLD;
571
572 size = num_regs * sizeof(u32);
573
574 return prep_dma_desc(nandc, false, first, vaddr, size, flow_control);
575}
576
577/*
578 * read_data_dma: prepares a DMA descriptor to transfer data from the
579 * controller's internal buffer to the buffer 'vaddr'
580 *
581 * @reg_off: offset within the controller's data buffer
582 * @vaddr: virtual address of the buffer we want to write to
583 * @size: DMA transaction size in bytes
584 */
585static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
586 const u8 *vaddr, int size)
587{
588 return prep_dma_desc(nandc, true, reg_off, vaddr, size, false);
589}
590
591/*
592 * write_data_dma: prepares a DMA descriptor to transfer data from
593 * 'vaddr' to the controller's internal buffer
594 *
595 * @reg_off: offset within the controller's data buffer
596 * @vaddr: virtual address of the buffer we want to read from
597 * @size: DMA transaction size in bytes
598 */
599static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
600 const u8 *vaddr, int size)
601{
602 return prep_dma_desc(nandc, false, reg_off, vaddr, size, false);
603}
604
605/*
606 * helper to prepare dma descriptors to configure registers needed for reading a
607 * codeword/step in a page
608 */
609static void config_cw_read(struct qcom_nand_controller *nandc)
610{
611 write_reg_dma(nandc, NAND_FLASH_CMD, 3);
612 write_reg_dma(nandc, NAND_DEV0_CFG0, 3);
613 write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1);
614
615 write_reg_dma(nandc, NAND_EXEC_CMD, 1);
616
617 read_reg_dma(nandc, NAND_FLASH_STATUS, 2);
618 read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1);
619}
620
621/*
622 * helpers to prepare dma descriptors used to configure registers needed for
623 * writing a codeword/step in a page
624 */
625static void config_cw_write_pre(struct qcom_nand_controller *nandc)
626{
627 write_reg_dma(nandc, NAND_FLASH_CMD, 3);
628 write_reg_dma(nandc, NAND_DEV0_CFG0, 3);
629 write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1);
630}
631
632static void config_cw_write_post(struct qcom_nand_controller *nandc)
633{
634 write_reg_dma(nandc, NAND_EXEC_CMD, 1);
635
636 read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
637
638 write_reg_dma(nandc, NAND_FLASH_STATUS, 1);
639 write_reg_dma(nandc, NAND_READ_STATUS, 1);
640}
641
642/*
643 * the following functions are used within chip->cmdfunc() to perform different
644 * NAND_CMD_* commands
645 */
646
647/* sets up descriptors for NAND_CMD_PARAM */
648static int nandc_param(struct qcom_nand_host *host)
649{
650 struct nand_chip *chip = &host->chip;
651 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
652
653 /*
654 * NAND_CMD_PARAM is called before we know much about the FLASH chip
655 * in use. we configure the controller to perform a raw read of 512
656 * bytes to read onfi params
657 */
658 nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE);
659 nandc_set_reg(nandc, NAND_ADDR0, 0);
660 nandc_set_reg(nandc, NAND_ADDR1, 0);
661 nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
662 | 512 << UD_SIZE_BYTES
663 | 5 << NUM_ADDR_CYCLES
664 | 0 << SPARE_SIZE_BYTES);
665 nandc_set_reg(nandc, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
666 | 0 << CS_ACTIVE_BSY
667 | 17 << BAD_BLOCK_BYTE_NUM
668 | 1 << BAD_BLOCK_IN_SPARE_AREA
669 | 2 << WR_RD_BSY_GAP
670 | 0 << WIDE_FLASH
671 | 1 << DEV0_CFG1_ECC_DISABLE);
672 nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
673
674 /* configure CMD1 and VLD for ONFI param probing */
675 nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
676 (nandc->vld & ~(1 << READ_START_VLD))
677 | 0 << READ_START_VLD);
678 nandc_set_reg(nandc, NAND_DEV_CMD1,
679 (nandc->cmd1 & ~(0xFF << READ_ADDR))
680 | NAND_CMD_PARAM << READ_ADDR);
681
682 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
683
684 nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
685 nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
686
687 write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1);
688 write_reg_dma(nandc, NAND_DEV_CMD1, 1);
689
690 nandc->buf_count = 512;
691 memset(nandc->data_buffer, 0xff, nandc->buf_count);
692
693 config_cw_read(nandc);
694
695 read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
696 nandc->buf_count);
697
698 /* restore CMD1 and VLD regs */
699 write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1);
700 write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1);
701
702 return 0;
703}
704
705/* sets up descriptors for NAND_CMD_ERASE1 */
706static int erase_block(struct qcom_nand_host *host, int page_addr)
707{
708 struct nand_chip *chip = &host->chip;
709 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
710
711 nandc_set_reg(nandc, NAND_FLASH_CMD,
712 BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
713 nandc_set_reg(nandc, NAND_ADDR0, page_addr);
714 nandc_set_reg(nandc, NAND_ADDR1, 0);
715 nandc_set_reg(nandc, NAND_DEV0_CFG0,
716 host->cfg0_raw & ~(7 << CW_PER_PAGE));
717 nandc_set_reg(nandc, NAND_DEV0_CFG1, host->cfg1_raw);
718 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
719 nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
720 nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
721
722 write_reg_dma(nandc, NAND_FLASH_CMD, 3);
723 write_reg_dma(nandc, NAND_DEV0_CFG0, 2);
724 write_reg_dma(nandc, NAND_EXEC_CMD, 1);
725
726 read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
727
728 write_reg_dma(nandc, NAND_FLASH_STATUS, 1);
729 write_reg_dma(nandc, NAND_READ_STATUS, 1);
730
731 return 0;
732}
733
734/* sets up descriptors for NAND_CMD_READID */
735static int read_id(struct qcom_nand_host *host, int column)
736{
737 struct nand_chip *chip = &host->chip;
738 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
739
740 if (column == -1)
741 return 0;
742
743 nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID);
744 nandc_set_reg(nandc, NAND_ADDR0, column);
745 nandc_set_reg(nandc, NAND_ADDR1, 0);
746 nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
747 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
748
749 write_reg_dma(nandc, NAND_FLASH_CMD, 4);
750 write_reg_dma(nandc, NAND_EXEC_CMD, 1);
751
752 read_reg_dma(nandc, NAND_READ_ID, 1);
753
754 return 0;
755}
756
757/* sets up descriptors for NAND_CMD_RESET */
758static int reset(struct qcom_nand_host *host)
759{
760 struct nand_chip *chip = &host->chip;
761 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
762
763 nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE);
764 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
765
766 write_reg_dma(nandc, NAND_FLASH_CMD, 1);
767 write_reg_dma(nandc, NAND_EXEC_CMD, 1);
768
769 read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
770
771 return 0;
772}
773
774/* helpers to submit/free our list of dma descriptors */
775static int submit_descs(struct qcom_nand_controller *nandc)
776{
777 struct desc_info *desc;
778 dma_cookie_t cookie = 0;
779
780 list_for_each_entry(desc, &nandc->desc_list, node)
781 cookie = dmaengine_submit(desc->dma_desc);
782
783 if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
784 return -ETIMEDOUT;
785
786 return 0;
787}
788
789static void free_descs(struct qcom_nand_controller *nandc)
790{
791 struct desc_info *desc, *n;
792
793 list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
794 list_del(&desc->node);
795 dma_unmap_sg(nandc->dev, &desc->sgl, 1, desc->dir);
796 kfree(desc);
797 }
798}
799
800/* reset the register read buffer for next NAND operation */
801static void clear_read_regs(struct qcom_nand_controller *nandc)
802{
803 nandc->reg_read_pos = 0;
804 memset(nandc->reg_read_buf, 0,
805 MAX_REG_RD * sizeof(*nandc->reg_read_buf));
806}
807
808static void pre_command(struct qcom_nand_host *host, int command)
809{
810 struct nand_chip *chip = &host->chip;
811 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
812
813 nandc->buf_count = 0;
814 nandc->buf_start = 0;
815 host->use_ecc = false;
816 host->last_command = command;
817
818 clear_read_regs(nandc);
819}
820
821/*
822 * this is called after NAND_CMD_PAGEPROG and NAND_CMD_ERASE1 to set our
823 * privately maintained status byte, this status byte can be read after
824 * NAND_CMD_STATUS is called
825 */
826static void parse_erase_write_errors(struct qcom_nand_host *host, int command)
827{
828 struct nand_chip *chip = &host->chip;
829 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
830 struct nand_ecc_ctrl *ecc = &chip->ecc;
831 int num_cw;
832 int i;
833
834 num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1;
835
836 for (i = 0; i < num_cw; i++) {
837 u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
838
839 if (flash_status & FS_MPU_ERR)
840 host->status &= ~NAND_STATUS_WP;
841
842 if (flash_status & FS_OP_ERR || (i == (num_cw - 1) &&
843 (flash_status &
844 FS_DEVICE_STS_ERR)))
845 host->status |= NAND_STATUS_FAIL;
846 }
847}
848
849static void post_command(struct qcom_nand_host *host, int command)
850{
851 struct nand_chip *chip = &host->chip;
852 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
853
854 switch (command) {
855 case NAND_CMD_READID:
856 memcpy(nandc->data_buffer, nandc->reg_read_buf,
857 nandc->buf_count);
858 break;
859 case NAND_CMD_PAGEPROG:
860 case NAND_CMD_ERASE1:
861 parse_erase_write_errors(host, command);
862 break;
863 default:
864 break;
865 }
866}
867
868/*
869 * Implements chip->cmdfunc. It's only used for a limited set of commands.
870 * The rest of the commands wouldn't be called by upper layers. For example,
871 * NAND_CMD_READOOB would never be called because we have our own versions
872 * of read_oob ops for nand_ecc_ctrl.
873 */
874static void qcom_nandc_command(struct mtd_info *mtd, unsigned int command,
875 int column, int page_addr)
876{
877 struct nand_chip *chip = mtd_to_nand(mtd);
878 struct qcom_nand_host *host = to_qcom_nand_host(chip);
879 struct nand_ecc_ctrl *ecc = &chip->ecc;
880 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
881 bool wait = false;
882 int ret = 0;
883
884 pre_command(host, command);
885
886 switch (command) {
887 case NAND_CMD_RESET:
888 ret = reset(host);
889 wait = true;
890 break;
891
892 case NAND_CMD_READID:
893 nandc->buf_count = 4;
894 ret = read_id(host, column);
895 wait = true;
896 break;
897
898 case NAND_CMD_PARAM:
899 ret = nandc_param(host);
900 wait = true;
901 break;
902
903 case NAND_CMD_ERASE1:
904 ret = erase_block(host, page_addr);
905 wait = true;
906 break;
907
908 case NAND_CMD_READ0:
909 /* we read the entire page for now */
910 WARN_ON(column != 0);
911
912 host->use_ecc = true;
913 set_address(host, 0, page_addr);
914 update_rw_regs(host, ecc->steps, true);
915 break;
916
917 case NAND_CMD_SEQIN:
918 WARN_ON(column != 0);
919 set_address(host, 0, page_addr);
920 break;
921
922 case NAND_CMD_PAGEPROG:
923 case NAND_CMD_STATUS:
924 case NAND_CMD_NONE:
925 default:
926 break;
927 }
928
929 if (ret) {
930 dev_err(nandc->dev, "failure executing command %d\n",
931 command);
932 free_descs(nandc);
933 return;
934 }
935
936 if (wait) {
937 ret = submit_descs(nandc);
938 if (ret)
939 dev_err(nandc->dev,
940 "failure submitting descs for command %d\n",
941 command);
942 }
943
944 free_descs(nandc);
945
946 post_command(host, command);
947}
948
949/*
950 * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
951 * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
952 *
953 * when using RS ECC, the HW reports the same erros when reading an erased CW,
954 * but it notifies that it is an erased CW by placing special characters at
955 * certain offsets in the buffer.
956 *
957 * verify if the page is erased or not, and fix up the page for RS ECC by
958 * replacing the special characters with 0xff.
959 */
960static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
961{
962 u8 empty1, empty2;
963
964 /*
965 * an erased page flags an error in NAND_FLASH_STATUS, check if the page
966 * is erased by looking for 0x54s at offsets 3 and 175 from the
967 * beginning of each codeword
968 */
969
970 empty1 = data_buf[3];
971 empty2 = data_buf[175];
972
973 /*
974 * if the erased codework markers, if they exist override them with
975 * 0xffs
976 */
977 if ((empty1 == 0x54 && empty2 == 0xff) ||
978 (empty1 == 0xff && empty2 == 0x54)) {
979 data_buf[3] = 0xff;
980 data_buf[175] = 0xff;
981 }
982
983 /*
984 * check if the entire chunk contains 0xffs or not. if it doesn't, then
985 * restore the original values at the special offsets
986 */
987 if (memchr_inv(data_buf, 0xff, data_len)) {
988 data_buf[3] = empty1;
989 data_buf[175] = empty2;
990
991 return false;
992 }
993
994 return true;
995}
996
997struct read_stats {
998 __le32 flash;
999 __le32 buffer;
1000 __le32 erased_cw;
1001};
1002
1003/*
1004 * reads back status registers set by the controller to notify page read
1005 * errors. this is equivalent to what 'ecc->correct()' would do.
1006 */
1007static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
1008 u8 *oob_buf)
1009{
1010 struct nand_chip *chip = &host->chip;
1011 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1012 struct mtd_info *mtd = nand_to_mtd(chip);
1013 struct nand_ecc_ctrl *ecc = &chip->ecc;
1014 unsigned int max_bitflips = 0;
1015 struct read_stats *buf;
1016 int i;
1017
1018 buf = (struct read_stats *)nandc->reg_read_buf;
1019
1020 for (i = 0; i < ecc->steps; i++, buf++) {
1021 u32 flash, buffer, erased_cw;
1022 int data_len, oob_len;
1023
1024 if (i == (ecc->steps - 1)) {
1025 data_len = ecc->size - ((ecc->steps - 1) << 2);
1026 oob_len = ecc->steps << 2;
1027 } else {
1028 data_len = host->cw_data;
1029 oob_len = 0;
1030 }
1031
1032 flash = le32_to_cpu(buf->flash);
1033 buffer = le32_to_cpu(buf->buffer);
1034 erased_cw = le32_to_cpu(buf->erased_cw);
1035
1036 if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
1037 bool erased;
1038
1039 /* ignore erased codeword errors */
1040 if (host->bch_enabled) {
1041 erased = (erased_cw & ERASED_CW) == ERASED_CW ?
1042 true : false;
1043 } else {
1044 erased = erased_chunk_check_and_fixup(data_buf,
1045 data_len);
1046 }
1047
1048 if (erased) {
1049 data_buf += data_len;
1050 if (oob_buf)
1051 oob_buf += oob_len + ecc->bytes;
1052 continue;
1053 }
1054
1055 if (buffer & BS_UNCORRECTABLE_BIT) {
1056 int ret, ecclen, extraooblen;
1057 void *eccbuf;
1058
1059 eccbuf = oob_buf ? oob_buf + oob_len : NULL;
1060 ecclen = oob_buf ? host->ecc_bytes_hw : 0;
1061 extraooblen = oob_buf ? oob_len : 0;
1062
1063 /*
1064 * make sure it isn't an erased page reported
1065 * as not-erased by HW because of a few bitflips
1066 */
1067 ret = nand_check_erased_ecc_chunk(data_buf,
1068 data_len, eccbuf, ecclen, oob_buf,
1069 extraooblen, ecc->strength);
1070 if (ret < 0) {
1071 mtd->ecc_stats.failed++;
1072 } else {
1073 mtd->ecc_stats.corrected += ret;
1074 max_bitflips =
1075 max_t(unsigned int, max_bitflips, ret);
1076 }
1077 }
1078 } else {
1079 unsigned int stat;
1080
1081 stat = buffer & BS_CORRECTABLE_ERR_MSK;
1082 mtd->ecc_stats.corrected += stat;
1083 max_bitflips = max(max_bitflips, stat);
1084 }
1085
1086 data_buf += data_len;
1087 if (oob_buf)
1088 oob_buf += oob_len + ecc->bytes;
1089 }
1090
1091 return max_bitflips;
1092}
1093
1094/*
1095 * helper to perform the actual page read operation, used by ecc->read_page(),
1096 * ecc->read_oob()
1097 */
1098static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
1099 u8 *oob_buf)
1100{
1101 struct nand_chip *chip = &host->chip;
1102 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1103 struct nand_ecc_ctrl *ecc = &chip->ecc;
1104 int i, ret;
1105
1106 /* queue cmd descs for each codeword */
1107 for (i = 0; i < ecc->steps; i++) {
1108 int data_size, oob_size;
1109
1110 if (i == (ecc->steps - 1)) {
1111 data_size = ecc->size - ((ecc->steps - 1) << 2);
1112 oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
1113 host->spare_bytes;
1114 } else {
1115 data_size = host->cw_data;
1116 oob_size = host->ecc_bytes_hw + host->spare_bytes;
1117 }
1118
1119 config_cw_read(nandc);
1120
1121 if (data_buf)
1122 read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
1123 data_size);
1124
1125 /*
1126 * when ecc is enabled, the controller doesn't read the real
1127 * or dummy bad block markers in each chunk. To maintain a
1128 * consistent layout across RAW and ECC reads, we just
1129 * leave the real/dummy BBM offsets empty (i.e, filled with
1130 * 0xffs)
1131 */
1132 if (oob_buf) {
1133 int j;
1134
1135 for (j = 0; j < host->bbm_size; j++)
1136 *oob_buf++ = 0xff;
1137
1138 read_data_dma(nandc, FLASH_BUF_ACC + data_size,
1139 oob_buf, oob_size);
1140 }
1141
1142 if (data_buf)
1143 data_buf += data_size;
1144 if (oob_buf)
1145 oob_buf += oob_size;
1146 }
1147
1148 ret = submit_descs(nandc);
1149 if (ret)
1150 dev_err(nandc->dev, "failure to read page/oob\n");
1151
1152 free_descs(nandc);
1153
1154 return ret;
1155}
1156
1157/*
1158 * a helper that copies the last step/codeword of a page (containing free oob)
1159 * into our local buffer
1160 */
1161static int copy_last_cw(struct qcom_nand_host *host, int page)
1162{
1163 struct nand_chip *chip = &host->chip;
1164 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1165 struct nand_ecc_ctrl *ecc = &chip->ecc;
1166 int size;
1167 int ret;
1168
1169 clear_read_regs(nandc);
1170
1171 size = host->use_ecc ? host->cw_data : host->cw_size;
1172
1173 /* prepare a clean read buffer */
1174 memset(nandc->data_buffer, 0xff, size);
1175
1176 set_address(host, host->cw_size * (ecc->steps - 1), page);
1177 update_rw_regs(host, 1, true);
1178
1179 config_cw_read(nandc);
1180
1181 read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size);
1182
1183 ret = submit_descs(nandc);
1184 if (ret)
1185 dev_err(nandc->dev, "failed to copy last codeword\n");
1186
1187 free_descs(nandc);
1188
1189 return ret;
1190}
1191
1192/* implements ecc->read_page() */
1193static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1194 uint8_t *buf, int oob_required, int page)
1195{
1196 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1197 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1198 u8 *data_buf, *oob_buf = NULL;
1199 int ret;
1200
1201 data_buf = buf;
1202 oob_buf = oob_required ? chip->oob_poi : NULL;
1203
1204 ret = read_page_ecc(host, data_buf, oob_buf);
1205 if (ret) {
1206 dev_err(nandc->dev, "failure to read page\n");
1207 return ret;
1208 }
1209
1210 return parse_read_errors(host, data_buf, oob_buf);
1211}
1212
1213/* implements ecc->read_page_raw() */
1214static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
1215 struct nand_chip *chip, uint8_t *buf,
1216 int oob_required, int page)
1217{
1218 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1219 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1220 u8 *data_buf, *oob_buf;
1221 struct nand_ecc_ctrl *ecc = &chip->ecc;
1222 int i, ret;
1223
1224 data_buf = buf;
1225 oob_buf = chip->oob_poi;
1226
1227 host->use_ecc = false;
1228 update_rw_regs(host, ecc->steps, true);
1229
1230 for (i = 0; i < ecc->steps; i++) {
1231 int data_size1, data_size2, oob_size1, oob_size2;
1232 int reg_off = FLASH_BUF_ACC;
1233
1234 data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
1235 oob_size1 = host->bbm_size;
1236
1237 if (i == (ecc->steps - 1)) {
1238 data_size2 = ecc->size - data_size1 -
1239 ((ecc->steps - 1) << 2);
1240 oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
1241 host->spare_bytes;
1242 } else {
1243 data_size2 = host->cw_data - data_size1;
1244 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
1245 }
1246
1247 config_cw_read(nandc);
1248
1249 read_data_dma(nandc, reg_off, data_buf, data_size1);
1250 reg_off += data_size1;
1251 data_buf += data_size1;
1252
1253 read_data_dma(nandc, reg_off, oob_buf, oob_size1);
1254 reg_off += oob_size1;
1255 oob_buf += oob_size1;
1256
1257 read_data_dma(nandc, reg_off, data_buf, data_size2);
1258 reg_off += data_size2;
1259 data_buf += data_size2;
1260
1261 read_data_dma(nandc, reg_off, oob_buf, oob_size2);
1262 oob_buf += oob_size2;
1263 }
1264
1265 ret = submit_descs(nandc);
1266 if (ret)
1267 dev_err(nandc->dev, "failure to read raw page\n");
1268
1269 free_descs(nandc);
1270
1271 return 0;
1272}
1273
1274/* implements ecc->read_oob() */
1275static int qcom_nandc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1276 int page)
1277{
1278 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1279 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1280 struct nand_ecc_ctrl *ecc = &chip->ecc;
1281 int ret;
1282
1283 clear_read_regs(nandc);
1284
1285 host->use_ecc = true;
1286 set_address(host, 0, page);
1287 update_rw_regs(host, ecc->steps, true);
1288
1289 ret = read_page_ecc(host, NULL, chip->oob_poi);
1290 if (ret)
1291 dev_err(nandc->dev, "failure to read oob\n");
1292
1293 return ret;
1294}
1295
1296/* implements ecc->write_page() */
1297static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1298 const uint8_t *buf, int oob_required, int page)
1299{
1300 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1301 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1302 struct nand_ecc_ctrl *ecc = &chip->ecc;
1303 u8 *data_buf, *oob_buf;
1304 int i, ret;
1305
1306 clear_read_regs(nandc);
1307
1308 data_buf = (u8 *)buf;
1309 oob_buf = chip->oob_poi;
1310
1311 host->use_ecc = true;
1312 update_rw_regs(host, ecc->steps, false);
1313
1314 for (i = 0; i < ecc->steps; i++) {
1315 int data_size, oob_size;
1316
1317 if (i == (ecc->steps - 1)) {
1318 data_size = ecc->size - ((ecc->steps - 1) << 2);
1319 oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
1320 host->spare_bytes;
1321 } else {
1322 data_size = host->cw_data;
1323 oob_size = ecc->bytes;
1324 }
1325
1326 config_cw_write_pre(nandc);
1327
1328 write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size);
1329
1330 /*
1331 * when ECC is enabled, we don't really need to write anything
1332 * to oob for the first n - 1 codewords since these oob regions
1333 * just contain ECC bytes that's written by the controller
1334 * itself. For the last codeword, we skip the bbm positions and
1335 * write to the free oob area.
1336 */
1337 if (i == (ecc->steps - 1)) {
1338 oob_buf += host->bbm_size;
1339
1340 write_data_dma(nandc, FLASH_BUF_ACC + data_size,
1341 oob_buf, oob_size);
1342 }
1343
1344 config_cw_write_post(nandc);
1345
1346 data_buf += data_size;
1347 oob_buf += oob_size;
1348 }
1349
1350 ret = submit_descs(nandc);
1351 if (ret)
1352 dev_err(nandc->dev, "failure to write page\n");
1353
1354 free_descs(nandc);
1355
1356 return ret;
1357}
1358
1359/* implements ecc->write_page_raw() */
1360static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
1361 struct nand_chip *chip, const uint8_t *buf,
1362 int oob_required, int page)
1363{
1364 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1365 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1366 struct nand_ecc_ctrl *ecc = &chip->ecc;
1367 u8 *data_buf, *oob_buf;
1368 int i, ret;
1369
1370 clear_read_regs(nandc);
1371
1372 data_buf = (u8 *)buf;
1373 oob_buf = chip->oob_poi;
1374
1375 host->use_ecc = false;
1376 update_rw_regs(host, ecc->steps, false);
1377
1378 for (i = 0; i < ecc->steps; i++) {
1379 int data_size1, data_size2, oob_size1, oob_size2;
1380 int reg_off = FLASH_BUF_ACC;
1381
1382 data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
1383 oob_size1 = host->bbm_size;
1384
1385 if (i == (ecc->steps - 1)) {
1386 data_size2 = ecc->size - data_size1 -
1387 ((ecc->steps - 1) << 2);
1388 oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
1389 host->spare_bytes;
1390 } else {
1391 data_size2 = host->cw_data - data_size1;
1392 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
1393 }
1394
1395 config_cw_write_pre(nandc);
1396
1397 write_data_dma(nandc, reg_off, data_buf, data_size1);
1398 reg_off += data_size1;
1399 data_buf += data_size1;
1400
1401 write_data_dma(nandc, reg_off, oob_buf, oob_size1);
1402 reg_off += oob_size1;
1403 oob_buf += oob_size1;
1404
1405 write_data_dma(nandc, reg_off, data_buf, data_size2);
1406 reg_off += data_size2;
1407 data_buf += data_size2;
1408
1409 write_data_dma(nandc, reg_off, oob_buf, oob_size2);
1410 oob_buf += oob_size2;
1411
1412 config_cw_write_post(nandc);
1413 }
1414
1415 ret = submit_descs(nandc);
1416 if (ret)
1417 dev_err(nandc->dev, "failure to write raw page\n");
1418
1419 free_descs(nandc);
1420
1421 return ret;
1422}
1423
1424/*
1425 * implements ecc->write_oob()
1426 *
1427 * the NAND controller cannot write only data or only oob within a codeword,
1428 * since ecc is calculated for the combined codeword. we first copy the
1429 * entire contents for the last codeword(data + oob), replace the old oob
1430 * with the new one in chip->oob_poi, and then write the entire codeword.
1431 * this read-copy-write operation results in a slight performance loss.
1432 */
1433static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
1434 int page)
1435{
1436 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1437 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1438 struct nand_ecc_ctrl *ecc = &chip->ecc;
1439 u8 *oob = chip->oob_poi;
1440 int free_boff;
1441 int data_size, oob_size;
1442 int ret, status = 0;
1443
1444 host->use_ecc = true;
1445
1446 ret = copy_last_cw(host, page);
1447 if (ret)
1448 return ret;
1449
1450 clear_read_regs(nandc);
1451
1452 /* calculate the data and oob size for the last codeword/step */
1453 data_size = ecc->size - ((ecc->steps - 1) << 2);
1454 oob_size = ecc->steps << 2;
1455
1456 free_boff = ecc->layout->oobfree[0].offset;
1457
1458 /* override new oob content to last codeword */
1459 memcpy(nandc->data_buffer + data_size, oob + free_boff, oob_size);
1460
1461 set_address(host, host->cw_size * (ecc->steps - 1), page);
1462 update_rw_regs(host, 1, false);
1463
1464 config_cw_write_pre(nandc);
1465 write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
1466 data_size + oob_size);
1467 config_cw_write_post(nandc);
1468
1469 ret = submit_descs(nandc);
1470
1471 free_descs(nandc);
1472
1473 if (ret) {
1474 dev_err(nandc->dev, "failure to write oob\n");
1475 return -EIO;
1476 }
1477
1478 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1479
1480 status = chip->waitfunc(mtd, chip);
1481
1482 return status & NAND_STATUS_FAIL ? -EIO : 0;
1483}
1484
1485static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
1486{
1487 struct nand_chip *chip = mtd_to_nand(mtd);
1488 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1489 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1490 struct nand_ecc_ctrl *ecc = &chip->ecc;
1491 int page, ret, bbpos, bad = 0;
1492 u32 flash_status;
1493
1494 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
1495
1496 /*
1497 * configure registers for a raw sub page read, the address is set to
1498 * the beginning of the last codeword, we don't care about reading ecc
1499 * portion of oob. we just want the first few bytes from this codeword
1500 * that contains the BBM
1501 */
1502 host->use_ecc = false;
1503
1504 ret = copy_last_cw(host, page);
1505 if (ret)
1506 goto err;
1507
1508 flash_status = le32_to_cpu(nandc->reg_read_buf[0]);
1509
1510 if (flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
1511 dev_warn(nandc->dev, "error when trying to read BBM\n");
1512 goto err;
1513 }
1514
1515 bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
1516
1517 bad = nandc->data_buffer[bbpos] != 0xff;
1518
1519 if (chip->options & NAND_BUSWIDTH_16)
1520 bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
1521err:
1522 return bad;
1523}
1524
1525static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
1526{
1527 struct nand_chip *chip = mtd_to_nand(mtd);
1528 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1529 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1530 struct nand_ecc_ctrl *ecc = &chip->ecc;
1531 int page, ret, status = 0;
1532
1533 clear_read_regs(nandc);
1534
1535 /*
1536 * to mark the BBM as bad, we flash the entire last codeword with 0s.
1537 * we don't care about the rest of the content in the codeword since
1538 * we aren't going to use this block again
1539 */
1540 memset(nandc->data_buffer, 0x00, host->cw_size);
1541
1542 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
1543
1544 /* prepare write */
1545 host->use_ecc = false;
1546 set_address(host, host->cw_size * (ecc->steps - 1), page);
1547 update_rw_regs(host, 1, false);
1548
1549 config_cw_write_pre(nandc);
1550 write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, host->cw_size);
1551 config_cw_write_post(nandc);
1552
1553 ret = submit_descs(nandc);
1554
1555 free_descs(nandc);
1556
1557 if (ret) {
1558 dev_err(nandc->dev, "failure to update BBM\n");
1559 return -EIO;
1560 }
1561
1562 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1563
1564 status = chip->waitfunc(mtd, chip);
1565
1566 return status & NAND_STATUS_FAIL ? -EIO : 0;
1567}
1568
1569/*
1570 * the three functions below implement chip->read_byte(), chip->read_buf()
1571 * and chip->write_buf() respectively. these aren't used for
1572 * reading/writing page data, they are used for smaller data like reading
1573 * id, status etc
1574 */
1575static uint8_t qcom_nandc_read_byte(struct mtd_info *mtd)
1576{
1577 struct nand_chip *chip = mtd_to_nand(mtd);
1578 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1579 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1580 u8 *buf = nandc->data_buffer;
1581 u8 ret = 0x0;
1582
1583 if (host->last_command == NAND_CMD_STATUS) {
1584 ret = host->status;
1585
1586 host->status = NAND_STATUS_READY | NAND_STATUS_WP;
1587
1588 return ret;
1589 }
1590
1591 if (nandc->buf_start < nandc->buf_count)
1592 ret = buf[nandc->buf_start++];
1593
1594 return ret;
1595}
1596
1597static void qcom_nandc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1598{
1599 struct nand_chip *chip = mtd_to_nand(mtd);
1600 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1601 int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
1602
1603 memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len);
1604 nandc->buf_start += real_len;
1605}
1606
1607static void qcom_nandc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
1608 int len)
1609{
1610 struct nand_chip *chip = mtd_to_nand(mtd);
1611 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1612 int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
1613
1614 memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len);
1615
1616 nandc->buf_start += real_len;
1617}
1618
1619/* we support only one external chip for now */
1620static void qcom_nandc_select_chip(struct mtd_info *mtd, int chipnr)
1621{
1622 struct nand_chip *chip = mtd_to_nand(mtd);
1623 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1624
1625 if (chipnr <= 0)
1626 return;
1627
1628 dev_warn(nandc->dev, "invalid chip select\n");
1629}
1630
1631/*
1632 * NAND controller page layout info
1633 *
1634 * Layout with ECC enabled:
1635 *
1636 * |----------------------| |---------------------------------|
1637 * | xx.......yy| | *********xx.......yy|
1638 * | DATA xx..ECC..yy| | DATA **SPARE**xx..ECC..yy|
1639 * | (516) xx.......yy| | (516-n*4) **(n*4)**xx.......yy|
1640 * | xx.......yy| | *********xx.......yy|
1641 * |----------------------| |---------------------------------|
1642 * codeword 1,2..n-1 codeword n
1643 * <---(528/532 Bytes)--> <-------(528/532 Bytes)--------->
1644 *
1645 * n = Number of codewords in the page
1646 * . = ECC bytes
1647 * * = Spare/free bytes
1648 * x = Unused byte(s)
1649 * y = Reserved byte(s)
1650 *
1651 * 2K page: n = 4, spare = 16 bytes
1652 * 4K page: n = 8, spare = 32 bytes
1653 * 8K page: n = 16, spare = 64 bytes
1654 *
1655 * the qcom nand controller operates at a sub page/codeword level. each
1656 * codeword is 528 and 532 bytes for 4 bit and 8 bit ECC modes respectively.
1657 * the number of ECC bytes vary based on the ECC strength and the bus width.
1658 *
1659 * the first n - 1 codewords contains 516 bytes of user data, the remaining
1660 * 12/16 bytes consist of ECC and reserved data. The nth codeword contains
1661 * both user data and spare(oobavail) bytes that sum up to 516 bytes.
1662 *
1663 * When we access a page with ECC enabled, the reserved bytes(s) are not
1664 * accessible at all. When reading, we fill up these unreadable positions
1665 * with 0xffs. When writing, the controller skips writing the inaccessible
1666 * bytes.
1667 *
1668 * Layout with ECC disabled:
1669 *
1670 * |------------------------------| |---------------------------------------|
1671 * | yy xx.......| | bb *********xx.......|
1672 * | DATA1 yy DATA2 xx..ECC..| | DATA1 bb DATA2 **SPARE**xx..ECC..|
1673 * | (size1) yy (size2) xx.......| | (size1) bb (size2) **(n*4)**xx.......|
1674 * | yy xx.......| | bb *********xx.......|
1675 * |------------------------------| |---------------------------------------|
1676 * codeword 1,2..n-1 codeword n
1677 * <-------(528/532 Bytes)------> <-----------(528/532 Bytes)----------->
1678 *
1679 * n = Number of codewords in the page
1680 * . = ECC bytes
1681 * * = Spare/free bytes
1682 * x = Unused byte(s)
1683 * y = Dummy Bad Bock byte(s)
1684 * b = Real Bad Block byte(s)
1685 * size1/size2 = function of codeword size and 'n'
1686 *
1687 * when the ECC block is disabled, one reserved byte (or two for 16 bit bus
1688 * width) is now accessible. For the first n - 1 codewords, these are dummy Bad
1689 * Block Markers. In the last codeword, this position contains the real BBM
1690 *
1691 * In order to have a consistent layout between RAW and ECC modes, we assume
1692 * the following OOB layout arrangement:
1693 *
1694 * |-----------| |--------------------|
1695 * |yyxx.......| |bb*********xx.......|
1696 * |yyxx..ECC..| |bb*FREEOOB*xx..ECC..|
1697 * |yyxx.......| |bb*********xx.......|
1698 * |yyxx.......| |bb*********xx.......|
1699 * |-----------| |--------------------|
1700 * first n - 1 nth OOB region
1701 * OOB regions
1702 *
1703 * n = Number of codewords in the page
1704 * . = ECC bytes
1705 * * = FREE OOB bytes
1706 * y = Dummy bad block byte(s) (inaccessible when ECC enabled)
1707 * x = Unused byte(s)
1708 * b = Real bad block byte(s) (inaccessible when ECC enabled)
1709 *
1710 * This layout is read as is when ECC is disabled. When ECC is enabled, the
1711 * inaccessible Bad Block byte(s) are ignored when we write to a page/oob,
1712 * and assumed as 0xffs when we read a page/oob. The ECC, unused and
1713 * dummy/real bad block bytes are grouped as ecc bytes in nand_ecclayout (i.e,
1714 * ecc->bytes is the sum of the three).
1715 */
1716
1717static struct nand_ecclayout *
1718qcom_nand_create_layout(struct qcom_nand_host *host)
1719{
1720 struct nand_chip *chip = &host->chip;
1721 struct mtd_info *mtd = nand_to_mtd(chip);
1722 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1723 struct nand_ecc_ctrl *ecc = &chip->ecc;
1724 struct nand_ecclayout *layout;
1725 int i, j, steps, pos = 0, shift = 0;
1726
1727 layout = devm_kzalloc(nandc->dev, sizeof(*layout), GFP_KERNEL);
1728 if (!layout)
1729 return NULL;
1730
1731 steps = mtd->writesize / ecc->size;
1732 layout->eccbytes = steps * ecc->bytes;
1733
1734 layout->oobfree[0].offset = (steps - 1) * ecc->bytes + host->bbm_size;
1735 layout->oobfree[0].length = steps << 2;
1736
1737 /*
1738 * the oob bytes in the first n - 1 codewords are all grouped together
1739 * in the format:
1740 * DUMMY_BBM + UNUSED + ECC
1741 */
1742 for (i = 0; i < steps - 1; i++) {
1743 for (j = 0; j < ecc->bytes; j++)
1744 layout->eccpos[pos++] = i * ecc->bytes + j;
1745 }
1746
1747 /*
1748 * the oob bytes in the last codeword are grouped in the format:
1749 * BBM + FREE OOB + UNUSED + ECC
1750 */
1751
1752 /* fill up the bbm positions */
1753 for (j = 0; j < host->bbm_size; j++)
1754 layout->eccpos[pos++] = i * ecc->bytes + j;
1755
1756 /*
1757 * fill up the ecc and reserved positions, their indices are offseted
1758 * by the free oob region
1759 */
1760 shift = layout->oobfree[0].length + host->bbm_size;
1761
1762 for (j = 0; j < (host->ecc_bytes_hw + host->spare_bytes); j++)
1763 layout->eccpos[pos++] = i * ecc->bytes + shift + j;
1764
1765 return layout;
1766}
1767
1768static int qcom_nand_host_setup(struct qcom_nand_host *host)
1769{
1770 struct nand_chip *chip = &host->chip;
1771 struct mtd_info *mtd = nand_to_mtd(chip);
1772 struct nand_ecc_ctrl *ecc = &chip->ecc;
1773 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1774 int cwperpage, bad_block_byte;
1775 bool wide_bus;
1776 int ecc_mode = 1;
1777
1778 /*
1779 * the controller requires each step consists of 512 bytes of data.
1780 * bail out if DT has populated a wrong step size.
1781 */
1782 if (ecc->size != NANDC_STEP_SIZE) {
1783 dev_err(nandc->dev, "invalid ecc size\n");
1784 return -EINVAL;
1785 }
1786
1787 wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
1788
1789 if (ecc->strength >= 8) {
1790 /* 8 bit ECC defaults to BCH ECC on all platforms */
1791 host->bch_enabled = true;
1792 ecc_mode = 1;
1793
1794 if (wide_bus) {
1795 host->ecc_bytes_hw = 14;
1796 host->spare_bytes = 0;
1797 host->bbm_size = 2;
1798 } else {
1799 host->ecc_bytes_hw = 13;
1800 host->spare_bytes = 2;
1801 host->bbm_size = 1;
1802 }
1803 } else {
1804 /*
1805 * if the controller supports BCH for 4 bit ECC, the controller
1806 * uses lesser bytes for ECC. If RS is used, the ECC bytes is
1807 * always 10 bytes
1808 */
1809 if (nandc->ecc_modes & ECC_BCH_4BIT) {
1810 /* BCH */
1811 host->bch_enabled = true;
1812 ecc_mode = 0;
1813
1814 if (wide_bus) {
1815 host->ecc_bytes_hw = 8;
1816 host->spare_bytes = 2;
1817 host->bbm_size = 2;
1818 } else {
1819 host->ecc_bytes_hw = 7;
1820 host->spare_bytes = 4;
1821 host->bbm_size = 1;
1822 }
1823 } else {
1824 /* RS */
1825 host->ecc_bytes_hw = 10;
1826
1827 if (wide_bus) {
1828 host->spare_bytes = 0;
1829 host->bbm_size = 2;
1830 } else {
1831 host->spare_bytes = 1;
1832 host->bbm_size = 1;
1833 }
1834 }
1835 }
1836
1837 /*
1838 * we consider ecc->bytes as the sum of all the non-data content in a
1839 * step. It gives us a clean representation of the oob area (even if
1840 * all the bytes aren't used for ECC).It is always 16 bytes for 8 bit
1841 * ECC and 12 bytes for 4 bit ECC
1842 */
1843 ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
1844
1845 ecc->read_page = qcom_nandc_read_page;
1846 ecc->read_page_raw = qcom_nandc_read_page_raw;
1847 ecc->read_oob = qcom_nandc_read_oob;
1848 ecc->write_page = qcom_nandc_write_page;
1849 ecc->write_page_raw = qcom_nandc_write_page_raw;
1850 ecc->write_oob = qcom_nandc_write_oob;
1851
1852 ecc->mode = NAND_ECC_HW;
1853
1854 ecc->layout = qcom_nand_create_layout(host);
1855 if (!ecc->layout)
1856 return -ENOMEM;
1857
1858 cwperpage = mtd->writesize / ecc->size;
1859
1860 /*
1861 * DATA_UD_BYTES varies based on whether the read/write command protects
1862 * spare data with ECC too. We protect spare data by default, so we set
1863 * it to main + spare data, which are 512 and 4 bytes respectively.
1864 */
1865 host->cw_data = 516;
1866
1867 /*
1868 * total bytes in a step, either 528 bytes for 4 bit ECC, or 532 bytes
1869 * for 8 bit ECC
1870 */
1871 host->cw_size = host->cw_data + ecc->bytes;
1872
1873 if (ecc->bytes * (mtd->writesize / ecc->size) > mtd->oobsize) {
1874 dev_err(nandc->dev, "ecc data doesn't fit in OOB area\n");
1875 return -EINVAL;
1876 }
1877
1878 bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
1879
1880 host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
1881 | host->cw_data << UD_SIZE_BYTES
1882 | 0 << DISABLE_STATUS_AFTER_WRITE
1883 | 5 << NUM_ADDR_CYCLES
1884 | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
1885 | 0 << STATUS_BFR_READ
1886 | 1 << SET_RD_MODE_AFTER_STATUS
1887 | host->spare_bytes << SPARE_SIZE_BYTES;
1888
1889 host->cfg1 = 7 << NAND_RECOVERY_CYCLES
1890 | 0 << CS_ACTIVE_BSY
1891 | bad_block_byte << BAD_BLOCK_BYTE_NUM
1892 | 0 << BAD_BLOCK_IN_SPARE_AREA
1893 | 2 << WR_RD_BSY_GAP
1894 | wide_bus << WIDE_FLASH
1895 | host->bch_enabled << ENABLE_BCH_ECC;
1896
1897 host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
1898 | host->cw_size << UD_SIZE_BYTES
1899 | 5 << NUM_ADDR_CYCLES
1900 | 0 << SPARE_SIZE_BYTES;
1901
1902 host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
1903 | 0 << CS_ACTIVE_BSY
1904 | 17 << BAD_BLOCK_BYTE_NUM
1905 | 1 << BAD_BLOCK_IN_SPARE_AREA
1906 | 2 << WR_RD_BSY_GAP
1907 | wide_bus << WIDE_FLASH
1908 | 1 << DEV0_CFG1_ECC_DISABLE;
1909
1910 host->ecc_bch_cfg = host->bch_enabled << ECC_CFG_ECC_DISABLE
1911 | 0 << ECC_SW_RESET
1912 | host->cw_data << ECC_NUM_DATA_BYTES
1913 | 1 << ECC_FORCE_CLK_OPEN
1914 | ecc_mode << ECC_MODE
1915 | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
1916
1917 host->ecc_buf_cfg = 0x203 << NUM_STEPS;
1918
1919 host->clrflashstatus = FS_READY_BSY_N;
1920 host->clrreadstatus = 0xc0;
1921
1922 dev_dbg(nandc->dev,
1923 "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
1924 host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg,
1925 host->cw_size, host->cw_data, ecc->strength, ecc->bytes,
1926 cwperpage);
1927
1928 return 0;
1929}
1930
1931static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
1932{
1933 int ret;
1934
1935 ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
1936 if (ret) {
1937 dev_err(nandc->dev, "failed to set DMA mask\n");
1938 return ret;
1939 }
1940
1941 /*
1942 * we use the internal buffer for reading ONFI params, reading small
1943 * data like ID and status, and preforming read-copy-write operations
1944 * when writing to a codeword partially. 532 is the maximum possible
1945 * size of a codeword for our nand controller
1946 */
1947 nandc->buf_size = 532;
1948
1949 nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size,
1950 GFP_KERNEL);
1951 if (!nandc->data_buffer)
1952 return -ENOMEM;
1953
1954 nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs),
1955 GFP_KERNEL);
1956 if (!nandc->regs)
1957 return -ENOMEM;
1958
1959 nandc->reg_read_buf = devm_kzalloc(nandc->dev,
1960 MAX_REG_RD * sizeof(*nandc->reg_read_buf),
1961 GFP_KERNEL);
1962 if (!nandc->reg_read_buf)
1963 return -ENOMEM;
1964
1965 nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx");
1966 if (!nandc->chan) {
1967 dev_err(nandc->dev, "failed to request slave channel\n");
1968 return -ENODEV;
1969 }
1970
1971 INIT_LIST_HEAD(&nandc->desc_list);
1972 INIT_LIST_HEAD(&nandc->host_list);
1973
1974 spin_lock_init(&nandc->controller.lock);
1975 init_waitqueue_head(&nandc->controller.wq);
1976
1977 return 0;
1978}
1979
1980static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
1981{
1982 dma_release_channel(nandc->chan);
1983}
1984
1985/* one time setup of a few nand controller registers */
1986static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
1987{
1988 /* kill onenand */
1989 nandc_write(nandc, SFLASHC_BURST_CFG, 0);
1990
1991 /* enable ADM DMA */
1992 nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
1993
1994 /* save the original values of these registers */
1995 nandc->cmd1 = nandc_read(nandc, NAND_DEV_CMD1);
1996 nandc->vld = nandc_read(nandc, NAND_DEV_CMD_VLD);
1997
1998 return 0;
1999}
2000
2001static int qcom_nand_host_init(struct qcom_nand_controller *nandc,
2002 struct qcom_nand_host *host,
2003 struct device_node *dn)
2004{
2005 struct nand_chip *chip = &host->chip;
2006 struct mtd_info *mtd = nand_to_mtd(chip);
2007 struct device *dev = nandc->dev;
2008 int ret;
2009
2010 ret = of_property_read_u32(dn, "reg", &host->cs);
2011 if (ret) {
2012 dev_err(dev, "can't get chip-select\n");
2013 return -ENXIO;
2014 }
2015
2016 nand_set_flash_node(chip, dn);
2017 mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
2018 mtd->owner = THIS_MODULE;
2019 mtd->dev.parent = dev;
2020
2021 chip->cmdfunc = qcom_nandc_command;
2022 chip->select_chip = qcom_nandc_select_chip;
2023 chip->read_byte = qcom_nandc_read_byte;
2024 chip->read_buf = qcom_nandc_read_buf;
2025 chip->write_buf = qcom_nandc_write_buf;
2026
2027 /*
2028 * the bad block marker is readable only when we read the last codeword
2029 * of a page with ECC disabled. currently, the nand_base and nand_bbt
2030 * helpers don't allow us to read BB from a nand chip with ECC
2031 * disabled (MTD_OPS_PLACE_OOB is set by default). use the block_bad
2032 * and block_markbad helpers until we permanently switch to using
2033 * MTD_OPS_RAW for all drivers (with the help of badblockbits)
2034 */
2035 chip->block_bad = qcom_nandc_block_bad;
2036 chip->block_markbad = qcom_nandc_block_markbad;
2037
2038 chip->controller = &nandc->controller;
2039 chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER |
2040 NAND_SKIP_BBTSCAN;
2041
2042 /* set up initial status value */
2043 host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2044
2045 ret = nand_scan_ident(mtd, 1, NULL);
2046 if (ret)
2047 return ret;
2048
2049 ret = qcom_nand_host_setup(host);
2050 if (ret)
2051 return ret;
2052
2053 ret = nand_scan_tail(mtd);
2054 if (ret)
2055 return ret;
2056
2057 return mtd_device_register(mtd, NULL, 0);
2058}
2059
2060/* parse custom DT properties here */
2061static int qcom_nandc_parse_dt(struct platform_device *pdev)
2062{
2063 struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
2064 struct device_node *np = nandc->dev->of_node;
2065 int ret;
2066
2067 ret = of_property_read_u32(np, "qcom,cmd-crci", &nandc->cmd_crci);
2068 if (ret) {
2069 dev_err(nandc->dev, "command CRCI unspecified\n");
2070 return ret;
2071 }
2072
2073 ret = of_property_read_u32(np, "qcom,data-crci", &nandc->data_crci);
2074 if (ret) {
2075 dev_err(nandc->dev, "data CRCI unspecified\n");
2076 return ret;
2077 }
2078
2079 return 0;
2080}
2081
2082static int qcom_nandc_probe(struct platform_device *pdev)
2083{
2084 struct qcom_nand_controller *nandc;
2085 struct qcom_nand_host *host;
2086 const void *dev_data;
2087 struct device *dev = &pdev->dev;
2088 struct device_node *dn = dev->of_node, *child;
2089 struct resource *res;
2090 int ret;
2091
2092 nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
2093 if (!nandc)
2094 return -ENOMEM;
2095
2096 platform_set_drvdata(pdev, nandc);
2097 nandc->dev = dev;
2098
2099 dev_data = of_device_get_match_data(dev);
2100 if (!dev_data) {
2101 dev_err(&pdev->dev, "failed to get device data\n");
2102 return -ENODEV;
2103 }
2104
2105 nandc->ecc_modes = (unsigned long)dev_data;
2106
2107 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2108 nandc->base = devm_ioremap_resource(dev, res);
2109 if (IS_ERR(nandc->base))
2110 return PTR_ERR(nandc->base);
2111
2112 nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start);
2113
2114 nandc->core_clk = devm_clk_get(dev, "core");
2115 if (IS_ERR(nandc->core_clk))
2116 return PTR_ERR(nandc->core_clk);
2117
2118 nandc->aon_clk = devm_clk_get(dev, "aon");
2119 if (IS_ERR(nandc->aon_clk))
2120 return PTR_ERR(nandc->aon_clk);
2121
2122 ret = qcom_nandc_parse_dt(pdev);
2123 if (ret)
2124 return ret;
2125
2126 ret = qcom_nandc_alloc(nandc);
2127 if (ret)
2128 return ret;
2129
2130 ret = clk_prepare_enable(nandc->core_clk);
2131 if (ret)
2132 goto err_core_clk;
2133
2134 ret = clk_prepare_enable(nandc->aon_clk);
2135 if (ret)
2136 goto err_aon_clk;
2137
2138 ret = qcom_nandc_setup(nandc);
2139 if (ret)
2140 goto err_setup;
2141
2142 for_each_available_child_of_node(dn, child) {
2143 if (of_device_is_compatible(child, "qcom,nandcs")) {
2144 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
2145 if (!host) {
2146 of_node_put(child);
2147 ret = -ENOMEM;
2148 goto err_cs_init;
2149 }
2150
2151 ret = qcom_nand_host_init(nandc, host, child);
2152 if (ret) {
2153 devm_kfree(dev, host);
2154 continue;
2155 }
2156
2157 list_add_tail(&host->node, &nandc->host_list);
2158 }
2159 }
2160
2161 if (list_empty(&nandc->host_list)) {
2162 ret = -ENODEV;
2163 goto err_cs_init;
2164 }
2165
2166 return 0;
2167
2168err_cs_init:
2169 list_for_each_entry(host, &nandc->host_list, node)
2170 nand_release(nand_to_mtd(&host->chip));
2171err_setup:
2172 clk_disable_unprepare(nandc->aon_clk);
2173err_aon_clk:
2174 clk_disable_unprepare(nandc->core_clk);
2175err_core_clk:
2176 qcom_nandc_unalloc(nandc);
2177
2178 return ret;
2179}
2180
2181static int qcom_nandc_remove(struct platform_device *pdev)
2182{
2183 struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
2184 struct qcom_nand_host *host;
2185
2186 list_for_each_entry(host, &nandc->host_list, node)
2187 nand_release(nand_to_mtd(&host->chip));
2188
2189 qcom_nandc_unalloc(nandc);
2190
2191 clk_disable_unprepare(nandc->aon_clk);
2192 clk_disable_unprepare(nandc->core_clk);
2193
2194 return 0;
2195}
2196
2197#define EBI2_NANDC_ECC_MODES (ECC_RS_4BIT | ECC_BCH_8BIT)
2198
2199/*
2200 * data will hold a struct pointer containing more differences once we support
2201 * more controller variants
2202 */
2203static const struct of_device_id qcom_nandc_of_match[] = {
2204 { .compatible = "qcom,ipq806x-nand",
2205 .data = (void *)EBI2_NANDC_ECC_MODES,
2206 },
2207 {}
2208};
2209MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
2210
2211static struct platform_driver qcom_nandc_driver = {
2212 .driver = {
2213 .name = "qcom-nandc",
2214 .of_match_table = qcom_nandc_of_match,
2215 },
2216 .probe = qcom_nandc_probe,
2217 .remove = qcom_nandc_remove,
2218};
2219module_platform_driver(qcom_nandc_driver);
2220
2221MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>");
2222MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
2223MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 01ac74fa3b95..9c9397b54b2c 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -861,9 +861,6 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
861 chip->ecc.mode = NAND_ECC_SOFT; 861 chip->ecc.mode = NAND_ECC_SOFT;
862#endif 862#endif
863 863
864 if (set->ecc_layout != NULL)
865 chip->ecc.layout = set->ecc_layout;
866
867 if (set->disable_ecc) 864 if (set->disable_ecc)
868 chip->ecc.mode = NAND_ECC_NONE; 865 chip->ecc.mode = NAND_ECC_NONE;
869 866
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index 51e10a35fe08..1c03eee44f3d 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -60,6 +60,7 @@
60#define NFC_REG_ECC_ERR_CNT(x) ((0x0040 + (x)) & ~0x3) 60#define NFC_REG_ECC_ERR_CNT(x) ((0x0040 + (x)) & ~0x3)
61#define NFC_REG_USER_DATA(x) (0x0050 + ((x) * 4)) 61#define NFC_REG_USER_DATA(x) (0x0050 + ((x) * 4))
62#define NFC_REG_SPARE_AREA 0x00A0 62#define NFC_REG_SPARE_AREA 0x00A0
63#define NFC_REG_PAT_ID 0x00A4
63#define NFC_RAM0_BASE 0x0400 64#define NFC_RAM0_BASE 0x0400
64#define NFC_RAM1_BASE 0x0800 65#define NFC_RAM1_BASE 0x0800
65 66
@@ -538,6 +539,174 @@ static void sunxi_nfc_cmd_ctrl(struct mtd_info *mtd, int dat,
538 sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0); 539 sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
539} 540}
540 541
542/* These seed values have been extracted from Allwinner's BSP */
543static const u16 sunxi_nfc_randomizer_page_seeds[] = {
544 0x2b75, 0x0bd0, 0x5ca3, 0x62d1, 0x1c93, 0x07e9, 0x2162, 0x3a72,
545 0x0d67, 0x67f9, 0x1be7, 0x077d, 0x032f, 0x0dac, 0x2716, 0x2436,
546 0x7922, 0x1510, 0x3860, 0x5287, 0x480f, 0x4252, 0x1789, 0x5a2d,
547 0x2a49, 0x5e10, 0x437f, 0x4b4e, 0x2f45, 0x216e, 0x5cb7, 0x7130,
548 0x2a3f, 0x60e4, 0x4dc9, 0x0ef0, 0x0f52, 0x1bb9, 0x6211, 0x7a56,
549 0x226d, 0x4ea7, 0x6f36, 0x3692, 0x38bf, 0x0c62, 0x05eb, 0x4c55,
550 0x60f4, 0x728c, 0x3b6f, 0x2037, 0x7f69, 0x0936, 0x651a, 0x4ceb,
551 0x6218, 0x79f3, 0x383f, 0x18d9, 0x4f05, 0x5c82, 0x2912, 0x6f17,
552 0x6856, 0x5938, 0x1007, 0x61ab, 0x3e7f, 0x57c2, 0x542f, 0x4f62,
553 0x7454, 0x2eac, 0x7739, 0x42d4, 0x2f90, 0x435a, 0x2e52, 0x2064,
554 0x637c, 0x66ad, 0x2c90, 0x0bad, 0x759c, 0x0029, 0x0986, 0x7126,
555 0x1ca7, 0x1605, 0x386a, 0x27f5, 0x1380, 0x6d75, 0x24c3, 0x0f8e,
556 0x2b7a, 0x1418, 0x1fd1, 0x7dc1, 0x2d8e, 0x43af, 0x2267, 0x7da3,
557 0x4e3d, 0x1338, 0x50db, 0x454d, 0x764d, 0x40a3, 0x42e6, 0x262b,
558 0x2d2e, 0x1aea, 0x2e17, 0x173d, 0x3a6e, 0x71bf, 0x25f9, 0x0a5d,
559 0x7c57, 0x0fbe, 0x46ce, 0x4939, 0x6b17, 0x37bb, 0x3e91, 0x76db,
560};
561
562/*
563 * sunxi_nfc_randomizer_ecc512_seeds and sunxi_nfc_randomizer_ecc1024_seeds
564 * have been generated using
565 * sunxi_nfc_randomizer_step(seed, (step_size * 8) + 15), which is what
566 * the randomizer engine does internally before de/scrambling OOB data.
567 *
568 * Those tables are statically defined to avoid calculating randomizer state
569 * at runtime.
570 */
571static const u16 sunxi_nfc_randomizer_ecc512_seeds[] = {
572 0x3346, 0x367f, 0x1f18, 0x769a, 0x4f64, 0x068c, 0x2ef1, 0x6b64,
573 0x28a9, 0x15d7, 0x30f8, 0x3659, 0x53db, 0x7c5f, 0x71d4, 0x4409,
574 0x26eb, 0x03cc, 0x655d, 0x47d4, 0x4daa, 0x0877, 0x712d, 0x3617,
575 0x3264, 0x49aa, 0x7f9e, 0x588e, 0x4fbc, 0x7176, 0x7f91, 0x6c6d,
576 0x4b95, 0x5fb7, 0x3844, 0x4037, 0x0184, 0x081b, 0x0ee8, 0x5b91,
577 0x293d, 0x1f71, 0x0e6f, 0x402b, 0x5122, 0x1e52, 0x22be, 0x3d2d,
578 0x75bc, 0x7c60, 0x6291, 0x1a2f, 0x61d4, 0x74aa, 0x4140, 0x29ab,
579 0x472d, 0x2852, 0x017e, 0x15e8, 0x5ec2, 0x17cf, 0x7d0f, 0x06b8,
580 0x117a, 0x6b94, 0x789b, 0x3126, 0x6ac5, 0x5be7, 0x150f, 0x51f8,
581 0x7889, 0x0aa5, 0x663d, 0x77e8, 0x0b87, 0x3dcb, 0x360d, 0x218b,
582 0x512f, 0x7dc9, 0x6a4d, 0x630a, 0x3547, 0x1dd2, 0x5aea, 0x69a5,
583 0x7bfa, 0x5e4f, 0x1519, 0x6430, 0x3a0e, 0x5eb3, 0x5425, 0x0c7a,
584 0x5540, 0x3670, 0x63c1, 0x31e9, 0x5a39, 0x2de7, 0x5979, 0x2891,
585 0x1562, 0x014b, 0x5b05, 0x2756, 0x5a34, 0x13aa, 0x6cb5, 0x2c36,
586 0x5e72, 0x1306, 0x0861, 0x15ef, 0x1ee8, 0x5a37, 0x7ac4, 0x45dd,
587 0x44c4, 0x7266, 0x2f41, 0x3ccc, 0x045e, 0x7d40, 0x7c66, 0x0fa0,
588};
589
590static const u16 sunxi_nfc_randomizer_ecc1024_seeds[] = {
591 0x2cf5, 0x35f1, 0x63a4, 0x5274, 0x2bd2, 0x778b, 0x7285, 0x32b6,
592 0x6a5c, 0x70d6, 0x757d, 0x6769, 0x5375, 0x1e81, 0x0cf3, 0x3982,
593 0x6787, 0x042a, 0x6c49, 0x1925, 0x56a8, 0x40a9, 0x063e, 0x7bd9,
594 0x4dbf, 0x55ec, 0x672e, 0x7334, 0x5185, 0x4d00, 0x232a, 0x7e07,
595 0x445d, 0x6b92, 0x528f, 0x4255, 0x53ba, 0x7d82, 0x2a2e, 0x3a4e,
596 0x75eb, 0x450c, 0x6844, 0x1b5d, 0x581a, 0x4cc6, 0x0379, 0x37b2,
597 0x419f, 0x0e92, 0x6b27, 0x5624, 0x01e3, 0x07c1, 0x44a5, 0x130c,
598 0x13e8, 0x5910, 0x0876, 0x60c5, 0x54e3, 0x5b7f, 0x2269, 0x509f,
599 0x7665, 0x36fd, 0x3e9a, 0x0579, 0x6295, 0x14ef, 0x0a81, 0x1bcc,
600 0x4b16, 0x64db, 0x0514, 0x4f07, 0x0591, 0x3576, 0x6853, 0x0d9e,
601 0x259f, 0x38b7, 0x64fb, 0x3094, 0x4693, 0x6ddd, 0x29bb, 0x0bc8,
602 0x3f47, 0x490e, 0x0c0e, 0x7933, 0x3c9e, 0x5840, 0x398d, 0x3e68,
603 0x4af1, 0x71f5, 0x57cf, 0x1121, 0x64eb, 0x3579, 0x15ac, 0x584d,
604 0x5f2a, 0x47e2, 0x6528, 0x6eac, 0x196e, 0x6b96, 0x0450, 0x0179,
605 0x609c, 0x06e1, 0x4626, 0x42c7, 0x273e, 0x486f, 0x0705, 0x1601,
606 0x145b, 0x407e, 0x062b, 0x57a5, 0x53f9, 0x5659, 0x4410, 0x3ccd,
607};
608
609static u16 sunxi_nfc_randomizer_step(u16 state, int count)
610{
611 state &= 0x7fff;
612
613 /*
614 * This loop is just a simple implementation of a Fibonacci LFSR using
615 * the x16 + x15 + 1 polynomial.
616 */
617 while (count--)
618 state = ((state >> 1) |
619 (((state ^ (state >> 1)) & 1) << 14)) & 0x7fff;
620
621 return state;
622}
623
624static u16 sunxi_nfc_randomizer_state(struct mtd_info *mtd, int page, bool ecc)
625{
626 const u16 *seeds = sunxi_nfc_randomizer_page_seeds;
627 int mod = mtd_div_by_ws(mtd->erasesize, mtd);
628
629 if (mod > ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds))
630 mod = ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds);
631
632 if (ecc) {
633 if (mtd->ecc_step_size == 512)
634 seeds = sunxi_nfc_randomizer_ecc512_seeds;
635 else
636 seeds = sunxi_nfc_randomizer_ecc1024_seeds;
637 }
638
639 return seeds[page % mod];
640}
641
642static void sunxi_nfc_randomizer_config(struct mtd_info *mtd,
643 int page, bool ecc)
644{
645 struct nand_chip *nand = mtd_to_nand(mtd);
646 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
647 u32 ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
648 u16 state;
649
650 if (!(nand->options & NAND_NEED_SCRAMBLING))
651 return;
652
653 ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
654 state = sunxi_nfc_randomizer_state(mtd, page, ecc);
655 ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_SEED_MSK;
656 writel(ecc_ctl | NFC_RANDOM_SEED(state), nfc->regs + NFC_REG_ECC_CTL);
657}
658
659static void sunxi_nfc_randomizer_enable(struct mtd_info *mtd)
660{
661 struct nand_chip *nand = mtd_to_nand(mtd);
662 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
663
664 if (!(nand->options & NAND_NEED_SCRAMBLING))
665 return;
666
667 writel(readl(nfc->regs + NFC_REG_ECC_CTL) | NFC_RANDOM_EN,
668 nfc->regs + NFC_REG_ECC_CTL);
669}
670
671static void sunxi_nfc_randomizer_disable(struct mtd_info *mtd)
672{
673 struct nand_chip *nand = mtd_to_nand(mtd);
674 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
675
676 if (!(nand->options & NAND_NEED_SCRAMBLING))
677 return;
678
679 writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_EN,
680 nfc->regs + NFC_REG_ECC_CTL);
681}
682
683static void sunxi_nfc_randomize_bbm(struct mtd_info *mtd, int page, u8 *bbm)
684{
685 u16 state = sunxi_nfc_randomizer_state(mtd, page, true);
686
687 bbm[0] ^= state;
688 bbm[1] ^= sunxi_nfc_randomizer_step(state, 8);
689}
690
691static void sunxi_nfc_randomizer_write_buf(struct mtd_info *mtd,
692 const uint8_t *buf, int len,
693 bool ecc, int page)
694{
695 sunxi_nfc_randomizer_config(mtd, page, ecc);
696 sunxi_nfc_randomizer_enable(mtd);
697 sunxi_nfc_write_buf(mtd, buf, len);
698 sunxi_nfc_randomizer_disable(mtd);
699}
700
701static void sunxi_nfc_randomizer_read_buf(struct mtd_info *mtd, uint8_t *buf,
702 int len, bool ecc, int page)
703{
704 sunxi_nfc_randomizer_config(mtd, page, ecc);
705 sunxi_nfc_randomizer_enable(mtd);
706 sunxi_nfc_read_buf(mtd, buf, len);
707 sunxi_nfc_randomizer_disable(mtd);
708}
709
541static void sunxi_nfc_hw_ecc_enable(struct mtd_info *mtd) 710static void sunxi_nfc_hw_ecc_enable(struct mtd_info *mtd)
542{ 711{
543 struct nand_chip *nand = mtd_to_nand(mtd); 712 struct nand_chip *nand = mtd_to_nand(mtd);
@@ -574,18 +743,20 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
574 u8 *data, int data_off, 743 u8 *data, int data_off,
575 u8 *oob, int oob_off, 744 u8 *oob, int oob_off,
576 int *cur_off, 745 int *cur_off,
577 unsigned int *max_bitflips) 746 unsigned int *max_bitflips,
747 bool bbm, int page)
578{ 748{
579 struct nand_chip *nand = mtd_to_nand(mtd); 749 struct nand_chip *nand = mtd_to_nand(mtd);
580 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); 750 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
581 struct nand_ecc_ctrl *ecc = &nand->ecc; 751 struct nand_ecc_ctrl *ecc = &nand->ecc;
752 int raw_mode = 0;
582 u32 status; 753 u32 status;
583 int ret; 754 int ret;
584 755
585 if (*cur_off != data_off) 756 if (*cur_off != data_off)
586 nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1); 757 nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
587 758
588 sunxi_nfc_read_buf(mtd, NULL, ecc->size); 759 sunxi_nfc_randomizer_read_buf(mtd, NULL, ecc->size, false, page);
589 760
590 if (data_off + ecc->size != oob_off) 761 if (data_off + ecc->size != oob_off)
591 nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1); 762 nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
@@ -594,25 +765,54 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
594 if (ret) 765 if (ret)
595 return ret; 766 return ret;
596 767
768 sunxi_nfc_randomizer_enable(mtd);
597 writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP, 769 writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP,
598 nfc->regs + NFC_REG_CMD); 770 nfc->regs + NFC_REG_CMD);
599 771
600 ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0); 772 ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
773 sunxi_nfc_randomizer_disable(mtd);
601 if (ret) 774 if (ret)
602 return ret; 775 return ret;
603 776
777 *cur_off = oob_off + ecc->bytes + 4;
778
604 status = readl(nfc->regs + NFC_REG_ECC_ST); 779 status = readl(nfc->regs + NFC_REG_ECC_ST);
780 if (status & NFC_ECC_PAT_FOUND(0)) {
781 u8 pattern = 0xff;
782
783 if (unlikely(!(readl(nfc->regs + NFC_REG_PAT_ID) & 0x1)))
784 pattern = 0x0;
785
786 memset(data, pattern, ecc->size);
787 memset(oob, pattern, ecc->bytes + 4);
788
789 return 1;
790 }
791
605 ret = NFC_ECC_ERR_CNT(0, readl(nfc->regs + NFC_REG_ECC_ERR_CNT(0))); 792 ret = NFC_ECC_ERR_CNT(0, readl(nfc->regs + NFC_REG_ECC_ERR_CNT(0)));
606 793
607 memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size); 794 memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size);
608 795
609 nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1); 796 nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
610 sunxi_nfc_read_buf(mtd, oob, ecc->bytes + 4); 797 sunxi_nfc_randomizer_read_buf(mtd, oob, ecc->bytes + 4, true, page);
611 798
612 if (status & NFC_ECC_ERR(0)) { 799 if (status & NFC_ECC_ERR(0)) {
800 /*
801 * Re-read the data with the randomizer disabled to identify
802 * bitflips in erased pages.
803 */
804 if (nand->options & NAND_NEED_SCRAMBLING) {
805 nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
806 nand->read_buf(mtd, data, ecc->size);
807 nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
808 nand->read_buf(mtd, oob, ecc->bytes + 4);
809 }
810
613 ret = nand_check_erased_ecc_chunk(data, ecc->size, 811 ret = nand_check_erased_ecc_chunk(data, ecc->size,
614 oob, ecc->bytes + 4, 812 oob, ecc->bytes + 4,
615 NULL, 0, ecc->strength); 813 NULL, 0, ecc->strength);
814 if (ret >= 0)
815 raw_mode = 1;
616 } else { 816 } else {
617 /* 817 /*
618 * The engine protects 4 bytes of OOB data per chunk. 818 * The engine protects 4 bytes of OOB data per chunk.
@@ -620,6 +820,10 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
620 */ 820 */
621 sunxi_nfc_user_data_to_buf(readl(nfc->regs + NFC_REG_USER_DATA(0)), 821 sunxi_nfc_user_data_to_buf(readl(nfc->regs + NFC_REG_USER_DATA(0)),
622 oob); 822 oob);
823
824 /* De-randomize the Bad Block Marker. */
825 if (bbm && nand->options & NAND_NEED_SCRAMBLING)
826 sunxi_nfc_randomize_bbm(mtd, page, oob);
623 } 827 }
624 828
625 if (ret < 0) { 829 if (ret < 0) {
@@ -629,13 +833,12 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
629 *max_bitflips = max_t(unsigned int, *max_bitflips, ret); 833 *max_bitflips = max_t(unsigned int, *max_bitflips, ret);
630 } 834 }
631 835
632 *cur_off = oob_off + ecc->bytes + 4; 836 return raw_mode;
633
634 return 0;
635} 837}
636 838
637static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd, 839static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
638 u8 *oob, int *cur_off) 840 u8 *oob, int *cur_off,
841 bool randomize, int page)
639{ 842{
640 struct nand_chip *nand = mtd_to_nand(mtd); 843 struct nand_chip *nand = mtd_to_nand(mtd);
641 struct nand_ecc_ctrl *ecc = &nand->ecc; 844 struct nand_ecc_ctrl *ecc = &nand->ecc;
@@ -649,7 +852,11 @@ static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
649 nand->cmdfunc(mtd, NAND_CMD_RNDOUT, 852 nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
650 offset + mtd->writesize, -1); 853 offset + mtd->writesize, -1);
651 854
652 sunxi_nfc_read_buf(mtd, oob + offset, len); 855 if (!randomize)
856 sunxi_nfc_read_buf(mtd, oob + offset, len);
857 else
858 sunxi_nfc_randomizer_read_buf(mtd, oob + offset, len,
859 false, page);
653 860
654 *cur_off = mtd->oobsize + mtd->writesize; 861 *cur_off = mtd->oobsize + mtd->writesize;
655} 862}
@@ -662,7 +869,8 @@ static inline u32 sunxi_nfc_buf_to_user_data(const u8 *buf)
662static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd, 869static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
663 const u8 *data, int data_off, 870 const u8 *data, int data_off,
664 const u8 *oob, int oob_off, 871 const u8 *oob, int oob_off,
665 int *cur_off) 872 int *cur_off, bool bbm,
873 int page)
666{ 874{
667 struct nand_chip *nand = mtd_to_nand(mtd); 875 struct nand_chip *nand = mtd_to_nand(mtd);
668 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); 876 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
@@ -672,11 +880,20 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
672 if (data_off != *cur_off) 880 if (data_off != *cur_off)
673 nand->cmdfunc(mtd, NAND_CMD_RNDIN, data_off, -1); 881 nand->cmdfunc(mtd, NAND_CMD_RNDIN, data_off, -1);
674 882
675 sunxi_nfc_write_buf(mtd, data, ecc->size); 883 sunxi_nfc_randomizer_write_buf(mtd, data, ecc->size, false, page);
676 884
677 /* Fill OOB data in */ 885 /* Fill OOB data in */
678 writel(sunxi_nfc_buf_to_user_data(oob), 886 if ((nand->options & NAND_NEED_SCRAMBLING) && bbm) {
679 nfc->regs + NFC_REG_USER_DATA(0)); 887 u8 user_data[4];
888
889 memcpy(user_data, oob, 4);
890 sunxi_nfc_randomize_bbm(mtd, page, user_data);
891 writel(sunxi_nfc_buf_to_user_data(user_data),
892 nfc->regs + NFC_REG_USER_DATA(0));
893 } else {
894 writel(sunxi_nfc_buf_to_user_data(oob),
895 nfc->regs + NFC_REG_USER_DATA(0));
896 }
680 897
681 if (data_off + ecc->size != oob_off) 898 if (data_off + ecc->size != oob_off)
682 nand->cmdfunc(mtd, NAND_CMD_RNDIN, oob_off, -1); 899 nand->cmdfunc(mtd, NAND_CMD_RNDIN, oob_off, -1);
@@ -685,11 +902,13 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
685 if (ret) 902 if (ret)
686 return ret; 903 return ret;
687 904
905 sunxi_nfc_randomizer_enable(mtd);
688 writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | 906 writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
689 NFC_ACCESS_DIR | NFC_ECC_OP, 907 NFC_ACCESS_DIR | NFC_ECC_OP,
690 nfc->regs + NFC_REG_CMD); 908 nfc->regs + NFC_REG_CMD);
691 909
692 ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0); 910 ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
911 sunxi_nfc_randomizer_disable(mtd);
693 if (ret) 912 if (ret)
694 return ret; 913 return ret;
695 914
@@ -699,7 +918,8 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
699} 918}
700 919
701static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd, 920static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd,
702 u8 *oob, int *cur_off) 921 u8 *oob, int *cur_off,
922 int page)
703{ 923{
704 struct nand_chip *nand = mtd_to_nand(mtd); 924 struct nand_chip *nand = mtd_to_nand(mtd);
705 struct nand_ecc_ctrl *ecc = &nand->ecc; 925 struct nand_ecc_ctrl *ecc = &nand->ecc;
@@ -713,7 +933,7 @@ static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd,
713 nand->cmdfunc(mtd, NAND_CMD_RNDIN, 933 nand->cmdfunc(mtd, NAND_CMD_RNDIN,
714 offset + mtd->writesize, -1); 934 offset + mtd->writesize, -1);
715 935
716 sunxi_nfc_write_buf(mtd, oob + offset, len); 936 sunxi_nfc_randomizer_write_buf(mtd, oob + offset, len, false, page);
717 937
718 *cur_off = mtd->oobsize + mtd->writesize; 938 *cur_off = mtd->oobsize + mtd->writesize;
719} 939}
@@ -725,6 +945,7 @@ static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
725 struct nand_ecc_ctrl *ecc = &chip->ecc; 945 struct nand_ecc_ctrl *ecc = &chip->ecc;
726 unsigned int max_bitflips = 0; 946 unsigned int max_bitflips = 0;
727 int ret, i, cur_off = 0; 947 int ret, i, cur_off = 0;
948 bool raw_mode = false;
728 949
729 sunxi_nfc_hw_ecc_enable(mtd); 950 sunxi_nfc_hw_ecc_enable(mtd);
730 951
@@ -736,13 +957,17 @@ static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
736 957
737 ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob, 958 ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob,
738 oob_off + mtd->writesize, 959 oob_off + mtd->writesize,
739 &cur_off, &max_bitflips); 960 &cur_off, &max_bitflips,
740 if (ret) 961 !i, page);
962 if (ret < 0)
741 return ret; 963 return ret;
964 else if (ret)
965 raw_mode = true;
742 } 966 }
743 967
744 if (oob_required) 968 if (oob_required)
745 sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off); 969 sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off,
970 !raw_mode, page);
746 971
747 sunxi_nfc_hw_ecc_disable(mtd); 972 sunxi_nfc_hw_ecc_disable(mtd);
748 973
@@ -767,13 +992,14 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
767 992
768 ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob, 993 ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob,
769 oob_off + mtd->writesize, 994 oob_off + mtd->writesize,
770 &cur_off); 995 &cur_off, !i, page);
771 if (ret) 996 if (ret)
772 return ret; 997 return ret;
773 } 998 }
774 999
775 if (oob_required) 1000 if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
776 sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi, &cur_off); 1001 sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
1002 &cur_off, page);
777 1003
778 sunxi_nfc_hw_ecc_disable(mtd); 1004 sunxi_nfc_hw_ecc_disable(mtd);
779 1005
@@ -788,6 +1014,7 @@ static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd,
788 struct nand_ecc_ctrl *ecc = &chip->ecc; 1014 struct nand_ecc_ctrl *ecc = &chip->ecc;
789 unsigned int max_bitflips = 0; 1015 unsigned int max_bitflips = 0;
790 int ret, i, cur_off = 0; 1016 int ret, i, cur_off = 0;
1017 bool raw_mode = false;
791 1018
792 sunxi_nfc_hw_ecc_enable(mtd); 1019 sunxi_nfc_hw_ecc_enable(mtd);
793 1020
@@ -799,13 +1026,16 @@ static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd,
799 1026
800 ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob, 1027 ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob,
801 oob_off, &cur_off, 1028 oob_off, &cur_off,
802 &max_bitflips); 1029 &max_bitflips, !i, page);
803 if (ret) 1030 if (ret < 0)
804 return ret; 1031 return ret;
1032 else if (ret)
1033 raw_mode = true;
805 } 1034 }
806 1035
807 if (oob_required) 1036 if (oob_required)
808 sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off); 1037 sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off,
1038 !raw_mode, page);
809 1039
810 sunxi_nfc_hw_ecc_disable(mtd); 1040 sunxi_nfc_hw_ecc_disable(mtd);
811 1041
@@ -829,13 +1059,15 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
829 const u8 *oob = chip->oob_poi + (i * (ecc->bytes + 4)); 1059 const u8 *oob = chip->oob_poi + (i * (ecc->bytes + 4));
830 1060
831 ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, 1061 ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off,
832 oob, oob_off, &cur_off); 1062 oob, oob_off, &cur_off,
1063 false, page);
833 if (ret) 1064 if (ret)
834 return ret; 1065 return ret;
835 } 1066 }
836 1067
837 if (oob_required) 1068 if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
838 sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi, &cur_off); 1069 sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
1070 &cur_off, page);
839 1071
840 sunxi_nfc_hw_ecc_disable(mtd); 1072 sunxi_nfc_hw_ecc_disable(mtd);
841 1073
@@ -1345,6 +1577,9 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
1345 if (nand->bbt_options & NAND_BBT_USE_FLASH) 1577 if (nand->bbt_options & NAND_BBT_USE_FLASH)
1346 nand->bbt_options |= NAND_BBT_NO_OOB; 1578 nand->bbt_options |= NAND_BBT_NO_OOB;
1347 1579
1580 if (nand->options & NAND_NEED_SCRAMBLING)
1581 nand->options |= NAND_NO_SUBPAGE_WRITE;
1582
1348 ret = sunxi_nand_chip_init_timings(chip, np); 1583 ret = sunxi_nand_chip_init_timings(chip, np);
1349 if (ret) { 1584 if (ret) {
1350 dev_err(dev, "could not configure chip timings: %d\n", ret); 1585 dev_err(dev, "could not configure chip timings: %d\n", ret);
diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c
index 034420f313d5..293feb19b0b1 100644
--- a/drivers/mtd/nand/vf610_nfc.c
+++ b/drivers/mtd/nand/vf610_nfc.c
@@ -795,8 +795,6 @@ static int vf610_nfc_probe(struct platform_device *pdev)
795 goto error; 795 goto error;
796 } 796 }
797 797
798 /* propagate ecc.layout to mtd_info */
799 mtd->ecclayout = chip->ecc.layout;
800 chip->ecc.read_page = vf610_nfc_read_page; 798 chip->ecc.read_page = vf610_nfc_read_page;
801 chip->ecc.write_page = vf610_nfc_write_page; 799 chip->ecc.write_page = vf610_nfc_write_page;
802 800
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 43b3392ffee7..af28bb3ae7cf 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1124,11 +1124,7 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1124 pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from, 1124 pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from,
1125 (int)len); 1125 (int)len);
1126 1126
1127 if (ops->mode == MTD_OPS_AUTO_OOB) 1127 oobsize = mtd_oobavail(mtd, ops);
1128 oobsize = this->ecclayout->oobavail;
1129 else
1130 oobsize = mtd->oobsize;
1131
1132 oobcolumn = from & (mtd->oobsize - 1); 1128 oobcolumn = from & (mtd->oobsize - 1);
1133 1129
1134 /* Do not allow reads past end of device */ 1130 /* Do not allow reads past end of device */
@@ -1229,11 +1225,7 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1229 pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from, 1225 pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from,
1230 (int)len); 1226 (int)len);
1231 1227
1232 if (ops->mode == MTD_OPS_AUTO_OOB) 1228 oobsize = mtd_oobavail(mtd, ops);
1233 oobsize = this->ecclayout->oobavail;
1234 else
1235 oobsize = mtd->oobsize;
1236
1237 oobcolumn = from & (mtd->oobsize - 1); 1229 oobcolumn = from & (mtd->oobsize - 1);
1238 1230
1239 /* Do not allow reads past end of device */ 1231 /* Do not allow reads past end of device */
@@ -1365,7 +1357,7 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
1365 ops->oobretlen = 0; 1357 ops->oobretlen = 0;
1366 1358
1367 if (mode == MTD_OPS_AUTO_OOB) 1359 if (mode == MTD_OPS_AUTO_OOB)
1368 oobsize = this->ecclayout->oobavail; 1360 oobsize = mtd->oobavail;
1369 else 1361 else
1370 oobsize = mtd->oobsize; 1362 oobsize = mtd->oobsize;
1371 1363
@@ -1885,12 +1877,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
1885 /* Check zero length */ 1877 /* Check zero length */
1886 if (!len) 1878 if (!len)
1887 return 0; 1879 return 0;
1888 1880 oobsize = mtd_oobavail(mtd, ops);
1889 if (ops->mode == MTD_OPS_AUTO_OOB)
1890 oobsize = this->ecclayout->oobavail;
1891 else
1892 oobsize = mtd->oobsize;
1893
1894 oobcolumn = to & (mtd->oobsize - 1); 1881 oobcolumn = to & (mtd->oobsize - 1);
1895 1882
1896 column = to & (mtd->writesize - 1); 1883 column = to & (mtd->writesize - 1);
@@ -2063,7 +2050,7 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
2063 ops->oobretlen = 0; 2050 ops->oobretlen = 0;
2064 2051
2065 if (mode == MTD_OPS_AUTO_OOB) 2052 if (mode == MTD_OPS_AUTO_OOB)
2066 oobsize = this->ecclayout->oobavail; 2053 oobsize = mtd->oobavail;
2067 else 2054 else
2068 oobsize = mtd->oobsize; 2055 oobsize = mtd->oobsize;
2069 2056
@@ -2599,6 +2586,7 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
2599 */ 2586 */
2600static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs) 2587static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
2601{ 2588{
2589 struct onenand_chip *this = mtd->priv;
2602 int ret; 2590 int ret;
2603 2591
2604 ret = onenand_block_isbad(mtd, ofs); 2592 ret = onenand_block_isbad(mtd, ofs);
@@ -2610,7 +2598,7 @@ static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
2610 } 2598 }
2611 2599
2612 onenand_get_device(mtd, FL_WRITING); 2600 onenand_get_device(mtd, FL_WRITING);
2613 ret = mtd_block_markbad(mtd, ofs); 2601 ret = this->block_markbad(mtd, ofs);
2614 onenand_release_device(mtd); 2602 onenand_release_device(mtd);
2615 return ret; 2603 return ret;
2616} 2604}
@@ -4049,12 +4037,10 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
4049 * The number of bytes available for a client to place data into 4037 * The number of bytes available for a client to place data into
4050 * the out of band area 4038 * the out of band area
4051 */ 4039 */
4052 this->ecclayout->oobavail = 0; 4040 mtd->oobavail = 0;
4053 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && 4041 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES &&
4054 this->ecclayout->oobfree[i].length; i++) 4042 this->ecclayout->oobfree[i].length; i++)
4055 this->ecclayout->oobavail += 4043 mtd->oobavail += this->ecclayout->oobfree[i].length;
4056 this->ecclayout->oobfree[i].length;
4057 mtd->oobavail = this->ecclayout->oobavail;
4058 4044
4059 mtd->ecclayout = this->ecclayout; 4045 mtd->ecclayout = this->ecclayout;
4060 mtd->ecc_strength = 1; 4046 mtd->ecc_strength = 1;
diff --git a/drivers/mtd/onenand/onenand_bbt.c b/drivers/mtd/onenand/onenand_bbt.c
index 08d0085f3e93..680188a88130 100644
--- a/drivers/mtd/onenand/onenand_bbt.c
+++ b/drivers/mtd/onenand/onenand_bbt.c
@@ -179,7 +179,7 @@ static int onenand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
179 * by the onenand_release function. 179 * by the onenand_release function.
180 * 180 *
181 */ 181 */
182int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd) 182static int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
183{ 183{
184 struct onenand_chip *this = mtd->priv; 184 struct onenand_chip *this = mtd->priv;
185 struct bbm_info *bbm = this->bbm; 185 struct bbm_info *bbm = this->bbm;
@@ -247,6 +247,3 @@ int onenand_default_bbt(struct mtd_info *mtd)
247 247
248 return onenand_scan_bbt(mtd, bbm->badblock_pattern); 248 return onenand_scan_bbt(mtd, bbm->badblock_pattern);
249} 249}
250
251EXPORT_SYMBOL(onenand_scan_bbt);
252EXPORT_SYMBOL(onenand_default_bbt);
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
index 0dc927540b3d..d42c98e1f581 100644
--- a/drivers/mtd/spi-nor/Kconfig
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -9,6 +9,7 @@ if MTD_SPI_NOR
9 9
10config MTD_MT81xx_NOR 10config MTD_MT81xx_NOR
11 tristate "Mediatek MT81xx SPI NOR flash controller" 11 tristate "Mediatek MT81xx SPI NOR flash controller"
12 depends on HAS_IOMEM
12 help 13 help
13 This enables access to SPI NOR flash, using MT81xx SPI NOR flash 14 This enables access to SPI NOR flash, using MT81xx SPI NOR flash
14 controller. This controller does not support generic SPI BUS, it only 15 controller. This controller does not support generic SPI BUS, it only
@@ -30,7 +31,7 @@ config MTD_SPI_NOR_USE_4K_SECTORS
30 31
31config SPI_FSL_QUADSPI 32config SPI_FSL_QUADSPI
32 tristate "Freescale Quad SPI controller" 33 tristate "Freescale Quad SPI controller"
33 depends on ARCH_MXC || COMPILE_TEST 34 depends on ARCH_MXC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
34 depends on HAS_IOMEM 35 depends on HAS_IOMEM
35 help 36 help
36 This enables support for the Quad SPI controller in master mode. 37 This enables support for the Quad SPI controller in master mode.
diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
index 54640f1eb3a1..9ab2b51d54b8 100644
--- a/drivers/mtd/spi-nor/fsl-quadspi.c
+++ b/drivers/mtd/spi-nor/fsl-quadspi.c
@@ -213,6 +213,7 @@ enum fsl_qspi_devtype {
213 FSL_QUADSPI_IMX6SX, 213 FSL_QUADSPI_IMX6SX,
214 FSL_QUADSPI_IMX7D, 214 FSL_QUADSPI_IMX7D,
215 FSL_QUADSPI_IMX6UL, 215 FSL_QUADSPI_IMX6UL,
216 FSL_QUADSPI_LS1021A,
216}; 217};
217 218
218struct fsl_qspi_devtype_data { 219struct fsl_qspi_devtype_data {
@@ -258,6 +259,14 @@ static struct fsl_qspi_devtype_data imx6ul_data = {
258 | QUADSPI_QUIRK_4X_INT_CLK, 259 | QUADSPI_QUIRK_4X_INT_CLK,
259}; 260};
260 261
262static struct fsl_qspi_devtype_data ls1021a_data = {
263 .devtype = FSL_QUADSPI_LS1021A,
264 .rxfifo = 128,
265 .txfifo = 64,
266 .ahb_buf_size = 1024,
267 .driver_data = 0,
268};
269
261#define FSL_QSPI_MAX_CHIP 4 270#define FSL_QSPI_MAX_CHIP 4
262struct fsl_qspi { 271struct fsl_qspi {
263 struct spi_nor nor[FSL_QSPI_MAX_CHIP]; 272 struct spi_nor nor[FSL_QSPI_MAX_CHIP];
@@ -275,6 +284,7 @@ struct fsl_qspi {
275 u32 clk_rate; 284 u32 clk_rate;
276 unsigned int chip_base_addr; /* We may support two chips. */ 285 unsigned int chip_base_addr; /* We may support two chips. */
277 bool has_second_chip; 286 bool has_second_chip;
287 bool big_endian;
278 struct mutex lock; 288 struct mutex lock;
279 struct pm_qos_request pm_qos_req; 289 struct pm_qos_request pm_qos_req;
280}; 290};
@@ -300,6 +310,28 @@ static inline int needs_wakeup_wait_mode(struct fsl_qspi *q)
300} 310}
301 311
302/* 312/*
313 * R/W functions for big- or little-endian registers:
314 * The qSPI controller's endian is independent of the CPU core's endian.
315 * So far, although the CPU core is little-endian but the qSPI have two
316 * versions for big-endian and little-endian.
317 */
318static void qspi_writel(struct fsl_qspi *q, u32 val, void __iomem *addr)
319{
320 if (q->big_endian)
321 iowrite32be(val, addr);
322 else
323 iowrite32(val, addr);
324}
325
326static u32 qspi_readl(struct fsl_qspi *q, void __iomem *addr)
327{
328 if (q->big_endian)
329 return ioread32be(addr);
330 else
331 return ioread32(addr);
332}
333
334/*
303 * An IC bug makes us to re-arrange the 32-bit data. 335 * An IC bug makes us to re-arrange the 32-bit data.
304 * The following chips, such as IMX6SLX, have fixed this bug. 336 * The following chips, such as IMX6SLX, have fixed this bug.
305 */ 337 */
@@ -310,14 +342,14 @@ static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a)
310 342
311static inline void fsl_qspi_unlock_lut(struct fsl_qspi *q) 343static inline void fsl_qspi_unlock_lut(struct fsl_qspi *q)
312{ 344{
313 writel(QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY); 345 qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
314 writel(QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR); 346 qspi_writel(q, QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR);
315} 347}
316 348
317static inline void fsl_qspi_lock_lut(struct fsl_qspi *q) 349static inline void fsl_qspi_lock_lut(struct fsl_qspi *q)
318{ 350{
319 writel(QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY); 351 qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
320 writel(QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR); 352 qspi_writel(q, QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR);
321} 353}
322 354
323static irqreturn_t fsl_qspi_irq_handler(int irq, void *dev_id) 355static irqreturn_t fsl_qspi_irq_handler(int irq, void *dev_id)
@@ -326,8 +358,8 @@ static irqreturn_t fsl_qspi_irq_handler(int irq, void *dev_id)
326 u32 reg; 358 u32 reg;
327 359
328 /* clear interrupt */ 360 /* clear interrupt */
329 reg = readl(q->iobase + QUADSPI_FR); 361 reg = qspi_readl(q, q->iobase + QUADSPI_FR);
330 writel(reg, q->iobase + QUADSPI_FR); 362 qspi_writel(q, reg, q->iobase + QUADSPI_FR);
331 363
332 if (reg & QUADSPI_FR_TFF_MASK) 364 if (reg & QUADSPI_FR_TFF_MASK)
333 complete(&q->c); 365 complete(&q->c);
@@ -348,7 +380,7 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
348 380
349 /* Clear all the LUT table */ 381 /* Clear all the LUT table */
350 for (i = 0; i < QUADSPI_LUT_NUM; i++) 382 for (i = 0; i < QUADSPI_LUT_NUM; i++)
351 writel(0, base + QUADSPI_LUT_BASE + i * 4); 383 qspi_writel(q, 0, base + QUADSPI_LUT_BASE + i * 4);
352 384
353 /* Quad Read */ 385 /* Quad Read */
354 lut_base = SEQID_QUAD_READ * 4; 386 lut_base = SEQID_QUAD_READ * 4;
@@ -364,14 +396,15 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
364 dummy = 8; 396 dummy = 8;
365 } 397 }
366 398
367 writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen), 399 qspi_writel(q, LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
368 base + QUADSPI_LUT(lut_base)); 400 base + QUADSPI_LUT(lut_base));
369 writel(LUT0(DUMMY, PAD1, dummy) | LUT1(FSL_READ, PAD4, rxfifo), 401 qspi_writel(q, LUT0(DUMMY, PAD1, dummy) | LUT1(FSL_READ, PAD4, rxfifo),
370 base + QUADSPI_LUT(lut_base + 1)); 402 base + QUADSPI_LUT(lut_base + 1));
371 403
372 /* Write enable */ 404 /* Write enable */
373 lut_base = SEQID_WREN * 4; 405 lut_base = SEQID_WREN * 4;
374 writel(LUT0(CMD, PAD1, SPINOR_OP_WREN), base + QUADSPI_LUT(lut_base)); 406 qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WREN),
407 base + QUADSPI_LUT(lut_base));
375 408
376 /* Page Program */ 409 /* Page Program */
377 lut_base = SEQID_PP * 4; 410 lut_base = SEQID_PP * 4;
@@ -385,13 +418,15 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
385 addrlen = ADDR32BIT; 418 addrlen = ADDR32BIT;
386 } 419 }
387 420
388 writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen), 421 qspi_writel(q, LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
389 base + QUADSPI_LUT(lut_base)); 422 base + QUADSPI_LUT(lut_base));
390 writel(LUT0(FSL_WRITE, PAD1, 0), base + QUADSPI_LUT(lut_base + 1)); 423 qspi_writel(q, LUT0(FSL_WRITE, PAD1, 0),
424 base + QUADSPI_LUT(lut_base + 1));
391 425
392 /* Read Status */ 426 /* Read Status */
393 lut_base = SEQID_RDSR * 4; 427 lut_base = SEQID_RDSR * 4;
394 writel(LUT0(CMD, PAD1, SPINOR_OP_RDSR) | LUT1(FSL_READ, PAD1, 0x1), 428 qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RDSR) |
429 LUT1(FSL_READ, PAD1, 0x1),
395 base + QUADSPI_LUT(lut_base)); 430 base + QUADSPI_LUT(lut_base));
396 431
397 /* Erase a sector */ 432 /* Erase a sector */
@@ -400,40 +435,46 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
400 cmd = q->nor[0].erase_opcode; 435 cmd = q->nor[0].erase_opcode;
401 addrlen = q->nor_size <= SZ_16M ? ADDR24BIT : ADDR32BIT; 436 addrlen = q->nor_size <= SZ_16M ? ADDR24BIT : ADDR32BIT;
402 437
403 writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen), 438 qspi_writel(q, LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
404 base + QUADSPI_LUT(lut_base)); 439 base + QUADSPI_LUT(lut_base));
405 440
406 /* Erase the whole chip */ 441 /* Erase the whole chip */
407 lut_base = SEQID_CHIP_ERASE * 4; 442 lut_base = SEQID_CHIP_ERASE * 4;
408 writel(LUT0(CMD, PAD1, SPINOR_OP_CHIP_ERASE), 443 qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_CHIP_ERASE),
409 base + QUADSPI_LUT(lut_base)); 444 base + QUADSPI_LUT(lut_base));
410 445
411 /* READ ID */ 446 /* READ ID */
412 lut_base = SEQID_RDID * 4; 447 lut_base = SEQID_RDID * 4;
413 writel(LUT0(CMD, PAD1, SPINOR_OP_RDID) | LUT1(FSL_READ, PAD1, 0x8), 448 qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RDID) |
449 LUT1(FSL_READ, PAD1, 0x8),
414 base + QUADSPI_LUT(lut_base)); 450 base + QUADSPI_LUT(lut_base));
415 451
416 /* Write Register */ 452 /* Write Register */
417 lut_base = SEQID_WRSR * 4; 453 lut_base = SEQID_WRSR * 4;
418 writel(LUT0(CMD, PAD1, SPINOR_OP_WRSR) | LUT1(FSL_WRITE, PAD1, 0x2), 454 qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WRSR) |
455 LUT1(FSL_WRITE, PAD1, 0x2),
419 base + QUADSPI_LUT(lut_base)); 456 base + QUADSPI_LUT(lut_base));
420 457
421 /* Read Configuration Register */ 458 /* Read Configuration Register */
422 lut_base = SEQID_RDCR * 4; 459 lut_base = SEQID_RDCR * 4;
423 writel(LUT0(CMD, PAD1, SPINOR_OP_RDCR) | LUT1(FSL_READ, PAD1, 0x1), 460 qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RDCR) |
461 LUT1(FSL_READ, PAD1, 0x1),
424 base + QUADSPI_LUT(lut_base)); 462 base + QUADSPI_LUT(lut_base));
425 463
426 /* Write disable */ 464 /* Write disable */
427 lut_base = SEQID_WRDI * 4; 465 lut_base = SEQID_WRDI * 4;
428 writel(LUT0(CMD, PAD1, SPINOR_OP_WRDI), base + QUADSPI_LUT(lut_base)); 466 qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WRDI),
467 base + QUADSPI_LUT(lut_base));
429 468
430 /* Enter 4 Byte Mode (Micron) */ 469 /* Enter 4 Byte Mode (Micron) */
431 lut_base = SEQID_EN4B * 4; 470 lut_base = SEQID_EN4B * 4;
432 writel(LUT0(CMD, PAD1, SPINOR_OP_EN4B), base + QUADSPI_LUT(lut_base)); 471 qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_EN4B),
472 base + QUADSPI_LUT(lut_base));
433 473
434 /* Enter 4 Byte Mode (Spansion) */ 474 /* Enter 4 Byte Mode (Spansion) */
435 lut_base = SEQID_BRWR * 4; 475 lut_base = SEQID_BRWR * 4;
436 writel(LUT0(CMD, PAD1, SPINOR_OP_BRWR), base + QUADSPI_LUT(lut_base)); 476 qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_BRWR),
477 base + QUADSPI_LUT(lut_base));
437 478
438 fsl_qspi_lock_lut(q); 479 fsl_qspi_lock_lut(q);
439} 480}
@@ -488,15 +529,16 @@ fsl_qspi_runcmd(struct fsl_qspi *q, u8 cmd, unsigned int addr, int len)
488 q->chip_base_addr, addr, len, cmd); 529 q->chip_base_addr, addr, len, cmd);
489 530
490 /* save the reg */ 531 /* save the reg */
491 reg = readl(base + QUADSPI_MCR); 532 reg = qspi_readl(q, base + QUADSPI_MCR);
492 533
493 writel(q->memmap_phy + q->chip_base_addr + addr, base + QUADSPI_SFAR); 534 qspi_writel(q, q->memmap_phy + q->chip_base_addr + addr,
494 writel(QUADSPI_RBCT_WMRK_MASK | QUADSPI_RBCT_RXBRD_USEIPS, 535 base + QUADSPI_SFAR);
536 qspi_writel(q, QUADSPI_RBCT_WMRK_MASK | QUADSPI_RBCT_RXBRD_USEIPS,
495 base + QUADSPI_RBCT); 537 base + QUADSPI_RBCT);
496 writel(reg | QUADSPI_MCR_CLR_RXF_MASK, base + QUADSPI_MCR); 538 qspi_writel(q, reg | QUADSPI_MCR_CLR_RXF_MASK, base + QUADSPI_MCR);
497 539
498 do { 540 do {
499 reg2 = readl(base + QUADSPI_SR); 541 reg2 = qspi_readl(q, base + QUADSPI_SR);
500 if (reg2 & (QUADSPI_SR_IP_ACC_MASK | QUADSPI_SR_AHB_ACC_MASK)) { 542 if (reg2 & (QUADSPI_SR_IP_ACC_MASK | QUADSPI_SR_AHB_ACC_MASK)) {
501 udelay(1); 543 udelay(1);
502 dev_dbg(q->dev, "The controller is busy, 0x%x\n", reg2); 544 dev_dbg(q->dev, "The controller is busy, 0x%x\n", reg2);
@@ -507,21 +549,22 @@ fsl_qspi_runcmd(struct fsl_qspi *q, u8 cmd, unsigned int addr, int len)
507 549
508 /* trigger the LUT now */ 550 /* trigger the LUT now */
509 seqid = fsl_qspi_get_seqid(q, cmd); 551 seqid = fsl_qspi_get_seqid(q, cmd);
510 writel((seqid << QUADSPI_IPCR_SEQID_SHIFT) | len, base + QUADSPI_IPCR); 552 qspi_writel(q, (seqid << QUADSPI_IPCR_SEQID_SHIFT) | len,
553 base + QUADSPI_IPCR);
511 554
512 /* Wait for the interrupt. */ 555 /* Wait for the interrupt. */
513 if (!wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000))) { 556 if (!wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000))) {
514 dev_err(q->dev, 557 dev_err(q->dev,
515 "cmd 0x%.2x timeout, addr@%.8x, FR:0x%.8x, SR:0x%.8x\n", 558 "cmd 0x%.2x timeout, addr@%.8x, FR:0x%.8x, SR:0x%.8x\n",
516 cmd, addr, readl(base + QUADSPI_FR), 559 cmd, addr, qspi_readl(q, base + QUADSPI_FR),
517 readl(base + QUADSPI_SR)); 560 qspi_readl(q, base + QUADSPI_SR));
518 err = -ETIMEDOUT; 561 err = -ETIMEDOUT;
519 } else { 562 } else {
520 err = 0; 563 err = 0;
521 } 564 }
522 565
523 /* restore the MCR */ 566 /* restore the MCR */
524 writel(reg, base + QUADSPI_MCR); 567 qspi_writel(q, reg, base + QUADSPI_MCR);
525 568
526 return err; 569 return err;
527} 570}
@@ -533,7 +576,7 @@ static void fsl_qspi_read_data(struct fsl_qspi *q, int len, u8 *rxbuf)
533 int i = 0; 576 int i = 0;
534 577
535 while (len > 0) { 578 while (len > 0) {
536 tmp = readl(q->iobase + QUADSPI_RBDR + i * 4); 579 tmp = qspi_readl(q, q->iobase + QUADSPI_RBDR + i * 4);
537 tmp = fsl_qspi_endian_xchg(q, tmp); 580 tmp = fsl_qspi_endian_xchg(q, tmp);
538 dev_dbg(q->dev, "chip addr:0x%.8x, rcv:0x%.8x\n", 581 dev_dbg(q->dev, "chip addr:0x%.8x, rcv:0x%.8x\n",
539 q->chip_base_addr, tmp); 582 q->chip_base_addr, tmp);
@@ -561,9 +604,9 @@ static inline void fsl_qspi_invalid(struct fsl_qspi *q)
561{ 604{
562 u32 reg; 605 u32 reg;
563 606
564 reg = readl(q->iobase + QUADSPI_MCR); 607 reg = qspi_readl(q, q->iobase + QUADSPI_MCR);
565 reg |= QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK; 608 reg |= QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK;
566 writel(reg, q->iobase + QUADSPI_MCR); 609 qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
567 610
568 /* 611 /*
569 * The minimum delay : 1 AHB + 2 SFCK clocks. 612 * The minimum delay : 1 AHB + 2 SFCK clocks.
@@ -572,7 +615,7 @@ static inline void fsl_qspi_invalid(struct fsl_qspi *q)
572 udelay(1); 615 udelay(1);
573 616
574 reg &= ~(QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK); 617 reg &= ~(QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK);
575 writel(reg, q->iobase + QUADSPI_MCR); 618 qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
576} 619}
577 620
578static int fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor, 621static int fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor,
@@ -586,20 +629,20 @@ static int fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor,
586 q->chip_base_addr, to, count); 629 q->chip_base_addr, to, count);
587 630
588 /* clear the TX FIFO. */ 631 /* clear the TX FIFO. */
589 tmp = readl(q->iobase + QUADSPI_MCR); 632 tmp = qspi_readl(q, q->iobase + QUADSPI_MCR);
590 writel(tmp | QUADSPI_MCR_CLR_TXF_MASK, q->iobase + QUADSPI_MCR); 633 qspi_writel(q, tmp | QUADSPI_MCR_CLR_TXF_MASK, q->iobase + QUADSPI_MCR);
591 634
592 /* fill the TX data to the FIFO */ 635 /* fill the TX data to the FIFO */
593 for (j = 0, i = ((count + 3) / 4); j < i; j++) { 636 for (j = 0, i = ((count + 3) / 4); j < i; j++) {
594 tmp = fsl_qspi_endian_xchg(q, *txbuf); 637 tmp = fsl_qspi_endian_xchg(q, *txbuf);
595 writel(tmp, q->iobase + QUADSPI_TBDR); 638 qspi_writel(q, tmp, q->iobase + QUADSPI_TBDR);
596 txbuf++; 639 txbuf++;
597 } 640 }
598 641
599 /* fill the TXFIFO upto 16 bytes for i.MX7d */ 642 /* fill the TXFIFO upto 16 bytes for i.MX7d */
600 if (needs_fill_txfifo(q)) 643 if (needs_fill_txfifo(q))
601 for (; i < 4; i++) 644 for (; i < 4; i++)
602 writel(tmp, q->iobase + QUADSPI_TBDR); 645 qspi_writel(q, tmp, q->iobase + QUADSPI_TBDR);
603 646
604 /* Trigger it */ 647 /* Trigger it */
605 ret = fsl_qspi_runcmd(q, opcode, to, count); 648 ret = fsl_qspi_runcmd(q, opcode, to, count);
@@ -615,10 +658,10 @@ static void fsl_qspi_set_map_addr(struct fsl_qspi *q)
615 int nor_size = q->nor_size; 658 int nor_size = q->nor_size;
616 void __iomem *base = q->iobase; 659 void __iomem *base = q->iobase;
617 660
618 writel(nor_size + q->memmap_phy, base + QUADSPI_SFA1AD); 661 qspi_writel(q, nor_size + q->memmap_phy, base + QUADSPI_SFA1AD);
619 writel(nor_size * 2 + q->memmap_phy, base + QUADSPI_SFA2AD); 662 qspi_writel(q, nor_size * 2 + q->memmap_phy, base + QUADSPI_SFA2AD);
620 writel(nor_size * 3 + q->memmap_phy, base + QUADSPI_SFB1AD); 663 qspi_writel(q, nor_size * 3 + q->memmap_phy, base + QUADSPI_SFB1AD);
621 writel(nor_size * 4 + q->memmap_phy, base + QUADSPI_SFB2AD); 664 qspi_writel(q, nor_size * 4 + q->memmap_phy, base + QUADSPI_SFB2AD);
622} 665}
623 666
624/* 667/*
@@ -640,24 +683,26 @@ static void fsl_qspi_init_abh_read(struct fsl_qspi *q)
640 int seqid; 683 int seqid;
641 684
642 /* AHB configuration for access buffer 0/1/2 .*/ 685 /* AHB configuration for access buffer 0/1/2 .*/
643 writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF0CR); 686 qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF0CR);
644 writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF1CR); 687 qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF1CR);
645 writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF2CR); 688 qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF2CR);
646 /* 689 /*
647 * Set ADATSZ with the maximum AHB buffer size to improve the 690 * Set ADATSZ with the maximum AHB buffer size to improve the
648 * read performance. 691 * read performance.
649 */ 692 */
650 writel(QUADSPI_BUF3CR_ALLMST_MASK | ((q->devtype_data->ahb_buf_size / 8) 693 qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK |
651 << QUADSPI_BUF3CR_ADATSZ_SHIFT), base + QUADSPI_BUF3CR); 694 ((q->devtype_data->ahb_buf_size / 8)
695 << QUADSPI_BUF3CR_ADATSZ_SHIFT),
696 base + QUADSPI_BUF3CR);
652 697
653 /* We only use the buffer3 */ 698 /* We only use the buffer3 */
654 writel(0, base + QUADSPI_BUF0IND); 699 qspi_writel(q, 0, base + QUADSPI_BUF0IND);
655 writel(0, base + QUADSPI_BUF1IND); 700 qspi_writel(q, 0, base + QUADSPI_BUF1IND);
656 writel(0, base + QUADSPI_BUF2IND); 701 qspi_writel(q, 0, base + QUADSPI_BUF2IND);
657 702
658 /* Set the default lut sequence for AHB Read. */ 703 /* Set the default lut sequence for AHB Read. */
659 seqid = fsl_qspi_get_seqid(q, q->nor[0].read_opcode); 704 seqid = fsl_qspi_get_seqid(q, q->nor[0].read_opcode);
660 writel(seqid << QUADSPI_BFGENCR_SEQID_SHIFT, 705 qspi_writel(q, seqid << QUADSPI_BFGENCR_SEQID_SHIFT,
661 q->iobase + QUADSPI_BFGENCR); 706 q->iobase + QUADSPI_BFGENCR);
662} 707}
663 708
@@ -713,7 +758,7 @@ static int fsl_qspi_nor_setup(struct fsl_qspi *q)
713 return ret; 758 return ret;
714 759
715 /* Reset the module */ 760 /* Reset the module */
716 writel(QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK, 761 qspi_writel(q, QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK,
717 base + QUADSPI_MCR); 762 base + QUADSPI_MCR);
718 udelay(1); 763 udelay(1);
719 764
@@ -721,24 +766,24 @@ static int fsl_qspi_nor_setup(struct fsl_qspi *q)
721 fsl_qspi_init_lut(q); 766 fsl_qspi_init_lut(q);
722 767
723 /* Disable the module */ 768 /* Disable the module */
724 writel(QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK, 769 qspi_writel(q, QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK,
725 base + QUADSPI_MCR); 770 base + QUADSPI_MCR);
726 771
727 reg = readl(base + QUADSPI_SMPR); 772 reg = qspi_readl(q, base + QUADSPI_SMPR);
728 writel(reg & ~(QUADSPI_SMPR_FSDLY_MASK 773 qspi_writel(q, reg & ~(QUADSPI_SMPR_FSDLY_MASK
729 | QUADSPI_SMPR_FSPHS_MASK 774 | QUADSPI_SMPR_FSPHS_MASK
730 | QUADSPI_SMPR_HSENA_MASK 775 | QUADSPI_SMPR_HSENA_MASK
731 | QUADSPI_SMPR_DDRSMP_MASK), base + QUADSPI_SMPR); 776 | QUADSPI_SMPR_DDRSMP_MASK), base + QUADSPI_SMPR);
732 777
733 /* Enable the module */ 778 /* Enable the module */
734 writel(QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK, 779 qspi_writel(q, QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK,
735 base + QUADSPI_MCR); 780 base + QUADSPI_MCR);
736 781
737 /* clear all interrupt status */ 782 /* clear all interrupt status */
738 writel(0xffffffff, q->iobase + QUADSPI_FR); 783 qspi_writel(q, 0xffffffff, q->iobase + QUADSPI_FR);
739 784
740 /* enable the interrupt */ 785 /* enable the interrupt */
741 writel(QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER); 786 qspi_writel(q, QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER);
742 787
743 return 0; 788 return 0;
744} 789}
@@ -776,6 +821,7 @@ static const struct of_device_id fsl_qspi_dt_ids[] = {
776 { .compatible = "fsl,imx6sx-qspi", .data = (void *)&imx6sx_data, }, 821 { .compatible = "fsl,imx6sx-qspi", .data = (void *)&imx6sx_data, },
777 { .compatible = "fsl,imx7d-qspi", .data = (void *)&imx7d_data, }, 822 { .compatible = "fsl,imx7d-qspi", .data = (void *)&imx7d_data, },
778 { .compatible = "fsl,imx6ul-qspi", .data = (void *)&imx6ul_data, }, 823 { .compatible = "fsl,imx6ul-qspi", .data = (void *)&imx6ul_data, },
824 { .compatible = "fsl,ls1021a-qspi", .data = (void *)&ls1021a_data, },
779 { /* sentinel */ } 825 { /* sentinel */ }
780}; 826};
781MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids); 827MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids);
@@ -954,6 +1000,7 @@ static int fsl_qspi_probe(struct platform_device *pdev)
954 if (IS_ERR(q->iobase)) 1000 if (IS_ERR(q->iobase))
955 return PTR_ERR(q->iobase); 1001 return PTR_ERR(q->iobase);
956 1002
1003 q->big_endian = of_property_read_bool(np, "big-endian");
957 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 1004 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
958 "QuadSPI-memory"); 1005 "QuadSPI-memory");
959 if (!devm_request_mem_region(dev, res->start, resource_size(res), 1006 if (!devm_request_mem_region(dev, res->start, resource_size(res),
@@ -1101,8 +1148,8 @@ static int fsl_qspi_remove(struct platform_device *pdev)
1101 } 1148 }
1102 1149
1103 /* disable the hardware */ 1150 /* disable the hardware */
1104 writel(QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR); 1151 qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
1105 writel(0x0, q->iobase + QUADSPI_RSER); 1152 qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER);
1106 1153
1107 mutex_destroy(&q->lock); 1154 mutex_destroy(&q->lock);
1108 1155
diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c
index d5f850d035bb..8bed1a4cb79c 100644
--- a/drivers/mtd/spi-nor/mtk-quadspi.c
+++ b/drivers/mtd/spi-nor/mtk-quadspi.c
@@ -371,8 +371,8 @@ static int mt8173_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
371 return ret; 371 return ret;
372} 372}
373 373
374static int __init mtk_nor_init(struct mt8173_nor *mt8173_nor, 374static int mtk_nor_init(struct mt8173_nor *mt8173_nor,
375 struct device_node *flash_node) 375 struct device_node *flash_node)
376{ 376{
377 int ret; 377 int ret;
378 struct spi_nor *nor; 378 struct spi_nor *nor;
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index ed0c19c558b5..157841dc3e99 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -61,14 +61,20 @@ struct flash_info {
61 u16 addr_width; 61 u16 addr_width;
62 62
63 u16 flags; 63 u16 flags;
64#define SECT_4K 0x01 /* SPINOR_OP_BE_4K works uniformly */ 64#define SECT_4K BIT(0) /* SPINOR_OP_BE_4K works uniformly */
65#define SPI_NOR_NO_ERASE 0x02 /* No erase command needed */ 65#define SPI_NOR_NO_ERASE BIT(1) /* No erase command needed */
66#define SST_WRITE 0x04 /* use SST byte programming */ 66#define SST_WRITE BIT(2) /* use SST byte programming */
67#define SPI_NOR_NO_FR 0x08 /* Can't do fastread */ 67#define SPI_NOR_NO_FR BIT(3) /* Can't do fastread */
68#define SECT_4K_PMC 0x10 /* SPINOR_OP_BE_4K_PMC works uniformly */ 68#define SECT_4K_PMC BIT(4) /* SPINOR_OP_BE_4K_PMC works uniformly */
69#define SPI_NOR_DUAL_READ 0x20 /* Flash supports Dual Read */ 69#define SPI_NOR_DUAL_READ BIT(5) /* Flash supports Dual Read */
70#define SPI_NOR_QUAD_READ 0x40 /* Flash supports Quad Read */ 70#define SPI_NOR_QUAD_READ BIT(6) /* Flash supports Quad Read */
71#define USE_FSR 0x80 /* use flag status register */ 71#define USE_FSR BIT(7) /* use flag status register */
72#define SPI_NOR_HAS_LOCK BIT(8) /* Flash supports lock/unlock via SR */
73#define SPI_NOR_HAS_TB BIT(9) /*
74 * Flash SR has Top/Bottom (TB) protect
75 * bit. Must be used with
76 * SPI_NOR_HAS_LOCK.
77 */
72}; 78};
73 79
74#define JEDEC_MFR(info) ((info)->id[0]) 80#define JEDEC_MFR(info) ((info)->id[0])
@@ -434,32 +440,58 @@ static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
434 } else { 440 } else {
435 pow = ((sr & mask) ^ mask) >> shift; 441 pow = ((sr & mask) ^ mask) >> shift;
436 *len = mtd->size >> pow; 442 *len = mtd->size >> pow;
437 *ofs = mtd->size - *len; 443 if (nor->flags & SNOR_F_HAS_SR_TB && sr & SR_TB)
444 *ofs = 0;
445 else
446 *ofs = mtd->size - *len;
438 } 447 }
439} 448}
440 449
441/* 450/*
442 * Return 1 if the entire region is locked, 0 otherwise 451 * Return 1 if the entire region is locked (if @locked is true) or unlocked (if
452 * @locked is false); 0 otherwise
443 */ 453 */
444static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len, 454static int stm_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
445 u8 sr) 455 u8 sr, bool locked)
446{ 456{
447 loff_t lock_offs; 457 loff_t lock_offs;
448 uint64_t lock_len; 458 uint64_t lock_len;
449 459
460 if (!len)
461 return 1;
462
450 stm_get_locked_range(nor, sr, &lock_offs, &lock_len); 463 stm_get_locked_range(nor, sr, &lock_offs, &lock_len);
451 464
452 return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs); 465 if (locked)
466 /* Requested range is a sub-range of locked range */
467 return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
468 else
469 /* Requested range does not overlap with locked range */
470 return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
471}
472
473static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
474 u8 sr)
475{
476 return stm_check_lock_status_sr(nor, ofs, len, sr, true);
477}
478
479static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
480 u8 sr)
481{
482 return stm_check_lock_status_sr(nor, ofs, len, sr, false);
453} 483}
454 484
455/* 485/*
456 * Lock a region of the flash. Compatible with ST Micro and similar flash. 486 * Lock a region of the flash. Compatible with ST Micro and similar flash.
457 * Supports only the block protection bits BP{0,1,2} in the status register 487 * Supports the block protection bits BP{0,1,2} in the status register
458 * (SR). Does not support these features found in newer SR bitfields: 488 * (SR). Does not support these features found in newer SR bitfields:
459 * - TB: top/bottom protect - only handle TB=0 (top protect)
460 * - SEC: sector/block protect - only handle SEC=0 (block protect) 489 * - SEC: sector/block protect - only handle SEC=0 (block protect)
461 * - CMP: complement protect - only support CMP=0 (range is not complemented) 490 * - CMP: complement protect - only support CMP=0 (range is not complemented)
462 * 491 *
492 * Support for the following is provided conditionally for some flash:
493 * - TB: top/bottom protect
494 *
463 * Sample table portion for 8MB flash (Winbond w25q64fw): 495 * Sample table portion for 8MB flash (Winbond w25q64fw):
464 * 496 *
465 * SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion 497 * SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion
@@ -472,6 +504,13 @@ static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
472 * 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4 504 * 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4
473 * 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2 505 * 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2
474 * X | X | 1 | 1 | 1 | 8 MB | ALL 506 * X | X | 1 | 1 | 1 | 8 MB | ALL
507 * ------|-------|-------|-------|-------|---------------|-------------------
508 * 0 | 1 | 0 | 0 | 1 | 128 KB | Lower 1/64
509 * 0 | 1 | 0 | 1 | 0 | 256 KB | Lower 1/32
510 * 0 | 1 | 0 | 1 | 1 | 512 KB | Lower 1/16
511 * 0 | 1 | 1 | 0 | 0 | 1 MB | Lower 1/8
512 * 0 | 1 | 1 | 0 | 1 | 2 MB | Lower 1/4
513 * 0 | 1 | 1 | 1 | 0 | 4 MB | Lower 1/2
475 * 514 *
476 * Returns negative on errors, 0 on success. 515 * Returns negative on errors, 0 on success.
477 */ 516 */
@@ -481,20 +520,39 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
481 int status_old, status_new; 520 int status_old, status_new;
482 u8 mask = SR_BP2 | SR_BP1 | SR_BP0; 521 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
483 u8 shift = ffs(mask) - 1, pow, val; 522 u8 shift = ffs(mask) - 1, pow, val;
523 loff_t lock_len;
524 bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
525 bool use_top;
484 int ret; 526 int ret;
485 527
486 status_old = read_sr(nor); 528 status_old = read_sr(nor);
487 if (status_old < 0) 529 if (status_old < 0)
488 return status_old; 530 return status_old;
489 531
490 /* SPI NOR always locks to the end */ 532 /* If nothing in our range is unlocked, we don't need to do anything */
491 if (ofs + len != mtd->size) { 533 if (stm_is_locked_sr(nor, ofs, len, status_old))
492 /* Does combined region extend to end? */ 534 return 0;
493 if (!stm_is_locked_sr(nor, ofs + len, mtd->size - ofs - len, 535
494 status_old)) 536 /* If anything below us is unlocked, we can't use 'bottom' protection */
495 return -EINVAL; 537 if (!stm_is_locked_sr(nor, 0, ofs, status_old))
496 len = mtd->size - ofs; 538 can_be_bottom = false;
497 } 539
540 /* If anything above us is unlocked, we can't use 'top' protection */
541 if (!stm_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
542 status_old))
543 can_be_top = false;
544
545 if (!can_be_bottom && !can_be_top)
546 return -EINVAL;
547
548 /* Prefer top, if both are valid */
549 use_top = can_be_top;
550
551 /* lock_len: length of region that should end up locked */
552 if (use_top)
553 lock_len = mtd->size - ofs;
554 else
555 lock_len = ofs + len;
498 556
499 /* 557 /*
500 * Need smallest pow such that: 558 * Need smallest pow such that:
@@ -505,7 +563,7 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
505 * 563 *
506 * pow = ceil(log2(size / len)) = log2(size) - floor(log2(len)) 564 * pow = ceil(log2(size / len)) = log2(size) - floor(log2(len))
507 */ 565 */
508 pow = ilog2(mtd->size) - ilog2(len); 566 pow = ilog2(mtd->size) - ilog2(lock_len);
509 val = mask - (pow << shift); 567 val = mask - (pow << shift);
510 if (val & ~mask) 568 if (val & ~mask)
511 return -EINVAL; 569 return -EINVAL;
@@ -513,10 +571,20 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
513 if (!(val & mask)) 571 if (!(val & mask))
514 return -EINVAL; 572 return -EINVAL;
515 573
516 status_new = (status_old & ~mask) | val; 574 status_new = (status_old & ~mask & ~SR_TB) | val;
575
576 /* Disallow further writes if WP pin is asserted */
577 status_new |= SR_SRWD;
578
579 if (!use_top)
580 status_new |= SR_TB;
581
582 /* Don't bother if they're the same */
583 if (status_new == status_old)
584 return 0;
517 585
518 /* Only modify protection if it will not unlock other areas */ 586 /* Only modify protection if it will not unlock other areas */
519 if ((status_new & mask) <= (status_old & mask)) 587 if ((status_new & mask) < (status_old & mask))
520 return -EINVAL; 588 return -EINVAL;
521 589
522 write_enable(nor); 590 write_enable(nor);
@@ -537,17 +605,40 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
537 int status_old, status_new; 605 int status_old, status_new;
538 u8 mask = SR_BP2 | SR_BP1 | SR_BP0; 606 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
539 u8 shift = ffs(mask) - 1, pow, val; 607 u8 shift = ffs(mask) - 1, pow, val;
608 loff_t lock_len;
609 bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
610 bool use_top;
540 int ret; 611 int ret;
541 612
542 status_old = read_sr(nor); 613 status_old = read_sr(nor);
543 if (status_old < 0) 614 if (status_old < 0)
544 return status_old; 615 return status_old;
545 616
546 /* Cannot unlock; would unlock larger region than requested */ 617 /* If nothing in our range is locked, we don't need to do anything */
547 if (stm_is_locked_sr(nor, ofs - mtd->erasesize, mtd->erasesize, 618 if (stm_is_unlocked_sr(nor, ofs, len, status_old))
548 status_old)) 619 return 0;
620
621 /* If anything below us is locked, we can't use 'top' protection */
622 if (!stm_is_unlocked_sr(nor, 0, ofs, status_old))
623 can_be_top = false;
624
625 /* If anything above us is locked, we can't use 'bottom' protection */
626 if (!stm_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
627 status_old))
628 can_be_bottom = false;
629
630 if (!can_be_bottom && !can_be_top)
549 return -EINVAL; 631 return -EINVAL;
550 632
633 /* Prefer top, if both are valid */
634 use_top = can_be_top;
635
636 /* lock_len: length of region that should remain locked */
637 if (use_top)
638 lock_len = mtd->size - (ofs + len);
639 else
640 lock_len = ofs;
641
551 /* 642 /*
552 * Need largest pow such that: 643 * Need largest pow such that:
553 * 644 *
@@ -557,8 +648,8 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
557 * 648 *
558 * pow = floor(log2(size / len)) = log2(size) - ceil(log2(len)) 649 * pow = floor(log2(size / len)) = log2(size) - ceil(log2(len))
559 */ 650 */
560 pow = ilog2(mtd->size) - order_base_2(mtd->size - (ofs + len)); 651 pow = ilog2(mtd->size) - order_base_2(lock_len);
561 if (ofs + len == mtd->size) { 652 if (lock_len == 0) {
562 val = 0; /* fully unlocked */ 653 val = 0; /* fully unlocked */
563 } else { 654 } else {
564 val = mask - (pow << shift); 655 val = mask - (pow << shift);
@@ -567,10 +658,21 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
567 return -EINVAL; 658 return -EINVAL;
568 } 659 }
569 660
570 status_new = (status_old & ~mask) | val; 661 status_new = (status_old & ~mask & ~SR_TB) | val;
662
663 /* Don't protect status register if we're fully unlocked */
664 if (lock_len == mtd->size)
665 status_new &= ~SR_SRWD;
666
667 if (!use_top)
668 status_new |= SR_TB;
669
670 /* Don't bother if they're the same */
671 if (status_new == status_old)
672 return 0;
571 673
572 /* Only modify protection if it will not lock other areas */ 674 /* Only modify protection if it will not lock other areas */
573 if ((status_new & mask) >= (status_old & mask)) 675 if ((status_new & mask) > (status_old & mask))
574 return -EINVAL; 676 return -EINVAL;
575 677
576 write_enable(nor); 678 write_enable(nor);
@@ -762,8 +864,8 @@ static const struct flash_info spi_nor_ids[] = {
762 { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) }, 864 { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
763 { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) }, 865 { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
764 { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) }, 866 { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
765 { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) }, 867 { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
766 { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) }, 868 { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
767 { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) }, 869 { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
768 { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, 870 { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
769 { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, 871 { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
@@ -797,6 +899,7 @@ static const struct flash_info spi_nor_ids[] = {
797 { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, 899 { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
798 { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, 900 { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
799 { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, 901 { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
902 { "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
800 { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) }, 903 { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) },
801 { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) }, 904 { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) },
802 { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) }, 905 { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) },
@@ -860,11 +963,23 @@ static const struct flash_info spi_nor_ids[] = {
860 { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) }, 963 { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
861 { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) }, 964 { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
862 { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) }, 965 { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
863 { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, 966 {
967 "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64,
968 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
969 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
970 },
864 { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) }, 971 { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
865 { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, 972 { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
866 { "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, 973 {
867 { "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, 974 "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128,
975 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
976 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
977 },
978 {
979 "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256,
980 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
981 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
982 },
868 { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) }, 983 { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
869 { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) }, 984 { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
870 { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) }, 985 { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
@@ -1100,45 +1215,6 @@ static int spansion_quad_enable(struct spi_nor *nor)
1100 return 0; 1215 return 0;
1101} 1216}
1102 1217
1103static int micron_quad_enable(struct spi_nor *nor)
1104{
1105 int ret;
1106 u8 val;
1107
1108 ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
1109 if (ret < 0) {
1110 dev_err(nor->dev, "error %d reading EVCR\n", ret);
1111 return ret;
1112 }
1113
1114 write_enable(nor);
1115
1116 /* set EVCR, enable quad I/O */
1117 nor->cmd_buf[0] = val & ~EVCR_QUAD_EN_MICRON;
1118 ret = nor->write_reg(nor, SPINOR_OP_WD_EVCR, nor->cmd_buf, 1);
1119 if (ret < 0) {
1120 dev_err(nor->dev, "error while writing EVCR register\n");
1121 return ret;
1122 }
1123
1124 ret = spi_nor_wait_till_ready(nor);
1125 if (ret)
1126 return ret;
1127
1128 /* read EVCR and check it */
1129 ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
1130 if (ret < 0) {
1131 dev_err(nor->dev, "error %d reading EVCR\n", ret);
1132 return ret;
1133 }
1134 if (val & EVCR_QUAD_EN_MICRON) {
1135 dev_err(nor->dev, "Micron EVCR Quad bit not clear\n");
1136 return -EINVAL;
1137 }
1138
1139 return 0;
1140}
1141
1142static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info) 1218static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
1143{ 1219{
1144 int status; 1220 int status;
@@ -1152,12 +1228,7 @@ static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
1152 } 1228 }
1153 return status; 1229 return status;
1154 case SNOR_MFR_MICRON: 1230 case SNOR_MFR_MICRON:
1155 status = micron_quad_enable(nor); 1231 return 0;
1156 if (status) {
1157 dev_err(nor->dev, "Micron quad-read not enabled\n");
1158 return -EINVAL;
1159 }
1160 return status;
1161 default: 1232 default:
1162 status = spansion_quad_enable(nor); 1233 status = spansion_quad_enable(nor);
1163 if (status) { 1234 if (status) {
@@ -1233,9 +1304,11 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
1233 1304
1234 if (JEDEC_MFR(info) == SNOR_MFR_ATMEL || 1305 if (JEDEC_MFR(info) == SNOR_MFR_ATMEL ||
1235 JEDEC_MFR(info) == SNOR_MFR_INTEL || 1306 JEDEC_MFR(info) == SNOR_MFR_INTEL ||
1236 JEDEC_MFR(info) == SNOR_MFR_SST) { 1307 JEDEC_MFR(info) == SNOR_MFR_SST ||
1308 info->flags & SPI_NOR_HAS_LOCK) {
1237 write_enable(nor); 1309 write_enable(nor);
1238 write_sr(nor, 0); 1310 write_sr(nor, 0);
1311 spi_nor_wait_till_ready(nor);
1239 } 1312 }
1240 1313
1241 if (!mtd->name) 1314 if (!mtd->name)
@@ -1249,7 +1322,8 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
1249 mtd->_read = spi_nor_read; 1322 mtd->_read = spi_nor_read;
1250 1323
1251 /* NOR protection support for STmicro/Micron chips and similar */ 1324 /* NOR protection support for STmicro/Micron chips and similar */
1252 if (JEDEC_MFR(info) == SNOR_MFR_MICRON) { 1325 if (JEDEC_MFR(info) == SNOR_MFR_MICRON ||
1326 info->flags & SPI_NOR_HAS_LOCK) {
1253 nor->flash_lock = stm_lock; 1327 nor->flash_lock = stm_lock;
1254 nor->flash_unlock = stm_unlock; 1328 nor->flash_unlock = stm_unlock;
1255 nor->flash_is_locked = stm_is_locked; 1329 nor->flash_is_locked = stm_is_locked;
@@ -1269,6 +1343,8 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
1269 1343
1270 if (info->flags & USE_FSR) 1344 if (info->flags & USE_FSR)
1271 nor->flags |= SNOR_F_USE_FSR; 1345 nor->flags |= SNOR_F_USE_FSR;
1346 if (info->flags & SPI_NOR_HAS_TB)
1347 nor->flags |= SNOR_F_HAS_SR_TB;
1272 1348
1273#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS 1349#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
1274 /* prefer "small sector" erase if possible */ 1350 /* prefer "small sector" erase if possible */
diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c
index 31762120eb56..1cb3f7758fb6 100644
--- a/drivers/mtd/tests/oobtest.c
+++ b/drivers/mtd/tests/oobtest.c
@@ -215,19 +215,19 @@ static int verify_eraseblock(int ebnum)
215 pr_info("ignoring error as within bitflip_limit\n"); 215 pr_info("ignoring error as within bitflip_limit\n");
216 } 216 }
217 217
218 if (use_offset != 0 || use_len < mtd->ecclayout->oobavail) { 218 if (use_offset != 0 || use_len < mtd->oobavail) {
219 int k; 219 int k;
220 220
221 ops.mode = MTD_OPS_AUTO_OOB; 221 ops.mode = MTD_OPS_AUTO_OOB;
222 ops.len = 0; 222 ops.len = 0;
223 ops.retlen = 0; 223 ops.retlen = 0;
224 ops.ooblen = mtd->ecclayout->oobavail; 224 ops.ooblen = mtd->oobavail;
225 ops.oobretlen = 0; 225 ops.oobretlen = 0;
226 ops.ooboffs = 0; 226 ops.ooboffs = 0;
227 ops.datbuf = NULL; 227 ops.datbuf = NULL;
228 ops.oobbuf = readbuf; 228 ops.oobbuf = readbuf;
229 err = mtd_read_oob(mtd, addr, &ops); 229 err = mtd_read_oob(mtd, addr, &ops);
230 if (err || ops.oobretlen != mtd->ecclayout->oobavail) { 230 if (err || ops.oobretlen != mtd->oobavail) {
231 pr_err("error: readoob failed at %#llx\n", 231 pr_err("error: readoob failed at %#llx\n",
232 (long long)addr); 232 (long long)addr);
233 errcnt += 1; 233 errcnt += 1;
@@ -244,7 +244,7 @@ static int verify_eraseblock(int ebnum)
244 /* verify post-(use_offset + use_len) area for 0xff */ 244 /* verify post-(use_offset + use_len) area for 0xff */
245 k = use_offset + use_len; 245 k = use_offset + use_len;
246 bitflips += memffshow(addr, k, readbuf + k, 246 bitflips += memffshow(addr, k, readbuf + k,
247 mtd->ecclayout->oobavail - k); 247 mtd->oobavail - k);
248 248
249 if (bitflips > bitflip_limit) { 249 if (bitflips > bitflip_limit) {
250 pr_err("error: verify failed at %#llx\n", 250 pr_err("error: verify failed at %#llx\n",
@@ -269,8 +269,8 @@ static int verify_eraseblock_in_one_go(int ebnum)
269 struct mtd_oob_ops ops; 269 struct mtd_oob_ops ops;
270 int err = 0; 270 int err = 0;
271 loff_t addr = (loff_t)ebnum * mtd->erasesize; 271 loff_t addr = (loff_t)ebnum * mtd->erasesize;
272 size_t len = mtd->ecclayout->oobavail * pgcnt; 272 size_t len = mtd->oobavail * pgcnt;
273 size_t oobavail = mtd->ecclayout->oobavail; 273 size_t oobavail = mtd->oobavail;
274 size_t bitflips; 274 size_t bitflips;
275 int i; 275 int i;
276 276
@@ -394,8 +394,8 @@ static int __init mtd_oobtest_init(void)
394 goto out; 394 goto out;
395 395
396 use_offset = 0; 396 use_offset = 0;
397 use_len = mtd->ecclayout->oobavail; 397 use_len = mtd->oobavail;
398 use_len_max = mtd->ecclayout->oobavail; 398 use_len_max = mtd->oobavail;
399 vary_offset = 0; 399 vary_offset = 0;
400 400
401 /* First test: write all OOB, read it back and verify */ 401 /* First test: write all OOB, read it back and verify */
@@ -460,8 +460,8 @@ static int __init mtd_oobtest_init(void)
460 460
461 /* Write all eraseblocks */ 461 /* Write all eraseblocks */
462 use_offset = 0; 462 use_offset = 0;
463 use_len = mtd->ecclayout->oobavail; 463 use_len = mtd->oobavail;
464 use_len_max = mtd->ecclayout->oobavail; 464 use_len_max = mtd->oobavail;
465 vary_offset = 1; 465 vary_offset = 1;
466 prandom_seed_state(&rnd_state, 5); 466 prandom_seed_state(&rnd_state, 5);
467 467
@@ -471,8 +471,8 @@ static int __init mtd_oobtest_init(void)
471 471
472 /* Check all eraseblocks */ 472 /* Check all eraseblocks */
473 use_offset = 0; 473 use_offset = 0;
474 use_len = mtd->ecclayout->oobavail; 474 use_len = mtd->oobavail;
475 use_len_max = mtd->ecclayout->oobavail; 475 use_len_max = mtd->oobavail;
476 vary_offset = 1; 476 vary_offset = 1;
477 prandom_seed_state(&rnd_state, 5); 477 prandom_seed_state(&rnd_state, 5);
478 err = verify_all_eraseblocks(); 478 err = verify_all_eraseblocks();
@@ -480,8 +480,8 @@ static int __init mtd_oobtest_init(void)
480 goto out; 480 goto out;
481 481
482 use_offset = 0; 482 use_offset = 0;
483 use_len = mtd->ecclayout->oobavail; 483 use_len = mtd->oobavail;
484 use_len_max = mtd->ecclayout->oobavail; 484 use_len_max = mtd->oobavail;
485 vary_offset = 0; 485 vary_offset = 0;
486 486
487 /* Fourth test: try to write off end of device */ 487 /* Fourth test: try to write off end of device */
@@ -501,7 +501,7 @@ static int __init mtd_oobtest_init(void)
501 ops.retlen = 0; 501 ops.retlen = 0;
502 ops.ooblen = 1; 502 ops.ooblen = 1;
503 ops.oobretlen = 0; 503 ops.oobretlen = 0;
504 ops.ooboffs = mtd->ecclayout->oobavail; 504 ops.ooboffs = mtd->oobavail;
505 ops.datbuf = NULL; 505 ops.datbuf = NULL;
506 ops.oobbuf = writebuf; 506 ops.oobbuf = writebuf;
507 pr_info("attempting to start write past end of OOB\n"); 507 pr_info("attempting to start write past end of OOB\n");
@@ -521,7 +521,7 @@ static int __init mtd_oobtest_init(void)
521 ops.retlen = 0; 521 ops.retlen = 0;
522 ops.ooblen = 1; 522 ops.ooblen = 1;
523 ops.oobretlen = 0; 523 ops.oobretlen = 0;
524 ops.ooboffs = mtd->ecclayout->oobavail; 524 ops.ooboffs = mtd->oobavail;
525 ops.datbuf = NULL; 525 ops.datbuf = NULL;
526 ops.oobbuf = readbuf; 526 ops.oobbuf = readbuf;
527 pr_info("attempting to start read past end of OOB\n"); 527 pr_info("attempting to start read past end of OOB\n");
@@ -543,7 +543,7 @@ static int __init mtd_oobtest_init(void)
543 ops.mode = MTD_OPS_AUTO_OOB; 543 ops.mode = MTD_OPS_AUTO_OOB;
544 ops.len = 0; 544 ops.len = 0;
545 ops.retlen = 0; 545 ops.retlen = 0;
546 ops.ooblen = mtd->ecclayout->oobavail + 1; 546 ops.ooblen = mtd->oobavail + 1;
547 ops.oobretlen = 0; 547 ops.oobretlen = 0;
548 ops.ooboffs = 0; 548 ops.ooboffs = 0;
549 ops.datbuf = NULL; 549 ops.datbuf = NULL;
@@ -563,7 +563,7 @@ static int __init mtd_oobtest_init(void)
563 ops.mode = MTD_OPS_AUTO_OOB; 563 ops.mode = MTD_OPS_AUTO_OOB;
564 ops.len = 0; 564 ops.len = 0;
565 ops.retlen = 0; 565 ops.retlen = 0;
566 ops.ooblen = mtd->ecclayout->oobavail + 1; 566 ops.ooblen = mtd->oobavail + 1;
567 ops.oobretlen = 0; 567 ops.oobretlen = 0;
568 ops.ooboffs = 0; 568 ops.ooboffs = 0;
569 ops.datbuf = NULL; 569 ops.datbuf = NULL;
@@ -587,7 +587,7 @@ static int __init mtd_oobtest_init(void)
587 ops.mode = MTD_OPS_AUTO_OOB; 587 ops.mode = MTD_OPS_AUTO_OOB;
588 ops.len = 0; 588 ops.len = 0;
589 ops.retlen = 0; 589 ops.retlen = 0;
590 ops.ooblen = mtd->ecclayout->oobavail; 590 ops.ooblen = mtd->oobavail;
591 ops.oobretlen = 0; 591 ops.oobretlen = 0;
592 ops.ooboffs = 1; 592 ops.ooboffs = 1;
593 ops.datbuf = NULL; 593 ops.datbuf = NULL;
@@ -607,7 +607,7 @@ static int __init mtd_oobtest_init(void)
607 ops.mode = MTD_OPS_AUTO_OOB; 607 ops.mode = MTD_OPS_AUTO_OOB;
608 ops.len = 0; 608 ops.len = 0;
609 ops.retlen = 0; 609 ops.retlen = 0;
610 ops.ooblen = mtd->ecclayout->oobavail; 610 ops.ooblen = mtd->oobavail;
611 ops.oobretlen = 0; 611 ops.oobretlen = 0;
612 ops.ooboffs = 1; 612 ops.ooboffs = 1;
613 ops.datbuf = NULL; 613 ops.datbuf = NULL;
@@ -638,7 +638,7 @@ static int __init mtd_oobtest_init(void)
638 for (i = 0; i < ebcnt - 1; ++i) { 638 for (i = 0; i < ebcnt - 1; ++i) {
639 int cnt = 2; 639 int cnt = 2;
640 int pg; 640 int pg;
641 size_t sz = mtd->ecclayout->oobavail; 641 size_t sz = mtd->oobavail;
642 if (bbt[i] || bbt[i + 1]) 642 if (bbt[i] || bbt[i + 1])
643 continue; 643 continue;
644 addr = (loff_t)(i + 1) * mtd->erasesize - mtd->writesize; 644 addr = (loff_t)(i + 1) * mtd->erasesize - mtd->writesize;
@@ -673,13 +673,12 @@ static int __init mtd_oobtest_init(void)
673 for (i = 0; i < ebcnt - 1; ++i) { 673 for (i = 0; i < ebcnt - 1; ++i) {
674 if (bbt[i] || bbt[i + 1]) 674 if (bbt[i] || bbt[i + 1])
675 continue; 675 continue;
676 prandom_bytes_state(&rnd_state, writebuf, 676 prandom_bytes_state(&rnd_state, writebuf, mtd->oobavail * 2);
677 mtd->ecclayout->oobavail * 2);
678 addr = (loff_t)(i + 1) * mtd->erasesize - mtd->writesize; 677 addr = (loff_t)(i + 1) * mtd->erasesize - mtd->writesize;
679 ops.mode = MTD_OPS_AUTO_OOB; 678 ops.mode = MTD_OPS_AUTO_OOB;
680 ops.len = 0; 679 ops.len = 0;
681 ops.retlen = 0; 680 ops.retlen = 0;
682 ops.ooblen = mtd->ecclayout->oobavail * 2; 681 ops.ooblen = mtd->oobavail * 2;
683 ops.oobretlen = 0; 682 ops.oobretlen = 0;
684 ops.ooboffs = 0; 683 ops.ooboffs = 0;
685 ops.datbuf = NULL; 684 ops.datbuf = NULL;
@@ -688,7 +687,7 @@ static int __init mtd_oobtest_init(void)
688 if (err) 687 if (err)
689 goto out; 688 goto out;
690 if (memcmpshow(addr, readbuf, writebuf, 689 if (memcmpshow(addr, readbuf, writebuf,
691 mtd->ecclayout->oobavail * 2)) { 690 mtd->oobavail * 2)) {
692 pr_err("error: verify failed at %#llx\n", 691 pr_err("error: verify failed at %#llx\n",
693 (long long)addr); 692 (long long)addr);
694 errcnt += 1; 693 errcnt += 1;
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c
index 9d47c5db24a6..163f21a1298d 100644
--- a/drivers/staging/mt29f_spinand/mt29f_spinand.c
+++ b/drivers/staging/mt29f_spinand/mt29f_spinand.c
@@ -49,7 +49,6 @@ static struct nand_ecclayout spinand_oob_64 = {
49 17, 18, 19, 20, 21, 22, 49 17, 18, 19, 20, 21, 22,
50 33, 34, 35, 36, 37, 38, 50 33, 34, 35, 36, 37, 38,
51 49, 50, 51, 52, 53, 54, }, 51 49, 50, 51, 52, 53, 54, },
52 .oobavail = 32,
53 .oobfree = { 52 .oobfree = {
54 {.offset = 8, 53 {.offset = 8,
55 .length = 8}, 54 .length = 8},
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.h b/drivers/staging/mt29f_spinand/mt29f_spinand.h
index ae62975cf44a..457dc7ffdaf1 100644
--- a/drivers/staging/mt29f_spinand/mt29f_spinand.h
+++ b/drivers/staging/mt29f_spinand/mt29f_spinand.h
@@ -78,7 +78,6 @@
78#define BL_ALL_UNLOCKED 0 78#define BL_ALL_UNLOCKED 0
79 79
80struct spinand_info { 80struct spinand_info {
81 struct nand_ecclayout *ecclayout;
82 struct spi_device *spi; 81 struct spi_device *spi;
83 void *priv; 82 void *priv;
84}; 83};
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index 95d5880a63ee..7e553f286775 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -134,37 +134,59 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
134 if (mutex_lock_interruptible(&c->alloc_sem)) 134 if (mutex_lock_interruptible(&c->alloc_sem))
135 return -EINTR; 135 return -EINTR;
136 136
137
137 for (;;) { 138 for (;;) {
139 /* We can't start doing GC until we've finished checking
140 the node CRCs etc. */
141 int bucket, want_ino;
142
138 spin_lock(&c->erase_completion_lock); 143 spin_lock(&c->erase_completion_lock);
139 if (!c->unchecked_size) 144 if (!c->unchecked_size)
140 break; 145 break;
141
142 /* We can't start doing GC yet. We haven't finished checking
143 the node CRCs etc. Do it now. */
144
145 /* checked_ino is protected by the alloc_sem */
146 if (c->checked_ino > c->highest_ino && xattr) {
147 pr_crit("Checked all inodes but still 0x%x bytes of unchecked space?\n",
148 c->unchecked_size);
149 jffs2_dbg_dump_block_lists_nolock(c);
150 spin_unlock(&c->erase_completion_lock);
151 mutex_unlock(&c->alloc_sem);
152 return -ENOSPC;
153 }
154
155 spin_unlock(&c->erase_completion_lock); 146 spin_unlock(&c->erase_completion_lock);
156 147
157 if (!xattr) 148 if (!xattr)
158 xattr = jffs2_verify_xattr(c); 149 xattr = jffs2_verify_xattr(c);
159 150
160 spin_lock(&c->inocache_lock); 151 spin_lock(&c->inocache_lock);
152 /* Instead of doing the inodes in numeric order, doing a lookup
153 * in the hash for each possible number, just walk the hash
154 * buckets of *existing* inodes. This means that we process
155 * them out-of-order, but it can be a lot faster if there's
156 * a sparse inode# space. Which there often is. */
157 want_ino = c->check_ino;
158 for (bucket = c->check_ino % c->inocache_hashsize ; bucket < c->inocache_hashsize; bucket++) {
159 for (ic = c->inocache_list[bucket]; ic; ic = ic->next) {
160 if (ic->ino < want_ino)
161 continue;
162
163 if (ic->state != INO_STATE_CHECKEDABSENT &&
164 ic->state != INO_STATE_PRESENT)
165 goto got_next; /* with inocache_lock held */
166
167 jffs2_dbg(1, "Skipping ino #%u already checked\n",
168 ic->ino);
169 }
170 want_ino = 0;
171 }
161 172
162 ic = jffs2_get_ino_cache(c, c->checked_ino++); 173 /* Point c->check_ino past the end of the last bucket. */
174 c->check_ino = ((c->highest_ino + c->inocache_hashsize + 1) &
175 ~c->inocache_hashsize) - 1;
163 176
164 if (!ic) { 177 spin_unlock(&c->inocache_lock);
165 spin_unlock(&c->inocache_lock); 178
166 continue; 179 pr_crit("Checked all inodes but still 0x%x bytes of unchecked space?\n",
167 } 180 c->unchecked_size);
181 jffs2_dbg_dump_block_lists_nolock(c);
182 mutex_unlock(&c->alloc_sem);
183 return -ENOSPC;
184
185 got_next:
186 /* For next time round the loop, we want c->checked_ino to indicate
187 * the *next* one we want to check. And since we're walking the
188 * buckets rather than doing it sequentially, it's: */
189 c->check_ino = ic->ino + c->inocache_hashsize;
168 190
169 if (!ic->pino_nlink) { 191 if (!ic->pino_nlink) {
170 jffs2_dbg(1, "Skipping check of ino #%d with nlink/pino zero\n", 192 jffs2_dbg(1, "Skipping check of ino #%d with nlink/pino zero\n",
@@ -176,8 +198,6 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
176 switch(ic->state) { 198 switch(ic->state) {
177 case INO_STATE_CHECKEDABSENT: 199 case INO_STATE_CHECKEDABSENT:
178 case INO_STATE_PRESENT: 200 case INO_STATE_PRESENT:
179 jffs2_dbg(1, "Skipping ino #%u already checked\n",
180 ic->ino);
181 spin_unlock(&c->inocache_lock); 201 spin_unlock(&c->inocache_lock);
182 continue; 202 continue;
183 203
@@ -196,7 +216,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
196 ic->ino); 216 ic->ino);
197 /* We need to come back again for the _same_ inode. We've 217 /* We need to come back again for the _same_ inode. We've
198 made no progress in this case, but that should be OK */ 218 made no progress in this case, but that should be OK */
199 c->checked_ino--; 219 c->check_ino = ic->ino;
200 220
201 mutex_unlock(&c->alloc_sem); 221 mutex_unlock(&c->alloc_sem);
202 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); 222 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h
index 046fee8b6e9b..778275f48a87 100644
--- a/fs/jffs2/jffs2_fs_sb.h
+++ b/fs/jffs2/jffs2_fs_sb.h
@@ -49,7 +49,7 @@ struct jffs2_sb_info {
49 struct mtd_info *mtd; 49 struct mtd_info *mtd;
50 50
51 uint32_t highest_ino; 51 uint32_t highest_ino;
52 uint32_t checked_ino; 52 uint32_t check_ino; /* *NEXT* inode to be checked */
53 53
54 unsigned int flags; 54 unsigned int flags;
55 55
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c
index b6bd4affd9ad..cda0774c2c9c 100644
--- a/fs/jffs2/nodemgmt.c
+++ b/fs/jffs2/nodemgmt.c
@@ -846,8 +846,8 @@ int jffs2_thread_should_wake(struct jffs2_sb_info *c)
846 return 1; 846 return 1;
847 847
848 if (c->unchecked_size) { 848 if (c->unchecked_size) {
849 jffs2_dbg(1, "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n", 849 jffs2_dbg(1, "jffs2_thread_should_wake(): unchecked_size %d, check_ino #%d\n",
850 c->unchecked_size, c->checked_ino); 850 c->unchecked_size, c->check_ino);
851 return 1; 851 return 1;
852 } 852 }
853 853
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index 5a3da3f52908..b25d28a21212 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -1183,22 +1183,20 @@ void jffs2_dirty_trigger(struct jffs2_sb_info *c)
1183 1183
1184int jffs2_nand_flash_setup(struct jffs2_sb_info *c) 1184int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
1185{ 1185{
1186 struct nand_ecclayout *oinfo = c->mtd->ecclayout;
1187
1188 if (!c->mtd->oobsize) 1186 if (!c->mtd->oobsize)
1189 return 0; 1187 return 0;
1190 1188
1191 /* Cleanmarker is out-of-band, so inline size zero */ 1189 /* Cleanmarker is out-of-band, so inline size zero */
1192 c->cleanmarker_size = 0; 1190 c->cleanmarker_size = 0;
1193 1191
1194 if (!oinfo || oinfo->oobavail == 0) { 1192 if (c->mtd->oobavail == 0) {
1195 pr_err("inconsistent device description\n"); 1193 pr_err("inconsistent device description\n");
1196 return -EINVAL; 1194 return -EINVAL;
1197 } 1195 }
1198 1196
1199 jffs2_dbg(1, "using OOB on NAND\n"); 1197 jffs2_dbg(1, "using OOB on NAND\n");
1200 1198
1201 c->oobavail = oinfo->oobavail; 1199 c->oobavail = c->mtd->oobavail;
1202 1200
1203 /* Initialise write buffer */ 1201 /* Initialise write buffer */
1204 init_rwsem(&c->wbuf_sem); 1202 init_rwsem(&c->wbuf_sem);
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index 36bb6a503f19..3bf8f954b642 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -166,7 +166,6 @@ struct bbm_info {
166}; 166};
167 167
168/* OneNAND BBT interface */ 168/* OneNAND BBT interface */
169extern int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd);
170extern int onenand_default_bbt(struct mtd_info *mtd); 169extern int onenand_default_bbt(struct mtd_info *mtd);
171 170
172#endif /* __LINUX_MTD_BBM_H */ 171#endif /* __LINUX_MTD_BBM_H */
diff --git a/include/linux/mtd/inftl.h b/include/linux/mtd/inftl.h
index 02cd5f9b79b8..8255118be0f0 100644
--- a/include/linux/mtd/inftl.h
+++ b/include/linux/mtd/inftl.h
@@ -44,7 +44,6 @@ struct INFTLrecord {
44 unsigned int nb_blocks; /* number of physical blocks */ 44 unsigned int nb_blocks; /* number of physical blocks */
45 unsigned int nb_boot_blocks; /* number of blocks used by the bios */ 45 unsigned int nb_boot_blocks; /* number of blocks used by the bios */
46 struct erase_info instr; 46 struct erase_info instr;
47 struct nand_ecclayout oobinfo;
48}; 47};
49 48
50int INFTL_mount(struct INFTLrecord *s); 49int INFTL_mount(struct INFTLrecord *s);
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 58f3ba709ade..5e0eb7ccabd4 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -240,8 +240,11 @@ struct map_info {
240 If there is no cache to care about this can be set to NULL. */ 240 If there is no cache to care about this can be set to NULL. */
241 void (*inval_cache)(struct map_info *, unsigned long, ssize_t); 241 void (*inval_cache)(struct map_info *, unsigned long, ssize_t);
242 242
243 /* set_vpp() must handle being reentered -- enable, enable, disable 243 /* This will be called with 1 as parameter when the first map user
244 must leave it enabled. */ 244 * needs VPP, and called with 0 when the last user exits. The map
245 * core maintains a reference counter, and assumes that VPP is a
246 * global resource applying to all mapped flash chips on the system.
247 */
245 void (*set_vpp)(struct map_info *, int); 248 void (*set_vpp)(struct map_info *, int);
246 249
247 unsigned long pfow_base; 250 unsigned long pfow_base;
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index cc84923011c0..771272187316 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -105,7 +105,6 @@ struct mtd_oob_ops {
105struct nand_ecclayout { 105struct nand_ecclayout {
106 __u32 eccbytes; 106 __u32 eccbytes;
107 __u32 eccpos[MTD_MAX_ECCPOS_ENTRIES_LARGE]; 107 __u32 eccpos[MTD_MAX_ECCPOS_ENTRIES_LARGE];
108 __u32 oobavail;
109 struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES_LARGE]; 108 struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES_LARGE];
110}; 109};
111 110
@@ -265,6 +264,11 @@ static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd)
265 return mtd->dev.of_node; 264 return mtd->dev.of_node;
266} 265}
267 266
267static inline int mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
268{
269 return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize;
270}
271
268int mtd_erase(struct mtd_info *mtd, struct erase_info *instr); 272int mtd_erase(struct mtd_info *mtd, struct erase_info *instr);
269int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, 273int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
270 void **virt, resource_size_t *phys); 274 void **virt, resource_size_t *phys);
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index bdd68e22b5a5..56574ba36555 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -168,6 +168,12 @@ typedef enum {
168/* Device supports subpage reads */ 168/* Device supports subpage reads */
169#define NAND_SUBPAGE_READ 0x00001000 169#define NAND_SUBPAGE_READ 0x00001000
170 170
171/*
172 * Some MLC NANDs need data scrambling to limit bitflips caused by repeated
173 * patterns.
174 */
175#define NAND_NEED_SCRAMBLING 0x00002000
176
171/* Options valid for Samsung large page devices */ 177/* Options valid for Samsung large page devices */
172#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG 178#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
173 179
@@ -666,7 +672,7 @@ struct nand_chip {
666 void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len); 672 void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
667 void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len); 673 void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
668 void (*select_chip)(struct mtd_info *mtd, int chip); 674 void (*select_chip)(struct mtd_info *mtd, int chip);
669 int (*block_bad)(struct mtd_info *mtd, loff_t ofs, int getchip); 675 int (*block_bad)(struct mtd_info *mtd, loff_t ofs);
670 int (*block_markbad)(struct mtd_info *mtd, loff_t ofs); 676 int (*block_markbad)(struct mtd_info *mtd, loff_t ofs);
671 void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl); 677 void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl);
672 int (*dev_ready)(struct mtd_info *mtd); 678 int (*dev_ready)(struct mtd_info *mtd);
@@ -896,7 +902,6 @@ extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
896 * @chip_delay: R/B delay value in us 902 * @chip_delay: R/B delay value in us
897 * @options: Option flags, e.g. 16bit buswidth 903 * @options: Option flags, e.g. 16bit buswidth
898 * @bbt_options: BBT option flags, e.g. NAND_BBT_USE_FLASH 904 * @bbt_options: BBT option flags, e.g. NAND_BBT_USE_FLASH
899 * @ecclayout: ECC layout info structure
900 * @part_probe_types: NULL-terminated array of probe types 905 * @part_probe_types: NULL-terminated array of probe types
901 */ 906 */
902struct platform_nand_chip { 907struct platform_nand_chip {
@@ -904,7 +909,6 @@ struct platform_nand_chip {
904 int chip_offset; 909 int chip_offset;
905 int nr_partitions; 910 int nr_partitions;
906 struct mtd_partition *partitions; 911 struct mtd_partition *partitions;
907 struct nand_ecclayout *ecclayout;
908 int chip_delay; 912 int chip_delay;
909 unsigned int options; 913 unsigned int options;
910 unsigned int bbt_options; 914 unsigned int bbt_options;
diff --git a/include/linux/mtd/nand_bch.h b/include/linux/mtd/nand_bch.h
index fb0bc3420a10..98f20ef05d60 100644
--- a/include/linux/mtd/nand_bch.h
+++ b/include/linux/mtd/nand_bch.h
@@ -32,9 +32,7 @@ int nand_bch_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc,
32/* 32/*
33 * Initialize BCH encoder/decoder 33 * Initialize BCH encoder/decoder
34 */ 34 */
35struct nand_bch_control * 35struct nand_bch_control *nand_bch_init(struct mtd_info *mtd);
36nand_bch_init(struct mtd_info *mtd, unsigned int eccsize,
37 unsigned int eccbytes, struct nand_ecclayout **ecclayout);
38/* 36/*
39 * Release BCH encoder/decoder resources 37 * Release BCH encoder/decoder resources
40 */ 38 */
@@ -58,9 +56,7 @@ nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf,
58 return -ENOTSUPP; 56 return -ENOTSUPP;
59} 57}
60 58
61static inline struct nand_bch_control * 59static inline struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
62nand_bch_init(struct mtd_info *mtd, unsigned int eccsize,
63 unsigned int eccbytes, struct nand_ecclayout **ecclayout)
64{ 60{
65 return NULL; 61 return NULL;
66} 62}
diff --git a/include/linux/mtd/nftl.h b/include/linux/mtd/nftl.h
index b059629e22bc..044daa02b8ff 100644
--- a/include/linux/mtd/nftl.h
+++ b/include/linux/mtd/nftl.h
@@ -50,7 +50,6 @@ struct NFTLrecord {
50 unsigned int nb_blocks; /* number of physical blocks */ 50 unsigned int nb_blocks; /* number of physical blocks */
51 unsigned int nb_boot_blocks; /* number of blocks used by the bios */ 51 unsigned int nb_boot_blocks; /* number of blocks used by the bios */
52 struct erase_info instr; 52 struct erase_info instr;
53 struct nand_ecclayout oobinfo;
54}; 53};
55 54
56int NFTL_mount(struct NFTLrecord *s); 55int NFTL_mount(struct NFTLrecord *s);
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 62356d50815b..3c36113a88e1 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -85,6 +85,7 @@
85#define SR_BP0 BIT(2) /* Block protect 0 */ 85#define SR_BP0 BIT(2) /* Block protect 0 */
86#define SR_BP1 BIT(3) /* Block protect 1 */ 86#define SR_BP1 BIT(3) /* Block protect 1 */
87#define SR_BP2 BIT(4) /* Block protect 2 */ 87#define SR_BP2 BIT(4) /* Block protect 2 */
88#define SR_TB BIT(5) /* Top/Bottom protect */
88#define SR_SRWD BIT(7) /* SR write protect */ 89#define SR_SRWD BIT(7) /* SR write protect */
89 90
90#define SR_QUAD_EN_MX BIT(6) /* Macronix Quad I/O */ 91#define SR_QUAD_EN_MX BIT(6) /* Macronix Quad I/O */
@@ -116,6 +117,7 @@ enum spi_nor_ops {
116 117
117enum spi_nor_option_flags { 118enum spi_nor_option_flags {
118 SNOR_F_USE_FSR = BIT(0), 119 SNOR_F_USE_FSR = BIT(0),
120 SNOR_F_HAS_SR_TB = BIT(1),
119}; 121};
120 122
121/** 123/**
diff --git a/include/linux/platform_data/mtd-nand-s3c2410.h b/include/linux/platform_data/mtd-nand-s3c2410.h
index 36bb92172f47..c55e42ee57fa 100644
--- a/include/linux/platform_data/mtd-nand-s3c2410.h
+++ b/include/linux/platform_data/mtd-nand-s3c2410.h
@@ -40,7 +40,6 @@ struct s3c2410_nand_set {
40 char *name; 40 char *name;
41 int *nr_map; 41 int *nr_map;
42 struct mtd_partition *partitions; 42 struct mtd_partition *partitions;
43 struct nand_ecclayout *ecc_layout;
44}; 43};
45 44
46struct s3c2410_platform_nand { 45struct s3c2410_platform_nand {