aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-11 11:35:34 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-11 11:35:34 -0400
commite413a19a8ef49ae3b76310bb569dabe66b22f5a3 (patch)
treef171d40fd0ec69296458173d7ec470339f93f53b /drivers/mtd
parent8d0304e69dc960ae7683943ac5b9c4c685d409d7 (diff)
parentf1900c79633e9ed757319e63aefb8e29443ea35e (diff)
Merge tag 'for-linus-20140610' of git://git.infradead.org/linux-mtd
Pull MTD updates from Brian Norris: - refactor m25p80.c driver for use as a general SPI NOR framework for other drivers which may speak to SPI NOR flash without providing full SPI support (i.e., not part of drivers/spi/) - new Freescale QuadSPI driver (utilizing new SPI NOR framework) - updates for the STMicro "FSM" SPI NOR driver - fix sync/flush behavior on mtd_blkdevs - fixup subpage write support on a few NAND drivers - correct the MTD OOB test for odd-sized OOB areas - add BCH-16 support for OMAP NAND - fix warnings and trivial refactoring - utilize new ECC DT bindings in pxa3xx NAND driver - new LPDDR NVM driver - address a few assorted bugs caught by Coverity - add new imx6sx support for GPMI NAND - use a bounce buffer for NAND when non-DMA-able buffers are used * tag 'for-linus-20140610' of git://git.infradead.org/linux-mtd: (77 commits) mtd: gpmi: add gpmi support for imx6sx mtd: maps: remove check for CONFIG_MTD_SUPERH_RESERVE mtd: bf5xx_nand: use the managed version of kzalloc mtd: pxa3xx_nand: make the driver work on big-endian systems mtd: nand: omap: fix omap_calculate_ecc_bch() for-loop error mtd: nand: r852: correct write_buf loop bounds mtd: nand_bbt: handle error case for nand_create_badblock_pattern() mtd: nand_bbt: remove unused variable mtd: maps: sc520cdp: fix warnings mtd: slram: fix unused variable warning mtd: pfow: remove unused variable mtd: lpddr: fix Kconfig dependency, for I/O accessors mtd: nand: pxa3xx: Add supported ECC strength and step size to the DT binding mtd: nand: pxa3xx: Use ECC strength and step size devicetree binding mtd: nand: pxa3xx: Clean pxa_ecc_init() error handling mtd: nand: Warn the user if the selected ECC strength is too weak mtd: nand: omap: Documentation: How to select correct ECC scheme for your device ? mtd: nand: omap: add support for BCH16_ECC - NAND driver updates mtd: nand: omap: add support for BCH16_ECC - ELM driver updates mtd: nand: omap: add support for BCH16_ECC - GPMC driver updates ...
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/Kconfig2
-rw-r--r--drivers/mtd/Makefile1
-rw-r--r--drivers/mtd/chips/Kconfig16
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c4
-rw-r--r--drivers/mtd/chips/cfi_util.c2
-rw-r--r--drivers/mtd/devices/Kconfig4
-rw-r--r--drivers/mtd/devices/elm.c38
-rw-r--r--drivers/mtd/devices/m25p80.c1305
-rw-r--r--drivers/mtd/devices/serial_flash_cmds.h44
-rw-r--r--drivers/mtd/devices/slram.c4
-rw-r--r--drivers/mtd/devices/st_spi_fsm.c340
-rw-r--r--drivers/mtd/lpddr/Kconfig13
-rw-r--r--drivers/mtd/lpddr/Makefile1
-rw-r--r--drivers/mtd/lpddr/lpddr2_nvm.c507
-rw-r--r--drivers/mtd/maps/Kconfig4
-rw-r--r--drivers/mtd/maps/sc520cdp.c6
-rw-r--r--drivers/mtd/maps/solutionengine.c25
-rw-r--r--drivers/mtd/mtd_blkdevs.c6
-rw-r--r--drivers/mtd/mtdchar.c20
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c13
-rw-r--r--drivers/mtd/nand/denali.c7
-rw-r--r--drivers/mtd/nand/docg4.c6
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c14
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c21
-rw-r--r--drivers/mtd/nand/gpmi-nand/bch-regs.h12
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c11
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c72
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h32
-rw-r--r--drivers/mtd/nand/nand_base.c104
-rw-r--r--drivers/mtd/nand/nand_bbt.c13
-rw-r--r--drivers/mtd/nand/nand_ecc.c2
-rw-r--r--drivers/mtd/nand/omap2.c108
-rw-r--r--drivers/mtd/nand/orion_nand.c2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c44
-rw-r--r--drivers/mtd/nand/r852.c6
-rw-r--r--drivers/mtd/onenand/samsung.c8
-rw-r--r--drivers/mtd/spi-nor/Kconfig17
-rw-r--r--drivers/mtd/spi-nor/Makefile2
-rw-r--r--drivers/mtd/spi-nor/fsl-quadspi.c1009
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c1107
-rw-r--r--drivers/mtd/tests/oobtest.c17
41 files changed, 3360 insertions, 1609 deletions
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 5d49a2129618..94b821042d9d 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -321,6 +321,8 @@ source "drivers/mtd/onenand/Kconfig"
321 321
322source "drivers/mtd/lpddr/Kconfig" 322source "drivers/mtd/lpddr/Kconfig"
323 323
324source "drivers/mtd/spi-nor/Kconfig"
325
324source "drivers/mtd/ubi/Kconfig" 326source "drivers/mtd/ubi/Kconfig"
325 327
326endif # MTD 328endif # MTD
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 4cfb31e6c966..99bb9a1f6e16 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -32,4 +32,5 @@ inftl-objs := inftlcore.o inftlmount.o
32 32
33obj-y += chips/ lpddr/ maps/ devices/ nand/ onenand/ tests/ 33obj-y += chips/ lpddr/ maps/ devices/ nand/ onenand/ tests/
34 34
35obj-$(CONFIG_MTD_SPI_NOR) += spi-nor/
35obj-$(CONFIG_MTD_UBI) += ubi/ 36obj-$(CONFIG_MTD_UBI) += ubi/
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
index e4696b37f3de..9f02c28c0204 100644
--- a/drivers/mtd/chips/Kconfig
+++ b/drivers/mtd/chips/Kconfig
@@ -169,33 +169,33 @@ config MTD_OTP
169 in the programming of OTP bits will waste them. 169 in the programming of OTP bits will waste them.
170 170
171config MTD_CFI_INTELEXT 171config MTD_CFI_INTELEXT
172 tristate "Support for Intel/Sharp flash chips" 172 tristate "Support for CFI command set 0001 (Intel/Sharp chips)"
173 depends on MTD_GEN_PROBE 173 depends on MTD_GEN_PROBE
174 select MTD_CFI_UTIL 174 select MTD_CFI_UTIL
175 help 175 help
176 The Common Flash Interface defines a number of different command 176 The Common Flash Interface defines a number of different command
177 sets which a CFI-compliant chip may claim to implement. This code 177 sets which a CFI-compliant chip may claim to implement. This code
178 provides support for one of those command sets, used on Intel 178 provides support for command set 0001, used on Intel StrataFlash
179 StrataFlash and other parts. 179 and other parts.
180 180
181config MTD_CFI_AMDSTD 181config MTD_CFI_AMDSTD
182 tristate "Support for AMD/Fujitsu/Spansion flash chips" 182 tristate "Support for CFI command set 0002 (AMD/Fujitsu/Spansion chips)"
183 depends on MTD_GEN_PROBE 183 depends on MTD_GEN_PROBE
184 select MTD_CFI_UTIL 184 select MTD_CFI_UTIL
185 help 185 help
186 The Common Flash Interface defines a number of different command 186 The Common Flash Interface defines a number of different command
187 sets which a CFI-compliant chip may claim to implement. This code 187 sets which a CFI-compliant chip may claim to implement. This code
188 provides support for one of those command sets, used on chips 188 provides support for command set 0002, used on chips including
189 including the AMD Am29LV320. 189 the AMD Am29LV320.
190 190
191config MTD_CFI_STAA 191config MTD_CFI_STAA
192 tristate "Support for ST (Advanced Architecture) flash chips" 192 tristate "Support for CFI command set 0020 (ST (Advanced Architecture) chips)"
193 depends on MTD_GEN_PROBE 193 depends on MTD_GEN_PROBE
194 select MTD_CFI_UTIL 194 select MTD_CFI_UTIL
195 help 195 help
196 The Common Flash Interface defines a number of different command 196 The Common Flash Interface defines a number of different command
197 sets which a CFI-compliant chip may claim to implement. This code 197 sets which a CFI-compliant chip may claim to implement. This code
198 provides support for one of those command sets. 198 provides support for command set 0020.
199 199
200config MTD_CFI_UTIL 200config MTD_CFI_UTIL
201 tristate 201 tristate
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 6293855fb5ee..423666b51efb 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -961,7 +961,7 @@ static int cfi_staa_erase_varsize(struct mtd_info *mtd,
961 chipnum++; 961 chipnum++;
962 962
963 if (chipnum >= cfi->numchips) 963 if (chipnum >= cfi->numchips)
964 break; 964 break;
965 } 965 }
966 } 966 }
967 967
@@ -1170,7 +1170,7 @@ static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1170 chipnum++; 1170 chipnum++;
1171 1171
1172 if (chipnum >= cfi->numchips) 1172 if (chipnum >= cfi->numchips)
1173 break; 1173 break;
1174 } 1174 }
1175 } 1175 }
1176 return 0; 1176 return 0;
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index 08049f6eea60..09c79bd0b4f4 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -239,7 +239,7 @@ int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
239 chipnum++; 239 chipnum++;
240 240
241 if (chipnum >= cfi->numchips) 241 if (chipnum >= cfi->numchips)
242 break; 242 break;
243 } 243 }
244 } 244 }
245 245
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 1210bc2923b7..c49d0b127fef 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -80,7 +80,7 @@ config MTD_DATAFLASH_OTP
80 80
81config MTD_M25P80 81config MTD_M25P80
82 tristate "Support most SPI Flash chips (AT26DF, M25P, W25X, ...)" 82 tristate "Support most SPI Flash chips (AT26DF, M25P, W25X, ...)"
83 depends on SPI_MASTER 83 depends on SPI_MASTER && MTD_SPI_NOR
84 help 84 help
85 This enables access to most modern SPI flash chips, used for 85 This enables access to most modern SPI flash chips, used for
86 program and data storage. Series supported include Atmel AT26DF, 86 program and data storage. Series supported include Atmel AT26DF,
@@ -212,7 +212,7 @@ config MTD_DOCG3
212 212
213config MTD_ST_SPI_FSM 213config MTD_ST_SPI_FSM
214 tristate "ST Microelectronics SPI FSM Serial Flash Controller" 214 tristate "ST Microelectronics SPI FSM Serial Flash Controller"
215 depends on ARM || SH 215 depends on ARCH_STI
216 help 216 help
217 This provides an MTD device driver for the ST Microelectronics 217 This provides an MTD device driver for the ST Microelectronics
218 SPI Fast Sequence Mode (FSM) Serial Flash Controller and support 218 SPI Fast Sequence Mode (FSM) Serial Flash Controller and support
diff --git a/drivers/mtd/devices/elm.c b/drivers/mtd/devices/elm.c
index 1fd4a0f77967..7df86948e6d4 100644
--- a/drivers/mtd/devices/elm.c
+++ b/drivers/mtd/devices/elm.c
@@ -213,6 +213,28 @@ static void elm_load_syndrome(struct elm_info *info,
213 val = cpu_to_be32(*(u32 *) &ecc[0]) >> 12; 213 val = cpu_to_be32(*(u32 *) &ecc[0]) >> 12;
214 elm_write_reg(info, offset, val); 214 elm_write_reg(info, offset, val);
215 break; 215 break;
216 case BCH16_ECC:
217 val = cpu_to_be32(*(u32 *) &ecc[22]);
218 elm_write_reg(info, offset, val);
219 offset += 4;
220 val = cpu_to_be32(*(u32 *) &ecc[18]);
221 elm_write_reg(info, offset, val);
222 offset += 4;
223 val = cpu_to_be32(*(u32 *) &ecc[14]);
224 elm_write_reg(info, offset, val);
225 offset += 4;
226 val = cpu_to_be32(*(u32 *) &ecc[10]);
227 elm_write_reg(info, offset, val);
228 offset += 4;
229 val = cpu_to_be32(*(u32 *) &ecc[6]);
230 elm_write_reg(info, offset, val);
231 offset += 4;
232 val = cpu_to_be32(*(u32 *) &ecc[2]);
233 elm_write_reg(info, offset, val);
234 offset += 4;
235 val = cpu_to_be32(*(u32 *) &ecc[0]) >> 16;
236 elm_write_reg(info, offset, val);
237 break;
216 default: 238 default:
217 pr_err("invalid config bch_type\n"); 239 pr_err("invalid config bch_type\n");
218 } 240 }
@@ -418,6 +440,7 @@ static int elm_remove(struct platform_device *pdev)
418 return 0; 440 return 0;
419} 441}
420 442
443#ifdef CONFIG_PM_SLEEP
421/** 444/**
422 * elm_context_save 445 * elm_context_save
423 * saves ELM configurations to preserve them across Hardware powered-down 446 * saves ELM configurations to preserve them across Hardware powered-down
@@ -435,6 +458,13 @@ static int elm_context_save(struct elm_info *info)
435 for (i = 0; i < ERROR_VECTOR_MAX; i++) { 458 for (i = 0; i < ERROR_VECTOR_MAX; i++) {
436 offset = i * SYNDROME_FRAGMENT_REG_SIZE; 459 offset = i * SYNDROME_FRAGMENT_REG_SIZE;
437 switch (bch_type) { 460 switch (bch_type) {
461 case BCH16_ECC:
462 regs->elm_syndrome_fragment_6[i] = elm_read_reg(info,
463 ELM_SYNDROME_FRAGMENT_6 + offset);
464 regs->elm_syndrome_fragment_5[i] = elm_read_reg(info,
465 ELM_SYNDROME_FRAGMENT_5 + offset);
466 regs->elm_syndrome_fragment_4[i] = elm_read_reg(info,
467 ELM_SYNDROME_FRAGMENT_4 + offset);
438 case BCH8_ECC: 468 case BCH8_ECC:
439 regs->elm_syndrome_fragment_3[i] = elm_read_reg(info, 469 regs->elm_syndrome_fragment_3[i] = elm_read_reg(info,
440 ELM_SYNDROME_FRAGMENT_3 + offset); 470 ELM_SYNDROME_FRAGMENT_3 + offset);
@@ -473,6 +503,13 @@ static int elm_context_restore(struct elm_info *info)
473 for (i = 0; i < ERROR_VECTOR_MAX; i++) { 503 for (i = 0; i < ERROR_VECTOR_MAX; i++) {
474 offset = i * SYNDROME_FRAGMENT_REG_SIZE; 504 offset = i * SYNDROME_FRAGMENT_REG_SIZE;
475 switch (bch_type) { 505 switch (bch_type) {
506 case BCH16_ECC:
507 elm_write_reg(info, ELM_SYNDROME_FRAGMENT_6 + offset,
508 regs->elm_syndrome_fragment_6[i]);
509 elm_write_reg(info, ELM_SYNDROME_FRAGMENT_5 + offset,
510 regs->elm_syndrome_fragment_5[i]);
511 elm_write_reg(info, ELM_SYNDROME_FRAGMENT_4 + offset,
512 regs->elm_syndrome_fragment_4[i]);
476 case BCH8_ECC: 513 case BCH8_ECC:
477 elm_write_reg(info, ELM_SYNDROME_FRAGMENT_3 + offset, 514 elm_write_reg(info, ELM_SYNDROME_FRAGMENT_3 + offset,
478 regs->elm_syndrome_fragment_3[i]); 515 regs->elm_syndrome_fragment_3[i]);
@@ -509,6 +546,7 @@ static int elm_resume(struct device *dev)
509 elm_context_restore(info); 546 elm_context_restore(info);
510 return 0; 547 return 0;
511} 548}
549#endif
512 550
513static SIMPLE_DEV_PM_OPS(elm_pm_ops, elm_suspend, elm_resume); 551static SIMPLE_DEV_PM_OPS(elm_pm_ops, elm_suspend, elm_resume);
514 552
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 524dab3ac938..ed7e0a1bed3c 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -19,485 +19,98 @@
19#include <linux/errno.h> 19#include <linux/errno.h>
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/device.h> 21#include <linux/device.h>
22#include <linux/interrupt.h>
23#include <linux/mutex.h>
24#include <linux/math64.h>
25#include <linux/slab.h>
26#include <linux/sched.h>
27#include <linux/mod_devicetable.h>
28 22
29#include <linux/mtd/cfi.h>
30#include <linux/mtd/mtd.h> 23#include <linux/mtd/mtd.h>
31#include <linux/mtd/partitions.h> 24#include <linux/mtd/partitions.h>
32#include <linux/of_platform.h>
33 25
34#include <linux/spi/spi.h> 26#include <linux/spi/spi.h>
35#include <linux/spi/flash.h> 27#include <linux/spi/flash.h>
28#include <linux/mtd/spi-nor.h>
36 29
37/* Flash opcodes. */
38#define OPCODE_WREN 0x06 /* Write enable */
39#define OPCODE_RDSR 0x05 /* Read status register */
40#define OPCODE_WRSR 0x01 /* Write status register 1 byte */
41#define OPCODE_NORM_READ 0x03 /* Read data bytes (low frequency) */
42#define OPCODE_FAST_READ 0x0b /* Read data bytes (high frequency) */
43#define OPCODE_DUAL_READ 0x3b /* Read data bytes (Dual SPI) */
44#define OPCODE_QUAD_READ 0x6b /* Read data bytes (Quad SPI) */
45#define OPCODE_PP 0x02 /* Page program (up to 256 bytes) */
46#define OPCODE_BE_4K 0x20 /* Erase 4KiB block */
47#define OPCODE_BE_4K_PMC 0xd7 /* Erase 4KiB block on PMC chips */
48#define OPCODE_BE_32K 0x52 /* Erase 32KiB block */
49#define OPCODE_CHIP_ERASE 0xc7 /* Erase whole flash chip */
50#define OPCODE_SE 0xd8 /* Sector erase (usually 64KiB) */
51#define OPCODE_RDID 0x9f /* Read JEDEC ID */
52#define OPCODE_RDCR 0x35 /* Read configuration register */
53
54/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
55#define OPCODE_NORM_READ_4B 0x13 /* Read data bytes (low frequency) */
56#define OPCODE_FAST_READ_4B 0x0c /* Read data bytes (high frequency) */
57#define OPCODE_DUAL_READ_4B 0x3c /* Read data bytes (Dual SPI) */
58#define OPCODE_QUAD_READ_4B 0x6c /* Read data bytes (Quad SPI) */
59#define OPCODE_PP_4B 0x12 /* Page program (up to 256 bytes) */
60#define OPCODE_SE_4B 0xdc /* Sector erase (usually 64KiB) */
61
62/* Used for SST flashes only. */
63#define OPCODE_BP 0x02 /* Byte program */
64#define OPCODE_WRDI 0x04 /* Write disable */
65#define OPCODE_AAI_WP 0xad /* Auto address increment word program */
66
67/* Used for Macronix and Winbond flashes. */
68#define OPCODE_EN4B 0xb7 /* Enter 4-byte mode */
69#define OPCODE_EX4B 0xe9 /* Exit 4-byte mode */
70
71/* Used for Spansion flashes only. */
72#define OPCODE_BRWR 0x17 /* Bank register write */
73
74/* Status Register bits. */
75#define SR_WIP 1 /* Write in progress */
76#define SR_WEL 2 /* Write enable latch */
77/* meaning of other SR_* bits may differ between vendors */
78#define SR_BP0 4 /* Block protect 0 */
79#define SR_BP1 8 /* Block protect 1 */
80#define SR_BP2 0x10 /* Block protect 2 */
81#define SR_SRWD 0x80 /* SR write protect */
82
83#define SR_QUAD_EN_MX 0x40 /* Macronix Quad I/O */
84
85/* Configuration Register bits. */
86#define CR_QUAD_EN_SPAN 0x2 /* Spansion Quad I/O */
87
88/* Define max times to check status register before we give up. */
89#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
90#define MAX_CMD_SIZE 6 30#define MAX_CMD_SIZE 6
91
92#define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16)
93
94/****************************************************************************/
95
96enum read_type {
97 M25P80_NORMAL = 0,
98 M25P80_FAST,
99 M25P80_DUAL,
100 M25P80_QUAD,
101};
102
103struct m25p { 31struct m25p {
104 struct spi_device *spi; 32 struct spi_device *spi;
105 struct mutex lock; 33 struct spi_nor spi_nor;
106 struct mtd_info mtd; 34 struct mtd_info mtd;
107 u16 page_size; 35 u8 command[MAX_CMD_SIZE];
108 u16 addr_width;
109 u8 erase_opcode;
110 u8 read_opcode;
111 u8 program_opcode;
112 u8 *command;
113 enum read_type flash_read;
114}; 36};
115 37
116static inline struct m25p *mtd_to_m25p(struct mtd_info *mtd) 38static int m25p80_read_reg(struct spi_nor *nor, u8 code, u8 *val, int len)
117{
118 return container_of(mtd, struct m25p, mtd);
119}
120
121/****************************************************************************/
122
123/*
124 * Internal helper functions
125 */
126
127/*
128 * Read the status register, returning its value in the location
129 * Return the status register value.
130 * Returns negative if error occurred.
131 */
132static int read_sr(struct m25p *flash)
133{
134 ssize_t retval;
135 u8 code = OPCODE_RDSR;
136 u8 val;
137
138 retval = spi_write_then_read(flash->spi, &code, 1, &val, 1);
139
140 if (retval < 0) {
141 dev_err(&flash->spi->dev, "error %d reading SR\n",
142 (int) retval);
143 return retval;
144 }
145
146 return val;
147}
148
149/*
150 * Read configuration register, returning its value in the
151 * location. Return the configuration register value.
152 * Returns negative if error occured.
153 */
154static int read_cr(struct m25p *flash)
155{
156 u8 code = OPCODE_RDCR;
157 int ret;
158 u8 val;
159
160 ret = spi_write_then_read(flash->spi, &code, 1, &val, 1);
161 if (ret < 0) {
162 dev_err(&flash->spi->dev, "error %d reading CR\n", ret);
163 return ret;
164 }
165
166 return val;
167}
168
169/*
170 * Write status register 1 byte
171 * Returns negative if error occurred.
172 */
173static int write_sr(struct m25p *flash, u8 val)
174{
175 flash->command[0] = OPCODE_WRSR;
176 flash->command[1] = val;
177
178 return spi_write(flash->spi, flash->command, 2);
179}
180
181/*
182 * Set write enable latch with Write Enable command.
183 * Returns negative if error occurred.
184 */
185static inline int write_enable(struct m25p *flash)
186{
187 u8 code = OPCODE_WREN;
188
189 return spi_write_then_read(flash->spi, &code, 1, NULL, 0);
190}
191
192/*
193 * Send write disble instruction to the chip.
194 */
195static inline int write_disable(struct m25p *flash)
196{
197 u8 code = OPCODE_WRDI;
198
199 return spi_write_then_read(flash->spi, &code, 1, NULL, 0);
200}
201
202/*
203 * Enable/disable 4-byte addressing mode.
204 */
205static inline int set_4byte(struct m25p *flash, u32 jedec_id, int enable)
206{
207 int status;
208 bool need_wren = false;
209
210 switch (JEDEC_MFR(jedec_id)) {
211 case CFI_MFR_ST: /* Micron, actually */
212 /* Some Micron need WREN command; all will accept it */
213 need_wren = true;
214 case CFI_MFR_MACRONIX:
215 case 0xEF /* winbond */:
216 if (need_wren)
217 write_enable(flash);
218
219 flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B;
220 status = spi_write(flash->spi, flash->command, 1);
221
222 if (need_wren)
223 write_disable(flash);
224
225 return status;
226 default:
227 /* Spansion style */
228 flash->command[0] = OPCODE_BRWR;
229 flash->command[1] = enable << 7;
230 return spi_write(flash->spi, flash->command, 2);
231 }
232}
233
234/*
235 * Service routine to read status register until ready, or timeout occurs.
236 * Returns non-zero if error.
237 */
238static int wait_till_ready(struct m25p *flash)
239{
240 unsigned long deadline;
241 int sr;
242
243 deadline = jiffies + MAX_READY_WAIT_JIFFIES;
244
245 do {
246 if ((sr = read_sr(flash)) < 0)
247 break;
248 else if (!(sr & SR_WIP))
249 return 0;
250
251 cond_resched();
252
253 } while (!time_after_eq(jiffies, deadline));
254
255 return 1;
256}
257
258/*
259 * Write status Register and configuration register with 2 bytes
260 * The first byte will be written to the status register, while the
261 * second byte will be written to the configuration register.
262 * Return negative if error occured.
263 */
264static int write_sr_cr(struct m25p *flash, u16 val)
265{
266 flash->command[0] = OPCODE_WRSR;
267 flash->command[1] = val & 0xff;
268 flash->command[2] = (val >> 8);
269
270 return spi_write(flash->spi, flash->command, 3);
271}
272
273static int macronix_quad_enable(struct m25p *flash)
274{
275 int ret, val;
276 u8 cmd[2];
277 cmd[0] = OPCODE_WRSR;
278
279 val = read_sr(flash);
280 cmd[1] = val | SR_QUAD_EN_MX;
281 write_enable(flash);
282
283 spi_write(flash->spi, &cmd, 2);
284
285 if (wait_till_ready(flash))
286 return 1;
287
288 ret = read_sr(flash);
289 if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
290 dev_err(&flash->spi->dev, "Macronix Quad bit not set\n");
291 return -EINVAL;
292 }
293
294 return 0;
295}
296
297static int spansion_quad_enable(struct m25p *flash)
298{ 39{
40 struct m25p *flash = nor->priv;
41 struct spi_device *spi = flash->spi;
299 int ret; 42 int ret;
300 int quad_en = CR_QUAD_EN_SPAN << 8;
301
302 write_enable(flash);
303 43
304 ret = write_sr_cr(flash, quad_en); 44 ret = spi_write_then_read(spi, &code, 1, val, len);
305 if (ret < 0) { 45 if (ret < 0)
306 dev_err(&flash->spi->dev, 46 dev_err(&spi->dev, "error %d reading %x\n", ret, code);
307 "error while writing configuration register\n");
308 return -EINVAL;
309 }
310
311 /* read back and check it */
312 ret = read_cr(flash);
313 if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
314 dev_err(&flash->spi->dev, "Spansion Quad bit not set\n");
315 return -EINVAL;
316 }
317
318 return 0;
319}
320
321static int set_quad_mode(struct m25p *flash, u32 jedec_id)
322{
323 int status;
324
325 switch (JEDEC_MFR(jedec_id)) {
326 case CFI_MFR_MACRONIX:
327 status = macronix_quad_enable(flash);
328 if (status) {
329 dev_err(&flash->spi->dev,
330 "Macronix quad-read not enabled\n");
331 return -EINVAL;
332 }
333 return status;
334 default:
335 status = spansion_quad_enable(flash);
336 if (status) {
337 dev_err(&flash->spi->dev,
338 "Spansion quad-read not enabled\n");
339 return -EINVAL;
340 }
341 return status;
342 }
343}
344
345/*
346 * Erase the whole flash memory
347 *
348 * Returns 0 if successful, non-zero otherwise.
349 */
350static int erase_chip(struct m25p *flash)
351{
352 pr_debug("%s: %s %lldKiB\n", dev_name(&flash->spi->dev), __func__,
353 (long long)(flash->mtd.size >> 10));
354 47
355 /* Wait until finished previous write command. */ 48 return ret;
356 if (wait_till_ready(flash))
357 return 1;
358
359 /* Send write enable, then erase commands. */
360 write_enable(flash);
361
362 /* Set up command buffer. */
363 flash->command[0] = OPCODE_CHIP_ERASE;
364
365 spi_write(flash->spi, flash->command, 1);
366
367 return 0;
368} 49}
369 50
370static void m25p_addr2cmd(struct m25p *flash, unsigned int addr, u8 *cmd) 51static void m25p_addr2cmd(struct spi_nor *nor, unsigned int addr, u8 *cmd)
371{ 52{
372 /* opcode is in cmd[0] */ 53 /* opcode is in cmd[0] */
373 cmd[1] = addr >> (flash->addr_width * 8 - 8); 54 cmd[1] = addr >> (nor->addr_width * 8 - 8);
374 cmd[2] = addr >> (flash->addr_width * 8 - 16); 55 cmd[2] = addr >> (nor->addr_width * 8 - 16);
375 cmd[3] = addr >> (flash->addr_width * 8 - 24); 56 cmd[3] = addr >> (nor->addr_width * 8 - 24);
376 cmd[4] = addr >> (flash->addr_width * 8 - 32); 57 cmd[4] = addr >> (nor->addr_width * 8 - 32);
377} 58}
378 59
379static int m25p_cmdsz(struct m25p *flash) 60static int m25p_cmdsz(struct spi_nor *nor)
380{ 61{
381 return 1 + flash->addr_width; 62 return 1 + nor->addr_width;
382} 63}
383 64
384/* 65static int m25p80_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len,
385 * Erase one sector of flash memory at offset ``offset'' which is any 66 int wr_en)
386 * address within the sector which should be erased.
387 *
388 * Returns 0 if successful, non-zero otherwise.
389 */
390static int erase_sector(struct m25p *flash, u32 offset)
391{ 67{
392 pr_debug("%s: %s %dKiB at 0x%08x\n", dev_name(&flash->spi->dev), 68 struct m25p *flash = nor->priv;
393 __func__, flash->mtd.erasesize / 1024, offset); 69 struct spi_device *spi = flash->spi;
394
395 /* Wait until finished previous write command. */
396 if (wait_till_ready(flash))
397 return 1;
398 70
399 /* Send write enable, then erase commands. */ 71 flash->command[0] = opcode;
400 write_enable(flash); 72 if (buf)
401 73 memcpy(&flash->command[1], buf, len);
402 /* Set up command buffer. */
403 flash->command[0] = flash->erase_opcode;
404 m25p_addr2cmd(flash, offset, flash->command);
405
406 spi_write(flash->spi, flash->command, m25p_cmdsz(flash));
407 74
408 return 0; 75 return spi_write(spi, flash->command, len + 1);
409} 76}
410 77
411/****************************************************************************/ 78static void m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
412 79 size_t *retlen, const u_char *buf)
413/*
414 * MTD implementation
415 */
416
417/*
418 * Erase an address range on the flash chip. The address range may extend
419 * one or more erase sectors. Return an error is there is a problem erasing.
420 */
421static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
422{ 80{
423 struct m25p *flash = mtd_to_m25p(mtd); 81 struct m25p *flash = nor->priv;
424 u32 addr,len; 82 struct spi_device *spi = flash->spi;
425 uint32_t rem; 83 struct spi_transfer t[2] = {};
426 84 struct spi_message m;
427 pr_debug("%s: %s at 0x%llx, len %lld\n", dev_name(&flash->spi->dev), 85 int cmd_sz = m25p_cmdsz(nor);
428 __func__, (long long)instr->addr,
429 (long long)instr->len);
430
431 div_u64_rem(instr->len, mtd->erasesize, &rem);
432 if (rem)
433 return -EINVAL;
434
435 addr = instr->addr;
436 len = instr->len;
437
438 mutex_lock(&flash->lock);
439
440 /* whole-chip erase? */
441 if (len == flash->mtd.size) {
442 if (erase_chip(flash)) {
443 instr->state = MTD_ERASE_FAILED;
444 mutex_unlock(&flash->lock);
445 return -EIO;
446 }
447 86
448 /* REVISIT in some cases we could speed up erasing large regions 87 spi_message_init(&m);
449 * by using OPCODE_SE instead of OPCODE_BE_4K. We may have set up
450 * to use "small sector erase", but that's not always optimal.
451 */
452 88
453 /* "sector"-at-a-time erase */ 89 if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
454 } else { 90 cmd_sz = 1;
455 while (len) {
456 if (erase_sector(flash, addr)) {
457 instr->state = MTD_ERASE_FAILED;
458 mutex_unlock(&flash->lock);
459 return -EIO;
460 }
461 91
462 addr += mtd->erasesize; 92 flash->command[0] = nor->program_opcode;
463 len -= mtd->erasesize; 93 m25p_addr2cmd(nor, to, flash->command);
464 }
465 }
466 94
467 mutex_unlock(&flash->lock); 95 t[0].tx_buf = flash->command;
96 t[0].len = cmd_sz;
97 spi_message_add_tail(&t[0], &m);
468 98
469 instr->state = MTD_ERASE_DONE; 99 t[1].tx_buf = buf;
470 mtd_erase_callback(instr); 100 t[1].len = len;
101 spi_message_add_tail(&t[1], &m);
471 102
472 return 0; 103 spi_sync(spi, &m);
473}
474 104
475/* 105 *retlen += m.actual_length - cmd_sz;
476 * Dummy Cycle calculation for different type of read.
477 * It can be used to support more commands with
478 * different dummy cycle requirements.
479 */
480static inline int m25p80_dummy_cycles_read(struct m25p *flash)
481{
482 switch (flash->flash_read) {
483 case M25P80_FAST:
484 case M25P80_DUAL:
485 case M25P80_QUAD:
486 return 1;
487 case M25P80_NORMAL:
488 return 0;
489 default:
490 dev_err(&flash->spi->dev, "No valid read type supported\n");
491 return -1;
492 }
493} 106}
494 107
495static inline unsigned int m25p80_rx_nbits(const struct m25p *flash) 108static inline unsigned int m25p80_rx_nbits(struct spi_nor *nor)
496{ 109{
497 switch (flash->flash_read) { 110 switch (nor->flash_read) {
498 case M25P80_DUAL: 111 case SPI_NOR_DUAL:
499 return 2; 112 return 2;
500 case M25P80_QUAD: 113 case SPI_NOR_QUAD:
501 return 4; 114 return 4;
502 default: 115 default:
503 return 0; 116 return 0;
@@ -505,590 +118,72 @@ static inline unsigned int m25p80_rx_nbits(const struct m25p *flash)
505} 118}
506 119
507/* 120/*
508 * Read an address range from the flash chip. The address range 121 * Read an address range from the nor chip. The address range
509 * may be any size provided it is within the physical boundaries. 122 * may be any size provided it is within the physical boundaries.
510 */ 123 */
511static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len, 124static int m25p80_read(struct spi_nor *nor, loff_t from, size_t len,
512 size_t *retlen, u_char *buf) 125 size_t *retlen, u_char *buf)
513{ 126{
514 struct m25p *flash = mtd_to_m25p(mtd); 127 struct m25p *flash = nor->priv;
128 struct spi_device *spi = flash->spi;
515 struct spi_transfer t[2]; 129 struct spi_transfer t[2];
516 struct spi_message m; 130 struct spi_message m;
517 uint8_t opcode; 131 int dummy = nor->read_dummy;
518 int dummy; 132 int ret;
519 133
520 pr_debug("%s: %s from 0x%08x, len %zd\n", dev_name(&flash->spi->dev), 134 /* Wait till previous write/erase is done. */
521 __func__, (u32)from, len); 135 ret = nor->wait_till_ready(nor);
136 if (ret)
137 return ret;
522 138
523 spi_message_init(&m); 139 spi_message_init(&m);
524 memset(t, 0, (sizeof t)); 140 memset(t, 0, (sizeof t));
525 141
526 dummy = m25p80_dummy_cycles_read(flash); 142 flash->command[0] = nor->read_opcode;
527 if (dummy < 0) { 143 m25p_addr2cmd(nor, from, flash->command);
528 dev_err(&flash->spi->dev, "No valid read command supported\n");
529 return -EINVAL;
530 }
531 144
532 t[0].tx_buf = flash->command; 145 t[0].tx_buf = flash->command;
533 t[0].len = m25p_cmdsz(flash) + dummy; 146 t[0].len = m25p_cmdsz(nor) + dummy;
534 spi_message_add_tail(&t[0], &m); 147 spi_message_add_tail(&t[0], &m);
535 148
536 t[1].rx_buf = buf; 149 t[1].rx_buf = buf;
537 t[1].rx_nbits = m25p80_rx_nbits(flash); 150 t[1].rx_nbits = m25p80_rx_nbits(nor);
538 t[1].len = len; 151 t[1].len = len;
539 spi_message_add_tail(&t[1], &m); 152 spi_message_add_tail(&t[1], &m);
540 153
541 mutex_lock(&flash->lock); 154 spi_sync(spi, &m);
542
543 /* Wait till previous write/erase is done. */
544 if (wait_till_ready(flash)) {
545 /* REVISIT status return?? */
546 mutex_unlock(&flash->lock);
547 return 1;
548 }
549
550 /* Set up the write data buffer. */
551 opcode = flash->read_opcode;
552 flash->command[0] = opcode;
553 m25p_addr2cmd(flash, from, flash->command);
554
555 spi_sync(flash->spi, &m);
556
557 *retlen = m.actual_length - m25p_cmdsz(flash) - dummy;
558
559 mutex_unlock(&flash->lock);
560 155
156 *retlen = m.actual_length - m25p_cmdsz(nor) - dummy;
561 return 0; 157 return 0;
562} 158}
563 159
564/* 160static int m25p80_erase(struct spi_nor *nor, loff_t offset)
565 * Write an address range to the flash chip. Data must be written in
566 * FLASH_PAGESIZE chunks. The address range may be any size provided
567 * it is within the physical boundaries.
568 */
569static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
570 size_t *retlen, const u_char *buf)
571{ 161{
572 struct m25p *flash = mtd_to_m25p(mtd); 162 struct m25p *flash = nor->priv;
573 u32 page_offset, page_size; 163 int ret;
574 struct spi_transfer t[2];
575 struct spi_message m;
576
577 pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev),
578 __func__, (u32)to, len);
579
580 spi_message_init(&m);
581 memset(t, 0, (sizeof t));
582
583 t[0].tx_buf = flash->command;
584 t[0].len = m25p_cmdsz(flash);
585 spi_message_add_tail(&t[0], &m);
586
587 t[1].tx_buf = buf;
588 spi_message_add_tail(&t[1], &m);
589
590 mutex_lock(&flash->lock);
591
592 /* Wait until finished previous write command. */
593 if (wait_till_ready(flash)) {
594 mutex_unlock(&flash->lock);
595 return 1;
596 }
597
598 write_enable(flash);
599
600 /* Set up the opcode in the write buffer. */
601 flash->command[0] = flash->program_opcode;
602 m25p_addr2cmd(flash, to, flash->command);
603
604 page_offset = to & (flash->page_size - 1);
605
606 /* do all the bytes fit onto one page? */
607 if (page_offset + len <= flash->page_size) {
608 t[1].len = len;
609
610 spi_sync(flash->spi, &m);
611
612 *retlen = m.actual_length - m25p_cmdsz(flash);
613 } else {
614 u32 i;
615
616 /* the size of data remaining on the first page */
617 page_size = flash->page_size - page_offset;
618
619 t[1].len = page_size;
620 spi_sync(flash->spi, &m);
621
622 *retlen = m.actual_length - m25p_cmdsz(flash);
623
624 /* write everything in flash->page_size chunks */
625 for (i = page_size; i < len; i += page_size) {
626 page_size = len - i;
627 if (page_size > flash->page_size)
628 page_size = flash->page_size;
629
630 /* write the next page to flash */
631 m25p_addr2cmd(flash, to + i, flash->command);
632
633 t[1].tx_buf = buf + i;
634 t[1].len = page_size;
635
636 wait_till_ready(flash);
637
638 write_enable(flash);
639
640 spi_sync(flash->spi, &m);
641
642 *retlen += m.actual_length - m25p_cmdsz(flash);
643 }
644 }
645
646 mutex_unlock(&flash->lock);
647
648 return 0;
649}
650
651static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
652 size_t *retlen, const u_char *buf)
653{
654 struct m25p *flash = mtd_to_m25p(mtd);
655 struct spi_transfer t[2];
656 struct spi_message m;
657 size_t actual;
658 int cmd_sz, ret;
659
660 pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev),
661 __func__, (u32)to, len);
662
663 spi_message_init(&m);
664 memset(t, 0, (sizeof t));
665
666 t[0].tx_buf = flash->command;
667 t[0].len = m25p_cmdsz(flash);
668 spi_message_add_tail(&t[0], &m);
669
670 t[1].tx_buf = buf;
671 spi_message_add_tail(&t[1], &m);
672 164
673 mutex_lock(&flash->lock); 165 dev_dbg(nor->dev, "%dKiB at 0x%08x\n",
166 flash->mtd.erasesize / 1024, (u32)offset);
674 167
675 /* Wait until finished previous write command. */ 168 /* Wait until finished previous write command. */
676 ret = wait_till_ready(flash); 169 ret = nor->wait_till_ready(nor);
677 if (ret) 170 if (ret)
678 goto time_out; 171 return ret;
679
680 write_enable(flash);
681
682 actual = to % 2;
683 /* Start write from odd address. */
684 if (actual) {
685 flash->command[0] = OPCODE_BP;
686 m25p_addr2cmd(flash, to, flash->command);
687
688 /* write one byte. */
689 t[1].len = 1;
690 spi_sync(flash->spi, &m);
691 ret = wait_till_ready(flash);
692 if (ret)
693 goto time_out;
694 *retlen += m.actual_length - m25p_cmdsz(flash);
695 }
696 to += actual;
697
698 flash->command[0] = OPCODE_AAI_WP;
699 m25p_addr2cmd(flash, to, flash->command);
700
701 /* Write out most of the data here. */
702 cmd_sz = m25p_cmdsz(flash);
703 for (; actual < len - 1; actual += 2) {
704 t[0].len = cmd_sz;
705 /* write two bytes. */
706 t[1].len = 2;
707 t[1].tx_buf = buf + actual;
708 172
709 spi_sync(flash->spi, &m); 173 /* Send write enable, then erase commands. */
710 ret = wait_till_ready(flash); 174 ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0, 0);
711 if (ret)
712 goto time_out;
713 *retlen += m.actual_length - cmd_sz;
714 cmd_sz = 1;
715 to += 2;
716 }
717 write_disable(flash);
718 ret = wait_till_ready(flash);
719 if (ret) 175 if (ret)
720 goto time_out; 176 return ret;
721
722 /* Write out trailing byte if it exists. */
723 if (actual != len) {
724 write_enable(flash);
725 flash->command[0] = OPCODE_BP;
726 m25p_addr2cmd(flash, to, flash->command);
727 t[0].len = m25p_cmdsz(flash);
728 t[1].len = 1;
729 t[1].tx_buf = buf + actual;
730
731 spi_sync(flash->spi, &m);
732 ret = wait_till_ready(flash);
733 if (ret)
734 goto time_out;
735 *retlen += m.actual_length - m25p_cmdsz(flash);
736 write_disable(flash);
737 }
738
739time_out:
740 mutex_unlock(&flash->lock);
741 return ret;
742}
743
744static int m25p80_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
745{
746 struct m25p *flash = mtd_to_m25p(mtd);
747 uint32_t offset = ofs;
748 uint8_t status_old, status_new;
749 int res = 0;
750
751 mutex_lock(&flash->lock);
752 /* Wait until finished previous command */
753 if (wait_till_ready(flash)) {
754 res = 1;
755 goto err;
756 }
757
758 status_old = read_sr(flash);
759
760 if (offset < flash->mtd.size-(flash->mtd.size/2))
761 status_new = status_old | SR_BP2 | SR_BP1 | SR_BP0;
762 else if (offset < flash->mtd.size-(flash->mtd.size/4))
763 status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1;
764 else if (offset < flash->mtd.size-(flash->mtd.size/8))
765 status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0;
766 else if (offset < flash->mtd.size-(flash->mtd.size/16))
767 status_new = (status_old & ~(SR_BP0|SR_BP1)) | SR_BP2;
768 else if (offset < flash->mtd.size-(flash->mtd.size/32))
769 status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0;
770 else if (offset < flash->mtd.size-(flash->mtd.size/64))
771 status_new = (status_old & ~(SR_BP2|SR_BP0)) | SR_BP1;
772 else
773 status_new = (status_old & ~(SR_BP2|SR_BP1)) | SR_BP0;
774
775 /* Only modify protection if it will not unlock other areas */
776 if ((status_new&(SR_BP2|SR_BP1|SR_BP0)) >
777 (status_old&(SR_BP2|SR_BP1|SR_BP0))) {
778 write_enable(flash);
779 if (write_sr(flash, status_new) < 0) {
780 res = 1;
781 goto err;
782 }
783 }
784
785err: mutex_unlock(&flash->lock);
786 return res;
787}
788
789static int m25p80_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
790{
791 struct m25p *flash = mtd_to_m25p(mtd);
792 uint32_t offset = ofs;
793 uint8_t status_old, status_new;
794 int res = 0;
795
796 mutex_lock(&flash->lock);
797 /* Wait until finished previous command */
798 if (wait_till_ready(flash)) {
799 res = 1;
800 goto err;
801 }
802
803 status_old = read_sr(flash);
804
805 if (offset+len > flash->mtd.size-(flash->mtd.size/64))
806 status_new = status_old & ~(SR_BP2|SR_BP1|SR_BP0);
807 else if (offset+len > flash->mtd.size-(flash->mtd.size/32))
808 status_new = (status_old & ~(SR_BP2|SR_BP1)) | SR_BP0;
809 else if (offset+len > flash->mtd.size-(flash->mtd.size/16))
810 status_new = (status_old & ~(SR_BP2|SR_BP0)) | SR_BP1;
811 else if (offset+len > flash->mtd.size-(flash->mtd.size/8))
812 status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0;
813 else if (offset+len > flash->mtd.size-(flash->mtd.size/4))
814 status_new = (status_old & ~(SR_BP0|SR_BP1)) | SR_BP2;
815 else if (offset+len > flash->mtd.size-(flash->mtd.size/2))
816 status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0;
817 else
818 status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1;
819
820 /* Only modify protection if it will not lock other areas */
821 if ((status_new&(SR_BP2|SR_BP1|SR_BP0)) <
822 (status_old&(SR_BP2|SR_BP1|SR_BP0))) {
823 write_enable(flash);
824 if (write_sr(flash, status_new) < 0) {
825 res = 1;
826 goto err;
827 }
828 }
829
830err: mutex_unlock(&flash->lock);
831 return res;
832}
833
834/****************************************************************************/
835
836/*
837 * SPI device driver setup and teardown
838 */
839
840struct flash_info {
841 /* JEDEC id zero means "no ID" (most older chips); otherwise it has
842 * a high byte of zero plus three data bytes: the manufacturer id,
843 * then a two byte device id.
844 */
845 u32 jedec_id;
846 u16 ext_id;
847
848 /* The size listed here is what works with OPCODE_SE, which isn't
849 * necessarily called a "sector" by the vendor.
850 */
851 unsigned sector_size;
852 u16 n_sectors;
853
854 u16 page_size;
855 u16 addr_width;
856
857 u16 flags;
858#define SECT_4K 0x01 /* OPCODE_BE_4K works uniformly */
859#define M25P_NO_ERASE 0x02 /* No erase command needed */
860#define SST_WRITE 0x04 /* use SST byte programming */
861#define M25P_NO_FR 0x08 /* Can't do fastread */
862#define SECT_4K_PMC 0x10 /* OPCODE_BE_4K_PMC works uniformly */
863#define M25P80_DUAL_READ 0x20 /* Flash supports Dual Read */
864#define M25P80_QUAD_READ 0x40 /* Flash supports Quad Read */
865};
866
867#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
868 ((kernel_ulong_t)&(struct flash_info) { \
869 .jedec_id = (_jedec_id), \
870 .ext_id = (_ext_id), \
871 .sector_size = (_sector_size), \
872 .n_sectors = (_n_sectors), \
873 .page_size = 256, \
874 .flags = (_flags), \
875 })
876
877#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \
878 ((kernel_ulong_t)&(struct flash_info) { \
879 .sector_size = (_sector_size), \
880 .n_sectors = (_n_sectors), \
881 .page_size = (_page_size), \
882 .addr_width = (_addr_width), \
883 .flags = (_flags), \
884 })
885
886/* NOTE: double check command sets and memory organization when you add
887 * more flash chips. This current list focusses on newer chips, which
888 * have been converging on command sets which including JEDEC ID.
889 */
890static const struct spi_device_id m25p_ids[] = {
891 /* Atmel -- some are (confusingly) marketed as "DataFlash" */
892 { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
893 { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
894
895 { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
896 { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
897 { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
898
899 { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
900 { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
901 { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
902 { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
903
904 { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
905
906 /* EON -- en25xxx */
907 { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
908 { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
909 { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
910 { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
911 { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
912 { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
913
914 /* ESMT */
915 { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K) },
916
917 /* Everspin */
918 { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, M25P_NO_ERASE | M25P_NO_FR) },
919 { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, M25P_NO_ERASE | M25P_NO_FR) },
920
921 /* GigaDevice */
922 { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) },
923 { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) },
924
925 /* Intel/Numonyx -- xxxs33b */
926 { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
927 { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
928 { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
929
930 /* Macronix */
931 { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
932 { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
933 { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
934 { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
935 { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) },
936 { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
937 { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) },
938 { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
939 { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
940 { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
941 { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
942 { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, M25P80_QUAD_READ) },
943 { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, M25P80_QUAD_READ) },
944
945 /* Micron */
946 { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, 0) },
947 { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, 0) },
948 { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, 0) },
949 { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) },
950 { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K) },
951
952 /* PMC */
953 { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
954 { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
955 { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
956
957 /* Spansion -- single (large) sector size only, at least
958 * for the chips listed here (without boot sectors).
959 */
960 { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, 0) },
961 { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, 0) },
962 { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) },
963 { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, M25P80_DUAL_READ | M25P80_QUAD_READ) },
964 { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, M25P80_DUAL_READ | M25P80_QUAD_READ) },
965 { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
966 { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
967 { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
968 { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) },
969 { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) },
970 { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
971 { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
972 { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
973 { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
974 { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
975 { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
976 { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K) },
977 { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
978
979 /* SST -- large erase sizes are "overlays", "sectors" are 4K */
980 { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
981 { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
982 { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) },
983 { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) },
984 { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
985 { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) },
986 { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) },
987 { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) },
988 { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
989
990 /* ST Microelectronics -- newer production may have feature updates */
991 { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
992 { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
993 { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
994 { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
995 { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
996 { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
997 { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
998 { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
999 { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
1000 { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, 0) },
1001
1002 { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
1003 { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
1004 { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) },
1005 { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) },
1006 { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) },
1007 { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) },
1008 { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) },
1009 { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) },
1010 { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) },
1011
1012 { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
1013 { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
1014 { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
1015
1016 { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) },
1017 { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
1018 { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
1019
1020 { "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K) },
1021 { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
1022 { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
1023 { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
1024 { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
1025
1026 /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
1027 { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
1028 { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
1029 { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
1030 { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
1031 { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
1032 { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
1033 { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
1034 { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, SECT_4K) },
1035 { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
1036 { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
1037 { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
1038 { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
1039 { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
1040 { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
1041 { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) },
1042
1043 /* Catalyst / On Semiconductor -- non-JEDEC */
1044 { "cat25c11", CAT25_INFO( 16, 8, 16, 1, M25P_NO_ERASE | M25P_NO_FR) },
1045 { "cat25c03", CAT25_INFO( 32, 8, 16, 2, M25P_NO_ERASE | M25P_NO_FR) },
1046 { "cat25c09", CAT25_INFO( 128, 8, 32, 2, M25P_NO_ERASE | M25P_NO_FR) },
1047 { "cat25c17", CAT25_INFO( 256, 8, 32, 2, M25P_NO_ERASE | M25P_NO_FR) },
1048 { "cat25128", CAT25_INFO(2048, 8, 64, 2, M25P_NO_ERASE | M25P_NO_FR) },
1049 { },
1050};
1051MODULE_DEVICE_TABLE(spi, m25p_ids);
1052
1053static const struct spi_device_id *jedec_probe(struct spi_device *spi)
1054{
1055 int tmp;
1056 u8 code = OPCODE_RDID;
1057 u8 id[5];
1058 u32 jedec;
1059 u16 ext_jedec;
1060 struct flash_info *info;
1061 177
1062 /* JEDEC also defines an optional "extended device information" 178 /* Set up command buffer. */
1063 * string for after vendor-specific data, after the three bytes 179 flash->command[0] = nor->erase_opcode;
1064 * we use here. Supporting some chips might require using it. 180 m25p_addr2cmd(nor, offset, flash->command);
1065 */
1066 tmp = spi_write_then_read(spi, &code, 1, id, 5);
1067 if (tmp < 0) {
1068 pr_debug("%s: error %d reading JEDEC ID\n",
1069 dev_name(&spi->dev), tmp);
1070 return ERR_PTR(tmp);
1071 }
1072 jedec = id[0];
1073 jedec = jedec << 8;
1074 jedec |= id[1];
1075 jedec = jedec << 8;
1076 jedec |= id[2];
1077 181
1078 ext_jedec = id[3] << 8 | id[4]; 182 spi_write(flash->spi, flash->command, m25p_cmdsz(nor));
1079 183
1080 for (tmp = 0; tmp < ARRAY_SIZE(m25p_ids) - 1; tmp++) { 184 return 0;
1081 info = (void *)m25p_ids[tmp].driver_data;
1082 if (info->jedec_id == jedec) {
1083 if (info->ext_id == 0 || info->ext_id == ext_jedec)
1084 return &m25p_ids[tmp];
1085 }
1086 }
1087 dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec);
1088 return ERR_PTR(-ENODEV);
1089} 185}
1090 186
1091
1092/* 187/*
1093 * board specific setup should have ensured the SPI clock used here 188 * board specific setup should have ensured the SPI clock used here
1094 * matches what the READ command supports, at least until this driver 189 * matches what the READ command supports, at least until this driver
@@ -1096,231 +191,45 @@ static const struct spi_device_id *jedec_probe(struct spi_device *spi)
1096 */ 191 */
1097static int m25p_probe(struct spi_device *spi) 192static int m25p_probe(struct spi_device *spi)
1098{ 193{
1099 const struct spi_device_id *id = spi_get_device_id(spi);
1100 struct flash_platform_data *data;
1101 struct m25p *flash;
1102 struct flash_info *info;
1103 unsigned i;
1104 struct mtd_part_parser_data ppdata; 194 struct mtd_part_parser_data ppdata;
1105 struct device_node *np = spi->dev.of_node; 195 struct flash_platform_data *data;
196 struct m25p *flash;
197 struct spi_nor *nor;
198 enum read_mode mode = SPI_NOR_NORMAL;
1106 int ret; 199 int ret;
1107 200
1108 /* Platform data helps sort out which chip type we have, as
1109 * well as how this board partitions it. If we don't have
1110 * a chip ID, try the JEDEC id commands; they'll work for most
1111 * newer chips, even if we don't recognize the particular chip.
1112 */
1113 data = dev_get_platdata(&spi->dev);
1114 if (data && data->type) {
1115 const struct spi_device_id *plat_id;
1116
1117 for (i = 0; i < ARRAY_SIZE(m25p_ids) - 1; i++) {
1118 plat_id = &m25p_ids[i];
1119 if (strcmp(data->type, plat_id->name))
1120 continue;
1121 break;
1122 }
1123
1124 if (i < ARRAY_SIZE(m25p_ids) - 1)
1125 id = plat_id;
1126 else
1127 dev_warn(&spi->dev, "unrecognized id %s\n", data->type);
1128 }
1129
1130 info = (void *)id->driver_data;
1131
1132 if (info->jedec_id) {
1133 const struct spi_device_id *jid;
1134
1135 jid = jedec_probe(spi);
1136 if (IS_ERR(jid)) {
1137 return PTR_ERR(jid);
1138 } else if (jid != id) {
1139 /*
1140 * JEDEC knows better, so overwrite platform ID. We
1141 * can't trust partitions any longer, but we'll let
1142 * mtd apply them anyway, since some partitions may be
1143 * marked read-only, and we don't want to lose that
1144 * information, even if it's not 100% accurate.
1145 */
1146 dev_warn(&spi->dev, "found %s, expected %s\n",
1147 jid->name, id->name);
1148 id = jid;
1149 info = (void *)jid->driver_data;
1150 }
1151 }
1152
1153 flash = devm_kzalloc(&spi->dev, sizeof(*flash), GFP_KERNEL); 201 flash = devm_kzalloc(&spi->dev, sizeof(*flash), GFP_KERNEL);
1154 if (!flash) 202 if (!flash)
1155 return -ENOMEM; 203 return -ENOMEM;
1156 204
1157 flash->command = devm_kzalloc(&spi->dev, MAX_CMD_SIZE, GFP_KERNEL); 205 nor = &flash->spi_nor;
1158 if (!flash->command)
1159 return -ENOMEM;
1160
1161 flash->spi = spi;
1162 mutex_init(&flash->lock);
1163 spi_set_drvdata(spi, flash);
1164
1165 /*
1166 * Atmel, SST and Intel/Numonyx serial flash tend to power
1167 * up with the software protection bits set
1168 */
1169 206
1170 if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ATMEL || 207 /* install the hooks */
1171 JEDEC_MFR(info->jedec_id) == CFI_MFR_INTEL || 208 nor->read = m25p80_read;
1172 JEDEC_MFR(info->jedec_id) == CFI_MFR_SST) { 209 nor->write = m25p80_write;
1173 write_enable(flash); 210 nor->erase = m25p80_erase;
1174 write_sr(flash, 0); 211 nor->write_reg = m25p80_write_reg;
1175 } 212 nor->read_reg = m25p80_read_reg;
1176
1177 if (data && data->name)
1178 flash->mtd.name = data->name;
1179 else
1180 flash->mtd.name = dev_name(&spi->dev);
1181
1182 flash->mtd.type = MTD_NORFLASH;
1183 flash->mtd.writesize = 1;
1184 flash->mtd.flags = MTD_CAP_NORFLASH;
1185 flash->mtd.size = info->sector_size * info->n_sectors;
1186 flash->mtd._erase = m25p80_erase;
1187 flash->mtd._read = m25p80_read;
1188
1189 /* flash protection support for STmicro chips */
1190 if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ST) {
1191 flash->mtd._lock = m25p80_lock;
1192 flash->mtd._unlock = m25p80_unlock;
1193 }
1194 213
1195 /* sst flash chips use AAI word program */ 214 nor->dev = &spi->dev;
1196 if (info->flags & SST_WRITE) 215 nor->mtd = &flash->mtd;
1197 flash->mtd._write = sst_write; 216 nor->priv = flash;
1198 else
1199 flash->mtd._write = m25p80_write;
1200 217
1201 /* prefer "small sector" erase if possible */ 218 spi_set_drvdata(spi, flash);
1202 if (info->flags & SECT_4K) { 219 flash->mtd.priv = nor;
1203 flash->erase_opcode = OPCODE_BE_4K; 220 flash->spi = spi;
1204 flash->mtd.erasesize = 4096;
1205 } else if (info->flags & SECT_4K_PMC) {
1206 flash->erase_opcode = OPCODE_BE_4K_PMC;
1207 flash->mtd.erasesize = 4096;
1208 } else {
1209 flash->erase_opcode = OPCODE_SE;
1210 flash->mtd.erasesize = info->sector_size;
1211 }
1212 221
1213 if (info->flags & M25P_NO_ERASE) 222 if (spi->mode & SPI_RX_QUAD)
1214 flash->mtd.flags |= MTD_NO_ERASE; 223 mode = SPI_NOR_QUAD;
224 else if (spi->mode & SPI_RX_DUAL)
225 mode = SPI_NOR_DUAL;
226 ret = spi_nor_scan(nor, spi_get_device_id(spi), mode);
227 if (ret)
228 return ret;
1215 229
230 data = dev_get_platdata(&spi->dev);
1216 ppdata.of_node = spi->dev.of_node; 231 ppdata.of_node = spi->dev.of_node;
1217 flash->mtd.dev.parent = &spi->dev;
1218 flash->page_size = info->page_size;
1219 flash->mtd.writebufsize = flash->page_size;
1220
1221 if (np) {
1222 /* If we were instantiated by DT, use it */
1223 if (of_property_read_bool(np, "m25p,fast-read"))
1224 flash->flash_read = M25P80_FAST;
1225 else
1226 flash->flash_read = M25P80_NORMAL;
1227 } else {
1228 /* If we weren't instantiated by DT, default to fast-read */
1229 flash->flash_read = M25P80_FAST;
1230 }
1231
1232 /* Some devices cannot do fast-read, no matter what DT tells us */
1233 if (info->flags & M25P_NO_FR)
1234 flash->flash_read = M25P80_NORMAL;
1235
1236 /* Quad/Dual-read mode takes precedence over fast/normal */
1237 if (spi->mode & SPI_RX_QUAD && info->flags & M25P80_QUAD_READ) {
1238 ret = set_quad_mode(flash, info->jedec_id);
1239 if (ret) {
1240 dev_err(&flash->spi->dev, "quad mode not supported\n");
1241 return ret;
1242 }
1243 flash->flash_read = M25P80_QUAD;
1244 } else if (spi->mode & SPI_RX_DUAL && info->flags & M25P80_DUAL_READ) {
1245 flash->flash_read = M25P80_DUAL;
1246 }
1247 232
1248 /* Default commands */
1249 switch (flash->flash_read) {
1250 case M25P80_QUAD:
1251 flash->read_opcode = OPCODE_QUAD_READ;
1252 break;
1253 case M25P80_DUAL:
1254 flash->read_opcode = OPCODE_DUAL_READ;
1255 break;
1256 case M25P80_FAST:
1257 flash->read_opcode = OPCODE_FAST_READ;
1258 break;
1259 case M25P80_NORMAL:
1260 flash->read_opcode = OPCODE_NORM_READ;
1261 break;
1262 default:
1263 dev_err(&flash->spi->dev, "No Read opcode defined\n");
1264 return -EINVAL;
1265 }
1266
1267 flash->program_opcode = OPCODE_PP;
1268
1269 if (info->addr_width)
1270 flash->addr_width = info->addr_width;
1271 else if (flash->mtd.size > 0x1000000) {
1272 /* enable 4-byte addressing if the device exceeds 16MiB */
1273 flash->addr_width = 4;
1274 if (JEDEC_MFR(info->jedec_id) == CFI_MFR_AMD) {
1275 /* Dedicated 4-byte command set */
1276 switch (flash->flash_read) {
1277 case M25P80_QUAD:
1278 flash->read_opcode = OPCODE_QUAD_READ_4B;
1279 break;
1280 case M25P80_DUAL:
1281 flash->read_opcode = OPCODE_DUAL_READ_4B;
1282 break;
1283 case M25P80_FAST:
1284 flash->read_opcode = OPCODE_FAST_READ_4B;
1285 break;
1286 case M25P80_NORMAL:
1287 flash->read_opcode = OPCODE_NORM_READ_4B;
1288 break;
1289 }
1290 flash->program_opcode = OPCODE_PP_4B;
1291 /* No small sector erase for 4-byte command set */
1292 flash->erase_opcode = OPCODE_SE_4B;
1293 flash->mtd.erasesize = info->sector_size;
1294 } else
1295 set_4byte(flash, info->jedec_id, 1);
1296 } else {
1297 flash->addr_width = 3;
1298 }
1299
1300 dev_info(&spi->dev, "%s (%lld Kbytes)\n", id->name,
1301 (long long)flash->mtd.size >> 10);
1302
1303 pr_debug("mtd .name = %s, .size = 0x%llx (%lldMiB) "
1304 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
1305 flash->mtd.name,
1306 (long long)flash->mtd.size, (long long)(flash->mtd.size >> 20),
1307 flash->mtd.erasesize, flash->mtd.erasesize / 1024,
1308 flash->mtd.numeraseregions);
1309
1310 if (flash->mtd.numeraseregions)
1311 for (i = 0; i < flash->mtd.numeraseregions; i++)
1312 pr_debug("mtd.eraseregions[%d] = { .offset = 0x%llx, "
1313 ".erasesize = 0x%.8x (%uKiB), "
1314 ".numblocks = %d }\n",
1315 i, (long long)flash->mtd.eraseregions[i].offset,
1316 flash->mtd.eraseregions[i].erasesize,
1317 flash->mtd.eraseregions[i].erasesize / 1024,
1318 flash->mtd.eraseregions[i].numblocks);
1319
1320
1321 /* partitions should match sector boundaries; and it may be good to
1322 * use readonly partitions for writeprotected sectors (BP2..BP0).
1323 */
1324 return mtd_device_parse_register(&flash->mtd, NULL, &ppdata, 233 return mtd_device_parse_register(&flash->mtd, NULL, &ppdata,
1325 data ? data->parts : NULL, 234 data ? data->parts : NULL,
1326 data ? data->nr_parts : 0); 235 data ? data->nr_parts : 0);
@@ -1341,7 +250,7 @@ static struct spi_driver m25p80_driver = {
1341 .name = "m25p80", 250 .name = "m25p80",
1342 .owner = THIS_MODULE, 251 .owner = THIS_MODULE,
1343 }, 252 },
1344 .id_table = m25p_ids, 253 .id_table = spi_nor_ids,
1345 .probe = m25p_probe, 254 .probe = m25p_probe,
1346 .remove = m25p_remove, 255 .remove = m25p_remove,
1347 256
diff --git a/drivers/mtd/devices/serial_flash_cmds.h b/drivers/mtd/devices/serial_flash_cmds.h
index 4f0c2c7c898e..f59a125295d0 100644
--- a/drivers/mtd/devices/serial_flash_cmds.h
+++ b/drivers/mtd/devices/serial_flash_cmds.h
@@ -13,43 +13,23 @@
13#define _MTD_SERIAL_FLASH_CMDS_H 13#define _MTD_SERIAL_FLASH_CMDS_H
14 14
15/* Generic Flash Commands/OPCODEs */ 15/* Generic Flash Commands/OPCODEs */
16#define FLASH_CMD_WREN 0x06 16#define SPINOR_OP_RDSR2 0x35
17#define FLASH_CMD_WRDI 0x04 17#define SPINOR_OP_WRVCR 0x81
18#define FLASH_CMD_RDID 0x9f 18#define SPINOR_OP_RDVCR 0x85
19#define FLASH_CMD_RDSR 0x05
20#define FLASH_CMD_RDSR2 0x35
21#define FLASH_CMD_WRSR 0x01
22#define FLASH_CMD_SE_4K 0x20
23#define FLASH_CMD_SE_32K 0x52
24#define FLASH_CMD_SE 0xd8
25#define FLASH_CMD_CHIPERASE 0xc7
26#define FLASH_CMD_WRVCR 0x81
27#define FLASH_CMD_RDVCR 0x85
28 19
29/* JEDEC Standard - Serial Flash Discoverable Parmeters (SFDP) Commands */ 20/* JEDEC Standard - Serial Flash Discoverable Parmeters (SFDP) Commands */
30#define FLASH_CMD_READ 0x03 /* READ */ 21#define SPINOR_OP_READ_1_2_2 0xbb /* DUAL I/O READ */
31#define FLASH_CMD_READ_FAST 0x0b /* FAST READ */ 22#define SPINOR_OP_READ_1_4_4 0xeb /* QUAD I/O READ */
32#define FLASH_CMD_READ_1_1_2 0x3b /* DUAL OUTPUT READ */
33#define FLASH_CMD_READ_1_2_2 0xbb /* DUAL I/O READ */
34#define FLASH_CMD_READ_1_1_4 0x6b /* QUAD OUTPUT READ */
35#define FLASH_CMD_READ_1_4_4 0xeb /* QUAD I/O READ */
36 23
37#define FLASH_CMD_WRITE 0x02 /* PAGE PROGRAM */ 24#define SPINOR_OP_WRITE 0x02 /* PAGE PROGRAM */
38#define FLASH_CMD_WRITE_1_1_2 0xa2 /* DUAL INPUT PROGRAM */ 25#define SPINOR_OP_WRITE_1_1_2 0xa2 /* DUAL INPUT PROGRAM */
39#define FLASH_CMD_WRITE_1_2_2 0xd2 /* DUAL INPUT EXT PROGRAM */ 26#define SPINOR_OP_WRITE_1_2_2 0xd2 /* DUAL INPUT EXT PROGRAM */
40#define FLASH_CMD_WRITE_1_1_4 0x32 /* QUAD INPUT PROGRAM */ 27#define SPINOR_OP_WRITE_1_1_4 0x32 /* QUAD INPUT PROGRAM */
41#define FLASH_CMD_WRITE_1_4_4 0x12 /* QUAD INPUT EXT PROGRAM */ 28#define SPINOR_OP_WRITE_1_4_4 0x12 /* QUAD INPUT EXT PROGRAM */
42
43#define FLASH_CMD_EN4B_ADDR 0xb7 /* Enter 4-byte address mode */
44#define FLASH_CMD_EX4B_ADDR 0xe9 /* Exit 4-byte address mode */
45 29
46/* READ commands with 32-bit addressing */ 30/* READ commands with 32-bit addressing */
47#define FLASH_CMD_READ4 0x13 31#define SPINOR_OP_READ4_1_2_2 0xbc
48#define FLASH_CMD_READ4_FAST 0x0c 32#define SPINOR_OP_READ4_1_4_4 0xec
49#define FLASH_CMD_READ4_1_1_2 0x3c
50#define FLASH_CMD_READ4_1_2_2 0xbc
51#define FLASH_CMD_READ4_1_1_4 0x6c
52#define FLASH_CMD_READ4_1_4_4 0xec
53 33
54/* Configuration flags */ 34/* Configuration flags */
55#define FLASH_FLAG_SINGLE 0x000000ff 35#define FLASH_FLAG_SINGLE 0x000000ff
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index 5a5cd2ace4a6..2fc4957cbe7f 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -280,14 +280,11 @@ __setup("slram=", mtd_slram_setup);
280static int __init init_slram(void) 280static int __init init_slram(void)
281{ 281{
282 char *devname; 282 char *devname;
283 int i;
284 283
285#ifndef MODULE 284#ifndef MODULE
286 char *devstart; 285 char *devstart;
287 char *devlength; 286 char *devlength;
288 287
289 i = 0;
290
291 if (!map) { 288 if (!map) {
292 E("slram: not enough parameters.\n"); 289 E("slram: not enough parameters.\n");
293 return(-EINVAL); 290 return(-EINVAL);
@@ -314,6 +311,7 @@ static int __init init_slram(void)
314 } 311 }
315#else 312#else
316 int count; 313 int count;
314 int i;
317 315
318 for (count = 0; count < SLRAM_MAX_DEVICES_PARAMS && map[count]; 316 for (count = 0; count < SLRAM_MAX_DEVICES_PARAMS && map[count];
319 count++) { 317 count++) {
diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c
index 1957d7c8e185..d252514d3e98 100644
--- a/drivers/mtd/devices/st_spi_fsm.c
+++ b/drivers/mtd/devices/st_spi_fsm.c
@@ -19,6 +19,7 @@
19#include <linux/mfd/syscon.h> 19#include <linux/mfd/syscon.h>
20#include <linux/mtd/mtd.h> 20#include <linux/mtd/mtd.h>
21#include <linux/mtd/partitions.h> 21#include <linux/mtd/partitions.h>
22#include <linux/mtd/spi-nor.h>
22#include <linux/sched.h> 23#include <linux/sched.h>
23#include <linux/delay.h> 24#include <linux/delay.h>
24#include <linux/io.h> 25#include <linux/io.h>
@@ -201,44 +202,6 @@
201 202
202#define STFSM_MAX_WAIT_SEQ_MS 1000 /* FSM execution time */ 203#define STFSM_MAX_WAIT_SEQ_MS 1000 /* FSM execution time */
203 204
204/* Flash Commands */
205#define FLASH_CMD_WREN 0x06
206#define FLASH_CMD_WRDI 0x04
207#define FLASH_CMD_RDID 0x9f
208#define FLASH_CMD_RDSR 0x05
209#define FLASH_CMD_RDSR2 0x35
210#define FLASH_CMD_WRSR 0x01
211#define FLASH_CMD_SE_4K 0x20
212#define FLASH_CMD_SE_32K 0x52
213#define FLASH_CMD_SE 0xd8
214#define FLASH_CMD_CHIPERASE 0xc7
215#define FLASH_CMD_WRVCR 0x81
216#define FLASH_CMD_RDVCR 0x85
217
218#define FLASH_CMD_READ 0x03 /* READ */
219#define FLASH_CMD_READ_FAST 0x0b /* FAST READ */
220#define FLASH_CMD_READ_1_1_2 0x3b /* DUAL OUTPUT READ */
221#define FLASH_CMD_READ_1_2_2 0xbb /* DUAL I/O READ */
222#define FLASH_CMD_READ_1_1_4 0x6b /* QUAD OUTPUT READ */
223#define FLASH_CMD_READ_1_4_4 0xeb /* QUAD I/O READ */
224
225#define FLASH_CMD_WRITE 0x02 /* PAGE PROGRAM */
226#define FLASH_CMD_WRITE_1_1_2 0xa2 /* DUAL INPUT PROGRAM */
227#define FLASH_CMD_WRITE_1_2_2 0xd2 /* DUAL INPUT EXT PROGRAM */
228#define FLASH_CMD_WRITE_1_1_4 0x32 /* QUAD INPUT PROGRAM */
229#define FLASH_CMD_WRITE_1_4_4 0x12 /* QUAD INPUT EXT PROGRAM */
230
231#define FLASH_CMD_EN4B_ADDR 0xb7 /* Enter 4-byte address mode */
232#define FLASH_CMD_EX4B_ADDR 0xe9 /* Exit 4-byte address mode */
233
234/* READ commands with 32-bit addressing (N25Q256 and S25FLxxxS) */
235#define FLASH_CMD_READ4 0x13
236#define FLASH_CMD_READ4_FAST 0x0c
237#define FLASH_CMD_READ4_1_1_2 0x3c
238#define FLASH_CMD_READ4_1_2_2 0xbc
239#define FLASH_CMD_READ4_1_1_4 0x6c
240#define FLASH_CMD_READ4_1_4_4 0xec
241
242/* S25FLxxxS commands */ 205/* S25FLxxxS commands */
243#define S25FL_CMD_WRITE4_1_1_4 0x34 206#define S25FL_CMD_WRITE4_1_1_4 0x34
244#define S25FL_CMD_SE4 0xdc 207#define S25FL_CMD_SE4 0xdc
@@ -246,7 +209,7 @@
246#define S25FL_CMD_DYBWR 0xe1 209#define S25FL_CMD_DYBWR 0xe1
247#define S25FL_CMD_DYBRD 0xe0 210#define S25FL_CMD_DYBRD 0xe0
248#define S25FL_CMD_WRITE4 0x12 /* Note, opcode clashes with 211#define S25FL_CMD_WRITE4 0x12 /* Note, opcode clashes with
249 * 'FLASH_CMD_WRITE_1_4_4' 212 * 'SPINOR_OP_WRITE_1_4_4'
250 * as found on N25Qxxx devices! */ 213 * as found on N25Qxxx devices! */
251 214
252/* Status register */ 215/* Status register */
@@ -261,6 +224,12 @@
261#define S25FL_STATUS_E_ERR 0x20 224#define S25FL_STATUS_E_ERR 0x20
262#define S25FL_STATUS_P_ERR 0x40 225#define S25FL_STATUS_P_ERR 0x40
263 226
227#define N25Q_CMD_WRVCR 0x81
228#define N25Q_CMD_RDVCR 0x85
229#define N25Q_CMD_RDVECR 0x65
230#define N25Q_CMD_RDNVCR 0xb5
231#define N25Q_CMD_WRNVCR 0xb1
232
264#define FLASH_PAGESIZE 256 /* In Bytes */ 233#define FLASH_PAGESIZE 256 /* In Bytes */
265#define FLASH_PAGESIZE_32 (FLASH_PAGESIZE / 4) /* In uint32_t */ 234#define FLASH_PAGESIZE_32 (FLASH_PAGESIZE / 4) /* In uint32_t */
266#define FLASH_MAX_BUSY_WAIT (300 * HZ) /* Maximum 'CHIPERASE' time */ 235#define FLASH_MAX_BUSY_WAIT (300 * HZ) /* Maximum 'CHIPERASE' time */
@@ -270,7 +239,6 @@
270 */ 239 */
271#define CFG_READ_TOGGLE_32BIT_ADDR 0x00000001 240#define CFG_READ_TOGGLE_32BIT_ADDR 0x00000001
272#define CFG_WRITE_TOGGLE_32BIT_ADDR 0x00000002 241#define CFG_WRITE_TOGGLE_32BIT_ADDR 0x00000002
273#define CFG_WRITE_EX_32BIT_ADDR_DELAY 0x00000004
274#define CFG_ERASESEC_TOGGLE_32BIT_ADDR 0x00000008 242#define CFG_ERASESEC_TOGGLE_32BIT_ADDR 0x00000008
275#define CFG_S25FL_CHECK_ERROR_FLAGS 0x00000010 243#define CFG_S25FL_CHECK_ERROR_FLAGS 0x00000010
276 244
@@ -329,7 +297,7 @@ struct flash_info {
329 u32 jedec_id; 297 u32 jedec_id;
330 u16 ext_id; 298 u16 ext_id;
331 /* 299 /*
332 * The size listed here is what works with FLASH_CMD_SE, which isn't 300 * The size listed here is what works with SPINOR_OP_SE, which isn't
333 * necessarily called a "sector" by the vendor. 301 * necessarily called a "sector" by the vendor.
334 */ 302 */
335 unsigned sector_size; 303 unsigned sector_size;
@@ -369,17 +337,26 @@ static struct flash_info flash_types[] = {
369 { "m25px32", 0x207116, 0, 64 * 1024, 64, M25PX_FLAG, 75, NULL }, 337 { "m25px32", 0x207116, 0, 64 * 1024, 64, M25PX_FLAG, 75, NULL },
370 { "m25px64", 0x207117, 0, 64 * 1024, 128, M25PX_FLAG, 75, NULL }, 338 { "m25px64", 0x207117, 0, 64 * 1024, 128, M25PX_FLAG, 75, NULL },
371 339
340 /* Macronix MX25xxx
341 * - Support for 'FLASH_FLAG_WRITE_1_4_4' is omitted for devices
342 * where operating frequency must be reduced.
343 */
372#define MX25_FLAG (FLASH_FLAG_READ_WRITE | \ 344#define MX25_FLAG (FLASH_FLAG_READ_WRITE | \
373 FLASH_FLAG_READ_FAST | \ 345 FLASH_FLAG_READ_FAST | \
374 FLASH_FLAG_READ_1_1_2 | \ 346 FLASH_FLAG_READ_1_1_2 | \
375 FLASH_FLAG_READ_1_2_2 | \ 347 FLASH_FLAG_READ_1_2_2 | \
376 FLASH_FLAG_READ_1_1_4 | \ 348 FLASH_FLAG_READ_1_1_4 | \
377 FLASH_FLAG_READ_1_4_4 | \
378 FLASH_FLAG_SE_4K | \ 349 FLASH_FLAG_SE_4K | \
379 FLASH_FLAG_SE_32K) 350 FLASH_FLAG_SE_32K)
351 { "mx25l3255e", 0xc29e16, 0, 64 * 1024, 64,
352 (MX25_FLAG | FLASH_FLAG_WRITE_1_4_4), 86,
353 stfsm_mx25_config},
380 { "mx25l25635e", 0xc22019, 0, 64*1024, 512, 354 { "mx25l25635e", 0xc22019, 0, 64*1024, 512,
381 (MX25_FLAG | FLASH_FLAG_32BIT_ADDR | FLASH_FLAG_RESET), 70, 355 (MX25_FLAG | FLASH_FLAG_32BIT_ADDR | FLASH_FLAG_RESET), 70,
382 stfsm_mx25_config }, 356 stfsm_mx25_config },
357 { "mx25l25655e", 0xc22619, 0, 64*1024, 512,
358 (MX25_FLAG | FLASH_FLAG_32BIT_ADDR | FLASH_FLAG_RESET), 70,
359 stfsm_mx25_config},
383 360
384#define N25Q_FLAG (FLASH_FLAG_READ_WRITE | \ 361#define N25Q_FLAG (FLASH_FLAG_READ_WRITE | \
385 FLASH_FLAG_READ_FAST | \ 362 FLASH_FLAG_READ_FAST | \
@@ -407,6 +384,8 @@ static struct flash_info flash_types[] = {
407 FLASH_FLAG_READ_1_4_4 | \ 384 FLASH_FLAG_READ_1_4_4 | \
408 FLASH_FLAG_WRITE_1_1_4 | \ 385 FLASH_FLAG_WRITE_1_1_4 | \
409 FLASH_FLAG_READ_FAST) 386 FLASH_FLAG_READ_FAST)
387 { "s25fl032p", 0x010215, 0x4d00, 64 * 1024, 64, S25FLXXXP_FLAG, 80,
388 stfsm_s25fl_config},
410 { "s25fl129p0", 0x012018, 0x4d00, 256 * 1024, 64, S25FLXXXP_FLAG, 80, 389 { "s25fl129p0", 0x012018, 0x4d00, 256 * 1024, 64, S25FLXXXP_FLAG, 80,
411 stfsm_s25fl_config }, 390 stfsm_s25fl_config },
412 { "s25fl129p1", 0x012018, 0x4d01, 64 * 1024, 256, S25FLXXXP_FLAG, 80, 391 { "s25fl129p1", 0x012018, 0x4d01, 64 * 1024, 256, S25FLXXXP_FLAG, 80,
@@ -473,22 +452,22 @@ static struct flash_info flash_types[] = {
473 452
474/* Default READ configurations, in order of preference */ 453/* Default READ configurations, in order of preference */
475static struct seq_rw_config default_read_configs[] = { 454static struct seq_rw_config default_read_configs[] = {
476 {FLASH_FLAG_READ_1_4_4, FLASH_CMD_READ_1_4_4, 0, 4, 4, 0x00, 2, 4}, 455 {FLASH_FLAG_READ_1_4_4, SPINOR_OP_READ_1_4_4, 0, 4, 4, 0x00, 2, 4},
477 {FLASH_FLAG_READ_1_1_4, FLASH_CMD_READ_1_1_4, 0, 1, 4, 0x00, 4, 0}, 456 {FLASH_FLAG_READ_1_1_4, SPINOR_OP_READ_1_1_4, 0, 1, 4, 0x00, 4, 0},
478 {FLASH_FLAG_READ_1_2_2, FLASH_CMD_READ_1_2_2, 0, 2, 2, 0x00, 4, 0}, 457 {FLASH_FLAG_READ_1_2_2, SPINOR_OP_READ_1_2_2, 0, 2, 2, 0x00, 4, 0},
479 {FLASH_FLAG_READ_1_1_2, FLASH_CMD_READ_1_1_2, 0, 1, 2, 0x00, 0, 8}, 458 {FLASH_FLAG_READ_1_1_2, SPINOR_OP_READ_1_1_2, 0, 1, 2, 0x00, 0, 8},
480 {FLASH_FLAG_READ_FAST, FLASH_CMD_READ_FAST, 0, 1, 1, 0x00, 0, 8}, 459 {FLASH_FLAG_READ_FAST, SPINOR_OP_READ_FAST, 0, 1, 1, 0x00, 0, 8},
481 {FLASH_FLAG_READ_WRITE, FLASH_CMD_READ, 0, 1, 1, 0x00, 0, 0}, 460 {FLASH_FLAG_READ_WRITE, SPINOR_OP_READ, 0, 1, 1, 0x00, 0, 0},
482 {0x00, 0, 0, 0, 0, 0x00, 0, 0}, 461 {0x00, 0, 0, 0, 0, 0x00, 0, 0},
483}; 462};
484 463
485/* Default WRITE configurations */ 464/* Default WRITE configurations */
486static struct seq_rw_config default_write_configs[] = { 465static struct seq_rw_config default_write_configs[] = {
487 {FLASH_FLAG_WRITE_1_4_4, FLASH_CMD_WRITE_1_4_4, 1, 4, 4, 0x00, 0, 0}, 466 {FLASH_FLAG_WRITE_1_4_4, SPINOR_OP_WRITE_1_4_4, 1, 4, 4, 0x00, 0, 0},
488 {FLASH_FLAG_WRITE_1_1_4, FLASH_CMD_WRITE_1_1_4, 1, 1, 4, 0x00, 0, 0}, 467 {FLASH_FLAG_WRITE_1_1_4, SPINOR_OP_WRITE_1_1_4, 1, 1, 4, 0x00, 0, 0},
489 {FLASH_FLAG_WRITE_1_2_2, FLASH_CMD_WRITE_1_2_2, 1, 2, 2, 0x00, 0, 0}, 468 {FLASH_FLAG_WRITE_1_2_2, SPINOR_OP_WRITE_1_2_2, 1, 2, 2, 0x00, 0, 0},
490 {FLASH_FLAG_WRITE_1_1_2, FLASH_CMD_WRITE_1_1_2, 1, 1, 2, 0x00, 0, 0}, 469 {FLASH_FLAG_WRITE_1_1_2, SPINOR_OP_WRITE_1_1_2, 1, 1, 2, 0x00, 0, 0},
491 {FLASH_FLAG_READ_WRITE, FLASH_CMD_WRITE, 1, 1, 1, 0x00, 0, 0}, 470 {FLASH_FLAG_READ_WRITE, SPINOR_OP_WRITE, 1, 1, 1, 0x00, 0, 0},
492 {0x00, 0, 0, 0, 0, 0x00, 0, 0}, 471 {0x00, 0, 0, 0, 0, 0x00, 0, 0},
493}; 472};
494 473
@@ -511,12 +490,12 @@ static struct seq_rw_config default_write_configs[] = {
511 * cycles. 490 * cycles.
512 */ 491 */
513static struct seq_rw_config n25q_read3_configs[] = { 492static struct seq_rw_config n25q_read3_configs[] = {
514 {FLASH_FLAG_READ_1_4_4, FLASH_CMD_READ_1_4_4, 0, 4, 4, 0x00, 0, 8}, 493 {FLASH_FLAG_READ_1_4_4, SPINOR_OP_READ_1_4_4, 0, 4, 4, 0x00, 0, 8},
515 {FLASH_FLAG_READ_1_1_4, FLASH_CMD_READ_1_1_4, 0, 1, 4, 0x00, 0, 8}, 494 {FLASH_FLAG_READ_1_1_4, SPINOR_OP_READ_1_1_4, 0, 1, 4, 0x00, 0, 8},
516 {FLASH_FLAG_READ_1_2_2, FLASH_CMD_READ_1_2_2, 0, 2, 2, 0x00, 0, 8}, 495 {FLASH_FLAG_READ_1_2_2, SPINOR_OP_READ_1_2_2, 0, 2, 2, 0x00, 0, 8},
517 {FLASH_FLAG_READ_1_1_2, FLASH_CMD_READ_1_1_2, 0, 1, 2, 0x00, 0, 8}, 496 {FLASH_FLAG_READ_1_1_2, SPINOR_OP_READ_1_1_2, 0, 1, 2, 0x00, 0, 8},
518 {FLASH_FLAG_READ_FAST, FLASH_CMD_READ_FAST, 0, 1, 1, 0x00, 0, 8}, 497 {FLASH_FLAG_READ_FAST, SPINOR_OP_READ_FAST, 0, 1, 1, 0x00, 0, 8},
519 {FLASH_FLAG_READ_WRITE, FLASH_CMD_READ, 0, 1, 1, 0x00, 0, 0}, 498 {FLASH_FLAG_READ_WRITE, SPINOR_OP_READ, 0, 1, 1, 0x00, 0, 0},
520 {0x00, 0, 0, 0, 0, 0x00, 0, 0}, 499 {0x00, 0, 0, 0, 0, 0x00, 0, 0},
521}; 500};
522 501
@@ -526,12 +505,12 @@ static struct seq_rw_config n25q_read3_configs[] = {
526 * - 'FAST' variants configured for 8 dummy cycles (see note above.) 505 * - 'FAST' variants configured for 8 dummy cycles (see note above.)
527 */ 506 */
528static struct seq_rw_config n25q_read4_configs[] = { 507static struct seq_rw_config n25q_read4_configs[] = {
529 {FLASH_FLAG_READ_1_4_4, FLASH_CMD_READ4_1_4_4, 0, 4, 4, 0x00, 0, 8}, 508 {FLASH_FLAG_READ_1_4_4, SPINOR_OP_READ4_1_4_4, 0, 4, 4, 0x00, 0, 8},
530 {FLASH_FLAG_READ_1_1_4, FLASH_CMD_READ4_1_1_4, 0, 1, 4, 0x00, 0, 8}, 509 {FLASH_FLAG_READ_1_1_4, SPINOR_OP_READ4_1_1_4, 0, 1, 4, 0x00, 0, 8},
531 {FLASH_FLAG_READ_1_2_2, FLASH_CMD_READ4_1_2_2, 0, 2, 2, 0x00, 0, 8}, 510 {FLASH_FLAG_READ_1_2_2, SPINOR_OP_READ4_1_2_2, 0, 2, 2, 0x00, 0, 8},
532 {FLASH_FLAG_READ_1_1_2, FLASH_CMD_READ4_1_1_2, 0, 1, 2, 0x00, 0, 8}, 511 {FLASH_FLAG_READ_1_1_2, SPINOR_OP_READ4_1_1_2, 0, 1, 2, 0x00, 0, 8},
533 {FLASH_FLAG_READ_FAST, FLASH_CMD_READ4_FAST, 0, 1, 1, 0x00, 0, 8}, 512 {FLASH_FLAG_READ_FAST, SPINOR_OP_READ4_FAST, 0, 1, 1, 0x00, 0, 8},
534 {FLASH_FLAG_READ_WRITE, FLASH_CMD_READ4, 0, 1, 1, 0x00, 0, 0}, 513 {FLASH_FLAG_READ_WRITE, SPINOR_OP_READ4, 0, 1, 1, 0x00, 0, 0},
535 {0x00, 0, 0, 0, 0, 0x00, 0, 0}, 514 {0x00, 0, 0, 0, 0, 0x00, 0, 0},
536}; 515};
537 516
@@ -544,7 +523,7 @@ static int stfsm_mx25_en_32bit_addr_seq(struct stfsm_seq *seq)
544{ 523{
545 seq->seq_opc[0] = (SEQ_OPC_PADS_1 | 524 seq->seq_opc[0] = (SEQ_OPC_PADS_1 |
546 SEQ_OPC_CYCLES(8) | 525 SEQ_OPC_CYCLES(8) |
547 SEQ_OPC_OPCODE(FLASH_CMD_EN4B_ADDR) | 526 SEQ_OPC_OPCODE(SPINOR_OP_EN4B) |
548 SEQ_OPC_CSDEASSERT); 527 SEQ_OPC_CSDEASSERT);
549 528
550 seq->seq[0] = STFSM_INST_CMD1; 529 seq->seq[0] = STFSM_INST_CMD1;
@@ -572,12 +551,12 @@ static int stfsm_mx25_en_32bit_addr_seq(struct stfsm_seq *seq)
572 * entering a state that is incompatible with the SPIBoot Controller. 551 * entering a state that is incompatible with the SPIBoot Controller.
573 */ 552 */
574static struct seq_rw_config stfsm_s25fl_read4_configs[] = { 553static struct seq_rw_config stfsm_s25fl_read4_configs[] = {
575 {FLASH_FLAG_READ_1_4_4, FLASH_CMD_READ4_1_4_4, 0, 4, 4, 0x00, 2, 4}, 554 {FLASH_FLAG_READ_1_4_4, SPINOR_OP_READ4_1_4_4, 0, 4, 4, 0x00, 2, 4},
576 {FLASH_FLAG_READ_1_1_4, FLASH_CMD_READ4_1_1_4, 0, 1, 4, 0x00, 0, 8}, 555 {FLASH_FLAG_READ_1_1_4, SPINOR_OP_READ4_1_1_4, 0, 1, 4, 0x00, 0, 8},
577 {FLASH_FLAG_READ_1_2_2, FLASH_CMD_READ4_1_2_2, 0, 2, 2, 0x00, 4, 0}, 556 {FLASH_FLAG_READ_1_2_2, SPINOR_OP_READ4_1_2_2, 0, 2, 2, 0x00, 4, 0},
578 {FLASH_FLAG_READ_1_1_2, FLASH_CMD_READ4_1_1_2, 0, 1, 2, 0x00, 0, 8}, 557 {FLASH_FLAG_READ_1_1_2, SPINOR_OP_READ4_1_1_2, 0, 1, 2, 0x00, 0, 8},
579 {FLASH_FLAG_READ_FAST, FLASH_CMD_READ4_FAST, 0, 1, 1, 0x00, 0, 8}, 558 {FLASH_FLAG_READ_FAST, SPINOR_OP_READ4_FAST, 0, 1, 1, 0x00, 0, 8},
580 {FLASH_FLAG_READ_WRITE, FLASH_CMD_READ4, 0, 1, 1, 0x00, 0, 0}, 559 {FLASH_FLAG_READ_WRITE, SPINOR_OP_READ4, 0, 1, 1, 0x00, 0, 0},
581 {0x00, 0, 0, 0, 0, 0x00, 0, 0}, 560 {0x00, 0, 0, 0, 0, 0x00, 0, 0},
582}; 561};
583 562
@@ -590,13 +569,13 @@ static struct seq_rw_config stfsm_s25fl_write4_configs[] = {
590/* 569/*
591 * [W25Qxxx] Configuration 570 * [W25Qxxx] Configuration
592 */ 571 */
593#define W25Q_STATUS_QE (0x1 << 9) 572#define W25Q_STATUS_QE (0x1 << 1)
594 573
595static struct stfsm_seq stfsm_seq_read_jedec = { 574static struct stfsm_seq stfsm_seq_read_jedec = {
596 .data_size = TRANSFER_SIZE(8), 575 .data_size = TRANSFER_SIZE(8),
597 .seq_opc[0] = (SEQ_OPC_PADS_1 | 576 .seq_opc[0] = (SEQ_OPC_PADS_1 |
598 SEQ_OPC_CYCLES(8) | 577 SEQ_OPC_CYCLES(8) |
599 SEQ_OPC_OPCODE(FLASH_CMD_RDID)), 578 SEQ_OPC_OPCODE(SPINOR_OP_RDID)),
600 .seq = { 579 .seq = {
601 STFSM_INST_CMD1, 580 STFSM_INST_CMD1,
602 STFSM_INST_DATA_READ, 581 STFSM_INST_DATA_READ,
@@ -612,7 +591,7 @@ static struct stfsm_seq stfsm_seq_read_status_fifo = {
612 .data_size = TRANSFER_SIZE(4), 591 .data_size = TRANSFER_SIZE(4),
613 .seq_opc[0] = (SEQ_OPC_PADS_1 | 592 .seq_opc[0] = (SEQ_OPC_PADS_1 |
614 SEQ_OPC_CYCLES(8) | 593 SEQ_OPC_CYCLES(8) |
615 SEQ_OPC_OPCODE(FLASH_CMD_RDSR)), 594 SEQ_OPC_OPCODE(SPINOR_OP_RDSR)),
616 .seq = { 595 .seq = {
617 STFSM_INST_CMD1, 596 STFSM_INST_CMD1,
618 STFSM_INST_DATA_READ, 597 STFSM_INST_DATA_READ,
@@ -628,10 +607,10 @@ static struct stfsm_seq stfsm_seq_erase_sector = {
628 /* 'addr_cfg' configured during initialisation */ 607 /* 'addr_cfg' configured during initialisation */
629 .seq_opc = { 608 .seq_opc = {
630 (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) | 609 (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
631 SEQ_OPC_OPCODE(FLASH_CMD_WREN) | SEQ_OPC_CSDEASSERT), 610 SEQ_OPC_OPCODE(SPINOR_OP_WREN) | SEQ_OPC_CSDEASSERT),
632 611
633 (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) | 612 (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
634 SEQ_OPC_OPCODE(FLASH_CMD_SE)), 613 SEQ_OPC_OPCODE(SPINOR_OP_SE)),
635 }, 614 },
636 .seq = { 615 .seq = {
637 STFSM_INST_CMD1, 616 STFSM_INST_CMD1,
@@ -649,10 +628,10 @@ static struct stfsm_seq stfsm_seq_erase_sector = {
649static struct stfsm_seq stfsm_seq_erase_chip = { 628static struct stfsm_seq stfsm_seq_erase_chip = {
650 .seq_opc = { 629 .seq_opc = {
651 (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) | 630 (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
652 SEQ_OPC_OPCODE(FLASH_CMD_WREN) | SEQ_OPC_CSDEASSERT), 631 SEQ_OPC_OPCODE(SPINOR_OP_WREN) | SEQ_OPC_CSDEASSERT),
653 632
654 (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) | 633 (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
655 SEQ_OPC_OPCODE(FLASH_CMD_CHIPERASE) | SEQ_OPC_CSDEASSERT), 634 SEQ_OPC_OPCODE(SPINOR_OP_CHIP_ERASE) | SEQ_OPC_CSDEASSERT),
656 }, 635 },
657 .seq = { 636 .seq = {
658 STFSM_INST_CMD1, 637 STFSM_INST_CMD1,
@@ -669,26 +648,9 @@ static struct stfsm_seq stfsm_seq_erase_chip = {
669 648
670static struct stfsm_seq stfsm_seq_write_status = { 649static struct stfsm_seq stfsm_seq_write_status = {
671 .seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) | 650 .seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
672 SEQ_OPC_OPCODE(FLASH_CMD_WREN) | SEQ_OPC_CSDEASSERT), 651 SEQ_OPC_OPCODE(SPINOR_OP_WREN) | SEQ_OPC_CSDEASSERT),
673 .seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
674 SEQ_OPC_OPCODE(FLASH_CMD_WRSR)),
675 .seq = {
676 STFSM_INST_CMD1,
677 STFSM_INST_CMD2,
678 STFSM_INST_STA_WR1,
679 STFSM_INST_STOP,
680 },
681 .seq_cfg = (SEQ_CFG_PADS_1 |
682 SEQ_CFG_READNOTWRITE |
683 SEQ_CFG_CSDEASSERT |
684 SEQ_CFG_STARTSEQ),
685};
686
687static struct stfsm_seq stfsm_seq_wrvcr = {
688 .seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
689 SEQ_OPC_OPCODE(FLASH_CMD_WREN) | SEQ_OPC_CSDEASSERT),
690 .seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) | 652 .seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
691 SEQ_OPC_OPCODE(FLASH_CMD_WRVCR)), 653 SEQ_OPC_OPCODE(SPINOR_OP_WRSR)),
692 .seq = { 654 .seq = {
693 STFSM_INST_CMD1, 655 STFSM_INST_CMD1,
694 STFSM_INST_CMD2, 656 STFSM_INST_CMD2,
@@ -704,9 +666,9 @@ static struct stfsm_seq stfsm_seq_wrvcr = {
704static int stfsm_n25q_en_32bit_addr_seq(struct stfsm_seq *seq) 666static int stfsm_n25q_en_32bit_addr_seq(struct stfsm_seq *seq)
705{ 667{
706 seq->seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) | 668 seq->seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
707 SEQ_OPC_OPCODE(FLASH_CMD_EN4B_ADDR)); 669 SEQ_OPC_OPCODE(SPINOR_OP_EN4B));
708 seq->seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) | 670 seq->seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
709 SEQ_OPC_OPCODE(FLASH_CMD_WREN) | 671 SEQ_OPC_OPCODE(SPINOR_OP_WREN) |
710 SEQ_OPC_CSDEASSERT); 672 SEQ_OPC_CSDEASSERT);
711 673
712 seq->seq[0] = STFSM_INST_CMD2; 674 seq->seq[0] = STFSM_INST_CMD2;
@@ -793,7 +755,7 @@ static void stfsm_read_fifo(struct stfsm *fsm, uint32_t *buf, uint32_t size)
793 755
794 dev_dbg(fsm->dev, "Reading %d bytes from FIFO\n", size); 756 dev_dbg(fsm->dev, "Reading %d bytes from FIFO\n", size);
795 757
796 BUG_ON((((uint32_t)buf) & 0x3) || (size & 0x3)); 758 BUG_ON((((uintptr_t)buf) & 0x3) || (size & 0x3));
797 759
798 while (remaining) { 760 while (remaining) {
799 for (;;) { 761 for (;;) {
@@ -817,7 +779,7 @@ static int stfsm_write_fifo(struct stfsm *fsm, const uint32_t *buf,
817 779
818 dev_dbg(fsm->dev, "writing %d bytes to FIFO\n", size); 780 dev_dbg(fsm->dev, "writing %d bytes to FIFO\n", size);
819 781
820 BUG_ON((((uint32_t)buf) & 0x3) || (size & 0x3)); 782 BUG_ON((((uintptr_t)buf) & 0x3) || (size & 0x3));
821 783
822 writesl(fsm->base + SPI_FAST_SEQ_DATA_REG, buf, words); 784 writesl(fsm->base + SPI_FAST_SEQ_DATA_REG, buf, words);
823 785
@@ -827,7 +789,7 @@ static int stfsm_write_fifo(struct stfsm *fsm, const uint32_t *buf,
827static int stfsm_enter_32bit_addr(struct stfsm *fsm, int enter) 789static int stfsm_enter_32bit_addr(struct stfsm *fsm, int enter)
828{ 790{
829 struct stfsm_seq *seq = &fsm->stfsm_seq_en_32bit_addr; 791 struct stfsm_seq *seq = &fsm->stfsm_seq_en_32bit_addr;
830 uint32_t cmd = enter ? FLASH_CMD_EN4B_ADDR : FLASH_CMD_EX4B_ADDR; 792 uint32_t cmd = enter ? SPINOR_OP_EN4B : SPINOR_OP_EX4B;
831 793
832 seq->seq_opc[0] = (SEQ_OPC_PADS_1 | 794 seq->seq_opc[0] = (SEQ_OPC_PADS_1 |
833 SEQ_OPC_CYCLES(8) | 795 SEQ_OPC_CYCLES(8) |
@@ -851,7 +813,7 @@ static uint8_t stfsm_wait_busy(struct stfsm *fsm)
851 /* Use RDRS1 */ 813 /* Use RDRS1 */
852 seq->seq_opc[0] = (SEQ_OPC_PADS_1 | 814 seq->seq_opc[0] = (SEQ_OPC_PADS_1 |
853 SEQ_OPC_CYCLES(8) | 815 SEQ_OPC_CYCLES(8) |
854 SEQ_OPC_OPCODE(FLASH_CMD_RDSR)); 816 SEQ_OPC_OPCODE(SPINOR_OP_RDSR));
855 817
856 /* Load read_status sequence */ 818 /* Load read_status sequence */
857 stfsm_load_seq(fsm, seq); 819 stfsm_load_seq(fsm, seq);
@@ -889,60 +851,57 @@ static uint8_t stfsm_wait_busy(struct stfsm *fsm)
889} 851}
890 852
891static int stfsm_read_status(struct stfsm *fsm, uint8_t cmd, 853static int stfsm_read_status(struct stfsm *fsm, uint8_t cmd,
892 uint8_t *status) 854 uint8_t *data, int bytes)
893{ 855{
894 struct stfsm_seq *seq = &stfsm_seq_read_status_fifo; 856 struct stfsm_seq *seq = &stfsm_seq_read_status_fifo;
895 uint32_t tmp; 857 uint32_t tmp;
858 uint8_t *t = (uint8_t *)&tmp;
859 int i;
896 860
897 dev_dbg(fsm->dev, "reading STA[%s]\n", 861 dev_dbg(fsm->dev, "read 'status' register [0x%02x], %d byte(s)\n",
898 (cmd == FLASH_CMD_RDSR) ? "1" : "2"); 862 cmd, bytes);
899 863
900 seq->seq_opc[0] = (SEQ_OPC_PADS_1 | 864 BUG_ON(bytes != 1 && bytes != 2);
901 SEQ_OPC_CYCLES(8) | 865
866 seq->seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
902 SEQ_OPC_OPCODE(cmd)), 867 SEQ_OPC_OPCODE(cmd)),
903 868
904 stfsm_load_seq(fsm, seq); 869 stfsm_load_seq(fsm, seq);
905 870
906 stfsm_read_fifo(fsm, &tmp, 4); 871 stfsm_read_fifo(fsm, &tmp, 4);
907 872
908 *status = (uint8_t)(tmp >> 24); 873 for (i = 0; i < bytes; i++)
874 data[i] = t[i];
909 875
910 stfsm_wait_seq(fsm); 876 stfsm_wait_seq(fsm);
911 877
912 return 0; 878 return 0;
913} 879}
914 880
915static int stfsm_write_status(struct stfsm *fsm, uint16_t status, 881static int stfsm_write_status(struct stfsm *fsm, uint8_t cmd,
916 int sta_bytes) 882 uint16_t data, int bytes, int wait_busy)
917{ 883{
918 struct stfsm_seq *seq = &stfsm_seq_write_status; 884 struct stfsm_seq *seq = &stfsm_seq_write_status;
919 885
920 dev_dbg(fsm->dev, "writing STA[%s] 0x%04x\n", 886 dev_dbg(fsm->dev,
921 (sta_bytes == 1) ? "1" : "1+2", status); 887 "write 'status' register [0x%02x], %d byte(s), 0x%04x\n"
922 888 " %s wait-busy\n", cmd, bytes, data, wait_busy ? "with" : "no");
923 seq->status = (uint32_t)status | STA_PADS_1 | STA_CSDEASSERT;
924 seq->seq[2] = (sta_bytes == 1) ?
925 STFSM_INST_STA_WR1 : STFSM_INST_STA_WR1_2;
926
927 stfsm_load_seq(fsm, seq);
928
929 stfsm_wait_seq(fsm);
930 889
931 return 0; 890 BUG_ON(bytes != 1 && bytes != 2);
932};
933 891
934static int stfsm_wrvcr(struct stfsm *fsm, uint8_t data) 892 seq->seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
935{ 893 SEQ_OPC_OPCODE(cmd));
936 struct stfsm_seq *seq = &stfsm_seq_wrvcr;
937
938 dev_dbg(fsm->dev, "writing VCR 0x%02x\n", data);
939 894
940 seq->status = (STA_DATA_BYTE1(data) | STA_PADS_1 | STA_CSDEASSERT); 895 seq->status = (uint32_t)data | STA_PADS_1 | STA_CSDEASSERT;
896 seq->seq[2] = (bytes == 1) ? STFSM_INST_STA_WR1 : STFSM_INST_STA_WR1_2;
941 897
942 stfsm_load_seq(fsm, seq); 898 stfsm_load_seq(fsm, seq);
943 899
944 stfsm_wait_seq(fsm); 900 stfsm_wait_seq(fsm);
945 901
902 if (wait_busy)
903 stfsm_wait_busy(fsm);
904
946 return 0; 905 return 0;
947} 906}
948 907
@@ -1027,7 +986,7 @@ static void stfsm_prepare_rw_seq(struct stfsm *fsm,
1027 if (cfg->write) 986 if (cfg->write)
1028 seq->seq_opc[i++] = (SEQ_OPC_PADS_1 | 987 seq->seq_opc[i++] = (SEQ_OPC_PADS_1 |
1029 SEQ_OPC_CYCLES(8) | 988 SEQ_OPC_CYCLES(8) |
1030 SEQ_OPC_OPCODE(FLASH_CMD_WREN) | 989 SEQ_OPC_OPCODE(SPINOR_OP_WREN) |
1031 SEQ_OPC_CSDEASSERT); 990 SEQ_OPC_CSDEASSERT);
1032 991
1033 /* Address configuration (24 or 32-bit addresses) */ 992 /* Address configuration (24 or 32-bit addresses) */
@@ -1149,31 +1108,36 @@ static int stfsm_mx25_config(struct stfsm *fsm)
1149 stfsm_mx25_en_32bit_addr_seq(&fsm->stfsm_seq_en_32bit_addr); 1108 stfsm_mx25_en_32bit_addr_seq(&fsm->stfsm_seq_en_32bit_addr);
1150 1109
1151 soc_reset = stfsm_can_handle_soc_reset(fsm); 1110 soc_reset = stfsm_can_handle_soc_reset(fsm);
1152 if (soc_reset || !fsm->booted_from_spi) { 1111 if (soc_reset || !fsm->booted_from_spi)
1153 /* If we can handle SoC resets, we enable 32-bit address 1112 /* If we can handle SoC resets, we enable 32-bit address
1154 * mode pervasively */ 1113 * mode pervasively */
1155 stfsm_enter_32bit_addr(fsm, 1); 1114 stfsm_enter_32bit_addr(fsm, 1);
1156 1115
1157 } else { 1116 else
1158 /* Else, enable/disable 32-bit addressing before/after 1117 /* Else, enable/disable 32-bit addressing before/after
1159 * each operation */ 1118 * each operation */
1160 fsm->configuration = (CFG_READ_TOGGLE_32BIT_ADDR | 1119 fsm->configuration = (CFG_READ_TOGGLE_32BIT_ADDR |
1161 CFG_WRITE_TOGGLE_32BIT_ADDR | 1120 CFG_WRITE_TOGGLE_32BIT_ADDR |
1162 CFG_ERASESEC_TOGGLE_32BIT_ADDR); 1121 CFG_ERASESEC_TOGGLE_32BIT_ADDR);
1163 /* It seems a small delay is required after exiting
1164 * 32-bit mode following a write operation. The issue
1165 * is under investigation.
1166 */
1167 fsm->configuration |= CFG_WRITE_EX_32BIT_ADDR_DELAY;
1168 }
1169 } 1122 }
1170 1123
1171 /* For QUAD mode, set 'QE' STATUS bit */ 1124 /* Check status of 'QE' bit, update if required. */
1125 stfsm_read_status(fsm, SPINOR_OP_RDSR, &sta, 1);
1172 data_pads = ((fsm->stfsm_seq_read.seq_cfg >> 16) & 0x3) + 1; 1126 data_pads = ((fsm->stfsm_seq_read.seq_cfg >> 16) & 0x3) + 1;
1173 if (data_pads == 4) { 1127 if (data_pads == 4) {
1174 stfsm_read_status(fsm, FLASH_CMD_RDSR, &sta); 1128 if (!(sta & MX25_STATUS_QE)) {
1175 sta |= MX25_STATUS_QE; 1129 /* Set 'QE' */
1176 stfsm_write_status(fsm, sta, 1); 1130 sta |= MX25_STATUS_QE;
1131
1132 stfsm_write_status(fsm, SPINOR_OP_WRSR, sta, 1, 1);
1133 }
1134 } else {
1135 if (sta & MX25_STATUS_QE) {
1136 /* Clear 'QE' */
1137 sta &= ~MX25_STATUS_QE;
1138
1139 stfsm_write_status(fsm, SPINOR_OP_WRSR, sta, 1, 1);
1140 }
1177 } 1141 }
1178 1142
1179 return 0; 1143 return 0;
@@ -1239,7 +1203,7 @@ static int stfsm_n25q_config(struct stfsm *fsm)
1239 */ 1203 */
1240 vcr = (N25Q_VCR_DUMMY_CYCLES(8) | N25Q_VCR_XIP_DISABLED | 1204 vcr = (N25Q_VCR_DUMMY_CYCLES(8) | N25Q_VCR_XIP_DISABLED |
1241 N25Q_VCR_WRAP_CONT); 1205 N25Q_VCR_WRAP_CONT);
1242 stfsm_wrvcr(fsm, vcr); 1206 stfsm_write_status(fsm, N25Q_CMD_WRVCR, vcr, 1, 0);
1243 1207
1244 return 0; 1208 return 0;
1245} 1209}
@@ -1297,7 +1261,7 @@ static void stfsm_s25fl_write_dyb(struct stfsm *fsm, uint32_t offs, uint8_t dby)
1297{ 1261{
1298 struct stfsm_seq seq = { 1262 struct stfsm_seq seq = {
1299 .seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) | 1263 .seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
1300 SEQ_OPC_OPCODE(FLASH_CMD_WREN) | 1264 SEQ_OPC_OPCODE(SPINOR_OP_WREN) |
1301 SEQ_OPC_CSDEASSERT), 1265 SEQ_OPC_CSDEASSERT),
1302 .seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) | 1266 .seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
1303 SEQ_OPC_OPCODE(S25FL_CMD_DYBWR)), 1267 SEQ_OPC_OPCODE(S25FL_CMD_DYBWR)),
@@ -1337,7 +1301,7 @@ static int stfsm_s25fl_clear_status_reg(struct stfsm *fsm)
1337 SEQ_OPC_CSDEASSERT), 1301 SEQ_OPC_CSDEASSERT),
1338 .seq_opc[1] = (SEQ_OPC_PADS_1 | 1302 .seq_opc[1] = (SEQ_OPC_PADS_1 |
1339 SEQ_OPC_CYCLES(8) | 1303 SEQ_OPC_CYCLES(8) |
1340 SEQ_OPC_OPCODE(FLASH_CMD_WRDI) | 1304 SEQ_OPC_OPCODE(SPINOR_OP_WRDI) |
1341 SEQ_OPC_CSDEASSERT), 1305 SEQ_OPC_CSDEASSERT),
1342 .seq = { 1306 .seq = {
1343 STFSM_INST_CMD1, 1307 STFSM_INST_CMD1,
@@ -1367,6 +1331,7 @@ static int stfsm_s25fl_config(struct stfsm *fsm)
1367 uint32_t offs; 1331 uint32_t offs;
1368 uint16_t sta_wr; 1332 uint16_t sta_wr;
1369 uint8_t sr1, cr1, dyb; 1333 uint8_t sr1, cr1, dyb;
1334 int update_sr = 0;
1370 int ret; 1335 int ret;
1371 1336
1372 if (flags & FLASH_FLAG_32BIT_ADDR) { 1337 if (flags & FLASH_FLAG_32BIT_ADDR) {
@@ -1414,34 +1379,28 @@ static int stfsm_s25fl_config(struct stfsm *fsm)
1414 } 1379 }
1415 } 1380 }
1416 1381
1417 /* Check status of 'QE' bit */ 1382 /* Check status of 'QE' bit, update if required. */
1383 stfsm_read_status(fsm, SPINOR_OP_RDSR2, &cr1, 1);
1418 data_pads = ((fsm->stfsm_seq_read.seq_cfg >> 16) & 0x3) + 1; 1384 data_pads = ((fsm->stfsm_seq_read.seq_cfg >> 16) & 0x3) + 1;
1419 stfsm_read_status(fsm, FLASH_CMD_RDSR2, &cr1);
1420 if (data_pads == 4) { 1385 if (data_pads == 4) {
1421 if (!(cr1 & STFSM_S25FL_CONFIG_QE)) { 1386 if (!(cr1 & STFSM_S25FL_CONFIG_QE)) {
1422 /* Set 'QE' */ 1387 /* Set 'QE' */
1423 cr1 |= STFSM_S25FL_CONFIG_QE; 1388 cr1 |= STFSM_S25FL_CONFIG_QE;
1424 1389
1425 stfsm_read_status(fsm, FLASH_CMD_RDSR, &sr1); 1390 update_sr = 1;
1426 sta_wr = ((uint16_t)cr1 << 8) | sr1;
1427
1428 stfsm_write_status(fsm, sta_wr, 2);
1429
1430 stfsm_wait_busy(fsm);
1431 } 1391 }
1432 } else { 1392 } else {
1433 if ((cr1 & STFSM_S25FL_CONFIG_QE)) { 1393 if (cr1 & STFSM_S25FL_CONFIG_QE) {
1434 /* Clear 'QE' */ 1394 /* Clear 'QE' */
1435 cr1 &= ~STFSM_S25FL_CONFIG_QE; 1395 cr1 &= ~STFSM_S25FL_CONFIG_QE;
1436 1396
1437 stfsm_read_status(fsm, FLASH_CMD_RDSR, &sr1); 1397 update_sr = 1;
1438 sta_wr = ((uint16_t)cr1 << 8) | sr1;
1439
1440 stfsm_write_status(fsm, sta_wr, 2);
1441
1442 stfsm_wait_busy(fsm);
1443 } 1398 }
1444 1399 }
1400 if (update_sr) {
1401 stfsm_read_status(fsm, SPINOR_OP_RDSR, &sr1, 1);
1402 sta_wr = ((uint16_t)cr1 << 8) | sr1;
1403 stfsm_write_status(fsm, SPINOR_OP_WRSR, sta_wr, 2, 1);
1445 } 1404 }
1446 1405
1447 /* 1406 /*
@@ -1456,27 +1415,36 @@ static int stfsm_s25fl_config(struct stfsm *fsm)
1456static int stfsm_w25q_config(struct stfsm *fsm) 1415static int stfsm_w25q_config(struct stfsm *fsm)
1457{ 1416{
1458 uint32_t data_pads; 1417 uint32_t data_pads;
1459 uint16_t sta_wr; 1418 uint8_t sr1, sr2;
1460 uint8_t sta1, sta2; 1419 uint16_t sr_wr;
1420 int update_sr = 0;
1461 int ret; 1421 int ret;
1462 1422
1463 ret = stfsm_prepare_rwe_seqs_default(fsm); 1423 ret = stfsm_prepare_rwe_seqs_default(fsm);
1464 if (ret) 1424 if (ret)
1465 return ret; 1425 return ret;
1466 1426
1467 /* If using QUAD mode, set QE STATUS bit */ 1427 /* Check status of 'QE' bit, update if required. */
1428 stfsm_read_status(fsm, SPINOR_OP_RDSR2, &sr2, 1);
1468 data_pads = ((fsm->stfsm_seq_read.seq_cfg >> 16) & 0x3) + 1; 1429 data_pads = ((fsm->stfsm_seq_read.seq_cfg >> 16) & 0x3) + 1;
1469 if (data_pads == 4) { 1430 if (data_pads == 4) {
1470 stfsm_read_status(fsm, FLASH_CMD_RDSR, &sta1); 1431 if (!(sr2 & W25Q_STATUS_QE)) {
1471 stfsm_read_status(fsm, FLASH_CMD_RDSR2, &sta2); 1432 /* Set 'QE' */
1472 1433 sr2 |= W25Q_STATUS_QE;
1473 sta_wr = ((uint16_t)sta2 << 8) | sta1; 1434 update_sr = 1;
1474 1435 }
1475 sta_wr |= W25Q_STATUS_QE; 1436 } else {
1476 1437 if (sr2 & W25Q_STATUS_QE) {
1477 stfsm_write_status(fsm, sta_wr, 2); 1438 /* Clear 'QE' */
1478 1439 sr2 &= ~W25Q_STATUS_QE;
1479 stfsm_wait_busy(fsm); 1440 update_sr = 1;
1441 }
1442 }
1443 if (update_sr) {
1444 /* Write status register */
1445 stfsm_read_status(fsm, SPINOR_OP_RDSR, &sr1, 1);
1446 sr_wr = ((uint16_t)sr2 << 8) | sr1;
1447 stfsm_write_status(fsm, SPINOR_OP_WRSR, sr_wr, 2, 1);
1480 } 1448 }
1481 1449
1482 return 0; 1450 return 0;
@@ -1506,7 +1474,7 @@ static int stfsm_read(struct stfsm *fsm, uint8_t *buf, uint32_t size,
1506 read_mask = (data_pads << 2) - 1; 1474 read_mask = (data_pads << 2) - 1;
1507 1475
1508 /* Handle non-aligned buf */ 1476 /* Handle non-aligned buf */
1509 p = ((uint32_t)buf & 0x3) ? (uint8_t *)page_buf : buf; 1477 p = ((uintptr_t)buf & 0x3) ? (uint8_t *)page_buf : buf;
1510 1478
1511 /* Handle non-aligned size */ 1479 /* Handle non-aligned size */
1512 size_ub = (size + read_mask) & ~read_mask; 1480 size_ub = (size + read_mask) & ~read_mask;
@@ -1528,7 +1496,7 @@ static int stfsm_read(struct stfsm *fsm, uint8_t *buf, uint32_t size,
1528 } 1496 }
1529 1497
1530 /* Handle non-aligned buf */ 1498 /* Handle non-aligned buf */
1531 if ((uint32_t)buf & 0x3) 1499 if ((uintptr_t)buf & 0x3)
1532 memcpy(buf, page_buf, size); 1500 memcpy(buf, page_buf, size);
1533 1501
1534 /* Wait for sequence to finish */ 1502 /* Wait for sequence to finish */
@@ -1570,7 +1538,7 @@ static int stfsm_write(struct stfsm *fsm, const uint8_t *buf,
1570 write_mask = (data_pads << 2) - 1; 1538 write_mask = (data_pads << 2) - 1;
1571 1539
1572 /* Handle non-aligned buf */ 1540 /* Handle non-aligned buf */
1573 if ((uint32_t)buf & 0x3) { 1541 if ((uintptr_t)buf & 0x3) {
1574 memcpy(page_buf, buf, size); 1542 memcpy(page_buf, buf, size);
1575 p = (uint8_t *)page_buf; 1543 p = (uint8_t *)page_buf;
1576 } else { 1544 } else {
@@ -1628,11 +1596,8 @@ static int stfsm_write(struct stfsm *fsm, const uint8_t *buf,
1628 stfsm_s25fl_clear_status_reg(fsm); 1596 stfsm_s25fl_clear_status_reg(fsm);
1629 1597
1630 /* Exit 32-bit address mode, if required */ 1598 /* Exit 32-bit address mode, if required */
1631 if (fsm->configuration & CFG_WRITE_TOGGLE_32BIT_ADDR) { 1599 if (fsm->configuration & CFG_WRITE_TOGGLE_32BIT_ADDR)
1632 stfsm_enter_32bit_addr(fsm, 0); 1600 stfsm_enter_32bit_addr(fsm, 0);
1633 if (fsm->configuration & CFG_WRITE_EX_32BIT_ADDR_DELAY)
1634 udelay(1);
1635 }
1636 1601
1637 return 0; 1602 return 0;
1638} 1603}
@@ -1736,7 +1701,7 @@ static int stfsm_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
1736 1701
1737 while (len) { 1702 while (len) {
1738 /* Write up to page boundary */ 1703 /* Write up to page boundary */
1739 bytes = min(FLASH_PAGESIZE - page_offs, len); 1704 bytes = min_t(size_t, FLASH_PAGESIZE - page_offs, len);
1740 1705
1741 ret = stfsm_write(fsm, b, bytes, to); 1706 ret = stfsm_write(fsm, b, bytes, to);
1742 if (ret) 1707 if (ret)
@@ -1935,6 +1900,13 @@ static int stfsm_init(struct stfsm *fsm)
1935 fsm->base + SPI_CONFIGDATA); 1900 fsm->base + SPI_CONFIGDATA);
1936 writel(STFSM_DEFAULT_WR_TIME, fsm->base + SPI_STATUS_WR_TIME_REG); 1901 writel(STFSM_DEFAULT_WR_TIME, fsm->base + SPI_STATUS_WR_TIME_REG);
1937 1902
1903 /*
1904 * Set the FSM 'WAIT' delay to the minimum workable value. Note, for
1905 * our purposes, the WAIT instruction is used purely to achieve
1906 * "sequence validity" rather than actually implement a delay.
1907 */
1908 writel(0x00000001, fsm->base + SPI_PROGRAM_ERASE_TIME);
1909
1938 /* Clear FIFO, just in case */ 1910 /* Clear FIFO, just in case */
1939 stfsm_clear_fifo(fsm); 1911 stfsm_clear_fifo(fsm);
1940 1912
@@ -2086,7 +2058,7 @@ static int stfsm_remove(struct platform_device *pdev)
2086 return mtd_device_unregister(&fsm->mtd); 2058 return mtd_device_unregister(&fsm->mtd);
2087} 2059}
2088 2060
2089static struct of_device_id stfsm_match[] = { 2061static const struct of_device_id stfsm_match[] = {
2090 { .compatible = "st,spi-fsm", }, 2062 { .compatible = "st,spi-fsm", },
2091 {}, 2063 {},
2092}; 2064};
diff --git a/drivers/mtd/lpddr/Kconfig b/drivers/mtd/lpddr/Kconfig
index 265f969817e3..3a19cbee24d7 100644
--- a/drivers/mtd/lpddr/Kconfig
+++ b/drivers/mtd/lpddr/Kconfig
@@ -1,5 +1,5 @@
1menu "LPDDR flash memory drivers" 1menu "LPDDR & LPDDR2 PCM memory drivers"
2 depends on MTD!=n 2 depends on MTD
3 3
4config MTD_LPDDR 4config MTD_LPDDR
5 tristate "Support for LPDDR flash chips" 5 tristate "Support for LPDDR flash chips"
@@ -17,4 +17,13 @@ config MTD_QINFO_PROBE
17 Window QINFO interface, permits software to be used for entire 17 Window QINFO interface, permits software to be used for entire
18 families of devices. This serves similar purpose of CFI on legacy 18 families of devices. This serves similar purpose of CFI on legacy
19 Flash products 19 Flash products
20
21config MTD_LPDDR2_NVM
22 # ARM dependency is only for writel_relaxed()
23 depends on MTD && ARM
24 tristate "Support for LPDDR2-NVM flash chips"
25 help
26 This option enables support of PCM memories with a LPDDR2-NVM
27 (Low power double data rate 2) interface.
28
20endmenu 29endmenu
diff --git a/drivers/mtd/lpddr/Makefile b/drivers/mtd/lpddr/Makefile
index da48e46b5812..881d440d483e 100644
--- a/drivers/mtd/lpddr/Makefile
+++ b/drivers/mtd/lpddr/Makefile
@@ -4,3 +4,4 @@
4 4
5obj-$(CONFIG_MTD_QINFO_PROBE) += qinfo_probe.o 5obj-$(CONFIG_MTD_QINFO_PROBE) += qinfo_probe.o
6obj-$(CONFIG_MTD_LPDDR) += lpddr_cmds.o 6obj-$(CONFIG_MTD_LPDDR) += lpddr_cmds.o
7obj-$(CONFIG_MTD_LPDDR2_NVM) += lpddr2_nvm.o
diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c
new file mode 100644
index 000000000000..063cec40d0ae
--- /dev/null
+++ b/drivers/mtd/lpddr/lpddr2_nvm.c
@@ -0,0 +1,507 @@
1/*
2 * LPDDR2-NVM MTD driver. This module provides read, write, erase, lock/unlock
3 * support for LPDDR2-NVM PCM memories
4 *
5 * Copyright © 2012 Micron Technology, Inc.
6 *
7 * Vincenzo Aliberti <vincenzo.aliberti@gmail.com>
8 * Domenico Manna <domenico.manna@gmail.com>
9 * Many thanks to Andrea Vigilante for initial enabling
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 2
14 * of the License, or (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 */
21
22#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
23
24#include <linux/init.h>
25#include <linux/io.h>
26#include <linux/module.h>
27#include <linux/kernel.h>
28#include <linux/mtd/map.h>
29#include <linux/mtd/mtd.h>
30#include <linux/mtd/partitions.h>
31#include <linux/slab.h>
32#include <linux/platform_device.h>
33#include <linux/ioport.h>
34#include <linux/err.h>
35
36/* Parameters */
37#define ERASE_BLOCKSIZE (0x00020000/2) /* in Word */
38#define WRITE_BUFFSIZE (0x00000400/2) /* in Word */
39#define OW_BASE_ADDRESS 0x00000000 /* OW offset */
40#define BUS_WIDTH 0x00000020 /* x32 devices */
41
42/* PFOW symbols address offset */
43#define PFOW_QUERY_STRING_P (0x0000/2) /* in Word */
44#define PFOW_QUERY_STRING_F (0x0002/2) /* in Word */
45#define PFOW_QUERY_STRING_O (0x0004/2) /* in Word */
46#define PFOW_QUERY_STRING_W (0x0006/2) /* in Word */
47
48/* OW registers address */
49#define CMD_CODE_OFS (0x0080/2) /* in Word */
50#define CMD_DATA_OFS (0x0084/2) /* in Word */
51#define CMD_ADD_L_OFS (0x0088/2) /* in Word */
52#define CMD_ADD_H_OFS (0x008A/2) /* in Word */
53#define MPR_L_OFS (0x0090/2) /* in Word */
54#define MPR_H_OFS (0x0092/2) /* in Word */
55#define CMD_EXEC_OFS (0x00C0/2) /* in Word */
56#define STATUS_REG_OFS (0x00CC/2) /* in Word */
57#define PRG_BUFFER_OFS (0x0010/2) /* in Word */
58
59/* Datamask */
60#define MR_CFGMASK 0x8000
61#define SR_OK_DATAMASK 0x0080
62
63/* LPDDR2-NVM Commands */
64#define LPDDR2_NVM_LOCK 0x0061
65#define LPDDR2_NVM_UNLOCK 0x0062
66#define LPDDR2_NVM_SW_PROGRAM 0x0041
67#define LPDDR2_NVM_SW_OVERWRITE 0x0042
68#define LPDDR2_NVM_BUF_PROGRAM 0x00E9
69#define LPDDR2_NVM_BUF_OVERWRITE 0x00EA
70#define LPDDR2_NVM_ERASE 0x0020
71
72/* LPDDR2-NVM Registers offset */
73#define LPDDR2_MODE_REG_DATA 0x0040
74#define LPDDR2_MODE_REG_CFG 0x0050
75
76/*
77 * Internal Type Definitions
78 * pcm_int_data contains memory controller details:
79 * @reg_data : LPDDR2_MODE_REG_DATA register address after remapping
80 * @reg_cfg : LPDDR2_MODE_REG_CFG register address after remapping
81 * &bus_width: memory bus-width (eg: x16 2 Bytes, x32 4 Bytes)
82 */
83struct pcm_int_data {
84 void __iomem *ctl_regs;
85 int bus_width;
86};
87
88static DEFINE_MUTEX(lpdd2_nvm_mutex);
89
90/*
91 * Build a map_word starting from an u_long
92 */
93static inline map_word build_map_word(u_long myword)
94{
95 map_word val = { {0} };
96 val.x[0] = myword;
97 return val;
98}
99
100/*
101 * Build Mode Register Configuration DataMask based on device bus-width
102 */
103static inline u_int build_mr_cfgmask(u_int bus_width)
104{
105 u_int val = MR_CFGMASK;
106
107 if (bus_width == 0x0004) /* x32 device */
108 val = val << 16;
109
110 return val;
111}
112
113/*
114 * Build Status Register OK DataMask based on device bus-width
115 */
116static inline u_int build_sr_ok_datamask(u_int bus_width)
117{
118 u_int val = SR_OK_DATAMASK;
119
120 if (bus_width == 0x0004) /* x32 device */
121 val = (val << 16)+val;
122
123 return val;
124}
125
126/*
127 * Evaluates Overlay Window Control Registers address
128 */
129static inline u_long ow_reg_add(struct map_info *map, u_long offset)
130{
131 u_long val = 0;
132 struct pcm_int_data *pcm_data = map->fldrv_priv;
133
134 val = map->pfow_base + offset*pcm_data->bus_width;
135
136 return val;
137}
138
139/*
140 * Enable lpddr2-nvm Overlay Window
141 * Overlay Window is a memory mapped area containing all LPDDR2-NVM registers
142 * used by device commands as well as uservisible resources like Device Status
143 * Register, Device ID, etc
144 */
145static inline void ow_enable(struct map_info *map)
146{
147 struct pcm_int_data *pcm_data = map->fldrv_priv;
148
149 writel_relaxed(build_mr_cfgmask(pcm_data->bus_width) | 0x18,
150 pcm_data->ctl_regs + LPDDR2_MODE_REG_CFG);
151 writel_relaxed(0x01, pcm_data->ctl_regs + LPDDR2_MODE_REG_DATA);
152}
153
154/*
155 * Disable lpddr2-nvm Overlay Window
156 * Overlay Window is a memory mapped area containing all LPDDR2-NVM registers
157 * used by device commands as well as uservisible resources like Device Status
158 * Register, Device ID, etc
159 */
160static inline void ow_disable(struct map_info *map)
161{
162 struct pcm_int_data *pcm_data = map->fldrv_priv;
163
164 writel_relaxed(build_mr_cfgmask(pcm_data->bus_width) | 0x18,
165 pcm_data->ctl_regs + LPDDR2_MODE_REG_CFG);
166 writel_relaxed(0x02, pcm_data->ctl_regs + LPDDR2_MODE_REG_DATA);
167}
168
169/*
170 * Execute lpddr2-nvm operations
171 */
172static int lpddr2_nvm_do_op(struct map_info *map, u_long cmd_code,
173 u_long cmd_data, u_long cmd_add, u_long cmd_mpr, u_char *buf)
174{
175 map_word add_l = { {0} }, add_h = { {0} }, mpr_l = { {0} },
176 mpr_h = { {0} }, data_l = { {0} }, cmd = { {0} },
177 exec_cmd = { {0} }, sr;
178 map_word data_h = { {0} }; /* only for 2x x16 devices stacked */
179 u_long i, status_reg, prg_buff_ofs;
180 struct pcm_int_data *pcm_data = map->fldrv_priv;
181 u_int sr_ok_datamask = build_sr_ok_datamask(pcm_data->bus_width);
182
183 /* Builds low and high words for OW Control Registers */
184 add_l.x[0] = cmd_add & 0x0000FFFF;
185 add_h.x[0] = (cmd_add >> 16) & 0x0000FFFF;
186 mpr_l.x[0] = cmd_mpr & 0x0000FFFF;
187 mpr_h.x[0] = (cmd_mpr >> 16) & 0x0000FFFF;
188 cmd.x[0] = cmd_code & 0x0000FFFF;
189 exec_cmd.x[0] = 0x0001;
190 data_l.x[0] = cmd_data & 0x0000FFFF;
191 data_h.x[0] = (cmd_data >> 16) & 0x0000FFFF; /* only for 2x x16 */
192
193 /* Set Overlay Window Control Registers */
194 map_write(map, cmd, ow_reg_add(map, CMD_CODE_OFS));
195 map_write(map, data_l, ow_reg_add(map, CMD_DATA_OFS));
196 map_write(map, add_l, ow_reg_add(map, CMD_ADD_L_OFS));
197 map_write(map, add_h, ow_reg_add(map, CMD_ADD_H_OFS));
198 map_write(map, mpr_l, ow_reg_add(map, MPR_L_OFS));
199 map_write(map, mpr_h, ow_reg_add(map, MPR_H_OFS));
200 if (pcm_data->bus_width == 0x0004) { /* 2x16 devices stacked */
201 map_write(map, cmd, ow_reg_add(map, CMD_CODE_OFS) + 2);
202 map_write(map, data_h, ow_reg_add(map, CMD_DATA_OFS) + 2);
203 map_write(map, add_l, ow_reg_add(map, CMD_ADD_L_OFS) + 2);
204 map_write(map, add_h, ow_reg_add(map, CMD_ADD_H_OFS) + 2);
205 map_write(map, mpr_l, ow_reg_add(map, MPR_L_OFS) + 2);
206 map_write(map, mpr_h, ow_reg_add(map, MPR_H_OFS) + 2);
207 }
208
209 /* Fill Program Buffer */
210 if ((cmd_code == LPDDR2_NVM_BUF_PROGRAM) ||
211 (cmd_code == LPDDR2_NVM_BUF_OVERWRITE)) {
212 prg_buff_ofs = (map_read(map,
213 ow_reg_add(map, PRG_BUFFER_OFS))).x[0];
214 for (i = 0; i < cmd_mpr; i++) {
215 map_write(map, build_map_word(buf[i]), map->pfow_base +
216 prg_buff_ofs + i);
217 }
218 }
219
220 /* Command Execute */
221 map_write(map, exec_cmd, ow_reg_add(map, CMD_EXEC_OFS));
222 if (pcm_data->bus_width == 0x0004) /* 2x16 devices stacked */
223 map_write(map, exec_cmd, ow_reg_add(map, CMD_EXEC_OFS) + 2);
224
225 /* Status Register Check */
226 do {
227 sr = map_read(map, ow_reg_add(map, STATUS_REG_OFS));
228 status_reg = sr.x[0];
229 if (pcm_data->bus_width == 0x0004) {/* 2x16 devices stacked */
230 sr = map_read(map, ow_reg_add(map,
231 STATUS_REG_OFS) + 2);
232 status_reg += sr.x[0] << 16;
233 }
234 } while ((status_reg & sr_ok_datamask) != sr_ok_datamask);
235
236 return (((status_reg & sr_ok_datamask) == sr_ok_datamask) ? 0 : -EIO);
237}
238
239/*
240 * Execute lpddr2-nvm operations @ block level
241 */
242static int lpddr2_nvm_do_block_op(struct mtd_info *mtd, loff_t start_add,
243 uint64_t len, u_char block_op)
244{
245 struct map_info *map = mtd->priv;
246 u_long add, end_add;
247 int ret = 0;
248
249 mutex_lock(&lpdd2_nvm_mutex);
250
251 ow_enable(map);
252
253 add = start_add;
254 end_add = add + len;
255
256 do {
257 ret = lpddr2_nvm_do_op(map, block_op, 0x00, add, add, NULL);
258 if (ret)
259 goto out;
260 add += mtd->erasesize;
261 } while (add < end_add);
262
263out:
264 ow_disable(map);
265 mutex_unlock(&lpdd2_nvm_mutex);
266 return ret;
267}
268
269/*
270 * verify presence of PFOW string
271 */
272static int lpddr2_nvm_pfow_present(struct map_info *map)
273{
274 map_word pfow_val[4];
275 unsigned int found = 1;
276
277 mutex_lock(&lpdd2_nvm_mutex);
278
279 ow_enable(map);
280
281 /* Load string from array */
282 pfow_val[0] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_P));
283 pfow_val[1] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_F));
284 pfow_val[2] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_O));
285 pfow_val[3] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_W));
286
287 /* Verify the string loaded vs expected */
288 if (!map_word_equal(map, build_map_word('P'), pfow_val[0]))
289 found = 0;
290 if (!map_word_equal(map, build_map_word('F'), pfow_val[1]))
291 found = 0;
292 if (!map_word_equal(map, build_map_word('O'), pfow_val[2]))
293 found = 0;
294 if (!map_word_equal(map, build_map_word('W'), pfow_val[3]))
295 found = 0;
296
297 ow_disable(map);
298
299 mutex_unlock(&lpdd2_nvm_mutex);
300
301 return found;
302}
303
304/*
305 * lpddr2_nvm driver read method
306 */
307static int lpddr2_nvm_read(struct mtd_info *mtd, loff_t start_add,
308 size_t len, size_t *retlen, u_char *buf)
309{
310 struct map_info *map = mtd->priv;
311
312 mutex_lock(&lpdd2_nvm_mutex);
313
314 *retlen = len;
315
316 map_copy_from(map, buf, start_add, *retlen);
317
318 mutex_unlock(&lpdd2_nvm_mutex);
319 return 0;
320}
321
322/*
323 * lpddr2_nvm driver write method
324 */
325static int lpddr2_nvm_write(struct mtd_info *mtd, loff_t start_add,
326 size_t len, size_t *retlen, const u_char *buf)
327{
328 struct map_info *map = mtd->priv;
329 struct pcm_int_data *pcm_data = map->fldrv_priv;
330 u_long add, current_len, tot_len, target_len, my_data;
331 u_char *write_buf = (u_char *)buf;
332 int ret = 0;
333
334 mutex_lock(&lpdd2_nvm_mutex);
335
336 ow_enable(map);
337
338 /* Set start value for the variables */
339 add = start_add;
340 target_len = len;
341 tot_len = 0;
342
343 while (tot_len < target_len) {
344 if (!(IS_ALIGNED(add, mtd->writesize))) { /* do sw program */
345 my_data = write_buf[tot_len];
346 my_data += (write_buf[tot_len+1]) << 8;
347 if (pcm_data->bus_width == 0x0004) {/* 2x16 devices */
348 my_data += (write_buf[tot_len+2]) << 16;
349 my_data += (write_buf[tot_len+3]) << 24;
350 }
351 ret = lpddr2_nvm_do_op(map, LPDDR2_NVM_SW_OVERWRITE,
352 my_data, add, 0x00, NULL);
353 if (ret)
354 goto out;
355
356 add += pcm_data->bus_width;
357 tot_len += pcm_data->bus_width;
358 } else { /* do buffer program */
359 current_len = min(target_len - tot_len,
360 (u_long) mtd->writesize);
361 ret = lpddr2_nvm_do_op(map, LPDDR2_NVM_BUF_OVERWRITE,
362 0x00, add, current_len, write_buf + tot_len);
363 if (ret)
364 goto out;
365
366 add += current_len;
367 tot_len += current_len;
368 }
369 }
370
371out:
372 *retlen = tot_len;
373 ow_disable(map);
374 mutex_unlock(&lpdd2_nvm_mutex);
375 return ret;
376}
377
378/*
379 * lpddr2_nvm driver erase method
380 */
381static int lpddr2_nvm_erase(struct mtd_info *mtd, struct erase_info *instr)
382{
383 int ret = lpddr2_nvm_do_block_op(mtd, instr->addr, instr->len,
384 LPDDR2_NVM_ERASE);
385 if (!ret) {
386 instr->state = MTD_ERASE_DONE;
387 mtd_erase_callback(instr);
388 }
389
390 return ret;
391}
392
393/*
394 * lpddr2_nvm driver unlock method
395 */
396static int lpddr2_nvm_unlock(struct mtd_info *mtd, loff_t start_add,
397 uint64_t len)
398{
399 return lpddr2_nvm_do_block_op(mtd, start_add, len, LPDDR2_NVM_UNLOCK);
400}
401
402/*
403 * lpddr2_nvm driver lock method
404 */
405static int lpddr2_nvm_lock(struct mtd_info *mtd, loff_t start_add,
406 uint64_t len)
407{
408 return lpddr2_nvm_do_block_op(mtd, start_add, len, LPDDR2_NVM_LOCK);
409}
410
411/*
412 * lpddr2_nvm driver probe method
413 */
414static int lpddr2_nvm_probe(struct platform_device *pdev)
415{
416 struct map_info *map;
417 struct mtd_info *mtd;
418 struct resource *add_range;
419 struct resource *control_regs;
420 struct pcm_int_data *pcm_data;
421
422 /* Allocate memory control_regs data structures */
423 pcm_data = devm_kzalloc(&pdev->dev, sizeof(*pcm_data), GFP_KERNEL);
424 if (!pcm_data)
425 return -ENOMEM;
426
427 pcm_data->bus_width = BUS_WIDTH;
428
429 /* Allocate memory for map_info & mtd_info data structures */
430 map = devm_kzalloc(&pdev->dev, sizeof(*map), GFP_KERNEL);
431 if (!map)
432 return -ENOMEM;
433
434 mtd = devm_kzalloc(&pdev->dev, sizeof(*mtd), GFP_KERNEL);
435 if (!mtd)
436 return -ENOMEM;
437
438 /* lpddr2_nvm address range */
439 add_range = platform_get_resource(pdev, IORESOURCE_MEM, 0);
440
441 /* Populate map_info data structure */
442 *map = (struct map_info) {
443 .virt = devm_ioremap_resource(&pdev->dev, add_range),
444 .name = pdev->dev.init_name,
445 .phys = add_range->start,
446 .size = resource_size(add_range),
447 .bankwidth = pcm_data->bus_width / 2,
448 .pfow_base = OW_BASE_ADDRESS,
449 .fldrv_priv = pcm_data,
450 };
451 if (IS_ERR(map->virt))
452 return PTR_ERR(map->virt);
453
454 simple_map_init(map); /* fill with default methods */
455
456 control_regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
457 pcm_data->ctl_regs = devm_ioremap_resource(&pdev->dev, control_regs);
458 if (IS_ERR(pcm_data->ctl_regs))
459 return PTR_ERR(pcm_data->ctl_regs);
460
461 /* Populate mtd_info data structure */
462 *mtd = (struct mtd_info) {
463 .name = pdev->dev.init_name,
464 .type = MTD_RAM,
465 .priv = map,
466 .size = resource_size(add_range),
467 .erasesize = ERASE_BLOCKSIZE * pcm_data->bus_width,
468 .writesize = 1,
469 .writebufsize = WRITE_BUFFSIZE * pcm_data->bus_width,
470 .flags = (MTD_CAP_NVRAM | MTD_POWERUP_LOCK),
471 ._read = lpddr2_nvm_read,
472 ._write = lpddr2_nvm_write,
473 ._erase = lpddr2_nvm_erase,
474 ._unlock = lpddr2_nvm_unlock,
475 ._lock = lpddr2_nvm_lock,
476 };
477
478 /* Verify the presence of the device looking for PFOW string */
479 if (!lpddr2_nvm_pfow_present(map)) {
480 pr_err("device not recognized\n");
481 return -EINVAL;
482 }
483 /* Parse partitions and register the MTD device */
484 return mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
485}
486
487/*
488 * lpddr2_nvm driver remove method
489 */
490static int lpddr2_nvm_remove(struct platform_device *pdev)
491{
492 return mtd_device_unregister(dev_get_drvdata(&pdev->dev));
493}
494
495/* Initialize platform_driver data structure for lpddr2_nvm */
496static struct platform_driver lpddr2_nvm_drv = {
497 .driver = {
498 .name = "lpddr2_nvm",
499 },
500 .probe = lpddr2_nvm_probe,
501 .remove = lpddr2_nvm_remove,
502};
503
504module_platform_driver(lpddr2_nvm_drv);
505MODULE_LICENSE("GPL");
506MODULE_AUTHOR("Vincenzo Aliberti <vincenzo.aliberti@gmail.com>");
507MODULE_DESCRIPTION("MTD driver for LPDDR2-NVM PCM memories");
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index fce23fe043f7..21b2874a303b 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -108,7 +108,7 @@ config MTD_SUN_UFLASH
108 108
109config MTD_SC520CDP 109config MTD_SC520CDP
110 tristate "CFI Flash device mapped on AMD SC520 CDP" 110 tristate "CFI Flash device mapped on AMD SC520 CDP"
111 depends on X86 && MTD_CFI 111 depends on (MELAN || COMPILE_TEST) && MTD_CFI
112 help 112 help
113 The SC520 CDP board has two banks of CFI-compliant chips and one 113 The SC520 CDP board has two banks of CFI-compliant chips and one
114 Dual-in-line JEDEC chip. This 'mapping' driver supports that 114 Dual-in-line JEDEC chip. This 'mapping' driver supports that
@@ -116,7 +116,7 @@ config MTD_SC520CDP
116 116
117config MTD_NETSC520 117config MTD_NETSC520
118 tristate "CFI Flash device mapped on AMD NetSc520" 118 tristate "CFI Flash device mapped on AMD NetSc520"
119 depends on X86 && MTD_CFI 119 depends on (MELAN || COMPILE_TEST) && MTD_CFI
120 help 120 help
121 This enables access routines for the flash chips on the AMD NetSc520 121 This enables access routines for the flash chips on the AMD NetSc520
122 demonstration board. If you have one of these boards and would like 122 demonstration board. If you have one of these boards and would like
diff --git a/drivers/mtd/maps/sc520cdp.c b/drivers/mtd/maps/sc520cdp.c
index 8fead8e46bce..093edd51bdc7 100644
--- a/drivers/mtd/maps/sc520cdp.c
+++ b/drivers/mtd/maps/sc520cdp.c
@@ -183,7 +183,7 @@ static const struct sc520_par_table par_table[NUM_FLASH_BANKS] =
183 183
184static void sc520cdp_setup_par(void) 184static void sc520cdp_setup_par(void)
185{ 185{
186 volatile unsigned long __iomem *mmcr; 186 unsigned long __iomem *mmcr;
187 unsigned long mmcr_val; 187 unsigned long mmcr_val;
188 int i, j; 188 int i, j;
189 189
@@ -203,11 +203,11 @@ static void sc520cdp_setup_par(void)
203 */ 203 */
204 for(i = 0; i < NUM_FLASH_BANKS; i++) { /* for each par_table entry */ 204 for(i = 0; i < NUM_FLASH_BANKS; i++) { /* for each par_table entry */
205 for(j = 0; j < NUM_SC520_PAR; j++) { /* for each PAR register */ 205 for(j = 0; j < NUM_SC520_PAR; j++) { /* for each PAR register */
206 mmcr_val = mmcr[SC520_PAR(j)]; 206 mmcr_val = readl(&mmcr[SC520_PAR(j)]);
207 /* if target device field matches, reprogram the PAR */ 207 /* if target device field matches, reprogram the PAR */
208 if((mmcr_val & SC520_PAR_TRGDEV) == par_table[i].trgdev) 208 if((mmcr_val & SC520_PAR_TRGDEV) == par_table[i].trgdev)
209 { 209 {
210 mmcr[SC520_PAR(j)] = par_table[i].new_par; 210 writel(par_table[i].new_par, &mmcr[SC520_PAR(j)]);
211 break; 211 break;
212 } 212 }
213 } 213 }
diff --git a/drivers/mtd/maps/solutionengine.c b/drivers/mtd/maps/solutionengine.c
index 83a7a7091562..bb580bc16445 100644
--- a/drivers/mtd/maps/solutionengine.c
+++ b/drivers/mtd/maps/solutionengine.c
@@ -33,28 +33,6 @@ struct map_info soleng_flash_map = {
33 33
34static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL }; 34static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL };
35 35
36#ifdef CONFIG_MTD_SUPERH_RESERVE
37static struct mtd_partition superh_se_partitions[] = {
38 /* Reserved for boot code, read-only */
39 {
40 .name = "flash_boot",
41 .offset = 0x00000000,
42 .size = CONFIG_MTD_SUPERH_RESERVE,
43 .mask_flags = MTD_WRITEABLE,
44 },
45 /* All else is writable (e.g. JFFS) */
46 {
47 .name = "Flash FS",
48 .offset = MTDPART_OFS_NXTBLK,
49 .size = MTDPART_SIZ_FULL,
50 }
51};
52#define NUM_PARTITIONS ARRAY_SIZE(superh_se_partitions)
53#else
54#define superh_se_partitions NULL
55#define NUM_PARTITIONS 0
56#endif /* CONFIG_MTD_SUPERH_RESERVE */
57
58static int __init init_soleng_maps(void) 36static int __init init_soleng_maps(void)
59{ 37{
60 /* First probe at offset 0 */ 38 /* First probe at offset 0 */
@@ -92,8 +70,7 @@ static int __init init_soleng_maps(void)
92 mtd_device_register(eprom_mtd, NULL, 0); 70 mtd_device_register(eprom_mtd, NULL, 0);
93 } 71 }
94 72
95 mtd_device_parse_register(flash_mtd, probes, NULL, 73 mtd_device_parse_register(flash_mtd, probes, NULL, NULL, 0);
96 superh_se_partitions, NUM_PARTITIONS);
97 74
98 return 0; 75 return 0;
99} 76}
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 4dbfaee9aa95..43e30992a369 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -87,6 +87,9 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
87 if (req->cmd_type != REQ_TYPE_FS) 87 if (req->cmd_type != REQ_TYPE_FS)
88 return -EIO; 88 return -EIO;
89 89
90 if (req->cmd_flags & REQ_FLUSH)
91 return tr->flush(dev);
92
90 if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > 93 if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
91 get_capacity(req->rq_disk)) 94 get_capacity(req->rq_disk))
92 return -EIO; 95 return -EIO;
@@ -407,6 +410,9 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
407 if (!new->rq) 410 if (!new->rq)
408 goto error3; 411 goto error3;
409 412
413 if (tr->flush)
414 blk_queue_flush(new->rq, REQ_FLUSH);
415
410 new->rq->queuedata = new; 416 new->rq->queuedata = new;
411 blk_queue_logical_block_size(new->rq, tr->blksize); 417 blk_queue_logical_block_size(new->rq, tr->blksize);
412 418
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 7d4e7b9da3a1..a0f54e80670c 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -568,13 +568,18 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,
568{ 568{
569 struct mtd_write_req req; 569 struct mtd_write_req req;
570 struct mtd_oob_ops ops; 570 struct mtd_oob_ops ops;
571 void __user *usr_data, *usr_oob; 571 const void __user *usr_data, *usr_oob;
572 int ret; 572 int ret;
573 573
574 if (copy_from_user(&req, argp, sizeof(req)) || 574 if (copy_from_user(&req, argp, sizeof(req)))
575 !access_ok(VERIFY_READ, req.usr_data, req.len) ||
576 !access_ok(VERIFY_READ, req.usr_oob, req.ooblen))
577 return -EFAULT; 575 return -EFAULT;
576
577 usr_data = (const void __user *)(uintptr_t)req.usr_data;
578 usr_oob = (const void __user *)(uintptr_t)req.usr_oob;
579 if (!access_ok(VERIFY_READ, usr_data, req.len) ||
580 !access_ok(VERIFY_READ, usr_oob, req.ooblen))
581 return -EFAULT;
582
578 if (!mtd->_write_oob) 583 if (!mtd->_write_oob)
579 return -EOPNOTSUPP; 584 return -EOPNOTSUPP;
580 585
@@ -583,10 +588,7 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,
583 ops.ooblen = (size_t)req.ooblen; 588 ops.ooblen = (size_t)req.ooblen;
584 ops.ooboffs = 0; 589 ops.ooboffs = 0;
585 590
586 usr_data = (void __user *)(uintptr_t)req.usr_data; 591 if (usr_data) {
587 usr_oob = (void __user *)(uintptr_t)req.usr_oob;
588
589 if (req.usr_data) {
590 ops.datbuf = memdup_user(usr_data, ops.len); 592 ops.datbuf = memdup_user(usr_data, ops.len);
591 if (IS_ERR(ops.datbuf)) 593 if (IS_ERR(ops.datbuf))
592 return PTR_ERR(ops.datbuf); 594 return PTR_ERR(ops.datbuf);
@@ -594,7 +596,7 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,
594 ops.datbuf = NULL; 596 ops.datbuf = NULL;
595 } 597 }
596 598
597 if (req.usr_oob) { 599 if (usr_oob) {
598 ops.oobbuf = memdup_user(usr_oob, ops.ooblen); 600 ops.oobbuf = memdup_user(usr_oob, ops.ooblen);
599 if (IS_ERR(ops.oobbuf)) { 601 if (IS_ERR(ops.oobbuf)) {
600 kfree(ops.datbuf); 602 kfree(ops.datbuf);
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index b7a24946ca26..722898aea7a6 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -679,9 +679,6 @@ static int bf5xx_nand_remove(struct platform_device *pdev)
679 peripheral_free_list(bfin_nfc_pin_req); 679 peripheral_free_list(bfin_nfc_pin_req);
680 bf5xx_nand_dma_remove(info); 680 bf5xx_nand_dma_remove(info);
681 681
682 /* free the common resources */
683 kfree(info);
684
685 return 0; 682 return 0;
686} 683}
687 684
@@ -742,10 +739,10 @@ static int bf5xx_nand_probe(struct platform_device *pdev)
742 return -EFAULT; 739 return -EFAULT;
743 } 740 }
744 741
745 info = kzalloc(sizeof(*info), GFP_KERNEL); 742 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
746 if (info == NULL) { 743 if (info == NULL) {
747 err = -ENOMEM; 744 err = -ENOMEM;
748 goto out_err_kzalloc; 745 goto out_err;
749 } 746 }
750 747
751 platform_set_drvdata(pdev, info); 748 platform_set_drvdata(pdev, info);
@@ -790,7 +787,7 @@ static int bf5xx_nand_probe(struct platform_device *pdev)
790 /* initialise the hardware */ 787 /* initialise the hardware */
791 err = bf5xx_nand_hw_init(info); 788 err = bf5xx_nand_hw_init(info);
792 if (err) 789 if (err)
793 goto out_err_hw_init; 790 goto out_err;
794 791
795 /* setup hardware ECC data struct */ 792 /* setup hardware ECC data struct */
796 if (hardware_ecc) { 793 if (hardware_ecc) {
@@ -827,9 +824,7 @@ static int bf5xx_nand_probe(struct platform_device *pdev)
827 824
828out_err_nand_scan: 825out_err_nand_scan:
829 bf5xx_nand_dma_remove(info); 826 bf5xx_nand_dma_remove(info);
830out_err_hw_init: 827out_err:
831 kfree(info);
832out_err_kzalloc:
833 peripheral_free_list(bfin_nfc_pin_req); 828 peripheral_free_list(bfin_nfc_pin_req);
834 829
835 return err; 830 return err;
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index c07cd573ad3a..9f2012a3e764 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -1233,7 +1233,7 @@ static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
1233 return status; 1233 return status;
1234} 1234}
1235 1235
1236static void denali_erase(struct mtd_info *mtd, int page) 1236static int denali_erase(struct mtd_info *mtd, int page)
1237{ 1237{
1238 struct denali_nand_info *denali = mtd_to_denali(mtd); 1238 struct denali_nand_info *denali = mtd_to_denali(mtd);
1239 1239
@@ -1250,8 +1250,7 @@ static void denali_erase(struct mtd_info *mtd, int page)
1250 irq_status = wait_for_irq(denali, INTR_STATUS__ERASE_COMP | 1250 irq_status = wait_for_irq(denali, INTR_STATUS__ERASE_COMP |
1251 INTR_STATUS__ERASE_FAIL); 1251 INTR_STATUS__ERASE_FAIL);
1252 1252
1253 denali->status = (irq_status & INTR_STATUS__ERASE_FAIL) ? 1253 return (irq_status & INTR_STATUS__ERASE_FAIL) ? NAND_STATUS_FAIL : PASS;
1254 NAND_STATUS_FAIL : PASS;
1255} 1254}
1256 1255
1257static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col, 1256static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
@@ -1584,7 +1583,7 @@ int denali_init(struct denali_nand_info *denali)
1584 denali->nand.ecc.write_page_raw = denali_write_page_raw; 1583 denali->nand.ecc.write_page_raw = denali_write_page_raw;
1585 denali->nand.ecc.read_oob = denali_read_oob; 1584 denali->nand.ecc.read_oob = denali_read_oob;
1586 denali->nand.ecc.write_oob = denali_write_oob; 1585 denali->nand.ecc.write_oob = denali_write_oob;
1587 denali->nand.erase_cmd = denali_erase; 1586 denali->nand.erase = denali_erase;
1588 1587
1589 if (nand_scan_tail(&denali->mtd)) { 1588 if (nand_scan_tail(&denali->mtd)) {
1590 ret = -ENXIO; 1589 ret = -ENXIO;
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index 1b0265e85a06..ce24637e14f1 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -872,7 +872,7 @@ static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
872 return 0; 872 return 0;
873} 873}
874 874
875static void docg4_erase_block(struct mtd_info *mtd, int page) 875static int docg4_erase_block(struct mtd_info *mtd, int page)
876{ 876{
877 struct nand_chip *nand = mtd->priv; 877 struct nand_chip *nand = mtd->priv;
878 struct docg4_priv *doc = nand->priv; 878 struct docg4_priv *doc = nand->priv;
@@ -916,6 +916,8 @@ static void docg4_erase_block(struct mtd_info *mtd, int page)
916 write_nop(docptr); 916 write_nop(docptr);
917 poll_status(doc); 917 poll_status(doc);
918 write_nop(docptr); 918 write_nop(docptr);
919
920 return nand->waitfunc(mtd, nand);
919} 921}
920 922
921static int write_page(struct mtd_info *mtd, struct nand_chip *nand, 923static int write_page(struct mtd_info *mtd, struct nand_chip *nand,
@@ -1236,7 +1238,7 @@ static void __init init_mtd_structs(struct mtd_info *mtd)
1236 nand->block_markbad = docg4_block_markbad; 1238 nand->block_markbad = docg4_block_markbad;
1237 nand->read_buf = docg4_read_buf; 1239 nand->read_buf = docg4_read_buf;
1238 nand->write_buf = docg4_write_buf16; 1240 nand->write_buf = docg4_write_buf16;
1239 nand->erase_cmd = docg4_erase_block; 1241 nand->erase = docg4_erase_block;
1240 nand->ecc.read_page = docg4_read_page; 1242 nand->ecc.read_page = docg4_read_page;
1241 nand->ecc.write_page = docg4_write_page; 1243 nand->ecc.write_page = docg4_write_page;
1242 nand->ecc.read_page_raw = docg4_read_page_raw; 1244 nand->ecc.read_page_raw = docg4_read_page_raw;
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index ec549cd9849f..545a5c002f09 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -723,6 +723,19 @@ static int fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
723 return 0; 723 return 0;
724} 724}
725 725
726/* ECC will be calculated automatically, and errors will be detected in
727 * waitfunc.
728 */
729static int fsl_elbc_write_subpage(struct mtd_info *mtd, struct nand_chip *chip,
730 uint32_t offset, uint32_t data_len,
731 const uint8_t *buf, int oob_required)
732{
733 fsl_elbc_write_buf(mtd, buf, mtd->writesize);
734 fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
735
736 return 0;
737}
738
726static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv) 739static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
727{ 740{
728 struct fsl_lbc_ctrl *ctrl = priv->ctrl; 741 struct fsl_lbc_ctrl *ctrl = priv->ctrl;
@@ -761,6 +774,7 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
761 774
762 chip->ecc.read_page = fsl_elbc_read_page; 775 chip->ecc.read_page = fsl_elbc_read_page;
763 chip->ecc.write_page = fsl_elbc_write_page; 776 chip->ecc.write_page = fsl_elbc_write_page;
777 chip->ecc.write_subpage = fsl_elbc_write_subpage;
764 778
765 /* If CS Base Register selects full hardware ECC then use it */ 779 /* If CS Base Register selects full hardware ECC then use it */
766 if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) == 780 if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index cb45d2f8e208..2338124dd05f 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -56,7 +56,7 @@ struct fsl_ifc_nand_ctrl {
56 struct nand_hw_control controller; 56 struct nand_hw_control controller;
57 struct fsl_ifc_mtd *chips[FSL_IFC_BANK_COUNT]; 57 struct fsl_ifc_mtd *chips[FSL_IFC_BANK_COUNT];
58 58
59 u8 __iomem *addr; /* Address of assigned IFC buffer */ 59 void __iomem *addr; /* Address of assigned IFC buffer */
60 unsigned int page; /* Last page written to / read from */ 60 unsigned int page; /* Last page written to / read from */
61 unsigned int read_bytes;/* Number of bytes read during command */ 61 unsigned int read_bytes;/* Number of bytes read during command */
62 unsigned int column; /* Saved column from SEQIN */ 62 unsigned int column; /* Saved column from SEQIN */
@@ -591,7 +591,10 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
591 * The chip always seems to report that it is 591 * The chip always seems to report that it is
592 * write-protected, even when it is not. 592 * write-protected, even when it is not.
593 */ 593 */
594 setbits8(ifc_nand_ctrl->addr, NAND_STATUS_WP); 594 if (chip->options & NAND_BUSWIDTH_16)
595 setbits16(ifc_nand_ctrl->addr, NAND_STATUS_WP);
596 else
597 setbits8(ifc_nand_ctrl->addr, NAND_STATUS_WP);
595 return; 598 return;
596 599
597 case NAND_CMD_RESET: 600 case NAND_CMD_RESET:
@@ -636,7 +639,7 @@ static void fsl_ifc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
636 len = bufsize - ifc_nand_ctrl->index; 639 len = bufsize - ifc_nand_ctrl->index;
637 } 640 }
638 641
639 memcpy_toio(&ifc_nand_ctrl->addr[ifc_nand_ctrl->index], buf, len); 642 memcpy_toio(ifc_nand_ctrl->addr + ifc_nand_ctrl->index, buf, len);
640 ifc_nand_ctrl->index += len; 643 ifc_nand_ctrl->index += len;
641} 644}
642 645
@@ -648,13 +651,16 @@ static uint8_t fsl_ifc_read_byte(struct mtd_info *mtd)
648{ 651{
649 struct nand_chip *chip = mtd->priv; 652 struct nand_chip *chip = mtd->priv;
650 struct fsl_ifc_mtd *priv = chip->priv; 653 struct fsl_ifc_mtd *priv = chip->priv;
654 unsigned int offset;
651 655
652 /* 656 /*
653 * If there are still bytes in the IFC buffer, then use the 657 * If there are still bytes in the IFC buffer, then use the
654 * next byte. 658 * next byte.
655 */ 659 */
656 if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) 660 if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
657 return in_8(&ifc_nand_ctrl->addr[ifc_nand_ctrl->index++]); 661 offset = ifc_nand_ctrl->index++;
662 return in_8(ifc_nand_ctrl->addr + offset);
663 }
658 664
659 dev_err(priv->dev, "%s: beyond end of buffer\n", __func__); 665 dev_err(priv->dev, "%s: beyond end of buffer\n", __func__);
660 return ERR_BYTE; 666 return ERR_BYTE;
@@ -675,8 +681,7 @@ static uint8_t fsl_ifc_read_byte16(struct mtd_info *mtd)
675 * next byte. 681 * next byte.
676 */ 682 */
677 if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) { 683 if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
678 data = in_be16((uint16_t __iomem *)&ifc_nand_ctrl-> 684 data = in_be16(ifc_nand_ctrl->addr + ifc_nand_ctrl->index);
679 addr[ifc_nand_ctrl->index]);
680 ifc_nand_ctrl->index += 2; 685 ifc_nand_ctrl->index += 2;
681 return (uint8_t) data; 686 return (uint8_t) data;
682 } 687 }
@@ -701,7 +706,7 @@ static void fsl_ifc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
701 706
702 avail = min((unsigned int)len, 707 avail = min((unsigned int)len,
703 ifc_nand_ctrl->read_bytes - ifc_nand_ctrl->index); 708 ifc_nand_ctrl->read_bytes - ifc_nand_ctrl->index);
704 memcpy_fromio(buf, &ifc_nand_ctrl->addr[ifc_nand_ctrl->index], avail); 709 memcpy_fromio(buf, ifc_nand_ctrl->addr + ifc_nand_ctrl->index, avail);
705 ifc_nand_ctrl->index += avail; 710 ifc_nand_ctrl->index += avail;
706 711
707 if (len > avail) 712 if (len > avail)
diff --git a/drivers/mtd/nand/gpmi-nand/bch-regs.h b/drivers/mtd/nand/gpmi-nand/bch-regs.h
index 588f5374047c..05bb91f2f4c4 100644
--- a/drivers/mtd/nand/gpmi-nand/bch-regs.h
+++ b/drivers/mtd/nand/gpmi-nand/bch-regs.h
@@ -54,7 +54,7 @@
54#define MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0 11 54#define MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0 11
55#define MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0 (0x1f << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0) 55#define MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0 (0x1f << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0)
56#define BF_BCH_FLASH0LAYOUT0_ECC0(v, x) \ 56#define BF_BCH_FLASH0LAYOUT0_ECC0(v, x) \
57 (GPMI_IS_MX6Q(x) \ 57 (GPMI_IS_MX6(x) \
58 ? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0) \ 58 ? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0) \
59 & MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0) \ 59 & MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0) \
60 : (((v) << BP_BCH_FLASH0LAYOUT0_ECC0) \ 60 : (((v) << BP_BCH_FLASH0LAYOUT0_ECC0) \
@@ -65,7 +65,7 @@
65#define MX6Q_BM_BCH_FLASH0LAYOUT0_GF_13_14 \ 65#define MX6Q_BM_BCH_FLASH0LAYOUT0_GF_13_14 \
66 (0x1 << MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14) 66 (0x1 << MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14)
67#define BF_BCH_FLASH0LAYOUT0_GF(v, x) \ 67#define BF_BCH_FLASH0LAYOUT0_GF(v, x) \
68 ((GPMI_IS_MX6Q(x) && ((v) == 14)) \ 68 ((GPMI_IS_MX6(x) && ((v) == 14)) \
69 ? (((1) << MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14) \ 69 ? (((1) << MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14) \
70 & MX6Q_BM_BCH_FLASH0LAYOUT0_GF_13_14) \ 70 & MX6Q_BM_BCH_FLASH0LAYOUT0_GF_13_14) \
71 : 0 \ 71 : 0 \
@@ -77,7 +77,7 @@
77#define MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \ 77#define MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \
78 (0x3ff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE) 78 (0x3ff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
79#define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v, x) \ 79#define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v, x) \
80 (GPMI_IS_MX6Q(x) \ 80 (GPMI_IS_MX6(x) \
81 ? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE) \ 81 ? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE) \
82 : ((v) & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE) \ 82 : ((v) & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE) \
83 ) 83 )
@@ -96,7 +96,7 @@
96#define MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN 11 96#define MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN 11
97#define MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN (0x1f << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN) 97#define MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN (0x1f << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN)
98#define BF_BCH_FLASH0LAYOUT1_ECCN(v, x) \ 98#define BF_BCH_FLASH0LAYOUT1_ECCN(v, x) \
99 (GPMI_IS_MX6Q(x) \ 99 (GPMI_IS_MX6(x) \
100 ? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN) \ 100 ? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN) \
101 & MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN) \ 101 & MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN) \
102 : (((v) << BP_BCH_FLASH0LAYOUT1_ECCN) \ 102 : (((v) << BP_BCH_FLASH0LAYOUT1_ECCN) \
@@ -107,7 +107,7 @@
107#define MX6Q_BM_BCH_FLASH0LAYOUT1_GF_13_14 \ 107#define MX6Q_BM_BCH_FLASH0LAYOUT1_GF_13_14 \
108 (0x1 << MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14) 108 (0x1 << MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14)
109#define BF_BCH_FLASH0LAYOUT1_GF(v, x) \ 109#define BF_BCH_FLASH0LAYOUT1_GF(v, x) \
110 ((GPMI_IS_MX6Q(x) && ((v) == 14)) \ 110 ((GPMI_IS_MX6(x) && ((v) == 14)) \
111 ? (((1) << MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14) \ 111 ? (((1) << MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14) \
112 & MX6Q_BM_BCH_FLASH0LAYOUT1_GF_13_14) \ 112 & MX6Q_BM_BCH_FLASH0LAYOUT1_GF_13_14) \
113 : 0 \ 113 : 0 \
@@ -119,7 +119,7 @@
119#define MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \ 119#define MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \
120 (0x3ff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE) 120 (0x3ff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
121#define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v, x) \ 121#define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v, x) \
122 (GPMI_IS_MX6Q(x) \ 122 (GPMI_IS_MX6(x) \
123 ? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \ 123 ? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
124 : ((v) & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \ 124 : ((v) & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
125 ) 125 )
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index dd1df605a1d6..87e658ce23ef 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -861,7 +861,7 @@ static void gpmi_compute_edo_timing(struct gpmi_nand_data *this,
861 struct resources *r = &this->resources; 861 struct resources *r = &this->resources;
862 unsigned long rate = clk_get_rate(r->clock[0]); 862 unsigned long rate = clk_get_rate(r->clock[0]);
863 int mode = this->timing_mode; 863 int mode = this->timing_mode;
864 int dll_threshold = 16; /* in ns */ 864 int dll_threshold = this->devdata->max_chain_delay;
865 unsigned long delay; 865 unsigned long delay;
866 unsigned long clk_period; 866 unsigned long clk_period;
867 int t_rea; 867 int t_rea;
@@ -886,9 +886,6 @@ static void gpmi_compute_edo_timing(struct gpmi_nand_data *this,
886 /* [3] for GPMI_HW_GPMI_CTRL1 */ 886 /* [3] for GPMI_HW_GPMI_CTRL1 */
887 hw->wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY; 887 hw->wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
888 888
889 if (GPMI_IS_MX6Q(this))
890 dll_threshold = 12;
891
892 /* 889 /*
893 * Enlarge 10 times for the numerator and denominator in {3}. 890 * Enlarge 10 times for the numerator and denominator in {3}.
894 * This make us to get more accurate result. 891 * This make us to get more accurate result.
@@ -974,7 +971,7 @@ int gpmi_extra_init(struct gpmi_nand_data *this)
974 struct nand_chip *chip = &this->nand; 971 struct nand_chip *chip = &this->nand;
975 972
976 /* Enable the asynchronous EDO feature. */ 973 /* Enable the asynchronous EDO feature. */
977 if (GPMI_IS_MX6Q(this) && chip->onfi_version) { 974 if (GPMI_IS_MX6(this) && chip->onfi_version) {
978 int mode = onfi_get_async_timing_mode(chip); 975 int mode = onfi_get_async_timing_mode(chip);
979 976
980 /* We only support the timing mode 4 and mode 5. */ 977 /* We only support the timing mode 4 and mode 5. */
@@ -1096,12 +1093,12 @@ int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
1096 if (GPMI_IS_MX23(this)) { 1093 if (GPMI_IS_MX23(this)) {
1097 mask = MX23_BM_GPMI_DEBUG_READY0 << chip; 1094 mask = MX23_BM_GPMI_DEBUG_READY0 << chip;
1098 reg = readl(r->gpmi_regs + HW_GPMI_DEBUG); 1095 reg = readl(r->gpmi_regs + HW_GPMI_DEBUG);
1099 } else if (GPMI_IS_MX28(this) || GPMI_IS_MX6Q(this)) { 1096 } else if (GPMI_IS_MX28(this) || GPMI_IS_MX6(this)) {
1100 /* 1097 /*
1101 * In the imx6, all the ready/busy pins are bound 1098 * In the imx6, all the ready/busy pins are bound
1102 * together. So we only need to check chip 0. 1099 * together. So we only need to check chip 0.
1103 */ 1100 */
1104 if (GPMI_IS_MX6Q(this)) 1101 if (GPMI_IS_MX6(this))
1105 chip = 0; 1102 chip = 0;
1106 1103
1107 /* MX28 shares the same R/B register as MX6Q. */ 1104 /* MX28 shares the same R/B register as MX6Q. */
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index bb77f750e75a..f638cd8077ca 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -53,6 +53,30 @@ static struct nand_ecclayout gpmi_hw_ecclayout = {
53 .oobfree = { {.offset = 0, .length = 0} } 53 .oobfree = { {.offset = 0, .length = 0} }
54}; 54};
55 55
56static const struct gpmi_devdata gpmi_devdata_imx23 = {
57 .type = IS_MX23,
58 .bch_max_ecc_strength = 20,
59 .max_chain_delay = 16,
60};
61
62static const struct gpmi_devdata gpmi_devdata_imx28 = {
63 .type = IS_MX28,
64 .bch_max_ecc_strength = 20,
65 .max_chain_delay = 16,
66};
67
68static const struct gpmi_devdata gpmi_devdata_imx6q = {
69 .type = IS_MX6Q,
70 .bch_max_ecc_strength = 40,
71 .max_chain_delay = 12,
72};
73
74static const struct gpmi_devdata gpmi_devdata_imx6sx = {
75 .type = IS_MX6SX,
76 .bch_max_ecc_strength = 62,
77 .max_chain_delay = 12,
78};
79
56static irqreturn_t bch_irq(int irq, void *cookie) 80static irqreturn_t bch_irq(int irq, void *cookie)
57{ 81{
58 struct gpmi_nand_data *this = cookie; 82 struct gpmi_nand_data *this = cookie;
@@ -102,14 +126,8 @@ static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
102 /* The mx23/mx28 only support the GF13. */ 126 /* The mx23/mx28 only support the GF13. */
103 if (geo->gf_len == 14) 127 if (geo->gf_len == 14)
104 return false; 128 return false;
105
106 if (geo->ecc_strength > MXS_ECC_STRENGTH_MAX)
107 return false;
108 } else if (GPMI_IS_MX6Q(this)) {
109 if (geo->ecc_strength > MX6_ECC_STRENGTH_MAX)
110 return false;
111 } 129 }
112 return true; 130 return geo->ecc_strength <= this->devdata->bch_max_ecc_strength;
113} 131}
114 132
115/* 133/*
@@ -270,8 +288,7 @@ static int legacy_set_geometry(struct gpmi_nand_data *this)
270 "We can not support this nand chip." 288 "We can not support this nand chip."
271 " Its required ecc strength(%d) is beyond our" 289 " Its required ecc strength(%d) is beyond our"
272 " capability(%d).\n", geo->ecc_strength, 290 " capability(%d).\n", geo->ecc_strength,
273 (GPMI_IS_MX6Q(this) ? MX6_ECC_STRENGTH_MAX 291 this->devdata->bch_max_ecc_strength);
274 : MXS_ECC_STRENGTH_MAX));
275 return -EINVAL; 292 return -EINVAL;
276 } 293 }
277 294
@@ -572,7 +589,7 @@ static int gpmi_get_clks(struct gpmi_nand_data *this)
572 } 589 }
573 590
574 /* Get extra clocks */ 591 /* Get extra clocks */
575 if (GPMI_IS_MX6Q(this)) 592 if (GPMI_IS_MX6(this))
576 extra_clks = extra_clks_for_mx6q; 593 extra_clks = extra_clks_for_mx6q;
577 if (!extra_clks) 594 if (!extra_clks)
578 return 0; 595 return 0;
@@ -590,9 +607,9 @@ static int gpmi_get_clks(struct gpmi_nand_data *this)
590 r->clock[i] = clk; 607 r->clock[i] = clk;
591 } 608 }
592 609
593 if (GPMI_IS_MX6Q(this)) 610 if (GPMI_IS_MX6(this))
594 /* 611 /*
595 * Set the default value for the gpmi clock in mx6q: 612 * Set the default value for the gpmi clock.
596 * 613 *
597 * If you want to use the ONFI nand which is in the 614 * If you want to use the ONFI nand which is in the
598 * Synchronous Mode, you should change the clock as you need. 615 * Synchronous Mode, you should change the clock as you need.
@@ -1655,7 +1672,7 @@ static int gpmi_init_last(struct gpmi_nand_data *this)
1655 * (1) the chip is imx6, and 1672 * (1) the chip is imx6, and
1656 * (2) the size of the ECC parity is byte aligned. 1673 * (2) the size of the ECC parity is byte aligned.
1657 */ 1674 */
1658 if (GPMI_IS_MX6Q(this) && 1675 if (GPMI_IS_MX6(this) &&
1659 ((bch_geo->gf_len * bch_geo->ecc_strength) % 8) == 0) { 1676 ((bch_geo->gf_len * bch_geo->ecc_strength) % 8) == 0) {
1660 ecc->read_subpage = gpmi_ecc_read_subpage; 1677 ecc->read_subpage = gpmi_ecc_read_subpage;
1661 chip->options |= NAND_SUBPAGE_READ; 1678 chip->options |= NAND_SUBPAGE_READ;
@@ -1711,7 +1728,7 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
1711 if (ret) 1728 if (ret)
1712 goto err_out; 1729 goto err_out;
1713 1730
1714 ret = nand_scan_ident(mtd, GPMI_IS_MX6Q(this) ? 2 : 1, NULL); 1731 ret = nand_scan_ident(mtd, GPMI_IS_MX6(this) ? 2 : 1, NULL);
1715 if (ret) 1732 if (ret)
1716 goto err_out; 1733 goto err_out;
1717 1734
@@ -1740,23 +1757,19 @@ err_out:
1740 return ret; 1757 return ret;
1741} 1758}
1742 1759
1743static const struct platform_device_id gpmi_ids[] = {
1744 { .name = "imx23-gpmi-nand", .driver_data = IS_MX23, },
1745 { .name = "imx28-gpmi-nand", .driver_data = IS_MX28, },
1746 { .name = "imx6q-gpmi-nand", .driver_data = IS_MX6Q, },
1747 {}
1748};
1749
1750static const struct of_device_id gpmi_nand_id_table[] = { 1760static const struct of_device_id gpmi_nand_id_table[] = {
1751 { 1761 {
1752 .compatible = "fsl,imx23-gpmi-nand", 1762 .compatible = "fsl,imx23-gpmi-nand",
1753 .data = (void *)&gpmi_ids[IS_MX23], 1763 .data = (void *)&gpmi_devdata_imx23,
1754 }, { 1764 }, {
1755 .compatible = "fsl,imx28-gpmi-nand", 1765 .compatible = "fsl,imx28-gpmi-nand",
1756 .data = (void *)&gpmi_ids[IS_MX28], 1766 .data = (void *)&gpmi_devdata_imx28,
1757 }, { 1767 }, {
1758 .compatible = "fsl,imx6q-gpmi-nand", 1768 .compatible = "fsl,imx6q-gpmi-nand",
1759 .data = (void *)&gpmi_ids[IS_MX6Q], 1769 .data = (void *)&gpmi_devdata_imx6q,
1770 }, {
1771 .compatible = "fsl,imx6sx-gpmi-nand",
1772 .data = (void *)&gpmi_devdata_imx6sx,
1760 }, {} 1773 }, {}
1761}; 1774};
1762MODULE_DEVICE_TABLE(of, gpmi_nand_id_table); 1775MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
@@ -1767,18 +1780,18 @@ static int gpmi_nand_probe(struct platform_device *pdev)
1767 const struct of_device_id *of_id; 1780 const struct of_device_id *of_id;
1768 int ret; 1781 int ret;
1769 1782
1783 this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL);
1784 if (!this)
1785 return -ENOMEM;
1786
1770 of_id = of_match_device(gpmi_nand_id_table, &pdev->dev); 1787 of_id = of_match_device(gpmi_nand_id_table, &pdev->dev);
1771 if (of_id) { 1788 if (of_id) {
1772 pdev->id_entry = of_id->data; 1789 this->devdata = of_id->data;
1773 } else { 1790 } else {
1774 dev_err(&pdev->dev, "Failed to find the right device id.\n"); 1791 dev_err(&pdev->dev, "Failed to find the right device id.\n");
1775 return -ENODEV; 1792 return -ENODEV;
1776 } 1793 }
1777 1794
1778 this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL);
1779 if (!this)
1780 return -ENOMEM;
1781
1782 platform_set_drvdata(pdev, this); 1795 platform_set_drvdata(pdev, this);
1783 this->pdev = pdev; 1796 this->pdev = pdev;
1784 this->dev = &pdev->dev; 1797 this->dev = &pdev->dev;
@@ -1823,7 +1836,6 @@ static struct platform_driver gpmi_nand_driver = {
1823 }, 1836 },
1824 .probe = gpmi_nand_probe, 1837 .probe = gpmi_nand_probe,
1825 .remove = gpmi_nand_remove, 1838 .remove = gpmi_nand_remove,
1826 .id_table = gpmi_ids,
1827}; 1839};
1828module_platform_driver(gpmi_nand_driver); 1840module_platform_driver(gpmi_nand_driver);
1829 1841
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
index 4c801fa18725..32c6ba49f986 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -119,11 +119,25 @@ struct nand_timing {
119 int8_t tRHOH_in_ns; 119 int8_t tRHOH_in_ns;
120}; 120};
121 121
122enum gpmi_type {
123 IS_MX23,
124 IS_MX28,
125 IS_MX6Q,
126 IS_MX6SX
127};
128
129struct gpmi_devdata {
130 enum gpmi_type type;
131 int bch_max_ecc_strength;
132 int max_chain_delay; /* See the async EDO mode */
133};
134
122struct gpmi_nand_data { 135struct gpmi_nand_data {
123 /* flags */ 136 /* flags */
124#define GPMI_ASYNC_EDO_ENABLED (1 << 0) 137#define GPMI_ASYNC_EDO_ENABLED (1 << 0)
125#define GPMI_TIMING_INIT_OK (1 << 1) 138#define GPMI_TIMING_INIT_OK (1 << 1)
126 int flags; 139 int flags;
140 const struct gpmi_devdata *devdata;
127 141
128 /* System Interface */ 142 /* System Interface */
129 struct device *dev; 143 struct device *dev;
@@ -281,15 +295,11 @@ extern int gpmi_read_page(struct gpmi_nand_data *,
281#define STATUS_ERASED 0xff 295#define STATUS_ERASED 0xff
282#define STATUS_UNCORRECTABLE 0xfe 296#define STATUS_UNCORRECTABLE 0xfe
283 297
284/* BCH's bit correction capability. */ 298/* Use the devdata to distinguish different Archs. */
285#define MXS_ECC_STRENGTH_MAX 20 /* mx23 and mx28 */ 299#define GPMI_IS_MX23(x) ((x)->devdata->type == IS_MX23)
286#define MX6_ECC_STRENGTH_MAX 40 300#define GPMI_IS_MX28(x) ((x)->devdata->type == IS_MX28)
287 301#define GPMI_IS_MX6Q(x) ((x)->devdata->type == IS_MX6Q)
288/* Use the platform_id to distinguish different Archs. */ 302#define GPMI_IS_MX6SX(x) ((x)->devdata->type == IS_MX6SX)
289#define IS_MX23 0x0 303
290#define IS_MX28 0x1 304#define GPMI_IS_MX6(x) (GPMI_IS_MX6Q(x) || GPMI_IS_MX6SX(x))
291#define IS_MX6Q 0x2
292#define GPMI_IS_MX23(x) ((x)->pdev->id_entry->driver_data == IS_MX23)
293#define GPMI_IS_MX28(x) ((x)->pdev->id_entry->driver_data == IS_MX28)
294#define GPMI_IS_MX6Q(x) ((x)->pdev->id_entry->driver_data == IS_MX6Q)
295#endif 305#endif
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 9d01c4df838c..41167e9e991e 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -37,6 +37,7 @@
37#include <linux/err.h> 37#include <linux/err.h>
38#include <linux/sched.h> 38#include <linux/sched.h>
39#include <linux/slab.h> 39#include <linux/slab.h>
40#include <linux/mm.h>
40#include <linux/types.h> 41#include <linux/types.h>
41#include <linux/mtd/mtd.h> 42#include <linux/mtd/mtd.h>
42#include <linux/mtd/nand.h> 43#include <linux/mtd/nand.h>
@@ -1204,8 +1205,7 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1204 * ecc.pos. Let's make sure that there are no gaps in ECC positions. 1205 * ecc.pos. Let's make sure that there are no gaps in ECC positions.
1205 */ 1206 */
1206 for (i = 0; i < eccfrag_len - 1; i++) { 1207 for (i = 0; i < eccfrag_len - 1; i++) {
1207 if (eccpos[i + start_step * chip->ecc.bytes] + 1 != 1208 if (eccpos[i + index] + 1 != eccpos[i + index + 1]) {
1208 eccpos[i + start_step * chip->ecc.bytes + 1]) {
1209 gaps = 1; 1209 gaps = 1;
1210 break; 1210 break;
1211 } 1211 }
@@ -1501,6 +1501,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1501 mtd->oobavail : mtd->oobsize; 1501 mtd->oobavail : mtd->oobsize;
1502 1502
1503 uint8_t *bufpoi, *oob, *buf; 1503 uint8_t *bufpoi, *oob, *buf;
1504 int use_bufpoi;
1504 unsigned int max_bitflips = 0; 1505 unsigned int max_bitflips = 0;
1505 int retry_mode = 0; 1506 int retry_mode = 0;
1506 bool ecc_fail = false; 1507 bool ecc_fail = false;
@@ -1523,9 +1524,20 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1523 bytes = min(mtd->writesize - col, readlen); 1524 bytes = min(mtd->writesize - col, readlen);
1524 aligned = (bytes == mtd->writesize); 1525 aligned = (bytes == mtd->writesize);
1525 1526
1527 if (!aligned)
1528 use_bufpoi = 1;
1529 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
1530 use_bufpoi = !virt_addr_valid(buf);
1531 else
1532 use_bufpoi = 0;
1533
1526 /* Is the current page in the buffer? */ 1534 /* Is the current page in the buffer? */
1527 if (realpage != chip->pagebuf || oob) { 1535 if (realpage != chip->pagebuf || oob) {
1528 bufpoi = aligned ? buf : chip->buffers->databuf; 1536 bufpoi = use_bufpoi ? chip->buffers->databuf : buf;
1537
1538 if (use_bufpoi && aligned)
1539 pr_debug("%s: using read bounce buffer for buf@%p\n",
1540 __func__, buf);
1529 1541
1530read_retry: 1542read_retry:
1531 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page); 1543 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
@@ -1547,7 +1559,7 @@ read_retry:
1547 ret = chip->ecc.read_page(mtd, chip, bufpoi, 1559 ret = chip->ecc.read_page(mtd, chip, bufpoi,
1548 oob_required, page); 1560 oob_required, page);
1549 if (ret < 0) { 1561 if (ret < 0) {
1550 if (!aligned) 1562 if (use_bufpoi)
1551 /* Invalidate page cache */ 1563 /* Invalidate page cache */
1552 chip->pagebuf = -1; 1564 chip->pagebuf = -1;
1553 break; 1565 break;
@@ -1556,7 +1568,7 @@ read_retry:
1556 max_bitflips = max_t(unsigned int, max_bitflips, ret); 1568 max_bitflips = max_t(unsigned int, max_bitflips, ret);
1557 1569
1558 /* Transfer not aligned data */ 1570 /* Transfer not aligned data */
1559 if (!aligned) { 1571 if (use_bufpoi) {
1560 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob && 1572 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
1561 !(mtd->ecc_stats.failed - ecc_failures) && 1573 !(mtd->ecc_stats.failed - ecc_failures) &&
1562 (ops->mode != MTD_OPS_RAW)) { 1574 (ops->mode != MTD_OPS_RAW)) {
@@ -2376,11 +2388,23 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2376 int bytes = mtd->writesize; 2388 int bytes = mtd->writesize;
2377 int cached = writelen > bytes && page != blockmask; 2389 int cached = writelen > bytes && page != blockmask;
2378 uint8_t *wbuf = buf; 2390 uint8_t *wbuf = buf;
2391 int use_bufpoi;
2392 int part_pagewr = (column || writelen < (mtd->writesize - 1));
2393
2394 if (part_pagewr)
2395 use_bufpoi = 1;
2396 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
2397 use_bufpoi = !virt_addr_valid(buf);
2398 else
2399 use_bufpoi = 0;
2379 2400
2380 /* Partial page write? */ 2401 /* Partial page write?, or need to use bounce buffer */
2381 if (unlikely(column || writelen < (mtd->writesize - 1))) { 2402 if (use_bufpoi) {
2403 pr_debug("%s: using write bounce buffer for buf@%p\n",
2404 __func__, buf);
2382 cached = 0; 2405 cached = 0;
2383 bytes = min_t(int, bytes - column, (int) writelen); 2406 if (part_pagewr)
2407 bytes = min_t(int, bytes - column, writelen);
2384 chip->pagebuf = -1; 2408 chip->pagebuf = -1;
2385 memset(chip->buffers->databuf, 0xff, mtd->writesize); 2409 memset(chip->buffers->databuf, 0xff, mtd->writesize);
2386 memcpy(&chip->buffers->databuf[column], buf, bytes); 2410 memcpy(&chip->buffers->databuf[column], buf, bytes);
@@ -2618,18 +2642,20 @@ out:
2618} 2642}
2619 2643
2620/** 2644/**
2621 * single_erase_cmd - [GENERIC] NAND standard block erase command function 2645 * single_erase - [GENERIC] NAND standard block erase command function
2622 * @mtd: MTD device structure 2646 * @mtd: MTD device structure
2623 * @page: the page address of the block which will be erased 2647 * @page: the page address of the block which will be erased
2624 * 2648 *
2625 * Standard erase command for NAND chips. 2649 * Standard erase command for NAND chips. Returns NAND status.
2626 */ 2650 */
2627static void single_erase_cmd(struct mtd_info *mtd, int page) 2651static int single_erase(struct mtd_info *mtd, int page)
2628{ 2652{
2629 struct nand_chip *chip = mtd->priv; 2653 struct nand_chip *chip = mtd->priv;
2630 /* Send commands to erase a block */ 2654 /* Send commands to erase a block */
2631 chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page); 2655 chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
2632 chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1); 2656 chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
2657
2658 return chip->waitfunc(mtd, chip);
2633} 2659}
2634 2660
2635/** 2661/**
@@ -2710,9 +2736,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2710 (page + pages_per_block)) 2736 (page + pages_per_block))
2711 chip->pagebuf = -1; 2737 chip->pagebuf = -1;
2712 2738
2713 chip->erase_cmd(mtd, page & chip->pagemask); 2739 status = chip->erase(mtd, page & chip->pagemask);
2714
2715 status = chip->waitfunc(mtd, chip);
2716 2740
2717 /* 2741 /*
2718 * See if operation failed and additional status checks are 2742 * See if operation failed and additional status checks are
@@ -3607,7 +3631,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
3607 3631
3608 chip->onfi_version = 0; 3632 chip->onfi_version = 0;
3609 if (!type->name || !type->pagesize) { 3633 if (!type->name || !type->pagesize) {
3610 /* Check is chip is ONFI compliant */ 3634 /* Check if the chip is ONFI compliant */
3611 if (nand_flash_detect_onfi(mtd, chip, &busw)) 3635 if (nand_flash_detect_onfi(mtd, chip, &busw))
3612 goto ident_done; 3636 goto ident_done;
3613 3637
@@ -3685,7 +3709,7 @@ ident_done:
3685 } 3709 }
3686 3710
3687 chip->badblockbits = 8; 3711 chip->badblockbits = 8;
3688 chip->erase_cmd = single_erase_cmd; 3712 chip->erase = single_erase;
3689 3713
3690 /* Do not replace user supplied command function! */ 3714 /* Do not replace user supplied command function! */
3691 if (mtd->writesize > 512 && chip->cmdfunc == nand_command) 3715 if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
@@ -3770,6 +3794,39 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
3770} 3794}
3771EXPORT_SYMBOL(nand_scan_ident); 3795EXPORT_SYMBOL(nand_scan_ident);
3772 3796
3797/*
3798 * Check if the chip configuration meet the datasheet requirements.
3799
3800 * If our configuration corrects A bits per B bytes and the minimum
3801 * required correction level is X bits per Y bytes, then we must ensure
3802 * both of the following are true:
3803 *
3804 * (1) A / B >= X / Y
3805 * (2) A >= X
3806 *
3807 * Requirement (1) ensures we can correct for the required bitflip density.
3808 * Requirement (2) ensures we can correct even when all bitflips are clumped
3809 * in the same sector.
3810 */
3811static bool nand_ecc_strength_good(struct mtd_info *mtd)
3812{
3813 struct nand_chip *chip = mtd->priv;
3814 struct nand_ecc_ctrl *ecc = &chip->ecc;
3815 int corr, ds_corr;
3816
3817 if (ecc->size == 0 || chip->ecc_step_ds == 0)
3818 /* Not enough information */
3819 return true;
3820
3821 /*
3822 * We get the number of corrected bits per page to compare
3823 * the correction density.
3824 */
3825 corr = (mtd->writesize * ecc->strength) / ecc->size;
3826 ds_corr = (mtd->writesize * chip->ecc_strength_ds) / chip->ecc_step_ds;
3827
3828 return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
3829}
3773 3830
3774/** 3831/**
3775 * nand_scan_tail - [NAND Interface] Scan for the NAND device 3832 * nand_scan_tail - [NAND Interface] Scan for the NAND device
@@ -3990,6 +4047,9 @@ int nand_scan_tail(struct mtd_info *mtd)
3990 ecc->layout->oobavail += ecc->layout->oobfree[i].length; 4047 ecc->layout->oobavail += ecc->layout->oobfree[i].length;
3991 mtd->oobavail = ecc->layout->oobavail; 4048 mtd->oobavail = ecc->layout->oobavail;
3992 4049
4050 /* ECC sanity check: warn noisily if it's too weak */
4051 WARN_ON(!nand_ecc_strength_good(mtd));
4052
3993 /* 4053 /*
3994 * Set the number of read / write steps for one page depending on ECC 4054 * Set the number of read / write steps for one page depending on ECC
3995 * mode. 4055 * mode.
@@ -4023,8 +4083,16 @@ int nand_scan_tail(struct mtd_info *mtd)
4023 chip->pagebuf = -1; 4083 chip->pagebuf = -1;
4024 4084
4025 /* Large page NAND with SOFT_ECC should support subpage reads */ 4085 /* Large page NAND with SOFT_ECC should support subpage reads */
4026 if ((ecc->mode == NAND_ECC_SOFT) && (chip->page_shift > 9)) 4086 switch (ecc->mode) {
4027 chip->options |= NAND_SUBPAGE_READ; 4087 case NAND_ECC_SOFT:
4088 case NAND_ECC_SOFT_BCH:
4089 if (chip->page_shift > 9)
4090 chip->options |= NAND_SUBPAGE_READ;
4091 break;
4092
4093 default:
4094 break;
4095 }
4028 4096
4029 /* Fill in remaining MTD driver data */ 4097 /* Fill in remaining MTD driver data */
4030 mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH; 4098 mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index c0615d1526f9..7f0c3b4c2a4f 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -528,7 +528,7 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
528{ 528{
529 struct nand_chip *this = mtd->priv; 529 struct nand_chip *this = mtd->priv;
530 int i, chips; 530 int i, chips;
531 int bits, startblock, block, dir; 531 int startblock, block, dir;
532 int scanlen = mtd->writesize + mtd->oobsize; 532 int scanlen = mtd->writesize + mtd->oobsize;
533 int bbtblocks; 533 int bbtblocks;
534 int blocktopage = this->bbt_erase_shift - this->page_shift; 534 int blocktopage = this->bbt_erase_shift - this->page_shift;
@@ -552,9 +552,6 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
552 bbtblocks = mtd->size >> this->bbt_erase_shift; 552 bbtblocks = mtd->size >> this->bbt_erase_shift;
553 } 553 }
554 554
555 /* Number of bits for each erase block in the bbt */
556 bits = td->options & NAND_BBT_NRBITS_MSK;
557
558 for (i = 0; i < chips; i++) { 555 for (i = 0; i < chips; i++) {
559 /* Reset version information */ 556 /* Reset version information */
560 td->version[i] = 0; 557 td->version[i] = 0;
@@ -1285,6 +1282,7 @@ static int nand_create_badblock_pattern(struct nand_chip *this)
1285int nand_default_bbt(struct mtd_info *mtd) 1282int nand_default_bbt(struct mtd_info *mtd)
1286{ 1283{
1287 struct nand_chip *this = mtd->priv; 1284 struct nand_chip *this = mtd->priv;
1285 int ret;
1288 1286
1289 /* Is a flash based bad block table requested? */ 1287 /* Is a flash based bad block table requested? */
1290 if (this->bbt_options & NAND_BBT_USE_FLASH) { 1288 if (this->bbt_options & NAND_BBT_USE_FLASH) {
@@ -1303,8 +1301,11 @@ int nand_default_bbt(struct mtd_info *mtd)
1303 this->bbt_md = NULL; 1301 this->bbt_md = NULL;
1304 } 1302 }
1305 1303
1306 if (!this->badblock_pattern) 1304 if (!this->badblock_pattern) {
1307 nand_create_badblock_pattern(this); 1305 ret = nand_create_badblock_pattern(this);
1306 if (ret)
1307 return ret;
1308 }
1308 1309
1309 return nand_scan_bbt(mtd, this->badblock_pattern); 1310 return nand_scan_bbt(mtd, this->badblock_pattern);
1310} 1311}
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
index 053c9a2d47c3..97c4c0216c90 100644
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -506,7 +506,7 @@ int __nand_correct_data(unsigned char *buf,
506 if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1) 506 if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1)
507 return 1; /* error in ECC data; no action needed */ 507 return 1; /* error in ECC data; no action needed */
508 508
509 pr_err("%s: uncorrectable ECC error", __func__); 509 pr_err("%s: uncorrectable ECC error\n", __func__);
510 return -1; 510 return -1;
511} 511}
512EXPORT_SYMBOL(__nand_correct_data); 512EXPORT_SYMBOL(__nand_correct_data);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 1ff49b80bdaf..f0ed92e210a1 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -137,6 +137,10 @@
137#define BADBLOCK_MARKER_LENGTH 2 137#define BADBLOCK_MARKER_LENGTH 2
138 138
139#ifdef CONFIG_MTD_NAND_OMAP_BCH 139#ifdef CONFIG_MTD_NAND_OMAP_BCH
140static u_char bch16_vector[] = {0xf5, 0x24, 0x1c, 0xd0, 0x61, 0xb3, 0xf1, 0x55,
141 0x2e, 0x2c, 0x86, 0xa3, 0xed, 0x36, 0x1b, 0x78,
142 0x48, 0x76, 0xa9, 0x3b, 0x97, 0xd1, 0x7a, 0x93,
143 0x07, 0x0e};
140static u_char bch8_vector[] = {0xf3, 0xdb, 0x14, 0x16, 0x8b, 0xd2, 0xbe, 0xcc, 144static u_char bch8_vector[] = {0xf3, 0xdb, 0x14, 0x16, 0x8b, 0xd2, 0xbe, 0xcc,
141 0xac, 0x6b, 0xff, 0x99, 0x7b}; 145 0xac, 0x6b, 0xff, 0x99, 0x7b};
142static u_char bch4_vector[] = {0x00, 0x6b, 0x31, 0xdd, 0x41, 0xbc, 0x10}; 146static u_char bch4_vector[] = {0x00, 0x6b, 0x31, 0xdd, 0x41, 0xbc, 0x10};
@@ -1114,6 +1118,19 @@ static void __maybe_unused omap_enable_hwecc_bch(struct mtd_info *mtd, int mode)
1114 ecc_size1 = BCH_ECC_SIZE1; 1118 ecc_size1 = BCH_ECC_SIZE1;
1115 } 1119 }
1116 break; 1120 break;
1121 case OMAP_ECC_BCH16_CODE_HW:
1122 bch_type = 0x2;
1123 nsectors = chip->ecc.steps;
1124 if (mode == NAND_ECC_READ) {
1125 wr_mode = 0x01;
1126 ecc_size0 = 52; /* ECC bits in nibbles per sector */
1127 ecc_size1 = 0; /* non-ECC bits in nibbles per sector */
1128 } else {
1129 wr_mode = 0x01;
1130 ecc_size0 = 0; /* extra bits in nibbles per sector */
1131 ecc_size1 = 52; /* OOB bits in nibbles per sector */
1132 }
1133 break;
1117 default: 1134 default:
1118 return; 1135 return;
1119 } 1136 }
@@ -1162,7 +1179,8 @@ static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd,
1162 struct gpmc_nand_regs *gpmc_regs = &info->reg; 1179 struct gpmc_nand_regs *gpmc_regs = &info->reg;
1163 u8 *ecc_code; 1180 u8 *ecc_code;
1164 unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4; 1181 unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4;
1165 int i; 1182 u32 val;
1183 int i, j;
1166 1184
1167 nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1; 1185 nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
1168 for (i = 0; i < nsectors; i++) { 1186 for (i = 0; i < nsectors; i++) {
@@ -1201,6 +1219,41 @@ static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd,
1201 *ecc_code++ = ((bch_val1 >> 4) & 0xFF); 1219 *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
1202 *ecc_code++ = ((bch_val1 & 0xF) << 4); 1220 *ecc_code++ = ((bch_val1 & 0xF) << 4);
1203 break; 1221 break;
1222 case OMAP_ECC_BCH16_CODE_HW:
1223 val = readl(gpmc_regs->gpmc_bch_result6[i]);
1224 ecc_code[0] = ((val >> 8) & 0xFF);
1225 ecc_code[1] = ((val >> 0) & 0xFF);
1226 val = readl(gpmc_regs->gpmc_bch_result5[i]);
1227 ecc_code[2] = ((val >> 24) & 0xFF);
1228 ecc_code[3] = ((val >> 16) & 0xFF);
1229 ecc_code[4] = ((val >> 8) & 0xFF);
1230 ecc_code[5] = ((val >> 0) & 0xFF);
1231 val = readl(gpmc_regs->gpmc_bch_result4[i]);
1232 ecc_code[6] = ((val >> 24) & 0xFF);
1233 ecc_code[7] = ((val >> 16) & 0xFF);
1234 ecc_code[8] = ((val >> 8) & 0xFF);
1235 ecc_code[9] = ((val >> 0) & 0xFF);
1236 val = readl(gpmc_regs->gpmc_bch_result3[i]);
1237 ecc_code[10] = ((val >> 24) & 0xFF);
1238 ecc_code[11] = ((val >> 16) & 0xFF);
1239 ecc_code[12] = ((val >> 8) & 0xFF);
1240 ecc_code[13] = ((val >> 0) & 0xFF);
1241 val = readl(gpmc_regs->gpmc_bch_result2[i]);
1242 ecc_code[14] = ((val >> 24) & 0xFF);
1243 ecc_code[15] = ((val >> 16) & 0xFF);
1244 ecc_code[16] = ((val >> 8) & 0xFF);
1245 ecc_code[17] = ((val >> 0) & 0xFF);
1246 val = readl(gpmc_regs->gpmc_bch_result1[i]);
1247 ecc_code[18] = ((val >> 24) & 0xFF);
1248 ecc_code[19] = ((val >> 16) & 0xFF);
1249 ecc_code[20] = ((val >> 8) & 0xFF);
1250 ecc_code[21] = ((val >> 0) & 0xFF);
1251 val = readl(gpmc_regs->gpmc_bch_result0[i]);
1252 ecc_code[22] = ((val >> 24) & 0xFF);
1253 ecc_code[23] = ((val >> 16) & 0xFF);
1254 ecc_code[24] = ((val >> 8) & 0xFF);
1255 ecc_code[25] = ((val >> 0) & 0xFF);
1256 break;
1204 default: 1257 default:
1205 return -EINVAL; 1258 return -EINVAL;
1206 } 1259 }
@@ -1210,8 +1263,8 @@ static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd,
1210 case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: 1263 case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
1211 /* Add constant polynomial to remainder, so that 1264 /* Add constant polynomial to remainder, so that
1212 * ECC of blank pages results in 0x0 on reading back */ 1265 * ECC of blank pages results in 0x0 on reading back */
1213 for (i = 0; i < eccbytes; i++) 1266 for (j = 0; j < eccbytes; j++)
1214 ecc_calc[i] ^= bch4_polynomial[i]; 1267 ecc_calc[j] ^= bch4_polynomial[j];
1215 break; 1268 break;
1216 case OMAP_ECC_BCH4_CODE_HW: 1269 case OMAP_ECC_BCH4_CODE_HW:
1217 /* Set 8th ECC byte as 0x0 for ROM compatibility */ 1270 /* Set 8th ECC byte as 0x0 for ROM compatibility */
@@ -1220,13 +1273,15 @@ static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd,
1220 case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: 1273 case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
1221 /* Add constant polynomial to remainder, so that 1274 /* Add constant polynomial to remainder, so that
1222 * ECC of blank pages results in 0x0 on reading back */ 1275 * ECC of blank pages results in 0x0 on reading back */
1223 for (i = 0; i < eccbytes; i++) 1276 for (j = 0; j < eccbytes; j++)
1224 ecc_calc[i] ^= bch8_polynomial[i]; 1277 ecc_calc[j] ^= bch8_polynomial[j];
1225 break; 1278 break;
1226 case OMAP_ECC_BCH8_CODE_HW: 1279 case OMAP_ECC_BCH8_CODE_HW:
1227 /* Set 14th ECC byte as 0x0 for ROM compatibility */ 1280 /* Set 14th ECC byte as 0x0 for ROM compatibility */
1228 ecc_calc[eccbytes - 1] = 0x0; 1281 ecc_calc[eccbytes - 1] = 0x0;
1229 break; 1282 break;
1283 case OMAP_ECC_BCH16_CODE_HW:
1284 break;
1230 default: 1285 default:
1231 return -EINVAL; 1286 return -EINVAL;
1232 } 1287 }
@@ -1237,6 +1292,7 @@ static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd,
1237 return 0; 1292 return 0;
1238} 1293}
1239 1294
1295#ifdef CONFIG_MTD_NAND_OMAP_BCH
1240/** 1296/**
1241 * erased_sector_bitflips - count bit flips 1297 * erased_sector_bitflips - count bit flips
1242 * @data: data sector buffer 1298 * @data: data sector buffer
@@ -1276,7 +1332,6 @@ static int erased_sector_bitflips(u_char *data, u_char *oob,
1276 return flip_bits; 1332 return flip_bits;
1277} 1333}
1278 1334
1279#ifdef CONFIG_MTD_NAND_OMAP_BCH
1280/** 1335/**
1281 * omap_elm_correct_data - corrects page data area in case error reported 1336 * omap_elm_correct_data - corrects page data area in case error reported
1282 * @mtd: MTD device structure 1337 * @mtd: MTD device structure
@@ -1318,6 +1373,10 @@ static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
1318 actual_eccbytes = ecc->bytes - 1; 1373 actual_eccbytes = ecc->bytes - 1;
1319 erased_ecc_vec = bch8_vector; 1374 erased_ecc_vec = bch8_vector;
1320 break; 1375 break;
1376 case OMAP_ECC_BCH16_CODE_HW:
1377 actual_eccbytes = ecc->bytes;
1378 erased_ecc_vec = bch16_vector;
1379 break;
1321 default: 1380 default:
1322 pr_err("invalid driver configuration\n"); 1381 pr_err("invalid driver configuration\n");
1323 return -EINVAL; 1382 return -EINVAL;
@@ -1382,7 +1441,7 @@ static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
1382 1441
1383 /* Check if any error reported */ 1442 /* Check if any error reported */
1384 if (!is_error_reported) 1443 if (!is_error_reported)
1385 return 0; 1444 return stat;
1386 1445
1387 /* Decode BCH error using ELM module */ 1446 /* Decode BCH error using ELM module */
1388 elm_decode_bch_error_page(info->elm_dev, ecc_vec, err_vec); 1447 elm_decode_bch_error_page(info->elm_dev, ecc_vec, err_vec);
@@ -1401,6 +1460,7 @@ static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
1401 BCH4_BIT_PAD; 1460 BCH4_BIT_PAD;
1402 break; 1461 break;
1403 case OMAP_ECC_BCH8_CODE_HW: 1462 case OMAP_ECC_BCH8_CODE_HW:
1463 case OMAP_ECC_BCH16_CODE_HW:
1404 pos = err_vec[i].error_loc[j]; 1464 pos = err_vec[i].error_loc[j];
1405 break; 1465 break;
1406 default: 1466 default:
@@ -1912,6 +1972,40 @@ static int omap_nand_probe(struct platform_device *pdev)
1912 goto return_error; 1972 goto return_error;
1913#endif 1973#endif
1914 1974
1975 case OMAP_ECC_BCH16_CODE_HW:
1976#ifdef CONFIG_MTD_NAND_OMAP_BCH
1977 pr_info("using OMAP_ECC_BCH16_CODE_HW ECC scheme\n");
1978 nand_chip->ecc.mode = NAND_ECC_HW;
1979 nand_chip->ecc.size = 512;
1980 nand_chip->ecc.bytes = 26;
1981 nand_chip->ecc.strength = 16;
1982 nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
1983 nand_chip->ecc.correct = omap_elm_correct_data;
1984 nand_chip->ecc.calculate = omap_calculate_ecc_bch;
1985 nand_chip->ecc.read_page = omap_read_page_bch;
1986 nand_chip->ecc.write_page = omap_write_page_bch;
1987 /* This ECC scheme requires ELM H/W block */
1988 err = is_elm_present(info, pdata->elm_of_node, BCH16_ECC);
1989 if (err < 0) {
1990 pr_err("ELM is required for this ECC scheme\n");
1991 goto return_error;
1992 }
1993 /* define ECC layout */
1994 ecclayout->eccbytes = nand_chip->ecc.bytes *
1995 (mtd->writesize /
1996 nand_chip->ecc.size);
1997 oob_index = BADBLOCK_MARKER_LENGTH;
1998 for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
1999 ecclayout->eccpos[i] = oob_index;
2000 /* reserved marker already included in ecclayout->eccbytes */
2001 ecclayout->oobfree->offset =
2002 ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
2003 break;
2004#else
2005 pr_err("nand: error: CONFIG_MTD_NAND_OMAP_BCH not enabled\n");
2006 err = -EINVAL;
2007 goto return_error;
2008#endif
1915 default: 2009 default:
1916 pr_err("nand: error: invalid or unsupported ECC scheme\n"); 2010 pr_err("nand: error: invalid or unsupported ECC scheme\n");
1917 err = -EINVAL; 2011 err = -EINVAL;
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index dd7fe817eafb..471b4df3a5ac 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -214,7 +214,7 @@ static int orion_nand_remove(struct platform_device *pdev)
214} 214}
215 215
216#ifdef CONFIG_OF 216#ifdef CONFIG_OF
217static struct of_device_id orion_nand_of_match_table[] = { 217static const struct of_device_id orion_nand_of_match_table[] = {
218 { .compatible = "marvell,orion-nand", }, 218 { .compatible = "marvell,orion-nand", },
219 {}, 219 {},
220}; 220};
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 7588fe2c127f..96b0b1d27df1 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -127,10 +127,10 @@
127 127
128/* macros for registers read/write */ 128/* macros for registers read/write */
129#define nand_writel(info, off, val) \ 129#define nand_writel(info, off, val) \
130 __raw_writel((val), (info)->mmio_base + (off)) 130 writel_relaxed((val), (info)->mmio_base + (off))
131 131
132#define nand_readl(info, off) \ 132#define nand_readl(info, off) \
133 __raw_readl((info)->mmio_base + (off)) 133 readl_relaxed((info)->mmio_base + (off))
134 134
135/* error code and state */ 135/* error code and state */
136enum { 136enum {
@@ -337,7 +337,7 @@ static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
337/* convert nano-seconds to nand flash controller clock cycles */ 337/* convert nano-seconds to nand flash controller clock cycles */
338#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000) 338#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
339 339
340static struct of_device_id pxa3xx_nand_dt_ids[] = { 340static const struct of_device_id pxa3xx_nand_dt_ids[] = {
341 { 341 {
342 .compatible = "marvell,pxa3xx-nand", 342 .compatible = "marvell,pxa3xx-nand",
343 .data = (void *)PXA3XX_NAND_VARIANT_PXA, 343 .data = (void *)PXA3XX_NAND_VARIANT_PXA,
@@ -1354,7 +1354,6 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1354 ecc->mode = NAND_ECC_HW; 1354 ecc->mode = NAND_ECC_HW;
1355 ecc->size = 512; 1355 ecc->size = 512;
1356 ecc->strength = 1; 1356 ecc->strength = 1;
1357 return 1;
1358 1357
1359 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) { 1358 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1360 info->chunk_size = 512; 1359 info->chunk_size = 512;
@@ -1363,7 +1362,6 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1363 ecc->mode = NAND_ECC_HW; 1362 ecc->mode = NAND_ECC_HW;
1364 ecc->size = 512; 1363 ecc->size = 512;
1365 ecc->strength = 1; 1364 ecc->strength = 1;
1366 return 1;
1367 1365
1368 /* 1366 /*
1369 * Required ECC: 4-bit correction per 512 bytes 1367 * Required ECC: 4-bit correction per 512 bytes
@@ -1378,7 +1376,6 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1378 ecc->size = info->chunk_size; 1376 ecc->size = info->chunk_size;
1379 ecc->layout = &ecc_layout_2KB_bch4bit; 1377 ecc->layout = &ecc_layout_2KB_bch4bit;
1380 ecc->strength = 16; 1378 ecc->strength = 16;
1381 return 1;
1382 1379
1383 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) { 1380 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1384 info->ecc_bch = 1; 1381 info->ecc_bch = 1;
@@ -1389,7 +1386,6 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1389 ecc->size = info->chunk_size; 1386 ecc->size = info->chunk_size;
1390 ecc->layout = &ecc_layout_4KB_bch4bit; 1387 ecc->layout = &ecc_layout_4KB_bch4bit;
1391 ecc->strength = 16; 1388 ecc->strength = 16;
1392 return 1;
1393 1389
1394 /* 1390 /*
1395 * Required ECC: 8-bit correction per 512 bytes 1391 * Required ECC: 8-bit correction per 512 bytes
@@ -1404,8 +1400,15 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1404 ecc->size = info->chunk_size; 1400 ecc->size = info->chunk_size;
1405 ecc->layout = &ecc_layout_4KB_bch8bit; 1401 ecc->layout = &ecc_layout_4KB_bch8bit;
1406 ecc->strength = 16; 1402 ecc->strength = 16;
1407 return 1; 1403 } else {
1404 dev_err(&info->pdev->dev,
1405 "ECC strength %d at page size %d is not supported\n",
1406 strength, page_size);
1407 return -ENODEV;
1408 } 1408 }
1409
1410 dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1411 ecc->strength, ecc->size);
1409 return 0; 1412 return 0;
1410} 1413}
1411 1414
@@ -1516,8 +1519,13 @@ KEEP_CONFIG:
1516 } 1519 }
1517 } 1520 }
1518 1521
1519 ecc_strength = chip->ecc_strength_ds; 1522 if (pdata->ecc_strength && pdata->ecc_step_size) {
1520 ecc_step = chip->ecc_step_ds; 1523 ecc_strength = pdata->ecc_strength;
1524 ecc_step = pdata->ecc_step_size;
1525 } else {
1526 ecc_strength = chip->ecc_strength_ds;
1527 ecc_step = chip->ecc_step_ds;
1528 }
1521 1529
1522 /* Set default ECC strength requirements on non-ONFI devices */ 1530 /* Set default ECC strength requirements on non-ONFI devices */
1523 if (ecc_strength < 1 && ecc_step < 1) { 1531 if (ecc_strength < 1 && ecc_step < 1) {
@@ -1527,12 +1535,8 @@ KEEP_CONFIG:
1527 1535
1528 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength, 1536 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1529 ecc_step, mtd->writesize); 1537 ecc_step, mtd->writesize);
1530 if (!ret) { 1538 if (ret)
1531 dev_err(&info->pdev->dev, 1539 return ret;
1532 "ECC strength %d at page size %d is not supported\n",
1533 ecc_strength, mtd->writesize);
1534 return -ENODEV;
1535 }
1536 1540
1537 /* calculate addressing information */ 1541 /* calculate addressing information */
1538 if (mtd->writesize >= 2048) 1542 if (mtd->writesize >= 2048)
@@ -1730,6 +1734,14 @@ static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1730 of_property_read_u32(np, "num-cs", &pdata->num_cs); 1734 of_property_read_u32(np, "num-cs", &pdata->num_cs);
1731 pdata->flash_bbt = of_get_nand_on_flash_bbt(np); 1735 pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1732 1736
1737 pdata->ecc_strength = of_get_nand_ecc_strength(np);
1738 if (pdata->ecc_strength < 0)
1739 pdata->ecc_strength = 0;
1740
1741 pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
1742 if (pdata->ecc_step_size < 0)
1743 pdata->ecc_step_size = 0;
1744
1733 pdev->dev.platform_data = pdata; 1745 pdev->dev.platform_data = pdata;
1734 1746
1735 return 0; 1747 return 0;
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index 325930db3f04..baea83f4dea8 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -245,7 +245,7 @@ static void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
245 } 245 }
246 246
247 /* write DWORD chinks - faster */ 247 /* write DWORD chinks - faster */
248 while (len) { 248 while (len >= 4) {
249 reg = buf[0] | buf[1] << 8 | buf[2] << 16 | buf[3] << 24; 249 reg = buf[0] | buf[1] << 8 | buf[2] << 16 | buf[3] << 24;
250 r852_write_reg_dword(dev, R852_DATALINE, reg); 250 r852_write_reg_dword(dev, R852_DATALINE, reg);
251 buf += 4; 251 buf += 4;
@@ -254,8 +254,10 @@ static void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
254 } 254 }
255 255
256 /* write rest */ 256 /* write rest */
257 while (len) 257 while (len > 0) {
258 r852_write_reg(dev, R852_DATALINE, *buf++); 258 r852_write_reg(dev, R852_DATALINE, *buf++);
259 len--;
260 }
259} 261}
260 262
261/* 263/*
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index b1a792fd1c23..efb819c3df2f 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -537,9 +537,9 @@ static int onenand_write_bufferram(struct mtd_info *mtd, int area,
537 return 0; 537 return 0;
538} 538}
539 539
540static int (*s5pc110_dma_ops)(void *dst, void *src, size_t count, int direction); 540static int (*s5pc110_dma_ops)(dma_addr_t dst, dma_addr_t src, size_t count, int direction);
541 541
542static int s5pc110_dma_poll(void *dst, void *src, size_t count, int direction) 542static int s5pc110_dma_poll(dma_addr_t dst, dma_addr_t src, size_t count, int direction)
543{ 543{
544 void __iomem *base = onenand->dma_addr; 544 void __iomem *base = onenand->dma_addr;
545 int status; 545 int status;
@@ -605,7 +605,7 @@ static irqreturn_t s5pc110_onenand_irq(int irq, void *data)
605 return IRQ_HANDLED; 605 return IRQ_HANDLED;
606} 606}
607 607
608static int s5pc110_dma_irq(void *dst, void *src, size_t count, int direction) 608static int s5pc110_dma_irq(dma_addr_t dst, dma_addr_t src, size_t count, int direction)
609{ 609{
610 void __iomem *base = onenand->dma_addr; 610 void __iomem *base = onenand->dma_addr;
611 int status; 611 int status;
@@ -686,7 +686,7 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
686 dev_err(dev, "Couldn't map a %d byte buffer for DMA\n", count); 686 dev_err(dev, "Couldn't map a %d byte buffer for DMA\n", count);
687 goto normal; 687 goto normal;
688 } 688 }
689 err = s5pc110_dma_ops((void *) dma_dst, (void *) dma_src, 689 err = s5pc110_dma_ops(dma_dst, dma_src,
690 count, S5PC110_DMA_DIR_READ); 690 count, S5PC110_DMA_DIR_READ);
691 691
692 if (page_dma) 692 if (page_dma)
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
new file mode 100644
index 000000000000..f8acfa4310ef
--- /dev/null
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -0,0 +1,17 @@
1menuconfig MTD_SPI_NOR
2 tristate "SPI-NOR device support"
3 depends on MTD
4 help
5 This is the framework for the SPI NOR which can be used by the SPI
6 device drivers and the SPI-NOR device driver.
7
8if MTD_SPI_NOR
9
10config SPI_FSL_QUADSPI
11 tristate "Freescale Quad SPI controller"
12 depends on ARCH_MXC
13 help
14 This enables support for the Quad SPI controller in master mode.
15 We only connect the NOR to this controller now.
16
17endif # MTD_SPI_NOR
diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile
new file mode 100644
index 000000000000..6a7ce1462247
--- /dev/null
+++ b/drivers/mtd/spi-nor/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o
2obj-$(CONFIG_SPI_FSL_QUADSPI) += fsl-quadspi.o
diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
new file mode 100644
index 000000000000..8d659a2888d5
--- /dev/null
+++ b/drivers/mtd/spi-nor/fsl-quadspi.c
@@ -0,0 +1,1009 @@
1/*
2 * Freescale QuadSPI driver.
3 *
4 * Copyright (C) 2013 Freescale Semiconductor, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/interrupt.h>
14#include <linux/errno.h>
15#include <linux/platform_device.h>
16#include <linux/sched.h>
17#include <linux/delay.h>
18#include <linux/io.h>
19#include <linux/clk.h>
20#include <linux/err.h>
21#include <linux/of.h>
22#include <linux/of_device.h>
23#include <linux/timer.h>
24#include <linux/jiffies.h>
25#include <linux/completion.h>
26#include <linux/mtd/mtd.h>
27#include <linux/mtd/partitions.h>
28#include <linux/mtd/spi-nor.h>
29
30/* The registers */
31#define QUADSPI_MCR 0x00
32#define QUADSPI_MCR_RESERVED_SHIFT 16
33#define QUADSPI_MCR_RESERVED_MASK (0xF << QUADSPI_MCR_RESERVED_SHIFT)
34#define QUADSPI_MCR_MDIS_SHIFT 14
35#define QUADSPI_MCR_MDIS_MASK (1 << QUADSPI_MCR_MDIS_SHIFT)
36#define QUADSPI_MCR_CLR_TXF_SHIFT 11
37#define QUADSPI_MCR_CLR_TXF_MASK (1 << QUADSPI_MCR_CLR_TXF_SHIFT)
38#define QUADSPI_MCR_CLR_RXF_SHIFT 10
39#define QUADSPI_MCR_CLR_RXF_MASK (1 << QUADSPI_MCR_CLR_RXF_SHIFT)
40#define QUADSPI_MCR_DDR_EN_SHIFT 7
41#define QUADSPI_MCR_DDR_EN_MASK (1 << QUADSPI_MCR_DDR_EN_SHIFT)
42#define QUADSPI_MCR_END_CFG_SHIFT 2
43#define QUADSPI_MCR_END_CFG_MASK (3 << QUADSPI_MCR_END_CFG_SHIFT)
44#define QUADSPI_MCR_SWRSTHD_SHIFT 1
45#define QUADSPI_MCR_SWRSTHD_MASK (1 << QUADSPI_MCR_SWRSTHD_SHIFT)
46#define QUADSPI_MCR_SWRSTSD_SHIFT 0
47#define QUADSPI_MCR_SWRSTSD_MASK (1 << QUADSPI_MCR_SWRSTSD_SHIFT)
48
49#define QUADSPI_IPCR 0x08
50#define QUADSPI_IPCR_SEQID_SHIFT 24
51#define QUADSPI_IPCR_SEQID_MASK (0xF << QUADSPI_IPCR_SEQID_SHIFT)
52
53#define QUADSPI_BUF0CR 0x10
54#define QUADSPI_BUF1CR 0x14
55#define QUADSPI_BUF2CR 0x18
56#define QUADSPI_BUFXCR_INVALID_MSTRID 0xe
57
58#define QUADSPI_BUF3CR 0x1c
59#define QUADSPI_BUF3CR_ALLMST_SHIFT 31
60#define QUADSPI_BUF3CR_ALLMST (1 << QUADSPI_BUF3CR_ALLMST_SHIFT)
61
62#define QUADSPI_BFGENCR 0x20
63#define QUADSPI_BFGENCR_PAR_EN_SHIFT 16
64#define QUADSPI_BFGENCR_PAR_EN_MASK (1 << (QUADSPI_BFGENCR_PAR_EN_SHIFT))
65#define QUADSPI_BFGENCR_SEQID_SHIFT 12
66#define QUADSPI_BFGENCR_SEQID_MASK (0xF << QUADSPI_BFGENCR_SEQID_SHIFT)
67
68#define QUADSPI_BUF0IND 0x30
69#define QUADSPI_BUF1IND 0x34
70#define QUADSPI_BUF2IND 0x38
71#define QUADSPI_SFAR 0x100
72
73#define QUADSPI_SMPR 0x108
74#define QUADSPI_SMPR_DDRSMP_SHIFT 16
75#define QUADSPI_SMPR_DDRSMP_MASK (7 << QUADSPI_SMPR_DDRSMP_SHIFT)
76#define QUADSPI_SMPR_FSDLY_SHIFT 6
77#define QUADSPI_SMPR_FSDLY_MASK (1 << QUADSPI_SMPR_FSDLY_SHIFT)
78#define QUADSPI_SMPR_FSPHS_SHIFT 5
79#define QUADSPI_SMPR_FSPHS_MASK (1 << QUADSPI_SMPR_FSPHS_SHIFT)
80#define QUADSPI_SMPR_HSENA_SHIFT 0
81#define QUADSPI_SMPR_HSENA_MASK (1 << QUADSPI_SMPR_HSENA_SHIFT)
82
83#define QUADSPI_RBSR 0x10c
84#define QUADSPI_RBSR_RDBFL_SHIFT 8
85#define QUADSPI_RBSR_RDBFL_MASK (0x3F << QUADSPI_RBSR_RDBFL_SHIFT)
86
87#define QUADSPI_RBCT 0x110
88#define QUADSPI_RBCT_WMRK_MASK 0x1F
89#define QUADSPI_RBCT_RXBRD_SHIFT 8
90#define QUADSPI_RBCT_RXBRD_USEIPS (0x1 << QUADSPI_RBCT_RXBRD_SHIFT)
91
92#define QUADSPI_TBSR 0x150
93#define QUADSPI_TBDR 0x154
94#define QUADSPI_SR 0x15c
95#define QUADSPI_SR_IP_ACC_SHIFT 1
96#define QUADSPI_SR_IP_ACC_MASK (0x1 << QUADSPI_SR_IP_ACC_SHIFT)
97#define QUADSPI_SR_AHB_ACC_SHIFT 2
98#define QUADSPI_SR_AHB_ACC_MASK (0x1 << QUADSPI_SR_AHB_ACC_SHIFT)
99
100#define QUADSPI_FR 0x160
101#define QUADSPI_FR_TFF_MASK 0x1
102
103#define QUADSPI_SFA1AD 0x180
104#define QUADSPI_SFA2AD 0x184
105#define QUADSPI_SFB1AD 0x188
106#define QUADSPI_SFB2AD 0x18c
107#define QUADSPI_RBDR 0x200
108
109#define QUADSPI_LUTKEY 0x300
110#define QUADSPI_LUTKEY_VALUE 0x5AF05AF0
111
112#define QUADSPI_LCKCR 0x304
113#define QUADSPI_LCKER_LOCK 0x1
114#define QUADSPI_LCKER_UNLOCK 0x2
115
116#define QUADSPI_RSER 0x164
117#define QUADSPI_RSER_TFIE (0x1 << 0)
118
119#define QUADSPI_LUT_BASE 0x310
120
121/*
122 * The definition of the LUT register shows below:
123 *
124 * ---------------------------------------------------
125 * | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 |
126 * ---------------------------------------------------
127 */
128#define OPRND0_SHIFT 0
129#define PAD0_SHIFT 8
130#define INSTR0_SHIFT 10
131#define OPRND1_SHIFT 16
132
133/* Instruction set for the LUT register. */
134#define LUT_STOP 0
135#define LUT_CMD 1
136#define LUT_ADDR 2
137#define LUT_DUMMY 3
138#define LUT_MODE 4
139#define LUT_MODE2 5
140#define LUT_MODE4 6
141#define LUT_READ 7
142#define LUT_WRITE 8
143#define LUT_JMP_ON_CS 9
144#define LUT_ADDR_DDR 10
145#define LUT_MODE_DDR 11
146#define LUT_MODE2_DDR 12
147#define LUT_MODE4_DDR 13
148#define LUT_READ_DDR 14
149#define LUT_WRITE_DDR 15
150#define LUT_DATA_LEARN 16
151
152/*
153 * The PAD definitions for LUT register.
154 *
155 * The pad stands for the lines number of IO[0:3].
156 * For example, the Quad read need four IO lines, so you should
157 * set LUT_PAD4 which means we use four IO lines.
158 */
159#define LUT_PAD1 0
160#define LUT_PAD2 1
161#define LUT_PAD4 2
162
163/* Oprands for the LUT register. */
164#define ADDR24BIT 0x18
165#define ADDR32BIT 0x20
166
167/* Macros for constructing the LUT register. */
168#define LUT0(ins, pad, opr) \
169 (((opr) << OPRND0_SHIFT) | ((LUT_##pad) << PAD0_SHIFT) | \
170 ((LUT_##ins) << INSTR0_SHIFT))
171
172#define LUT1(ins, pad, opr) (LUT0(ins, pad, opr) << OPRND1_SHIFT)
173
174/* other macros for LUT register. */
175#define QUADSPI_LUT(x) (QUADSPI_LUT_BASE + (x) * 4)
176#define QUADSPI_LUT_NUM 64
177
178/* SEQID -- we can have 16 seqids at most. */
179#define SEQID_QUAD_READ 0
180#define SEQID_WREN 1
181#define SEQID_WRDI 2
182#define SEQID_RDSR 3
183#define SEQID_SE 4
184#define SEQID_CHIP_ERASE 5
185#define SEQID_PP 6
186#define SEQID_RDID 7
187#define SEQID_WRSR 8
188#define SEQID_RDCR 9
189#define SEQID_EN4B 10
190#define SEQID_BRWR 11
191
192enum fsl_qspi_devtype {
193 FSL_QUADSPI_VYBRID,
194 FSL_QUADSPI_IMX6SX,
195};
196
197struct fsl_qspi_devtype_data {
198 enum fsl_qspi_devtype devtype;
199 int rxfifo;
200 int txfifo;
201};
202
203static struct fsl_qspi_devtype_data vybrid_data = {
204 .devtype = FSL_QUADSPI_VYBRID,
205 .rxfifo = 128,
206 .txfifo = 64
207};
208
209static struct fsl_qspi_devtype_data imx6sx_data = {
210 .devtype = FSL_QUADSPI_IMX6SX,
211 .rxfifo = 128,
212 .txfifo = 512
213};
214
215#define FSL_QSPI_MAX_CHIP 4
216struct fsl_qspi {
217 struct mtd_info mtd[FSL_QSPI_MAX_CHIP];
218 struct spi_nor nor[FSL_QSPI_MAX_CHIP];
219 void __iomem *iobase;
220 void __iomem *ahb_base; /* Used when read from AHB bus */
221 u32 memmap_phy;
222 struct clk *clk, *clk_en;
223 struct device *dev;
224 struct completion c;
225 struct fsl_qspi_devtype_data *devtype_data;
226 u32 nor_size;
227 u32 nor_num;
228 u32 clk_rate;
229 unsigned int chip_base_addr; /* We may support two chips. */
230};
231
232static inline int is_vybrid_qspi(struct fsl_qspi *q)
233{
234 return q->devtype_data->devtype == FSL_QUADSPI_VYBRID;
235}
236
237static inline int is_imx6sx_qspi(struct fsl_qspi *q)
238{
239 return q->devtype_data->devtype == FSL_QUADSPI_IMX6SX;
240}
241
242/*
243 * An IC bug makes us to re-arrange the 32-bit data.
244 * The following chips, such as IMX6SLX, have fixed this bug.
245 */
246static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a)
247{
248 return is_vybrid_qspi(q) ? __swab32(a) : a;
249}
250
251static inline void fsl_qspi_unlock_lut(struct fsl_qspi *q)
252{
253 writel(QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
254 writel(QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR);
255}
256
257static inline void fsl_qspi_lock_lut(struct fsl_qspi *q)
258{
259 writel(QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
260 writel(QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR);
261}
262
263static irqreturn_t fsl_qspi_irq_handler(int irq, void *dev_id)
264{
265 struct fsl_qspi *q = dev_id;
266 u32 reg;
267
268 /* clear interrupt */
269 reg = readl(q->iobase + QUADSPI_FR);
270 writel(reg, q->iobase + QUADSPI_FR);
271
272 if (reg & QUADSPI_FR_TFF_MASK)
273 complete(&q->c);
274
275 dev_dbg(q->dev, "QUADSPI_FR : 0x%.8x:0x%.8x\n", q->chip_base_addr, reg);
276 return IRQ_HANDLED;
277}
278
279static void fsl_qspi_init_lut(struct fsl_qspi *q)
280{
281 void __iomem *base = q->iobase;
282 int rxfifo = q->devtype_data->rxfifo;
283 u32 lut_base;
284 u8 cmd, addrlen, dummy;
285 int i;
286
287 fsl_qspi_unlock_lut(q);
288
289 /* Clear all the LUT table */
290 for (i = 0; i < QUADSPI_LUT_NUM; i++)
291 writel(0, base + QUADSPI_LUT_BASE + i * 4);
292
293 /* Quad Read */
294 lut_base = SEQID_QUAD_READ * 4;
295
296 if (q->nor_size <= SZ_16M) {
297 cmd = SPINOR_OP_READ_1_1_4;
298 addrlen = ADDR24BIT;
299 dummy = 8;
300 } else {
301 /* use the 4-byte address */
302 cmd = SPINOR_OP_READ_1_1_4;
303 addrlen = ADDR32BIT;
304 dummy = 8;
305 }
306
307 writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
308 base + QUADSPI_LUT(lut_base));
309 writel(LUT0(DUMMY, PAD1, dummy) | LUT1(READ, PAD4, rxfifo),
310 base + QUADSPI_LUT(lut_base + 1));
311
312 /* Write enable */
313 lut_base = SEQID_WREN * 4;
314 writel(LUT0(CMD, PAD1, SPINOR_OP_WREN), base + QUADSPI_LUT(lut_base));
315
316 /* Page Program */
317 lut_base = SEQID_PP * 4;
318
319 if (q->nor_size <= SZ_16M) {
320 cmd = SPINOR_OP_PP;
321 addrlen = ADDR24BIT;
322 } else {
323 /* use the 4-byte address */
324 cmd = SPINOR_OP_PP;
325 addrlen = ADDR32BIT;
326 }
327
328 writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
329 base + QUADSPI_LUT(lut_base));
330 writel(LUT0(WRITE, PAD1, 0), base + QUADSPI_LUT(lut_base + 1));
331
332 /* Read Status */
333 lut_base = SEQID_RDSR * 4;
334 writel(LUT0(CMD, PAD1, SPINOR_OP_RDSR) | LUT1(READ, PAD1, 0x1),
335 base + QUADSPI_LUT(lut_base));
336
337 /* Erase a sector */
338 lut_base = SEQID_SE * 4;
339
340 if (q->nor_size <= SZ_16M) {
341 cmd = SPINOR_OP_SE;
342 addrlen = ADDR24BIT;
343 } else {
344 /* use the 4-byte address */
345 cmd = SPINOR_OP_SE;
346 addrlen = ADDR32BIT;
347 }
348
349 writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
350 base + QUADSPI_LUT(lut_base));
351
352 /* Erase the whole chip */
353 lut_base = SEQID_CHIP_ERASE * 4;
354 writel(LUT0(CMD, PAD1, SPINOR_OP_CHIP_ERASE),
355 base + QUADSPI_LUT(lut_base));
356
357 /* READ ID */
358 lut_base = SEQID_RDID * 4;
359 writel(LUT0(CMD, PAD1, SPINOR_OP_RDID) | LUT1(READ, PAD1, 0x8),
360 base + QUADSPI_LUT(lut_base));
361
362 /* Write Register */
363 lut_base = SEQID_WRSR * 4;
364 writel(LUT0(CMD, PAD1, SPINOR_OP_WRSR) | LUT1(WRITE, PAD1, 0x2),
365 base + QUADSPI_LUT(lut_base));
366
367 /* Read Configuration Register */
368 lut_base = SEQID_RDCR * 4;
369 writel(LUT0(CMD, PAD1, SPINOR_OP_RDCR) | LUT1(READ, PAD1, 0x1),
370 base + QUADSPI_LUT(lut_base));
371
372 /* Write disable */
373 lut_base = SEQID_WRDI * 4;
374 writel(LUT0(CMD, PAD1, SPINOR_OP_WRDI), base + QUADSPI_LUT(lut_base));
375
376 /* Enter 4 Byte Mode (Micron) */
377 lut_base = SEQID_EN4B * 4;
378 writel(LUT0(CMD, PAD1, SPINOR_OP_EN4B), base + QUADSPI_LUT(lut_base));
379
380 /* Enter 4 Byte Mode (Spansion) */
381 lut_base = SEQID_BRWR * 4;
382 writel(LUT0(CMD, PAD1, SPINOR_OP_BRWR), base + QUADSPI_LUT(lut_base));
383
384 fsl_qspi_lock_lut(q);
385}
386
387/* Get the SEQID for the command */
388static int fsl_qspi_get_seqid(struct fsl_qspi *q, u8 cmd)
389{
390 switch (cmd) {
391 case SPINOR_OP_READ_1_1_4:
392 return SEQID_QUAD_READ;
393 case SPINOR_OP_WREN:
394 return SEQID_WREN;
395 case SPINOR_OP_WRDI:
396 return SEQID_WRDI;
397 case SPINOR_OP_RDSR:
398 return SEQID_RDSR;
399 case SPINOR_OP_SE:
400 return SEQID_SE;
401 case SPINOR_OP_CHIP_ERASE:
402 return SEQID_CHIP_ERASE;
403 case SPINOR_OP_PP:
404 return SEQID_PP;
405 case SPINOR_OP_RDID:
406 return SEQID_RDID;
407 case SPINOR_OP_WRSR:
408 return SEQID_WRSR;
409 case SPINOR_OP_RDCR:
410 return SEQID_RDCR;
411 case SPINOR_OP_EN4B:
412 return SEQID_EN4B;
413 case SPINOR_OP_BRWR:
414 return SEQID_BRWR;
415 default:
416 dev_err(q->dev, "Unsupported cmd 0x%.2x\n", cmd);
417 break;
418 }
419 return -EINVAL;
420}
421
422static int
423fsl_qspi_runcmd(struct fsl_qspi *q, u8 cmd, unsigned int addr, int len)
424{
425 void __iomem *base = q->iobase;
426 int seqid;
427 u32 reg, reg2;
428 int err;
429
430 init_completion(&q->c);
431 dev_dbg(q->dev, "to 0x%.8x:0x%.8x, len:%d, cmd:%.2x\n",
432 q->chip_base_addr, addr, len, cmd);
433
434 /* save the reg */
435 reg = readl(base + QUADSPI_MCR);
436
437 writel(q->memmap_phy + q->chip_base_addr + addr, base + QUADSPI_SFAR);
438 writel(QUADSPI_RBCT_WMRK_MASK | QUADSPI_RBCT_RXBRD_USEIPS,
439 base + QUADSPI_RBCT);
440 writel(reg | QUADSPI_MCR_CLR_RXF_MASK, base + QUADSPI_MCR);
441
442 do {
443 reg2 = readl(base + QUADSPI_SR);
444 if (reg2 & (QUADSPI_SR_IP_ACC_MASK | QUADSPI_SR_AHB_ACC_MASK)) {
445 udelay(1);
446 dev_dbg(q->dev, "The controller is busy, 0x%x\n", reg2);
447 continue;
448 }
449 break;
450 } while (1);
451
452 /* trigger the LUT now */
453 seqid = fsl_qspi_get_seqid(q, cmd);
454 writel((seqid << QUADSPI_IPCR_SEQID_SHIFT) | len, base + QUADSPI_IPCR);
455
456 /* Wait for the interrupt. */
457 err = wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000));
458 if (!err) {
459 dev_err(q->dev,
460 "cmd 0x%.2x timeout, addr@%.8x, FR:0x%.8x, SR:0x%.8x\n",
461 cmd, addr, readl(base + QUADSPI_FR),
462 readl(base + QUADSPI_SR));
463 err = -ETIMEDOUT;
464 } else {
465 err = 0;
466 }
467
468 /* restore the MCR */
469 writel(reg, base + QUADSPI_MCR);
470
471 return err;
472}
473
474/* Read out the data from the QUADSPI_RBDR buffer registers. */
475static void fsl_qspi_read_data(struct fsl_qspi *q, int len, u8 *rxbuf)
476{
477 u32 tmp;
478 int i = 0;
479
480 while (len > 0) {
481 tmp = readl(q->iobase + QUADSPI_RBDR + i * 4);
482 tmp = fsl_qspi_endian_xchg(q, tmp);
483 dev_dbg(q->dev, "chip addr:0x%.8x, rcv:0x%.8x\n",
484 q->chip_base_addr, tmp);
485
486 if (len >= 4) {
487 *((u32 *)rxbuf) = tmp;
488 rxbuf += 4;
489 } else {
490 memcpy(rxbuf, &tmp, len);
491 break;
492 }
493
494 len -= 4;
495 i++;
496 }
497}
498
499/*
500 * If we have changed the content of the flash by writing or erasing,
501 * we need to invalidate the AHB buffer. If we do not do so, we may read out
502 * the wrong data. The spec tells us reset the AHB domain and Serial Flash
503 * domain at the same time.
504 */
505static inline void fsl_qspi_invalid(struct fsl_qspi *q)
506{
507 u32 reg;
508
509 reg = readl(q->iobase + QUADSPI_MCR);
510 reg |= QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK;
511 writel(reg, q->iobase + QUADSPI_MCR);
512
513 /*
514 * The minimum delay : 1 AHB + 2 SFCK clocks.
515 * Delay 1 us is enough.
516 */
517 udelay(1);
518
519 reg &= ~(QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK);
520 writel(reg, q->iobase + QUADSPI_MCR);
521}
522
523static int fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor,
524 u8 opcode, unsigned int to, u32 *txbuf,
525 unsigned count, size_t *retlen)
526{
527 int ret, i, j;
528 u32 tmp;
529
530 dev_dbg(q->dev, "to 0x%.8x:0x%.8x, len : %d\n",
531 q->chip_base_addr, to, count);
532
533 /* clear the TX FIFO. */
534 tmp = readl(q->iobase + QUADSPI_MCR);
535 writel(tmp | QUADSPI_MCR_CLR_RXF_MASK, q->iobase + QUADSPI_MCR);
536
537 /* fill the TX data to the FIFO */
538 for (j = 0, i = ((count + 3) / 4); j < i; j++) {
539 tmp = fsl_qspi_endian_xchg(q, *txbuf);
540 writel(tmp, q->iobase + QUADSPI_TBDR);
541 txbuf++;
542 }
543
544 /* Trigger it */
545 ret = fsl_qspi_runcmd(q, opcode, to, count);
546
547 if (ret == 0 && retlen)
548 *retlen += count;
549
550 return ret;
551}
552
553static void fsl_qspi_set_map_addr(struct fsl_qspi *q)
554{
555 int nor_size = q->nor_size;
556 void __iomem *base = q->iobase;
557
558 writel(nor_size + q->memmap_phy, base + QUADSPI_SFA1AD);
559 writel(nor_size * 2 + q->memmap_phy, base + QUADSPI_SFA2AD);
560 writel(nor_size * 3 + q->memmap_phy, base + QUADSPI_SFB1AD);
561 writel(nor_size * 4 + q->memmap_phy, base + QUADSPI_SFB2AD);
562}
563
564/*
565 * There are two different ways to read out the data from the flash:
566 * the "IP Command Read" and the "AHB Command Read".
567 *
568 * The IC guy suggests we use the "AHB Command Read" which is faster
569 * then the "IP Command Read". (What's more is that there is a bug in
570 * the "IP Command Read" in the Vybrid.)
571 *
572 * After we set up the registers for the "AHB Command Read", we can use
573 * the memcpy to read the data directly. A "missed" access to the buffer
574 * causes the controller to clear the buffer, and use the sequence pointed
575 * by the QUADSPI_BFGENCR[SEQID] to initiate a read from the flash.
576 */
577static void fsl_qspi_init_abh_read(struct fsl_qspi *q)
578{
579 void __iomem *base = q->iobase;
580 int seqid;
581
582 /* AHB configuration for access buffer 0/1/2 .*/
583 writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF0CR);
584 writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF1CR);
585 writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF2CR);
586 writel(QUADSPI_BUF3CR_ALLMST, base + QUADSPI_BUF3CR);
587
588 /* We only use the buffer3 */
589 writel(0, base + QUADSPI_BUF0IND);
590 writel(0, base + QUADSPI_BUF1IND);
591 writel(0, base + QUADSPI_BUF2IND);
592
593 /* Set the default lut sequence for AHB Read. */
594 seqid = fsl_qspi_get_seqid(q, q->nor[0].read_opcode);
595 writel(seqid << QUADSPI_BFGENCR_SEQID_SHIFT,
596 q->iobase + QUADSPI_BFGENCR);
597}
598
599/* We use this function to do some basic init for spi_nor_scan(). */
600static int fsl_qspi_nor_setup(struct fsl_qspi *q)
601{
602 void __iomem *base = q->iobase;
603 u32 reg;
604 int ret;
605
606 /* the default frequency, we will change it in the future.*/
607 ret = clk_set_rate(q->clk, 66000000);
608 if (ret)
609 return ret;
610
611 /* Init the LUT table. */
612 fsl_qspi_init_lut(q);
613
614 /* Disable the module */
615 writel(QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK,
616 base + QUADSPI_MCR);
617
618 reg = readl(base + QUADSPI_SMPR);
619 writel(reg & ~(QUADSPI_SMPR_FSDLY_MASK
620 | QUADSPI_SMPR_FSPHS_MASK
621 | QUADSPI_SMPR_HSENA_MASK
622 | QUADSPI_SMPR_DDRSMP_MASK), base + QUADSPI_SMPR);
623
624 /* Enable the module */
625 writel(QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK,
626 base + QUADSPI_MCR);
627
628 /* enable the interrupt */
629 writel(QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER);
630
631 return 0;
632}
633
634static int fsl_qspi_nor_setup_last(struct fsl_qspi *q)
635{
636 unsigned long rate = q->clk_rate;
637 int ret;
638
639 if (is_imx6sx_qspi(q))
640 rate *= 4;
641
642 ret = clk_set_rate(q->clk, rate);
643 if (ret)
644 return ret;
645
646 /* Init the LUT table again. */
647 fsl_qspi_init_lut(q);
648
649 /* Init for AHB read */
650 fsl_qspi_init_abh_read(q);
651
652 return 0;
653}
654
655static struct of_device_id fsl_qspi_dt_ids[] = {
656 { .compatible = "fsl,vf610-qspi", .data = (void *)&vybrid_data, },
657 { .compatible = "fsl,imx6sx-qspi", .data = (void *)&imx6sx_data, },
658 { /* sentinel */ }
659};
660MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids);
661
662static void fsl_qspi_set_base_addr(struct fsl_qspi *q, struct spi_nor *nor)
663{
664 q->chip_base_addr = q->nor_size * (nor - q->nor);
665}
666
667static int fsl_qspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
668{
669 int ret;
670 struct fsl_qspi *q = nor->priv;
671
672 ret = fsl_qspi_runcmd(q, opcode, 0, len);
673 if (ret)
674 return ret;
675
676 fsl_qspi_read_data(q, len, buf);
677 return 0;
678}
679
680static int fsl_qspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len,
681 int write_enable)
682{
683 struct fsl_qspi *q = nor->priv;
684 int ret;
685
686 if (!buf) {
687 ret = fsl_qspi_runcmd(q, opcode, 0, 1);
688 if (ret)
689 return ret;
690
691 if (opcode == SPINOR_OP_CHIP_ERASE)
692 fsl_qspi_invalid(q);
693
694 } else if (len > 0) {
695 ret = fsl_qspi_nor_write(q, nor, opcode, 0,
696 (u32 *)buf, len, NULL);
697 } else {
698 dev_err(q->dev, "invalid cmd %d\n", opcode);
699 ret = -EINVAL;
700 }
701
702 return ret;
703}
704
705static void fsl_qspi_write(struct spi_nor *nor, loff_t to,
706 size_t len, size_t *retlen, const u_char *buf)
707{
708 struct fsl_qspi *q = nor->priv;
709
710 fsl_qspi_nor_write(q, nor, nor->program_opcode, to,
711 (u32 *)buf, len, retlen);
712
713 /* invalid the data in the AHB buffer. */
714 fsl_qspi_invalid(q);
715}
716
717static int fsl_qspi_read(struct spi_nor *nor, loff_t from,
718 size_t len, size_t *retlen, u_char *buf)
719{
720 struct fsl_qspi *q = nor->priv;
721 u8 cmd = nor->read_opcode;
722 int ret;
723
724 dev_dbg(q->dev, "cmd [%x],read from (0x%p, 0x%.8x, 0x%.8x),len:%d\n",
725 cmd, q->ahb_base, q->chip_base_addr, (unsigned int)from, len);
726
727 /* Wait until the previous command is finished. */
728 ret = nor->wait_till_ready(nor);
729 if (ret)
730 return ret;
731
732 /* Read out the data directly from the AHB buffer.*/
733 memcpy(buf, q->ahb_base + q->chip_base_addr + from, len);
734
735 *retlen += len;
736 return 0;
737}
738
739static int fsl_qspi_erase(struct spi_nor *nor, loff_t offs)
740{
741 struct fsl_qspi *q = nor->priv;
742 int ret;
743
744 dev_dbg(nor->dev, "%dKiB at 0x%08x:0x%08x\n",
745 nor->mtd->erasesize / 1024, q->chip_base_addr, (u32)offs);
746
747 /* Wait until finished previous write command. */
748 ret = nor->wait_till_ready(nor);
749 if (ret)
750 return ret;
751
752 /* Send write enable, then erase commands. */
753 ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0, 0);
754 if (ret)
755 return ret;
756
757 ret = fsl_qspi_runcmd(q, nor->erase_opcode, offs, 0);
758 if (ret)
759 return ret;
760
761 fsl_qspi_invalid(q);
762 return 0;
763}
764
765static int fsl_qspi_prep(struct spi_nor *nor, enum spi_nor_ops ops)
766{
767 struct fsl_qspi *q = nor->priv;
768 int ret;
769
770 ret = clk_enable(q->clk_en);
771 if (ret)
772 return ret;
773
774 ret = clk_enable(q->clk);
775 if (ret) {
776 clk_disable(q->clk_en);
777 return ret;
778 }
779
780 fsl_qspi_set_base_addr(q, nor);
781 return 0;
782}
783
784static void fsl_qspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
785{
786 struct fsl_qspi *q = nor->priv;
787
788 clk_disable(q->clk);
789 clk_disable(q->clk_en);
790}
791
792static int fsl_qspi_probe(struct platform_device *pdev)
793{
794 struct device_node *np = pdev->dev.of_node;
795 struct mtd_part_parser_data ppdata;
796 struct device *dev = &pdev->dev;
797 struct fsl_qspi *q;
798 struct resource *res;
799 struct spi_nor *nor;
800 struct mtd_info *mtd;
801 int ret, i = 0;
802 bool has_second_chip = false;
803 const struct of_device_id *of_id =
804 of_match_device(fsl_qspi_dt_ids, &pdev->dev);
805
806 q = devm_kzalloc(dev, sizeof(*q), GFP_KERNEL);
807 if (!q)
808 return -ENOMEM;
809
810 q->nor_num = of_get_child_count(dev->of_node);
811 if (!q->nor_num || q->nor_num > FSL_QSPI_MAX_CHIP)
812 return -ENODEV;
813
814 /* find the resources */
815 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI");
816 q->iobase = devm_ioremap_resource(dev, res);
817 if (IS_ERR(q->iobase)) {
818 ret = PTR_ERR(q->iobase);
819 goto map_failed;
820 }
821
822 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
823 "QuadSPI-memory");
824 q->ahb_base = devm_ioremap_resource(dev, res);
825 if (IS_ERR(q->ahb_base)) {
826 ret = PTR_ERR(q->ahb_base);
827 goto map_failed;
828 }
829 q->memmap_phy = res->start;
830
831 /* find the clocks */
832 q->clk_en = devm_clk_get(dev, "qspi_en");
833 if (IS_ERR(q->clk_en)) {
834 ret = PTR_ERR(q->clk_en);
835 goto map_failed;
836 }
837
838 q->clk = devm_clk_get(dev, "qspi");
839 if (IS_ERR(q->clk)) {
840 ret = PTR_ERR(q->clk);
841 goto map_failed;
842 }
843
844 ret = clk_prepare_enable(q->clk_en);
845 if (ret) {
846 dev_err(dev, "can not enable the qspi_en clock\n");
847 goto map_failed;
848 }
849
850 ret = clk_prepare_enable(q->clk);
851 if (ret) {
852 clk_disable_unprepare(q->clk_en);
853 dev_err(dev, "can not enable the qspi clock\n");
854 goto map_failed;
855 }
856
857 /* find the irq */
858 ret = platform_get_irq(pdev, 0);
859 if (ret < 0) {
860 dev_err(dev, "failed to get the irq\n");
861 goto irq_failed;
862 }
863
864 ret = devm_request_irq(dev, ret,
865 fsl_qspi_irq_handler, 0, pdev->name, q);
866 if (ret) {
867 dev_err(dev, "failed to request irq.\n");
868 goto irq_failed;
869 }
870
871 q->dev = dev;
872 q->devtype_data = (struct fsl_qspi_devtype_data *)of_id->data;
873 platform_set_drvdata(pdev, q);
874
875 ret = fsl_qspi_nor_setup(q);
876 if (ret)
877 goto irq_failed;
878
879 if (of_get_property(np, "fsl,qspi-has-second-chip", NULL))
880 has_second_chip = true;
881
882 /* iterate the subnodes. */
883 for_each_available_child_of_node(dev->of_node, np) {
884 const struct spi_device_id *id;
885 char modalias[40];
886
887 /* skip the holes */
888 if (!has_second_chip)
889 i *= 2;
890
891 nor = &q->nor[i];
892 mtd = &q->mtd[i];
893
894 nor->mtd = mtd;
895 nor->dev = dev;
896 nor->priv = q;
897 mtd->priv = nor;
898
899 /* fill the hooks */
900 nor->read_reg = fsl_qspi_read_reg;
901 nor->write_reg = fsl_qspi_write_reg;
902 nor->read = fsl_qspi_read;
903 nor->write = fsl_qspi_write;
904 nor->erase = fsl_qspi_erase;
905
906 nor->prepare = fsl_qspi_prep;
907 nor->unprepare = fsl_qspi_unprep;
908
909 if (of_modalias_node(np, modalias, sizeof(modalias)) < 0)
910 goto map_failed;
911
912 id = spi_nor_match_id(modalias);
913 if (!id)
914 goto map_failed;
915
916 ret = of_property_read_u32(np, "spi-max-frequency",
917 &q->clk_rate);
918 if (ret < 0)
919 goto map_failed;
920
921 /* set the chip address for READID */
922 fsl_qspi_set_base_addr(q, nor);
923
924 ret = spi_nor_scan(nor, id, SPI_NOR_QUAD);
925 if (ret)
926 goto map_failed;
927
928 ppdata.of_node = np;
929 ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
930 if (ret)
931 goto map_failed;
932
933 /* Set the correct NOR size now. */
934 if (q->nor_size == 0) {
935 q->nor_size = mtd->size;
936
937 /* Map the SPI NOR to accessiable address */
938 fsl_qspi_set_map_addr(q);
939 }
940
941 /*
942 * The TX FIFO is 64 bytes in the Vybrid, but the Page Program
943 * may writes 265 bytes per time. The write is working in the
944 * unit of the TX FIFO, not in the unit of the SPI NOR's page
945 * size.
946 *
947 * So shrink the spi_nor->page_size if it is larger then the
948 * TX FIFO.
949 */
950 if (nor->page_size > q->devtype_data->txfifo)
951 nor->page_size = q->devtype_data->txfifo;
952
953 i++;
954 }
955
956 /* finish the rest init. */
957 ret = fsl_qspi_nor_setup_last(q);
958 if (ret)
959 goto last_init_failed;
960
961 clk_disable(q->clk);
962 clk_disable(q->clk_en);
963 dev_info(dev, "QuadSPI SPI NOR flash driver\n");
964 return 0;
965
966last_init_failed:
967 for (i = 0; i < q->nor_num; i++)
968 mtd_device_unregister(&q->mtd[i]);
969
970irq_failed:
971 clk_disable_unprepare(q->clk);
972 clk_disable_unprepare(q->clk_en);
973map_failed:
974 dev_err(dev, "Freescale QuadSPI probe failed\n");
975 return ret;
976}
977
978static int fsl_qspi_remove(struct platform_device *pdev)
979{
980 struct fsl_qspi *q = platform_get_drvdata(pdev);
981 int i;
982
983 for (i = 0; i < q->nor_num; i++)
984 mtd_device_unregister(&q->mtd[i]);
985
986 /* disable the hardware */
987 writel(QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
988 writel(0x0, q->iobase + QUADSPI_RSER);
989
990 clk_unprepare(q->clk);
991 clk_unprepare(q->clk_en);
992 return 0;
993}
994
995static struct platform_driver fsl_qspi_driver = {
996 .driver = {
997 .name = "fsl-quadspi",
998 .bus = &platform_bus_type,
999 .owner = THIS_MODULE,
1000 .of_match_table = fsl_qspi_dt_ids,
1001 },
1002 .probe = fsl_qspi_probe,
1003 .remove = fsl_qspi_remove,
1004};
1005module_platform_driver(fsl_qspi_driver);
1006
1007MODULE_DESCRIPTION("Freescale QuadSPI Controller Driver");
1008MODULE_AUTHOR("Freescale Semiconductor Inc.");
1009MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
new file mode 100644
index 000000000000..c713c8656710
--- /dev/null
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -0,0 +1,1107 @@
1/*
2 * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
3 * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
4 *
5 * Copyright (C) 2005, Intec Automation Inc.
6 * Copyright (C) 2014, Freescale Semiconductor, Inc.
7 *
8 * This code is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/err.h>
14#include <linux/errno.h>
15#include <linux/module.h>
16#include <linux/device.h>
17#include <linux/mutex.h>
18#include <linux/math64.h>
19
20#include <linux/mtd/cfi.h>
21#include <linux/mtd/mtd.h>
22#include <linux/of_platform.h>
23#include <linux/spi/flash.h>
24#include <linux/mtd/spi-nor.h>
25
26/* Define max times to check status register before we give up. */
27#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
28
29#define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16)
30
31/*
32 * Read the status register, returning its value in the location
33 * Return the status register value.
34 * Returns negative if error occurred.
35 */
36static int read_sr(struct spi_nor *nor)
37{
38 int ret;
39 u8 val;
40
41 ret = nor->read_reg(nor, SPINOR_OP_RDSR, &val, 1);
42 if (ret < 0) {
43 pr_err("error %d reading SR\n", (int) ret);
44 return ret;
45 }
46
47 return val;
48}
49
50/*
51 * Read configuration register, returning its value in the
52 * location. Return the configuration register value.
53 * Returns negative if error occured.
54 */
55static int read_cr(struct spi_nor *nor)
56{
57 int ret;
58 u8 val;
59
60 ret = nor->read_reg(nor, SPINOR_OP_RDCR, &val, 1);
61 if (ret < 0) {
62 dev_err(nor->dev, "error %d reading CR\n", ret);
63 return ret;
64 }
65
66 return val;
67}
68
69/*
70 * Dummy Cycle calculation for different type of read.
71 * It can be used to support more commands with
72 * different dummy cycle requirements.
73 */
74static inline int spi_nor_read_dummy_cycles(struct spi_nor *nor)
75{
76 switch (nor->flash_read) {
77 case SPI_NOR_FAST:
78 case SPI_NOR_DUAL:
79 case SPI_NOR_QUAD:
80 return 1;
81 case SPI_NOR_NORMAL:
82 return 0;
83 }
84 return 0;
85}
86
87/*
88 * Write status register 1 byte
89 * Returns negative if error occurred.
90 */
91static inline int write_sr(struct spi_nor *nor, u8 val)
92{
93 nor->cmd_buf[0] = val;
94 return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1, 0);
95}
96
97/*
98 * Set write enable latch with Write Enable command.
99 * Returns negative if error occurred.
100 */
101static inline int write_enable(struct spi_nor *nor)
102{
103 return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0, 0);
104}
105
106/*
107 * Send write disble instruction to the chip.
108 */
109static inline int write_disable(struct spi_nor *nor)
110{
111 return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0, 0);
112}
113
114static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
115{
116 return mtd->priv;
117}
118
119/* Enable/disable 4-byte addressing mode. */
120static inline int set_4byte(struct spi_nor *nor, u32 jedec_id, int enable)
121{
122 int status;
123 bool need_wren = false;
124 u8 cmd;
125
126 switch (JEDEC_MFR(jedec_id)) {
127 case CFI_MFR_ST: /* Micron, actually */
128 /* Some Micron need WREN command; all will accept it */
129 need_wren = true;
130 case CFI_MFR_MACRONIX:
131 case 0xEF /* winbond */:
132 if (need_wren)
133 write_enable(nor);
134
135 cmd = enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B;
136 status = nor->write_reg(nor, cmd, NULL, 0, 0);
137 if (need_wren)
138 write_disable(nor);
139
140 return status;
141 default:
142 /* Spansion style */
143 nor->cmd_buf[0] = enable << 7;
144 return nor->write_reg(nor, SPINOR_OP_BRWR, nor->cmd_buf, 1, 0);
145 }
146}
147
148static int spi_nor_wait_till_ready(struct spi_nor *nor)
149{
150 unsigned long deadline;
151 int sr;
152
153 deadline = jiffies + MAX_READY_WAIT_JIFFIES;
154
155 do {
156 cond_resched();
157
158 sr = read_sr(nor);
159 if (sr < 0)
160 break;
161 else if (!(sr & SR_WIP))
162 return 0;
163 } while (!time_after_eq(jiffies, deadline));
164
165 return -ETIMEDOUT;
166}
167
168/*
169 * Service routine to read status register until ready, or timeout occurs.
170 * Returns non-zero if error.
171 */
172static int wait_till_ready(struct spi_nor *nor)
173{
174 return nor->wait_till_ready(nor);
175}
176
177/*
178 * Erase the whole flash memory
179 *
180 * Returns 0 if successful, non-zero otherwise.
181 */
182static int erase_chip(struct spi_nor *nor)
183{
184 int ret;
185
186 dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd->size >> 10));
187
188 /* Wait until finished previous write command. */
189 ret = wait_till_ready(nor);
190 if (ret)
191 return ret;
192
193 /* Send write enable, then erase commands. */
194 write_enable(nor);
195
196 return nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0, 0);
197}
198
199static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
200{
201 int ret = 0;
202
203 mutex_lock(&nor->lock);
204
205 if (nor->prepare) {
206 ret = nor->prepare(nor, ops);
207 if (ret) {
208 dev_err(nor->dev, "failed in the preparation.\n");
209 mutex_unlock(&nor->lock);
210 return ret;
211 }
212 }
213 return ret;
214}
215
216static void spi_nor_unlock_and_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
217{
218 if (nor->unprepare)
219 nor->unprepare(nor, ops);
220 mutex_unlock(&nor->lock);
221}
222
223/*
224 * Erase an address range on the nor chip. The address range may extend
225 * one or more erase sectors. Return an error is there is a problem erasing.
226 */
227static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
228{
229 struct spi_nor *nor = mtd_to_spi_nor(mtd);
230 u32 addr, len;
231 uint32_t rem;
232 int ret;
233
234 dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
235 (long long)instr->len);
236
237 div_u64_rem(instr->len, mtd->erasesize, &rem);
238 if (rem)
239 return -EINVAL;
240
241 addr = instr->addr;
242 len = instr->len;
243
244 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_ERASE);
245 if (ret)
246 return ret;
247
248 /* whole-chip erase? */
249 if (len == mtd->size) {
250 if (erase_chip(nor)) {
251 ret = -EIO;
252 goto erase_err;
253 }
254
255 /* REVISIT in some cases we could speed up erasing large regions
256 * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up
257 * to use "small sector erase", but that's not always optimal.
258 */
259
260 /* "sector"-at-a-time erase */
261 } else {
262 while (len) {
263 if (nor->erase(nor, addr)) {
264 ret = -EIO;
265 goto erase_err;
266 }
267
268 addr += mtd->erasesize;
269 len -= mtd->erasesize;
270 }
271 }
272
273 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
274
275 instr->state = MTD_ERASE_DONE;
276 mtd_erase_callback(instr);
277
278 return ret;
279
280erase_err:
281 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
282 instr->state = MTD_ERASE_FAILED;
283 return ret;
284}
285
286static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
287{
288 struct spi_nor *nor = mtd_to_spi_nor(mtd);
289 uint32_t offset = ofs;
290 uint8_t status_old, status_new;
291 int ret = 0;
292
293 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK);
294 if (ret)
295 return ret;
296
297 /* Wait until finished previous command */
298 ret = wait_till_ready(nor);
299 if (ret)
300 goto err;
301
302 status_old = read_sr(nor);
303
304 if (offset < mtd->size - (mtd->size / 2))
305 status_new = status_old | SR_BP2 | SR_BP1 | SR_BP0;
306 else if (offset < mtd->size - (mtd->size / 4))
307 status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1;
308 else if (offset < mtd->size - (mtd->size / 8))
309 status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0;
310 else if (offset < mtd->size - (mtd->size / 16))
311 status_new = (status_old & ~(SR_BP0 | SR_BP1)) | SR_BP2;
312 else if (offset < mtd->size - (mtd->size / 32))
313 status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0;
314 else if (offset < mtd->size - (mtd->size / 64))
315 status_new = (status_old & ~(SR_BP2 | SR_BP0)) | SR_BP1;
316 else
317 status_new = (status_old & ~(SR_BP2 | SR_BP1)) | SR_BP0;
318
319 /* Only modify protection if it will not unlock other areas */
320 if ((status_new & (SR_BP2 | SR_BP1 | SR_BP0)) >
321 (status_old & (SR_BP2 | SR_BP1 | SR_BP0))) {
322 write_enable(nor);
323 ret = write_sr(nor, status_new);
324 if (ret)
325 goto err;
326 }
327
328err:
329 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
330 return ret;
331}
332
333static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
334{
335 struct spi_nor *nor = mtd_to_spi_nor(mtd);
336 uint32_t offset = ofs;
337 uint8_t status_old, status_new;
338 int ret = 0;
339
340 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
341 if (ret)
342 return ret;
343
344 /* Wait until finished previous command */
345 ret = wait_till_ready(nor);
346 if (ret)
347 goto err;
348
349 status_old = read_sr(nor);
350
351 if (offset+len > mtd->size - (mtd->size / 64))
352 status_new = status_old & ~(SR_BP2 | SR_BP1 | SR_BP0);
353 else if (offset+len > mtd->size - (mtd->size / 32))
354 status_new = (status_old & ~(SR_BP2 | SR_BP1)) | SR_BP0;
355 else if (offset+len > mtd->size - (mtd->size / 16))
356 status_new = (status_old & ~(SR_BP2 | SR_BP0)) | SR_BP1;
357 else if (offset+len > mtd->size - (mtd->size / 8))
358 status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0;
359 else if (offset+len > mtd->size - (mtd->size / 4))
360 status_new = (status_old & ~(SR_BP0 | SR_BP1)) | SR_BP2;
361 else if (offset+len > mtd->size - (mtd->size / 2))
362 status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0;
363 else
364 status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1;
365
366 /* Only modify protection if it will not lock other areas */
367 if ((status_new & (SR_BP2 | SR_BP1 | SR_BP0)) <
368 (status_old & (SR_BP2 | SR_BP1 | SR_BP0))) {
369 write_enable(nor);
370 ret = write_sr(nor, status_new);
371 if (ret)
372 goto err;
373 }
374
375err:
376 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK);
377 return ret;
378}
379
380struct flash_info {
381 /* JEDEC id zero means "no ID" (most older chips); otherwise it has
382 * a high byte of zero plus three data bytes: the manufacturer id,
383 * then a two byte device id.
384 */
385 u32 jedec_id;
386 u16 ext_id;
387
388 /* The size listed here is what works with SPINOR_OP_SE, which isn't
389 * necessarily called a "sector" by the vendor.
390 */
391 unsigned sector_size;
392 u16 n_sectors;
393
394 u16 page_size;
395 u16 addr_width;
396
397 u16 flags;
398#define SECT_4K 0x01 /* SPINOR_OP_BE_4K works uniformly */
399#define SPI_NOR_NO_ERASE 0x02 /* No erase command needed */
400#define SST_WRITE 0x04 /* use SST byte programming */
401#define SPI_NOR_NO_FR 0x08 /* Can't do fastread */
402#define SECT_4K_PMC 0x10 /* SPINOR_OP_BE_4K_PMC works uniformly */
403#define SPI_NOR_DUAL_READ 0x20 /* Flash supports Dual Read */
404#define SPI_NOR_QUAD_READ 0x40 /* Flash supports Quad Read */
405};
406
407#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
408 ((kernel_ulong_t)&(struct flash_info) { \
409 .jedec_id = (_jedec_id), \
410 .ext_id = (_ext_id), \
411 .sector_size = (_sector_size), \
412 .n_sectors = (_n_sectors), \
413 .page_size = 256, \
414 .flags = (_flags), \
415 })
416
417#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \
418 ((kernel_ulong_t)&(struct flash_info) { \
419 .sector_size = (_sector_size), \
420 .n_sectors = (_n_sectors), \
421 .page_size = (_page_size), \
422 .addr_width = (_addr_width), \
423 .flags = (_flags), \
424 })
425
426/* NOTE: double check command sets and memory organization when you add
427 * more nor chips. This current list focusses on newer chips, which
428 * have been converging on command sets which including JEDEC ID.
429 */
430const struct spi_device_id spi_nor_ids[] = {
431 /* Atmel -- some are (confusingly) marketed as "DataFlash" */
432 { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
433 { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
434
435 { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
436 { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
437 { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
438
439 { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
440 { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
441 { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
442 { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
443
444 { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
445
446 /* EON -- en25xxx */
447 { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
448 { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
449 { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
450 { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
451 { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
452 { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
453
454 /* ESMT */
455 { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K) },
456
457 /* Everspin */
458 { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
459 { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
460
461 /* GigaDevice */
462 { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) },
463 { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) },
464
465 /* Intel/Numonyx -- xxxs33b */
466 { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
467 { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
468 { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
469
470 /* Macronix */
471 { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
472 { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
473 { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
474 { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
475 { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) },
476 { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
477 { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) },
478 { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
479 { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
480 { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
481 { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
482 { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_QUAD_READ) },
483 { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
484
485 /* Micron */
486 { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, 0) },
487 { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, 0) },
488 { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, 0) },
489 { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) },
490 { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K) },
491
492 /* PMC */
493 { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
494 { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
495 { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
496
497 /* Spansion -- single (large) sector size only, at least
498 * for the chips listed here (without boot sectors).
499 */
500 { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
501 { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, 0) },
502 { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) },
503 { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
504 { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
505 { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
506 { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
507 { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
508 { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) },
509 { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) },
510 { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
511 { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
512 { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
513 { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
514 { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
515 { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
516 { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K) },
517 { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
518
519 /* SST -- large erase sizes are "overlays", "sectors" are 4K */
520 { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
521 { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
522 { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) },
523 { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) },
524 { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
525 { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) },
526 { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) },
527 { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) },
528 { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
529
530 /* ST Microelectronics -- newer production may have feature updates */
531 { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
532 { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
533 { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
534 { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
535 { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
536 { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
537 { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
538 { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
539 { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
540 { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, 0) },
541
542 { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
543 { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
544 { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) },
545 { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) },
546 { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) },
547 { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) },
548 { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) },
549 { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) },
550 { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) },
551
552 { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
553 { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
554 { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
555
556 { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) },
557 { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
558 { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
559
560 { "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K) },
561 { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
562 { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
563 { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
564 { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
565
566 /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
567 { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
568 { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
569 { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
570 { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
571 { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
572 { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
573 { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
574 { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, SECT_4K) },
575 { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
576 { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
577 { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
578 { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
579 { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
580 { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
581 { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) },
582
583 /* Catalyst / On Semiconductor -- non-JEDEC */
584 { "cat25c11", CAT25_INFO( 16, 8, 16, 1, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
585 { "cat25c03", CAT25_INFO( 32, 8, 16, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
586 { "cat25c09", CAT25_INFO( 128, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
587 { "cat25c17", CAT25_INFO( 256, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
588 { "cat25128", CAT25_INFO(2048, 8, 64, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
589 { },
590};
591EXPORT_SYMBOL_GPL(spi_nor_ids);
592
593static const struct spi_device_id *spi_nor_read_id(struct spi_nor *nor)
594{
595 int tmp;
596 u8 id[5];
597 u32 jedec;
598 u16 ext_jedec;
599 struct flash_info *info;
600
601 tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, 5);
602 if (tmp < 0) {
603 dev_dbg(nor->dev, " error %d reading JEDEC ID\n", tmp);
604 return ERR_PTR(tmp);
605 }
606 jedec = id[0];
607 jedec = jedec << 8;
608 jedec |= id[1];
609 jedec = jedec << 8;
610 jedec |= id[2];
611
612 ext_jedec = id[3] << 8 | id[4];
613
614 for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) {
615 info = (void *)spi_nor_ids[tmp].driver_data;
616 if (info->jedec_id == jedec) {
617 if (info->ext_id == 0 || info->ext_id == ext_jedec)
618 return &spi_nor_ids[tmp];
619 }
620 }
621 dev_err(nor->dev, "unrecognized JEDEC id %06x\n", jedec);
622 return ERR_PTR(-ENODEV);
623}
624
625static const struct spi_device_id *jedec_probe(struct spi_nor *nor)
626{
627 return nor->read_id(nor);
628}
629
630static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
631 size_t *retlen, u_char *buf)
632{
633 struct spi_nor *nor = mtd_to_spi_nor(mtd);
634 int ret;
635
636 dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
637
638 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_READ);
639 if (ret)
640 return ret;
641
642 ret = nor->read(nor, from, len, retlen, buf);
643
644 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ);
645 return ret;
646}
647
648static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
649 size_t *retlen, const u_char *buf)
650{
651 struct spi_nor *nor = mtd_to_spi_nor(mtd);
652 size_t actual;
653 int ret;
654
655 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
656
657 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
658 if (ret)
659 return ret;
660
661 /* Wait until finished previous write command. */
662 ret = wait_till_ready(nor);
663 if (ret)
664 goto time_out;
665
666 write_enable(nor);
667
668 nor->sst_write_second = false;
669
670 actual = to % 2;
671 /* Start write from odd address. */
672 if (actual) {
673 nor->program_opcode = SPINOR_OP_BP;
674
675 /* write one byte. */
676 nor->write(nor, to, 1, retlen, buf);
677 ret = wait_till_ready(nor);
678 if (ret)
679 goto time_out;
680 }
681 to += actual;
682
683 /* Write out most of the data here. */
684 for (; actual < len - 1; actual += 2) {
685 nor->program_opcode = SPINOR_OP_AAI_WP;
686
687 /* write two bytes. */
688 nor->write(nor, to, 2, retlen, buf + actual);
689 ret = wait_till_ready(nor);
690 if (ret)
691 goto time_out;
692 to += 2;
693 nor->sst_write_second = true;
694 }
695 nor->sst_write_second = false;
696
697 write_disable(nor);
698 ret = wait_till_ready(nor);
699 if (ret)
700 goto time_out;
701
702 /* Write out trailing byte if it exists. */
703 if (actual != len) {
704 write_enable(nor);
705
706 nor->program_opcode = SPINOR_OP_BP;
707 nor->write(nor, to, 1, retlen, buf + actual);
708
709 ret = wait_till_ready(nor);
710 if (ret)
711 goto time_out;
712 write_disable(nor);
713 }
714time_out:
715 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
716 return ret;
717}
718
719/*
720 * Write an address range to the nor chip. Data must be written in
721 * FLASH_PAGESIZE chunks. The address range may be any size provided
722 * it is within the physical boundaries.
723 */
724static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
725 size_t *retlen, const u_char *buf)
726{
727 struct spi_nor *nor = mtd_to_spi_nor(mtd);
728 u32 page_offset, page_size, i;
729 int ret;
730
731 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
732
733 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
734 if (ret)
735 return ret;
736
737 /* Wait until finished previous write command. */
738 ret = wait_till_ready(nor);
739 if (ret)
740 goto write_err;
741
742 write_enable(nor);
743
744 page_offset = to & (nor->page_size - 1);
745
746 /* do all the bytes fit onto one page? */
747 if (page_offset + len <= nor->page_size) {
748 nor->write(nor, to, len, retlen, buf);
749 } else {
750 /* the size of data remaining on the first page */
751 page_size = nor->page_size - page_offset;
752 nor->write(nor, to, page_size, retlen, buf);
753
754 /* write everything in nor->page_size chunks */
755 for (i = page_size; i < len; i += page_size) {
756 page_size = len - i;
757 if (page_size > nor->page_size)
758 page_size = nor->page_size;
759
760 wait_till_ready(nor);
761 write_enable(nor);
762
763 nor->write(nor, to + i, page_size, retlen, buf + i);
764 }
765 }
766
767write_err:
768 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
769 return 0;
770}
771
772static int macronix_quad_enable(struct spi_nor *nor)
773{
774 int ret, val;
775
776 val = read_sr(nor);
777 write_enable(nor);
778
779 nor->cmd_buf[0] = val | SR_QUAD_EN_MX;
780 nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1, 0);
781
782 if (wait_till_ready(nor))
783 return 1;
784
785 ret = read_sr(nor);
786 if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
787 dev_err(nor->dev, "Macronix Quad bit not set\n");
788 return -EINVAL;
789 }
790
791 return 0;
792}
793
794/*
795 * Write status Register and configuration register with 2 bytes
796 * The first byte will be written to the status register, while the
797 * second byte will be written to the configuration register.
798 * Return negative if error occured.
799 */
800static int write_sr_cr(struct spi_nor *nor, u16 val)
801{
802 nor->cmd_buf[0] = val & 0xff;
803 nor->cmd_buf[1] = (val >> 8);
804
805 return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 2, 0);
806}
807
808static int spansion_quad_enable(struct spi_nor *nor)
809{
810 int ret;
811 int quad_en = CR_QUAD_EN_SPAN << 8;
812
813 write_enable(nor);
814
815 ret = write_sr_cr(nor, quad_en);
816 if (ret < 0) {
817 dev_err(nor->dev,
818 "error while writing configuration register\n");
819 return -EINVAL;
820 }
821
822 /* read back and check it */
823 ret = read_cr(nor);
824 if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
825 dev_err(nor->dev, "Spansion Quad bit not set\n");
826 return -EINVAL;
827 }
828
829 return 0;
830}
831
832static int set_quad_mode(struct spi_nor *nor, u32 jedec_id)
833{
834 int status;
835
836 switch (JEDEC_MFR(jedec_id)) {
837 case CFI_MFR_MACRONIX:
838 status = macronix_quad_enable(nor);
839 if (status) {
840 dev_err(nor->dev, "Macronix quad-read not enabled\n");
841 return -EINVAL;
842 }
843 return status;
844 default:
845 status = spansion_quad_enable(nor);
846 if (status) {
847 dev_err(nor->dev, "Spansion quad-read not enabled\n");
848 return -EINVAL;
849 }
850 return status;
851 }
852}
853
854static int spi_nor_check(struct spi_nor *nor)
855{
856 if (!nor->dev || !nor->read || !nor->write ||
857 !nor->read_reg || !nor->write_reg || !nor->erase) {
858 pr_err("spi-nor: please fill all the necessary fields!\n");
859 return -EINVAL;
860 }
861
862 if (!nor->read_id)
863 nor->read_id = spi_nor_read_id;
864 if (!nor->wait_till_ready)
865 nor->wait_till_ready = spi_nor_wait_till_ready;
866
867 return 0;
868}
869
870int spi_nor_scan(struct spi_nor *nor, const struct spi_device_id *id,
871 enum read_mode mode)
872{
873 struct flash_info *info;
874 struct flash_platform_data *data;
875 struct device *dev = nor->dev;
876 struct mtd_info *mtd = nor->mtd;
877 struct device_node *np = dev->of_node;
878 int ret;
879 int i;
880
881 ret = spi_nor_check(nor);
882 if (ret)
883 return ret;
884
885 /* Platform data helps sort out which chip type we have, as
886 * well as how this board partitions it. If we don't have
887 * a chip ID, try the JEDEC id commands; they'll work for most
888 * newer chips, even if we don't recognize the particular chip.
889 */
890 data = dev_get_platdata(dev);
891 if (data && data->type) {
892 const struct spi_device_id *plat_id;
893
894 for (i = 0; i < ARRAY_SIZE(spi_nor_ids) - 1; i++) {
895 plat_id = &spi_nor_ids[i];
896 if (strcmp(data->type, plat_id->name))
897 continue;
898 break;
899 }
900
901 if (i < ARRAY_SIZE(spi_nor_ids) - 1)
902 id = plat_id;
903 else
904 dev_warn(dev, "unrecognized id %s\n", data->type);
905 }
906
907 info = (void *)id->driver_data;
908
909 if (info->jedec_id) {
910 const struct spi_device_id *jid;
911
912 jid = jedec_probe(nor);
913 if (IS_ERR(jid)) {
914 return PTR_ERR(jid);
915 } else if (jid != id) {
916 /*
917 * JEDEC knows better, so overwrite platform ID. We
918 * can't trust partitions any longer, but we'll let
919 * mtd apply them anyway, since some partitions may be
920 * marked read-only, and we don't want to lose that
921 * information, even if it's not 100% accurate.
922 */
923 dev_warn(dev, "found %s, expected %s\n",
924 jid->name, id->name);
925 id = jid;
926 info = (void *)jid->driver_data;
927 }
928 }
929
930 mutex_init(&nor->lock);
931
932 /*
933 * Atmel, SST and Intel/Numonyx serial nor tend to power
934 * up with the software protection bits set
935 */
936
937 if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ATMEL ||
938 JEDEC_MFR(info->jedec_id) == CFI_MFR_INTEL ||
939 JEDEC_MFR(info->jedec_id) == CFI_MFR_SST) {
940 write_enable(nor);
941 write_sr(nor, 0);
942 }
943
944 if (data && data->name)
945 mtd->name = data->name;
946 else
947 mtd->name = dev_name(dev);
948
949 mtd->type = MTD_NORFLASH;
950 mtd->writesize = 1;
951 mtd->flags = MTD_CAP_NORFLASH;
952 mtd->size = info->sector_size * info->n_sectors;
953 mtd->_erase = spi_nor_erase;
954 mtd->_read = spi_nor_read;
955
956 /* nor protection support for STmicro chips */
957 if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ST) {
958 mtd->_lock = spi_nor_lock;
959 mtd->_unlock = spi_nor_unlock;
960 }
961
962 /* sst nor chips use AAI word program */
963 if (info->flags & SST_WRITE)
964 mtd->_write = sst_write;
965 else
966 mtd->_write = spi_nor_write;
967
968 /* prefer "small sector" erase if possible */
969 if (info->flags & SECT_4K) {
970 nor->erase_opcode = SPINOR_OP_BE_4K;
971 mtd->erasesize = 4096;
972 } else if (info->flags & SECT_4K_PMC) {
973 nor->erase_opcode = SPINOR_OP_BE_4K_PMC;
974 mtd->erasesize = 4096;
975 } else {
976 nor->erase_opcode = SPINOR_OP_SE;
977 mtd->erasesize = info->sector_size;
978 }
979
980 if (info->flags & SPI_NOR_NO_ERASE)
981 mtd->flags |= MTD_NO_ERASE;
982
983 mtd->dev.parent = dev;
984 nor->page_size = info->page_size;
985 mtd->writebufsize = nor->page_size;
986
987 if (np) {
988 /* If we were instantiated by DT, use it */
989 if (of_property_read_bool(np, "m25p,fast-read"))
990 nor->flash_read = SPI_NOR_FAST;
991 else
992 nor->flash_read = SPI_NOR_NORMAL;
993 } else {
994 /* If we weren't instantiated by DT, default to fast-read */
995 nor->flash_read = SPI_NOR_FAST;
996 }
997
998 /* Some devices cannot do fast-read, no matter what DT tells us */
999 if (info->flags & SPI_NOR_NO_FR)
1000 nor->flash_read = SPI_NOR_NORMAL;
1001
1002 /* Quad/Dual-read mode takes precedence over fast/normal */
1003 if (mode == SPI_NOR_QUAD && info->flags & SPI_NOR_QUAD_READ) {
1004 ret = set_quad_mode(nor, info->jedec_id);
1005 if (ret) {
1006 dev_err(dev, "quad mode not supported\n");
1007 return ret;
1008 }
1009 nor->flash_read = SPI_NOR_QUAD;
1010 } else if (mode == SPI_NOR_DUAL && info->flags & SPI_NOR_DUAL_READ) {
1011 nor->flash_read = SPI_NOR_DUAL;
1012 }
1013
1014 /* Default commands */
1015 switch (nor->flash_read) {
1016 case SPI_NOR_QUAD:
1017 nor->read_opcode = SPINOR_OP_READ_1_1_4;
1018 break;
1019 case SPI_NOR_DUAL:
1020 nor->read_opcode = SPINOR_OP_READ_1_1_2;
1021 break;
1022 case SPI_NOR_FAST:
1023 nor->read_opcode = SPINOR_OP_READ_FAST;
1024 break;
1025 case SPI_NOR_NORMAL:
1026 nor->read_opcode = SPINOR_OP_READ;
1027 break;
1028 default:
1029 dev_err(dev, "No Read opcode defined\n");
1030 return -EINVAL;
1031 }
1032
1033 nor->program_opcode = SPINOR_OP_PP;
1034
1035 if (info->addr_width)
1036 nor->addr_width = info->addr_width;
1037 else if (mtd->size > 0x1000000) {
1038 /* enable 4-byte addressing if the device exceeds 16MiB */
1039 nor->addr_width = 4;
1040 if (JEDEC_MFR(info->jedec_id) == CFI_MFR_AMD) {
1041 /* Dedicated 4-byte command set */
1042 switch (nor->flash_read) {
1043 case SPI_NOR_QUAD:
1044 nor->read_opcode = SPINOR_OP_READ4_1_1_4;
1045 break;
1046 case SPI_NOR_DUAL:
1047 nor->read_opcode = SPINOR_OP_READ4_1_1_2;
1048 break;
1049 case SPI_NOR_FAST:
1050 nor->read_opcode = SPINOR_OP_READ4_FAST;
1051 break;
1052 case SPI_NOR_NORMAL:
1053 nor->read_opcode = SPINOR_OP_READ4;
1054 break;
1055 }
1056 nor->program_opcode = SPINOR_OP_PP_4B;
1057 /* No small sector erase for 4-byte command set */
1058 nor->erase_opcode = SPINOR_OP_SE_4B;
1059 mtd->erasesize = info->sector_size;
1060 } else
1061 set_4byte(nor, info->jedec_id, 1);
1062 } else {
1063 nor->addr_width = 3;
1064 }
1065
1066 nor->read_dummy = spi_nor_read_dummy_cycles(nor);
1067
1068 dev_info(dev, "%s (%lld Kbytes)\n", id->name,
1069 (long long)mtd->size >> 10);
1070
1071 dev_dbg(dev,
1072 "mtd .name = %s, .size = 0x%llx (%lldMiB), "
1073 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
1074 mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
1075 mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
1076
1077 if (mtd->numeraseregions)
1078 for (i = 0; i < mtd->numeraseregions; i++)
1079 dev_dbg(dev,
1080 "mtd.eraseregions[%d] = { .offset = 0x%llx, "
1081 ".erasesize = 0x%.8x (%uKiB), "
1082 ".numblocks = %d }\n",
1083 i, (long long)mtd->eraseregions[i].offset,
1084 mtd->eraseregions[i].erasesize,
1085 mtd->eraseregions[i].erasesize / 1024,
1086 mtd->eraseregions[i].numblocks);
1087 return 0;
1088}
1089EXPORT_SYMBOL_GPL(spi_nor_scan);
1090
1091const struct spi_device_id *spi_nor_match_id(char *name)
1092{
1093 const struct spi_device_id *id = spi_nor_ids;
1094
1095 while (id->name[0]) {
1096 if (!strcmp(name, id->name))
1097 return id;
1098 id++;
1099 }
1100 return NULL;
1101}
1102EXPORT_SYMBOL_GPL(spi_nor_match_id);
1103
1104MODULE_LICENSE("GPL");
1105MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
1106MODULE_AUTHOR("Mike Lavender");
1107MODULE_DESCRIPTION("framework for SPI NOR");
diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c
index 2e9e2d11f204..f19ab1acde1f 100644
--- a/drivers/mtd/tests/oobtest.c
+++ b/drivers/mtd/tests/oobtest.c
@@ -69,8 +69,8 @@ static int write_eraseblock(int ebnum)
69 int err = 0; 69 int err = 0;
70 loff_t addr = ebnum * mtd->erasesize; 70 loff_t addr = ebnum * mtd->erasesize;
71 71
72 prandom_bytes_state(&rnd_state, writebuf, use_len_max * pgcnt);
72 for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) { 73 for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
73 prandom_bytes_state(&rnd_state, writebuf, use_len);
74 ops.mode = MTD_OPS_AUTO_OOB; 74 ops.mode = MTD_OPS_AUTO_OOB;
75 ops.len = 0; 75 ops.len = 0;
76 ops.retlen = 0; 76 ops.retlen = 0;
@@ -78,7 +78,7 @@ static int write_eraseblock(int ebnum)
78 ops.oobretlen = 0; 78 ops.oobretlen = 0;
79 ops.ooboffs = use_offset; 79 ops.ooboffs = use_offset;
80 ops.datbuf = NULL; 80 ops.datbuf = NULL;
81 ops.oobbuf = writebuf; 81 ops.oobbuf = writebuf + (use_len_max * i) + use_offset;
82 err = mtd_write_oob(mtd, addr, &ops); 82 err = mtd_write_oob(mtd, addr, &ops);
83 if (err || ops.oobretlen != use_len) { 83 if (err || ops.oobretlen != use_len) {
84 pr_err("error: writeoob failed at %#llx\n", 84 pr_err("error: writeoob failed at %#llx\n",
@@ -122,8 +122,8 @@ static int verify_eraseblock(int ebnum)
122 int err = 0; 122 int err = 0;
123 loff_t addr = ebnum * mtd->erasesize; 123 loff_t addr = ebnum * mtd->erasesize;
124 124
125 prandom_bytes_state(&rnd_state, writebuf, use_len_max * pgcnt);
125 for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) { 126 for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
126 prandom_bytes_state(&rnd_state, writebuf, use_len);
127 ops.mode = MTD_OPS_AUTO_OOB; 127 ops.mode = MTD_OPS_AUTO_OOB;
128 ops.len = 0; 128 ops.len = 0;
129 ops.retlen = 0; 129 ops.retlen = 0;
@@ -139,7 +139,8 @@ static int verify_eraseblock(int ebnum)
139 errcnt += 1; 139 errcnt += 1;
140 return err ? err : -1; 140 return err ? err : -1;
141 } 141 }
142 if (memcmp(readbuf, writebuf, use_len)) { 142 if (memcmp(readbuf, writebuf + (use_len_max * i) + use_offset,
143 use_len)) {
143 pr_err("error: verify failed at %#llx\n", 144 pr_err("error: verify failed at %#llx\n",
144 (long long)addr); 145 (long long)addr);
145 errcnt += 1; 146 errcnt += 1;
@@ -166,7 +167,9 @@ static int verify_eraseblock(int ebnum)
166 errcnt += 1; 167 errcnt += 1;
167 return err ? err : -1; 168 return err ? err : -1;
168 } 169 }
169 if (memcmp(readbuf + use_offset, writebuf, use_len)) { 170 if (memcmp(readbuf + use_offset,
171 writebuf + (use_len_max * i) + use_offset,
172 use_len)) {
170 pr_err("error: verify failed at %#llx\n", 173 pr_err("error: verify failed at %#llx\n",
171 (long long)addr); 174 (long long)addr);
172 errcnt += 1; 175 errcnt += 1;
@@ -566,8 +569,8 @@ static int __init mtd_oobtest_init(void)
566 if (bbt[i] || bbt[i + 1]) 569 if (bbt[i] || bbt[i + 1])
567 continue; 570 continue;
568 addr = (i + 1) * mtd->erasesize - mtd->writesize; 571 addr = (i + 1) * mtd->erasesize - mtd->writesize;
572 prandom_bytes_state(&rnd_state, writebuf, sz * cnt);
569 for (pg = 0; pg < cnt; ++pg) { 573 for (pg = 0; pg < cnt; ++pg) {
570 prandom_bytes_state(&rnd_state, writebuf, sz);
571 ops.mode = MTD_OPS_AUTO_OOB; 574 ops.mode = MTD_OPS_AUTO_OOB;
572 ops.len = 0; 575 ops.len = 0;
573 ops.retlen = 0; 576 ops.retlen = 0;
@@ -575,7 +578,7 @@ static int __init mtd_oobtest_init(void)
575 ops.oobretlen = 0; 578 ops.oobretlen = 0;
576 ops.ooboffs = 0; 579 ops.ooboffs = 0;
577 ops.datbuf = NULL; 580 ops.datbuf = NULL;
578 ops.oobbuf = writebuf; 581 ops.oobbuf = writebuf + pg * sz;
579 err = mtd_write_oob(mtd, addr, &ops); 582 err = mtd_write_oob(mtd, addr, &ops);
580 if (err) 583 if (err)
581 goto out; 584 goto out;