aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/nand
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-11-07 12:11:16 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-11-07 12:11:16 -0500
commite0d65113a70f1dc514e625cc4e7a7485a4bf72df (patch)
tree7320a130dc304623f5cf4b5dd8f67fb1776225ca /drivers/mtd/nand
parentcf5e15fbd72c13977720aa15b7b7e00e1d8fd8f2 (diff)
parent48e546b7f281f251893baa40769581fd15f085fb (diff)
Merge git://git.infradead.org/mtd-2.6
* git://git.infradead.org/mtd-2.6: (226 commits) mtd: tests: annotate as DANGEROUS in Kconfig mtd: tests: don't use mtd0 as a default mtd: clean up usage of MTD_DOCPROBE_ADDRESS jffs2: add compr=lzo and compr=zlib options jffs2: implement mount option parsing and compression overriding mtd: nand: initialize ops.mode mtd: provide an alias for the redboot module name mtd: m25p80: don't probe device which has status of 'disabled' mtd: nand_h1900 never worked mtd: Add DiskOnChip G3 support mtd: m25p80: add EON flash EN25Q32B into spi flash id table mtd: mark block device queue as non-rotational mtd: r852: make r852_pm_ops static mtd: m25p80: add support for at25df321a spi data flash mtd: mxc_nand: preset_v1_v2: unlock all NAND flash blocks mtd: nand: switch `check_pattern()' to standard `memcmp()' mtd: nand: invalidate cache on unaligned reads mtd: nand: do not scan bad blocks with NAND_BBT_NO_OOB set mtd: nand: wait to set BBT version mtd: nand: scrub BBT on ECC errors ... Fix up trivial conflicts: - arch/arm/mach-at91/board-usb-a9260.c Merged into board-usb-a926x.c - drivers/mtd/maps/lantiq-flash.c add_mtd_partitions -> mtd_device_register vs changed to use mtd_device_parse_register.
Diffstat (limited to 'drivers/mtd/nand')
-rw-r--r--drivers/mtd/nand/Kconfig29
-rw-r--r--drivers/mtd/nand/Makefile2
-rw-r--r--drivers/mtd/nand/atmel_nand.c74
-rw-r--r--drivers/mtd/nand/au1550nd.c29
-rw-r--r--drivers/mtd/nand/autcpu12.c4
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c57
-rw-r--r--drivers/mtd/nand/cafe_nand.c21
-rw-r--r--drivers/mtd/nand/cmx270_nand.c23
-rw-r--r--drivers/mtd/nand/cs553x_nand.c15
-rw-r--r--drivers/mtd/nand/davinci_nand.c39
-rw-r--r--drivers/mtd/nand/denali.c6
-rw-r--r--drivers/mtd/nand/diskonchip.c8
-rw-r--r--drivers/mtd/nand/edb7312.c203
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c75
-rw-r--r--drivers/mtd/nand/fsl_upm.c16
-rw-r--r--drivers/mtd/nand/fsmc_nand.c77
-rw-r--r--drivers/mtd/nand/gpmi-nand/Makefile3
-rw-r--r--drivers/mtd/nand/gpmi-nand/bch-regs.h84
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c1057
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c1619
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h273
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-regs.h172
-rw-r--r--drivers/mtd/nand/h1910.c19
-rw-r--r--drivers/mtd/nand/jz4740_nand.c18
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c22
-rw-r--r--drivers/mtd/nand/mxc_nand.c37
-rw-r--r--drivers/mtd/nand/nand_base.c1109
-rw-r--r--drivers/mtd/nand/nand_bbt.c692
-rw-r--r--drivers/mtd/nand/nand_bch.c4
-rw-r--r--drivers/mtd/nand/nand_ecc.c10
-rw-r--r--drivers/mtd/nand/nandsim.c4
-rw-r--r--drivers/mtd/nand/ndfc.c22
-rw-r--r--drivers/mtd/nand/nomadik_nand.c1
-rw-r--r--drivers/mtd/nand/nuc900_nand.c1
-rw-r--r--drivers/mtd/nand/omap2.c22
-rw-r--r--drivers/mtd/nand/orion_nand.c16
-rw-r--r--drivers/mtd/nand/pasemi_nand.c3
-rw-r--r--drivers/mtd/nand/plat_nand.c25
-rw-r--r--drivers/mtd/nand/ppchameleonevb.c47
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c471
-rw-r--r--drivers/mtd/nand/r852.c6
-rw-r--r--drivers/mtd/nand/rtc_from4.c5
-rw-r--r--drivers/mtd/nand/s3c2410.c27
-rw-r--r--drivers/mtd/nand/sharpsl.c13
-rw-r--r--drivers/mtd/nand/sm_common.c2
-rw-r--r--drivers/mtd/nand/socrates_nand.c28
-rw-r--r--drivers/mtd/nand/tmio_nand.c17
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c8
48 files changed, 4597 insertions, 1918 deletions
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index dbfa0f7fb464..cce7b70824c3 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -83,16 +83,9 @@ config MTD_NAND_DENALI_SCRATCH_REG_ADDR
83 scratch register here to enable this feature. On Intel Moorestown 83 scratch register here to enable this feature. On Intel Moorestown
84 boards, the scratch register is at 0xFF108018. 84 boards, the scratch register is at 0xFF108018.
85 85
86config MTD_NAND_EDB7312
87 tristate "Support for Cirrus Logic EBD7312 evaluation board"
88 depends on ARCH_EDB7312
89 help
90 This enables the driver for the Cirrus Logic EBD7312 evaluation
91 board to access the onboard NAND Flash.
92
93config MTD_NAND_H1900 86config MTD_NAND_H1900
94 tristate "iPAQ H1900 flash" 87 tristate "iPAQ H1900 flash"
95 depends on ARCH_PXA 88 depends on ARCH_PXA && BROKEN
96 help 89 help
97 This enables the driver for the iPAQ h1900 flash. 90 This enables the driver for the iPAQ h1900 flash.
98 91
@@ -116,10 +109,11 @@ config MTD_NAND_AMS_DELTA
116 Support for NAND flash on Amstrad E3 (Delta). 109 Support for NAND flash on Amstrad E3 (Delta).
117 110
118config MTD_NAND_OMAP2 111config MTD_NAND_OMAP2
119 tristate "NAND Flash device on OMAP2 and OMAP3" 112 tristate "NAND Flash device on OMAP2, OMAP3 and OMAP4"
120 depends on ARM && (ARCH_OMAP2 || ARCH_OMAP3) 113 depends on ARM && (ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP4)
121 help 114 help
122 Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms. 115 Support for NAND flash on Texas Instruments OMAP2, OMAP3 and OMAP4
116 platforms.
123 117
124config MTD_NAND_IDS 118config MTD_NAND_IDS
125 tristate 119 tristate
@@ -423,6 +417,19 @@ config MTD_NAND_NANDSIM
423 The simulator may simulate various NAND flash chips for the 417 The simulator may simulate various NAND flash chips for the
424 MTD nand layer. 418 MTD nand layer.
425 419
420config MTD_NAND_GPMI_NAND
421 bool "GPMI NAND Flash Controller driver"
422 depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28)
423 select MTD_PARTITIONS
424 select MTD_CMDLINE_PARTS
425 help
426 Enables NAND Flash support for IMX23 or IMX28.
427 The GPMI controller is very powerful, with the help of BCH
428 module, it can do the hardware ECC. The GPMI supports several
429 NAND flashs at the same time. The GPMI may conflicts with other
430 block, such as SD card. So pay attention to it when you enable
431 the GPMI.
432
426config MTD_NAND_PLATFORM 433config MTD_NAND_PLATFORM
427 tristate "Support for generic platform NAND driver" 434 tristate "Support for generic platform NAND driver"
428 help 435 help
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 5745d831168e..618f4ba23699 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -13,7 +13,6 @@ obj-$(CONFIG_MTD_NAND_SPIA) += spia.o
13obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o 13obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o
14obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o 14obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o
15obj-$(CONFIG_MTD_NAND_DENALI) += denali.o 15obj-$(CONFIG_MTD_NAND_DENALI) += denali.o
16obj-$(CONFIG_MTD_NAND_EDB7312) += edb7312.o
17obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o 16obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o
18obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o 17obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o
19obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB) += ppchameleonevb.o 18obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB) += ppchameleonevb.o
@@ -49,5 +48,6 @@ obj-$(CONFIG_MTD_NAND_BCM_UMI) += bcm_umi_nand.o nand_bcm_umi.o
49obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o 48obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o
50obj-$(CONFIG_MTD_NAND_RICOH) += r852.o 49obj-$(CONFIG_MTD_NAND_RICOH) += r852.o
51obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o 50obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o
51obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
52 52
53nand-objs := nand_base.o nand_bbt.o 53nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 55da20ccc7a8..23e5d77c39fc 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -161,37 +161,6 @@ static int atmel_nand_device_ready(struct mtd_info *mtd)
161 !!host->board->rdy_pin_active_low; 161 !!host->board->rdy_pin_active_low;
162} 162}
163 163
164/*
165 * Minimal-overhead PIO for data access.
166 */
167static void atmel_read_buf8(struct mtd_info *mtd, u8 *buf, int len)
168{
169 struct nand_chip *nand_chip = mtd->priv;
170
171 __raw_readsb(nand_chip->IO_ADDR_R, buf, len);
172}
173
174static void atmel_read_buf16(struct mtd_info *mtd, u8 *buf, int len)
175{
176 struct nand_chip *nand_chip = mtd->priv;
177
178 __raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2);
179}
180
181static void atmel_write_buf8(struct mtd_info *mtd, const u8 *buf, int len)
182{
183 struct nand_chip *nand_chip = mtd->priv;
184
185 __raw_writesb(nand_chip->IO_ADDR_W, buf, len);
186}
187
188static void atmel_write_buf16(struct mtd_info *mtd, const u8 *buf, int len)
189{
190 struct nand_chip *nand_chip = mtd->priv;
191
192 __raw_writesw(nand_chip->IO_ADDR_W, buf, len / 2);
193}
194
195static void dma_complete_func(void *completion) 164static void dma_complete_func(void *completion)
196{ 165{
197 complete(completion); 166 complete(completion);
@@ -266,33 +235,27 @@ err_buf:
266static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len) 235static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len)
267{ 236{
268 struct nand_chip *chip = mtd->priv; 237 struct nand_chip *chip = mtd->priv;
269 struct atmel_nand_host *host = chip->priv;
270 238
271 if (use_dma && len > mtd->oobsize) 239 if (use_dma && len > mtd->oobsize)
272 /* only use DMA for bigger than oob size: better performances */ 240 /* only use DMA for bigger than oob size: better performances */
273 if (atmel_nand_dma_op(mtd, buf, len, 1) == 0) 241 if (atmel_nand_dma_op(mtd, buf, len, 1) == 0)
274 return; 242 return;
275 243
276 if (host->board->bus_width_16) 244 /* if no DMA operation possible, use PIO */
277 atmel_read_buf16(mtd, buf, len); 245 memcpy_fromio(buf, chip->IO_ADDR_R, len);
278 else
279 atmel_read_buf8(mtd, buf, len);
280} 246}
281 247
282static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len) 248static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
283{ 249{
284 struct nand_chip *chip = mtd->priv; 250 struct nand_chip *chip = mtd->priv;
285 struct atmel_nand_host *host = chip->priv;
286 251
287 if (use_dma && len > mtd->oobsize) 252 if (use_dma && len > mtd->oobsize)
288 /* only use DMA for bigger than oob size: better performances */ 253 /* only use DMA for bigger than oob size: better performances */
289 if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0) 254 if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0)
290 return; 255 return;
291 256
292 if (host->board->bus_width_16) 257 /* if no DMA operation possible, use PIO */
293 atmel_write_buf16(mtd, buf, len); 258 memcpy_toio(chip->IO_ADDR_W, buf, len);
294 else
295 atmel_write_buf8(mtd, buf, len);
296} 259}
297 260
298/* 261/*
@@ -481,10 +444,6 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
481 } 444 }
482} 445}
483 446
484#ifdef CONFIG_MTD_CMDLINE_PARTS
485static const char *part_probes[] = { "cmdlinepart", NULL };
486#endif
487
488/* 447/*
489 * Probe for the NAND device. 448 * Probe for the NAND device.
490 */ 449 */
@@ -496,8 +455,6 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
496 struct resource *regs; 455 struct resource *regs;
497 struct resource *mem; 456 struct resource *mem;
498 int res; 457 int res;
499 struct mtd_partition *partitions = NULL;
500 int num_partitions = 0;
501 458
502 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 459 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
503 if (!mem) { 460 if (!mem) {
@@ -583,7 +540,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
583 540
584 if (on_flash_bbt) { 541 if (on_flash_bbt) {
585 printk(KERN_INFO "atmel_nand: Use On Flash BBT\n"); 542 printk(KERN_INFO "atmel_nand: Use On Flash BBT\n");
586 nand_chip->options |= NAND_USE_FLASH_BBT; 543 nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
587 } 544 }
588 545
589 if (!cpu_has_dma()) 546 if (!cpu_has_dma())
@@ -594,7 +551,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
594 551
595 dma_cap_zero(mask); 552 dma_cap_zero(mask);
596 dma_cap_set(DMA_MEMCPY, mask); 553 dma_cap_set(DMA_MEMCPY, mask);
597 host->dma_chan = dma_request_channel(mask, 0, NULL); 554 host->dma_chan = dma_request_channel(mask, NULL, NULL);
598 if (!host->dma_chan) { 555 if (!host->dma_chan) {
599 dev_err(host->dev, "Failed to request DMA channel\n"); 556 dev_err(host->dev, "Failed to request DMA channel\n");
600 use_dma = 0; 557 use_dma = 0;
@@ -655,27 +612,12 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
655 goto err_scan_tail; 612 goto err_scan_tail;
656 } 613 }
657 614
658#ifdef CONFIG_MTD_CMDLINE_PARTS
659 mtd->name = "atmel_nand"; 615 mtd->name = "atmel_nand";
660 num_partitions = parse_mtd_partitions(mtd, part_probes, 616 res = mtd_device_parse_register(mtd, NULL, 0,
661 &partitions, 0); 617 host->board->parts, host->board->num_parts);
662#endif
663 if (num_partitions <= 0 && host->board->partition_info)
664 partitions = host->board->partition_info(mtd->size,
665 &num_partitions);
666
667 if ((!partitions) || (num_partitions == 0)) {
668 printk(KERN_ERR "atmel_nand: No partitions defined, or unsupported device.\n");
669 res = -ENXIO;
670 goto err_no_partitions;
671 }
672
673 res = mtd_device_register(mtd, partitions, num_partitions);
674 if (!res) 618 if (!res)
675 return res; 619 return res;
676 620
677err_no_partitions:
678 nand_release(mtd);
679err_scan_tail: 621err_scan_tail:
680err_scan_ident: 622err_scan_ident:
681err_no_card: 623err_no_card:
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index fa5736b9286c..7dd3700f2303 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -52,7 +52,7 @@ static const struct mtd_partition partition_info[] = {
52 * au_read_byte - read one byte from the chip 52 * au_read_byte - read one byte from the chip
53 * @mtd: MTD device structure 53 * @mtd: MTD device structure
54 * 54 *
55 * read function for 8bit buswith 55 * read function for 8bit buswidth
56 */ 56 */
57static u_char au_read_byte(struct mtd_info *mtd) 57static u_char au_read_byte(struct mtd_info *mtd)
58{ 58{
@@ -67,7 +67,7 @@ static u_char au_read_byte(struct mtd_info *mtd)
67 * @mtd: MTD device structure 67 * @mtd: MTD device structure
68 * @byte: pointer to data byte to write 68 * @byte: pointer to data byte to write
69 * 69 *
70 * write function for 8it buswith 70 * write function for 8it buswidth
71 */ 71 */
72static void au_write_byte(struct mtd_info *mtd, u_char byte) 72static void au_write_byte(struct mtd_info *mtd, u_char byte)
73{ 73{
@@ -77,11 +77,10 @@ static void au_write_byte(struct mtd_info *mtd, u_char byte)
77} 77}
78 78
79/** 79/**
80 * au_read_byte16 - read one byte endianess aware from the chip 80 * au_read_byte16 - read one byte endianness aware from the chip
81 * @mtd: MTD device structure 81 * @mtd: MTD device structure
82 * 82 *
83 * read function for 16bit buswith with 83 * read function for 16bit buswidth with endianness conversion
84 * endianess conversion
85 */ 84 */
86static u_char au_read_byte16(struct mtd_info *mtd) 85static u_char au_read_byte16(struct mtd_info *mtd)
87{ 86{
@@ -92,12 +91,11 @@ static u_char au_read_byte16(struct mtd_info *mtd)
92} 91}
93 92
94/** 93/**
95 * au_write_byte16 - write one byte endianess aware to the chip 94 * au_write_byte16 - write one byte endianness aware to the chip
96 * @mtd: MTD device structure 95 * @mtd: MTD device structure
97 * @byte: pointer to data byte to write 96 * @byte: pointer to data byte to write
98 * 97 *
99 * write function for 16bit buswith with 98 * write function for 16bit buswidth with endianness conversion
100 * endianess conversion
101 */ 99 */
102static void au_write_byte16(struct mtd_info *mtd, u_char byte) 100static void au_write_byte16(struct mtd_info *mtd, u_char byte)
103{ 101{
@@ -110,8 +108,7 @@ static void au_write_byte16(struct mtd_info *mtd, u_char byte)
110 * au_read_word - read one word from the chip 108 * au_read_word - read one word from the chip
111 * @mtd: MTD device structure 109 * @mtd: MTD device structure
112 * 110 *
113 * read function for 16bit buswith without 111 * read function for 16bit buswidth without endianness conversion
114 * endianess conversion
115 */ 112 */
116static u16 au_read_word(struct mtd_info *mtd) 113static u16 au_read_word(struct mtd_info *mtd)
117{ 114{
@@ -127,7 +124,7 @@ static u16 au_read_word(struct mtd_info *mtd)
127 * @buf: data buffer 124 * @buf: data buffer
128 * @len: number of bytes to write 125 * @len: number of bytes to write
129 * 126 *
130 * write function for 8bit buswith 127 * write function for 8bit buswidth
131 */ 128 */
132static void au_write_buf(struct mtd_info *mtd, const u_char *buf, int len) 129static void au_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
133{ 130{
@@ -146,7 +143,7 @@ static void au_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
146 * @buf: buffer to store date 143 * @buf: buffer to store date
147 * @len: number of bytes to read 144 * @len: number of bytes to read
148 * 145 *
149 * read function for 8bit buswith 146 * read function for 8bit buswidth
150 */ 147 */
151static void au_read_buf(struct mtd_info *mtd, u_char *buf, int len) 148static void au_read_buf(struct mtd_info *mtd, u_char *buf, int len)
152{ 149{
@@ -165,7 +162,7 @@ static void au_read_buf(struct mtd_info *mtd, u_char *buf, int len)
165 * @buf: buffer containing the data to compare 162 * @buf: buffer containing the data to compare
166 * @len: number of bytes to compare 163 * @len: number of bytes to compare
167 * 164 *
168 * verify function for 8bit buswith 165 * verify function for 8bit buswidth
169 */ 166 */
170static int au_verify_buf(struct mtd_info *mtd, const u_char *buf, int len) 167static int au_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
171{ 168{
@@ -187,7 +184,7 @@ static int au_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
187 * @buf: data buffer 184 * @buf: data buffer
188 * @len: number of bytes to write 185 * @len: number of bytes to write
189 * 186 *
190 * write function for 16bit buswith 187 * write function for 16bit buswidth
191 */ 188 */
192static void au_write_buf16(struct mtd_info *mtd, const u_char *buf, int len) 189static void au_write_buf16(struct mtd_info *mtd, const u_char *buf, int len)
193{ 190{
@@ -209,7 +206,7 @@ static void au_write_buf16(struct mtd_info *mtd, const u_char *buf, int len)
209 * @buf: buffer to store date 206 * @buf: buffer to store date
210 * @len: number of bytes to read 207 * @len: number of bytes to read
211 * 208 *
212 * read function for 16bit buswith 209 * read function for 16bit buswidth
213 */ 210 */
214static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len) 211static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
215{ 212{
@@ -230,7 +227,7 @@ static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
230 * @buf: buffer containing the data to compare 227 * @buf: buffer containing the data to compare
231 * @len: number of bytes to compare 228 * @len: number of bytes to compare
232 * 229 *
233 * verify function for 16bit buswith 230 * verify function for 16bit buswidth
234 */ 231 */
235static int au_verify_buf16(struct mtd_info *mtd, const u_char *buf, int len) 232static int au_verify_buf16(struct mtd_info *mtd, const u_char *buf, int len)
236{ 233{
diff --git a/drivers/mtd/nand/autcpu12.c b/drivers/mtd/nand/autcpu12.c
index eddc9a224985..2e42ec2e8ff4 100644
--- a/drivers/mtd/nand/autcpu12.c
+++ b/drivers/mtd/nand/autcpu12.c
@@ -172,9 +172,9 @@ static int __init autcpu12_init(void)
172 172
173 /* Enable the following for a flash based bad block table */ 173 /* Enable the following for a flash based bad block table */
174 /* 174 /*
175 this->options = NAND_USE_FLASH_BBT; 175 this->bbt_options = NAND_BBT_USE_FLASH;
176 */ 176 */
177 this->options = NAND_USE_FLASH_BBT; 177 this->bbt_options = NAND_BBT_USE_FLASH;
178 178
179 /* Scan to find existence of the device */ 179 /* Scan to find existence of the device */
180 if (nand_scan(autcpu12_mtd, 1)) { 180 if (nand_scan(autcpu12_mtd, 1)) {
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
index 8c569e454dc5..46b58d672847 100644
--- a/drivers/mtd/nand/bcm_umi_nand.c
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -52,8 +52,6 @@
52static const __devinitconst char gBanner[] = KERN_INFO \ 52static const __devinitconst char gBanner[] = KERN_INFO \
53 "BCM UMI MTD NAND Driver: 1.00\n"; 53 "BCM UMI MTD NAND Driver: 1.00\n";
54 54
55const char *part_probes[] = { "cmdlinepart", NULL };
56
57#if NAND_ECC_BCH 55#if NAND_ECC_BCH
58static uint8_t scan_ff_pattern[] = { 0xff }; 56static uint8_t scan_ff_pattern[] = { 0xff };
59 57
@@ -376,16 +374,18 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
376 374
377 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 375 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
378 376
379 if (!r) 377 if (!r) {
380 return -ENXIO; 378 err = -ENXIO;
379 goto out_free;
380 }
381 381
382 /* map physical address */ 382 /* map physical address */
383 bcm_umi_io_base = ioremap(r->start, resource_size(r)); 383 bcm_umi_io_base = ioremap(r->start, resource_size(r));
384 384
385 if (!bcm_umi_io_base) { 385 if (!bcm_umi_io_base) {
386 printk(KERN_ERR "ioremap to access BCM UMI NAND chip failed\n"); 386 printk(KERN_ERR "ioremap to access BCM UMI NAND chip failed\n");
387 kfree(board_mtd); 387 err = -EIO;
388 return -EIO; 388 goto out_free;
389 } 389 }
390 390
391 /* Get pointer to private data */ 391 /* Get pointer to private data */
@@ -401,9 +401,8 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
401 /* Initialize the NAND hardware. */ 401 /* Initialize the NAND hardware. */
402 if (bcm_umi_nand_inithw() < 0) { 402 if (bcm_umi_nand_inithw() < 0) {
403 printk(KERN_ERR "BCM UMI NAND chip could not be initialized\n"); 403 printk(KERN_ERR "BCM UMI NAND chip could not be initialized\n");
404 iounmap(bcm_umi_io_base); 404 err = -EIO;
405 kfree(board_mtd); 405 goto out_unmap;
406 return -EIO;
407 } 406 }
408 407
409 /* Set address of NAND IO lines */ 408 /* Set address of NAND IO lines */
@@ -436,7 +435,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
436#if USE_DMA 435#if USE_DMA
437 err = nand_dma_init(); 436 err = nand_dma_init();
438 if (err != 0) 437 if (err != 0)
439 return err; 438 goto out_unmap;
440#endif 439#endif
441 440
442 /* Figure out the size of the device that we have. 441 /* Figure out the size of the device that we have.
@@ -447,9 +446,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
447 err = nand_scan_ident(board_mtd, 1, NULL); 446 err = nand_scan_ident(board_mtd, 1, NULL);
448 if (err) { 447 if (err) {
449 printk(KERN_ERR "nand_scan failed: %d\n", err); 448 printk(KERN_ERR "nand_scan failed: %d\n", err);
450 iounmap(bcm_umi_io_base); 449 goto out_unmap;
451 kfree(board_mtd);
452 return err;
453 } 450 }
454 451
455 /* Now that we know the nand size, we can setup the ECC layout */ 452 /* Now that we know the nand size, we can setup the ECC layout */
@@ -468,13 +465,14 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
468 { 465 {
469 printk(KERN_ERR "NAND - Unrecognized pagesize: %d\n", 466 printk(KERN_ERR "NAND - Unrecognized pagesize: %d\n",
470 board_mtd->writesize); 467 board_mtd->writesize);
471 return -EINVAL; 468 err = -EINVAL;
469 goto out_unmap;
472 } 470 }
473 } 471 }
474 472
475#if NAND_ECC_BCH 473#if NAND_ECC_BCH
476 if (board_mtd->writesize > 512) { 474 if (board_mtd->writesize > 512) {
477 if (this->options & NAND_USE_FLASH_BBT) 475 if (this->bbt_options & NAND_BBT_USE_FLASH)
478 largepage_bbt.options = NAND_BBT_SCAN2NDPAGE; 476 largepage_bbt.options = NAND_BBT_SCAN2NDPAGE;
479 this->badblock_pattern = &largepage_bbt; 477 this->badblock_pattern = &largepage_bbt;
480 } 478 }
@@ -485,33 +483,20 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
485 err = nand_scan_tail(board_mtd); 483 err = nand_scan_tail(board_mtd);
486 if (err) { 484 if (err) {
487 printk(KERN_ERR "nand_scan failed: %d\n", err); 485 printk(KERN_ERR "nand_scan failed: %d\n", err);
488 iounmap(bcm_umi_io_base); 486 goto out_unmap;
489 kfree(board_mtd);
490 return err;
491 } 487 }
492 488
493 /* Register the partitions */ 489 /* Register the partitions */
494 { 490 board_mtd->name = "bcm_umi-nand";
495 int nr_partitions; 491 mtd_device_parse_register(board_mtd, NULL, 0, NULL, 0);
496 struct mtd_partition *partition_info;
497
498 board_mtd->name = "bcm_umi-nand";
499 nr_partitions =
500 parse_mtd_partitions(board_mtd, part_probes,
501 &partition_info, 0);
502
503 if (nr_partitions <= 0) {
504 printk(KERN_ERR "BCM UMI NAND: Too few partitions - %d\n",
505 nr_partitions);
506 iounmap(bcm_umi_io_base);
507 kfree(board_mtd);
508 return -EIO;
509 }
510 mtd_device_register(board_mtd, partition_info, nr_partitions);
511 }
512 492
513 /* Return happy */ 493 /* Return happy */
514 return 0; 494 return 0;
495out_unmap:
496 iounmap(bcm_umi_io_base);
497out_free:
498 kfree(board_mtd);
499 return err;
515} 500}
516 501
517static int bcm_umi_nand_remove(struct platform_device *pdev) 502static int bcm_umi_nand_remove(struct platform_device *pdev)
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 7c8df837d3b8..72d3f23490c5 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -58,7 +58,6 @@
58 58
59struct cafe_priv { 59struct cafe_priv {
60 struct nand_chip nand; 60 struct nand_chip nand;
61 struct mtd_partition *parts;
62 struct pci_dev *pdev; 61 struct pci_dev *pdev;
63 void __iomem *mmio; 62 void __iomem *mmio;
64 struct rs_control *rs; 63 struct rs_control *rs;
@@ -372,7 +371,7 @@ static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
372 return 1; 371 return 1;
373} 372}
374/** 373/**
375 * cafe_nand_read_page_syndrome - {REPLACABLE] hardware ecc syndrom based page read 374 * cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read
376 * @mtd: mtd info structure 375 * @mtd: mtd info structure
377 * @chip: nand chip info structure 376 * @chip: nand chip info structure
378 * @buf: buffer to store read data 377 * @buf: buffer to store read data
@@ -631,8 +630,6 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
631 struct cafe_priv *cafe; 630 struct cafe_priv *cafe;
632 uint32_t ctrl; 631 uint32_t ctrl;
633 int err = 0; 632 int err = 0;
634 struct mtd_partition *parts;
635 int nr_parts;
636 633
637 /* Very old versions shared the same PCI ident for all three 634 /* Very old versions shared the same PCI ident for all three
638 functions on the chip. Verify the class too... */ 635 functions on the chip. Verify the class too... */
@@ -687,7 +684,8 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
687 cafe->nand.chip_delay = 0; 684 cafe->nand.chip_delay = 0;
688 685
689 /* Enable the following for a flash based bad block table */ 686 /* Enable the following for a flash based bad block table */
690 cafe->nand.options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR | NAND_OWN_BUFFERS; 687 cafe->nand.bbt_options = NAND_BBT_USE_FLASH;
688 cafe->nand.options = NAND_NO_AUTOINCR | NAND_OWN_BUFFERS;
691 689
692 if (skipbbt) { 690 if (skipbbt) {
693 cafe->nand.options |= NAND_SKIP_BBTSCAN; 691 cafe->nand.options |= NAND_SKIP_BBTSCAN;
@@ -800,18 +798,9 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
800 798
801 pci_set_drvdata(pdev, mtd); 799 pci_set_drvdata(pdev, mtd);
802 800
803 /* We register the whole device first, separate from the partitions */
804 mtd_device_register(mtd, NULL, 0);
805
806#ifdef CONFIG_MTD_CMDLINE_PARTS
807 mtd->name = "cafe_nand"; 801 mtd->name = "cafe_nand";
808#endif 802 mtd_device_parse_register(mtd, part_probes, 0, NULL, 0);
809 nr_parts = parse_mtd_partitions(mtd, part_probes, &parts, 0); 803
810 if (nr_parts > 0) {
811 cafe->parts = parts;
812 dev_info(&cafe->pdev->dev, "%d partitions found\n", nr_parts);
813 mtd_device_register(mtd, parts, nr_parts);
814 }
815 goto out; 804 goto out;
816 805
817 out_irq: 806 out_irq:
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index be33b0f4634d..737ef9a04fdb 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -51,8 +51,6 @@ static struct mtd_partition partition_info[] = {
51}; 51};
52#define NUM_PARTITIONS (ARRAY_SIZE(partition_info)) 52#define NUM_PARTITIONS (ARRAY_SIZE(partition_info))
53 53
54const char *part_probes[] = { "cmdlinepart", NULL };
55
56static u_char cmx270_read_byte(struct mtd_info *mtd) 54static u_char cmx270_read_byte(struct mtd_info *mtd)
57{ 55{
58 struct nand_chip *this = mtd->priv; 56 struct nand_chip *this = mtd->priv;
@@ -152,9 +150,6 @@ static int cmx270_device_ready(struct mtd_info *mtd)
152static int __init cmx270_init(void) 150static int __init cmx270_init(void)
153{ 151{
154 struct nand_chip *this; 152 struct nand_chip *this;
155 const char *part_type;
156 struct mtd_partition *mtd_parts;
157 int mtd_parts_nb = 0;
158 int ret; 153 int ret;
159 154
160 if (!(machine_is_armcore() && cpu_is_pxa27x())) 155 if (!(machine_is_armcore() && cpu_is_pxa27x()))
@@ -223,23 +218,9 @@ static int __init cmx270_init(void)
223 goto err_scan; 218 goto err_scan;
224 } 219 }
225 220
226#ifdef CONFIG_MTD_CMDLINE_PARTS
227 mtd_parts_nb = parse_mtd_partitions(cmx270_nand_mtd, part_probes,
228 &mtd_parts, 0);
229 if (mtd_parts_nb > 0)
230 part_type = "command line";
231 else
232 mtd_parts_nb = 0;
233#endif
234 if (!mtd_parts_nb) {
235 mtd_parts = partition_info;
236 mtd_parts_nb = NUM_PARTITIONS;
237 part_type = "static";
238 }
239
240 /* Register the partitions */ 221 /* Register the partitions */
241 pr_notice("Using %s partition definition\n", part_type); 222 ret = mtd_device_parse_register(cmx270_nand_mtd, NULL, 0,
242 ret = mtd_device_register(cmx270_nand_mtd, mtd_parts, mtd_parts_nb); 223 partition_info, NUM_PARTITIONS);
243 if (ret) 224 if (ret)
244 goto err_scan; 225 goto err_scan;
245 226
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index f59ad1f2d5db..414afa793563 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -239,7 +239,8 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
239 this->ecc.correct = nand_correct_data; 239 this->ecc.correct = nand_correct_data;
240 240
241 /* Enable the following for a flash based bad block table */ 241 /* Enable the following for a flash based bad block table */
242 this->options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR; 242 this->bbt_options = NAND_BBT_USE_FLASH;
243 this->options = NAND_NO_AUTOINCR;
243 244
244 /* Scan to find existence of the device */ 245 /* Scan to find existence of the device */
245 if (nand_scan(new_mtd, 1)) { 246 if (nand_scan(new_mtd, 1)) {
@@ -277,15 +278,11 @@ static int is_geode(void)
277 return 0; 278 return 0;
278} 279}
279 280
280static const char *part_probes[] = { "cmdlinepart", NULL };
281
282static int __init cs553x_init(void) 281static int __init cs553x_init(void)
283{ 282{
284 int err = -ENXIO; 283 int err = -ENXIO;
285 int i; 284 int i;
286 uint64_t val; 285 uint64_t val;
287 int mtd_parts_nb = 0;
288 struct mtd_partition *mtd_parts = NULL;
289 286
290 /* If the CPU isn't a Geode GX or LX, abort */ 287 /* If the CPU isn't a Geode GX or LX, abort */
291 if (!is_geode()) 288 if (!is_geode())
@@ -315,13 +312,9 @@ static int __init cs553x_init(void)
315 do mtdconcat etc. if we want to. */ 312 do mtdconcat etc. if we want to. */
316 for (i = 0; i < NR_CS553X_CONTROLLERS; i++) { 313 for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
317 if (cs553x_mtd[i]) { 314 if (cs553x_mtd[i]) {
318
319 /* If any devices registered, return success. Else the last error. */ 315 /* If any devices registered, return success. Else the last error. */
320 mtd_parts_nb = parse_mtd_partitions(cs553x_mtd[i], part_probes, &mtd_parts, 0); 316 mtd_device_parse_register(cs553x_mtd[i], NULL, 0,
321 if (mtd_parts_nb > 0) 317 NULL, 0);
322 printk(KERN_NOTICE "Using command line partition definition\n");
323 mtd_device_register(cs553x_mtd[i], mtd_parts,
324 mtd_parts_nb);
325 err = 0; 318 err = 0;
326 } 319 }
327 } 320 }
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 1f34951ae1a7..c153e1f77f90 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -57,7 +57,6 @@ struct davinci_nand_info {
57 57
58 struct device *dev; 58 struct device *dev;
59 struct clk *clk; 59 struct clk *clk;
60 bool partitioned;
61 60
62 bool is_readmode; 61 bool is_readmode;
63 62
@@ -530,8 +529,6 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
530 int ret; 529 int ret;
531 uint32_t val; 530 uint32_t val;
532 nand_ecc_modes_t ecc_mode; 531 nand_ecc_modes_t ecc_mode;
533 struct mtd_partition *mtd_parts = NULL;
534 int mtd_parts_nb = 0;
535 532
536 /* insist on board-specific configuration */ 533 /* insist on board-specific configuration */
537 if (!pdata) 534 if (!pdata)
@@ -581,7 +578,9 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
581 info->chip.chip_delay = 0; 578 info->chip.chip_delay = 0;
582 info->chip.select_chip = nand_davinci_select_chip; 579 info->chip.select_chip = nand_davinci_select_chip;
583 580
584 /* options such as NAND_USE_FLASH_BBT or 16-bit widths */ 581 /* options such as NAND_BBT_USE_FLASH */
582 info->chip.bbt_options = pdata->bbt_options;
583 /* options such as 16-bit widths */
585 info->chip.options = pdata->options; 584 info->chip.options = pdata->options;
586 info->chip.bbt_td = pdata->bbt_td; 585 info->chip.bbt_td = pdata->bbt_td;
587 info->chip.bbt_md = pdata->bbt_md; 586 info->chip.bbt_md = pdata->bbt_md;
@@ -751,33 +750,8 @@ syndrome_done:
751 if (ret < 0) 750 if (ret < 0)
752 goto err_scan; 751 goto err_scan;
753 752
754 if (mtd_has_cmdlinepart()) { 753 ret = mtd_device_parse_register(&info->mtd, NULL, 0,
755 static const char *probes[] __initconst = { 754 pdata->parts, pdata->nr_parts);
756 "cmdlinepart", NULL
757 };
758
759 mtd_parts_nb = parse_mtd_partitions(&info->mtd, probes,
760 &mtd_parts, 0);
761 }
762
763 if (mtd_parts_nb <= 0) {
764 mtd_parts = pdata->parts;
765 mtd_parts_nb = pdata->nr_parts;
766 }
767
768 /* Register any partitions */
769 if (mtd_parts_nb > 0) {
770 ret = mtd_device_register(&info->mtd, mtd_parts,
771 mtd_parts_nb);
772 if (ret == 0)
773 info->partitioned = true;
774 }
775
776 /* If there's no partition info, just package the whole chip
777 * as a single MTD device.
778 */
779 if (!info->partitioned)
780 ret = mtd_device_register(&info->mtd, NULL, 0) ? -ENODEV : 0;
781 755
782 if (ret < 0) 756 if (ret < 0)
783 goto err_scan; 757 goto err_scan;
@@ -816,9 +790,6 @@ err_nomem:
816static int __exit nand_davinci_remove(struct platform_device *pdev) 790static int __exit nand_davinci_remove(struct platform_device *pdev)
817{ 791{
818 struct davinci_nand_info *info = platform_get_drvdata(pdev); 792 struct davinci_nand_info *info = platform_get_drvdata(pdev);
819 int status;
820
821 status = mtd_device_unregister(&info->mtd);
822 793
823 spin_lock_irq(&davinci_nand_lock); 794 spin_lock_irq(&davinci_nand_lock);
824 if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME) 795 if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index d5276218945f..3984d488f9ab 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -1346,6 +1346,7 @@ static void denali_hw_init(struct denali_nand_info *denali)
1346 * */ 1346 * */
1347 denali->bbtskipbytes = ioread32(denali->flash_reg + 1347 denali->bbtskipbytes = ioread32(denali->flash_reg +
1348 SPARE_AREA_SKIP_BYTES); 1348 SPARE_AREA_SKIP_BYTES);
1349 detect_max_banks(denali);
1349 denali_nand_reset(denali); 1350 denali_nand_reset(denali);
1350 iowrite32(0x0F, denali->flash_reg + RB_PIN_ENABLED); 1351 iowrite32(0x0F, denali->flash_reg + RB_PIN_ENABLED);
1351 iowrite32(CHIP_EN_DONT_CARE__FLAG, 1352 iowrite32(CHIP_EN_DONT_CARE__FLAG,
@@ -1356,7 +1357,6 @@ static void denali_hw_init(struct denali_nand_info *denali)
1356 /* Should set value for these registers when init */ 1357 /* Should set value for these registers when init */
1357 iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES); 1358 iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
1358 iowrite32(1, denali->flash_reg + ECC_ENABLE); 1359 iowrite32(1, denali->flash_reg + ECC_ENABLE);
1359 detect_max_banks(denali);
1360 denali_nand_timing_set(denali); 1360 denali_nand_timing_set(denali);
1361 denali_irq_init(denali); 1361 denali_irq_init(denali);
1362} 1362}
@@ -1577,7 +1577,8 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1577 denali->nand.bbt_md = &bbt_mirror_descr; 1577 denali->nand.bbt_md = &bbt_mirror_descr;
1578 1578
1579 /* skip the scan for now until we have OOB read and write support */ 1579 /* skip the scan for now until we have OOB read and write support */
1580 denali->nand.options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN; 1580 denali->nand.bbt_options |= NAND_BBT_USE_FLASH;
1581 denali->nand.options |= NAND_SKIP_BBTSCAN;
1581 denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME; 1582 denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
1582 1583
1583 /* Denali Controller only support 15bit and 8bit ECC in MRST, 1584 /* Denali Controller only support 15bit and 8bit ECC in MRST,
@@ -1676,7 +1677,6 @@ static void denali_pci_remove(struct pci_dev *dev)
1676 struct denali_nand_info *denali = pci_get_drvdata(dev); 1677 struct denali_nand_info *denali = pci_get_drvdata(dev);
1677 1678
1678 nand_release(&denali->mtd); 1679 nand_release(&denali->mtd);
1679 mtd_device_unregister(&denali->mtd);
1680 1680
1681 denali_irq_cleanup(dev->irq, denali); 1681 denali_irq_cleanup(dev->irq, denali);
1682 1682
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index e1b84cb90f0d..5780dbab6113 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -133,7 +133,7 @@ static struct rs_control *rs_decoder;
133 133
134/* 134/*
135 * The HW decoder in the DoC ASIC's provides us a error syndrome, 135 * The HW decoder in the DoC ASIC's provides us a error syndrome,
136 * which we must convert to a standard syndrom usable by the generic 136 * which we must convert to a standard syndrome usable by the generic
137 * Reed-Solomon library code. 137 * Reed-Solomon library code.
138 * 138 *
139 * Fabrice Bellard figured this out in the old docecc code. I added 139 * Fabrice Bellard figured this out in the old docecc code. I added
@@ -154,7 +154,7 @@ static int doc_ecc_decode(struct rs_control *rs, uint8_t *data, uint8_t *ecc)
154 ds[3] = ((ecc[3] & 0xc0) >> 6) | ((ecc[0] & 0xff) << 2); 154 ds[3] = ((ecc[3] & 0xc0) >> 6) | ((ecc[0] & 0xff) << 2);
155 parity = ecc[1]; 155 parity = ecc[1];
156 156
157 /* Initialize the syndrom buffer */ 157 /* Initialize the syndrome buffer */
158 for (i = 0; i < NROOTS; i++) 158 for (i = 0; i < NROOTS; i++)
159 s[i] = ds[0]; 159 s[i] = ds[0];
160 /* 160 /*
@@ -1032,7 +1032,7 @@ static int doc200x_correct_data(struct mtd_info *mtd, u_char *dat,
1032 WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf); 1032 WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
1033 else 1033 else
1034 WriteDOC(DOC_ECC_DIS, docptr, ECCConf); 1034 WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
1035 if (no_ecc_failures && (ret == -EBADMSG)) { 1035 if (no_ecc_failures && mtd_is_eccerr(ret)) {
1036 printk(KERN_ERR "suppressing ECC failure\n"); 1036 printk(KERN_ERR "suppressing ECC failure\n");
1037 ret = 0; 1037 ret = 0;
1038 } 1038 }
@@ -1653,7 +1653,7 @@ static int __init doc_probe(unsigned long physadr)
1653 nand->ecc.mode = NAND_ECC_HW_SYNDROME; 1653 nand->ecc.mode = NAND_ECC_HW_SYNDROME;
1654 nand->ecc.size = 512; 1654 nand->ecc.size = 512;
1655 nand->ecc.bytes = 6; 1655 nand->ecc.bytes = 6;
1656 nand->options = NAND_USE_FLASH_BBT; 1656 nand->bbt_options = NAND_BBT_USE_FLASH;
1657 1657
1658 doc->physadr = physadr; 1658 doc->physadr = physadr;
1659 doc->virtadr = virtadr; 1659 doc->virtadr = virtadr;
diff --git a/drivers/mtd/nand/edb7312.c b/drivers/mtd/nand/edb7312.c
deleted file mode 100644
index 8400d0f6dada..000000000000
--- a/drivers/mtd/nand/edb7312.c
+++ /dev/null
@@ -1,203 +0,0 @@
1/*
2 * drivers/mtd/nand/edb7312.c
3 *
4 * Copyright (C) 2002 Marius Gröger (mag@sysgo.de)
5 *
6 * Derived from drivers/mtd/nand/autcpu12.c
7 * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * Overview:
14 * This is a device driver for the NAND flash device found on the
15 * CLEP7312 board which utilizes the Toshiba TC58V64AFT part. This is
16 * a 64Mibit (8MiB x 8 bits) NAND flash device.
17 */
18
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/mtd/mtd.h>
23#include <linux/mtd/nand.h>
24#include <linux/mtd/partitions.h>
25#include <asm/io.h>
26#include <mach/hardware.h> /* for CLPS7111_VIRT_BASE */
27#include <asm/sizes.h>
28#include <asm/hardware/clps7111.h>
29
30/*
31 * MTD structure for EDB7312 board
32 */
33static struct mtd_info *ep7312_mtd = NULL;
34
35/*
36 * Values specific to the EDB7312 board (used with EP7312 processor)
37 */
38#define EP7312_FIO_PBASE 0x10000000 /* Phys address of flash */
39#define EP7312_PXDR 0x0001 /*
40 * IO offset to Port B data register
41 * where the CLE, ALE and NCE pins
42 * are wired to.
43 */
44#define EP7312_PXDDR 0x0041 /*
45 * IO offset to Port B data direction
46 * register so we can control the IO
47 * lines.
48 */
49
50/*
51 * Module stuff
52 */
53
54static unsigned long ep7312_fio_pbase = EP7312_FIO_PBASE;
55static void __iomem *ep7312_pxdr = (void __iomem *)EP7312_PXDR;
56static void __iomem *ep7312_pxddr = (void __iomem *)EP7312_PXDDR;
57
58/*
59 * Define static partitions for flash device
60 */
61static struct mtd_partition partition_info[] = {
62 {.name = "EP7312 Nand Flash",
63 .offset = 0,
64 .size = 8 * 1024 * 1024}
65};
66
67#define NUM_PARTITIONS 1
68
69/*
70 * hardware specific access to control-lines
71 *
72 * NAND_NCE: bit 0 -> bit 6 (bit 7 = 1)
73 * NAND_CLE: bit 1 -> bit 4
74 * NAND_ALE: bit 2 -> bit 5
75 */
76static void ep7312_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
77{
78 struct nand_chip *chip = mtd->priv;
79
80 if (ctrl & NAND_CTRL_CHANGE) {
81 unsigned char bits = 0x80;
82
83 bits |= (ctrl & (NAND_CLE | NAND_ALE)) << 3;
84 bits |= (ctrl & NAND_NCE) ? 0x00 : 0x40;
85
86 clps_writeb((clps_readb(ep7312_pxdr) & 0xF0) | bits,
87 ep7312_pxdr);
88 }
89 if (cmd != NAND_CMD_NONE)
90 writeb(cmd, chip->IO_ADDR_W);
91}
92
93/*
94 * read device ready pin
95 */
96static int ep7312_device_ready(struct mtd_info *mtd)
97{
98 return 1;
99}
100
101const char *part_probes[] = { "cmdlinepart", NULL };
102
103/*
104 * Main initialization routine
105 */
106static int __init ep7312_init(void)
107{
108 struct nand_chip *this;
109 const char *part_type = 0;
110 int mtd_parts_nb = 0;
111 struct mtd_partition *mtd_parts = 0;
112 void __iomem *ep7312_fio_base;
113
114 /* Allocate memory for MTD device structure and private data */
115 ep7312_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
116 if (!ep7312_mtd) {
117 printk("Unable to allocate EDB7312 NAND MTD device structure.\n");
118 return -ENOMEM;
119 }
120
121 /* map physical address */
122 ep7312_fio_base = ioremap(ep7312_fio_pbase, SZ_1K);
123 if (!ep7312_fio_base) {
124 printk("ioremap EDB7312 NAND flash failed\n");
125 kfree(ep7312_mtd);
126 return -EIO;
127 }
128
129 /* Get pointer to private data */
130 this = (struct nand_chip *)(&ep7312_mtd[1]);
131
132 /* Initialize structures */
133 memset(ep7312_mtd, 0, sizeof(struct mtd_info));
134 memset(this, 0, sizeof(struct nand_chip));
135
136 /* Link the private data with the MTD structure */
137 ep7312_mtd->priv = this;
138 ep7312_mtd->owner = THIS_MODULE;
139
140 /*
141 * Set GPIO Port B control register so that the pins are configured
142 * to be outputs for controlling the NAND flash.
143 */
144 clps_writeb(0xf0, ep7312_pxddr);
145
146 /* insert callbacks */
147 this->IO_ADDR_R = ep7312_fio_base;
148 this->IO_ADDR_W = ep7312_fio_base;
149 this->cmd_ctrl = ep7312_hwcontrol;
150 this->dev_ready = ep7312_device_ready;
151 /* 15 us command delay time */
152 this->chip_delay = 15;
153
154 /* Scan to find existence of the device */
155 if (nand_scan(ep7312_mtd, 1)) {
156 iounmap((void *)ep7312_fio_base);
157 kfree(ep7312_mtd);
158 return -ENXIO;
159 }
160 ep7312_mtd->name = "edb7312-nand";
161 mtd_parts_nb = parse_mtd_partitions(ep7312_mtd, part_probes, &mtd_parts, 0);
162 if (mtd_parts_nb > 0)
163 part_type = "command line";
164 else
165 mtd_parts_nb = 0;
166 if (mtd_parts_nb == 0) {
167 mtd_parts = partition_info;
168 mtd_parts_nb = NUM_PARTITIONS;
169 part_type = "static";
170 }
171
172 /* Register the partitions */
173 printk(KERN_NOTICE "Using %s partition definition\n", part_type);
174 mtd_device_register(ep7312_mtd, mtd_parts, mtd_parts_nb);
175
176 /* Return happy */
177 return 0;
178}
179
180module_init(ep7312_init);
181
182/*
183 * Clean up routine
184 */
185static void __exit ep7312_cleanup(void)
186{
187 struct nand_chip *this = (struct nand_chip *)&ep7312_mtd[1];
188
189 /* Release resources, unregister device */
190 nand_release(ap7312_mtd);
191
192 /* Release io resource */
193 iounmap(this->IO_ADDR_R);
194
195 /* Free the MTD device structure */
196 kfree(ep7312_mtd);
197}
198
199module_exit(ep7312_cleanup);
200
201MODULE_LICENSE("GPL");
202MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>");
203MODULE_DESCRIPTION("MTD map driver for Cogent EDB7312 board");
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 33d8aad8bba5..eedd8ee2c9ac 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -75,7 +75,6 @@ struct fsl_elbc_fcm_ctrl {
75 unsigned int use_mdr; /* Non zero if the MDR is to be set */ 75 unsigned int use_mdr; /* Non zero if the MDR is to be set */
76 unsigned int oob; /* Non zero if operating on OOB data */ 76 unsigned int oob; /* Non zero if operating on OOB data */
77 unsigned int counter; /* counter for the initializations */ 77 unsigned int counter; /* counter for the initializations */
78 char *oob_poi; /* Place to write ECC after read back */
79}; 78};
80 79
81/* These map to the positions used by the FCM hardware ECC generator */ 80/* These map to the positions used by the FCM hardware ECC generator */
@@ -244,6 +243,25 @@ static int fsl_elbc_run_command(struct mtd_info *mtd)
244 return -EIO; 243 return -EIO;
245 } 244 }
246 245
246 if (chip->ecc.mode != NAND_ECC_HW)
247 return 0;
248
249 if (elbc_fcm_ctrl->read_bytes == mtd->writesize + mtd->oobsize) {
250 uint32_t lteccr = in_be32(&lbc->lteccr);
251 /*
252 * if command was a full page read and the ELBC
253 * has the LTECCR register, then bits 12-15 (ppc order) of
254 * LTECCR indicates which 512 byte sub-pages had fixed errors.
255 * bits 28-31 are uncorrectable errors, marked elsewhere.
256 * for small page nand only 1 bit is used.
257 * if the ELBC doesn't have the lteccr register it reads 0
258 */
259 if (lteccr & 0x000F000F)
260 out_be32(&lbc->lteccr, 0x000F000F); /* clear lteccr */
261 if (lteccr & 0x000F0000)
262 mtd->ecc_stats.corrected++;
263 }
264
247 return 0; 265 return 0;
248} 266}
249 267
@@ -435,7 +453,6 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
435 453
436 /* PAGEPROG reuses all of the setup from SEQIN and adds the length */ 454 /* PAGEPROG reuses all of the setup from SEQIN and adds the length */
437 case NAND_CMD_PAGEPROG: { 455 case NAND_CMD_PAGEPROG: {
438 int full_page;
439 dev_vdbg(priv->dev, 456 dev_vdbg(priv->dev,
440 "fsl_elbc_cmdfunc: NAND_CMD_PAGEPROG " 457 "fsl_elbc_cmdfunc: NAND_CMD_PAGEPROG "
441 "writing %d bytes.\n", elbc_fcm_ctrl->index); 458 "writing %d bytes.\n", elbc_fcm_ctrl->index);
@@ -445,34 +462,12 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
445 * write so the HW generates the ECC. 462 * write so the HW generates the ECC.
446 */ 463 */
447 if (elbc_fcm_ctrl->oob || elbc_fcm_ctrl->column != 0 || 464 if (elbc_fcm_ctrl->oob || elbc_fcm_ctrl->column != 0 ||
448 elbc_fcm_ctrl->index != mtd->writesize + mtd->oobsize) { 465 elbc_fcm_ctrl->index != mtd->writesize + mtd->oobsize)
449 out_be32(&lbc->fbcr, elbc_fcm_ctrl->index); 466 out_be32(&lbc->fbcr, elbc_fcm_ctrl->index);
450 full_page = 0; 467 else
451 } else {
452 out_be32(&lbc->fbcr, 0); 468 out_be32(&lbc->fbcr, 0);
453 full_page = 1;
454 }
455 469
456 fsl_elbc_run_command(mtd); 470 fsl_elbc_run_command(mtd);
457
458 /* Read back the page in order to fill in the ECC for the
459 * caller. Is this really needed?
460 */
461 if (full_page && elbc_fcm_ctrl->oob_poi) {
462 out_be32(&lbc->fbcr, 3);
463 set_addr(mtd, 6, page_addr, 1);
464
465 elbc_fcm_ctrl->read_bytes = mtd->writesize + 9;
466
467 fsl_elbc_do_read(chip, 1);
468 fsl_elbc_run_command(mtd);
469
470 memcpy_fromio(elbc_fcm_ctrl->oob_poi + 6,
471 &elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index], 3);
472 elbc_fcm_ctrl->index += 3;
473 }
474
475 elbc_fcm_ctrl->oob_poi = NULL;
476 return; 471 return;
477 } 472 }
478 473
@@ -752,13 +747,8 @@ static void fsl_elbc_write_page(struct mtd_info *mtd,
752 struct nand_chip *chip, 747 struct nand_chip *chip,
753 const uint8_t *buf) 748 const uint8_t *buf)
754{ 749{
755 struct fsl_elbc_mtd *priv = chip->priv;
756 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
757
758 fsl_elbc_write_buf(mtd, buf, mtd->writesize); 750 fsl_elbc_write_buf(mtd, buf, mtd->writesize);
759 fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize); 751 fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
760
761 elbc_fcm_ctrl->oob_poi = chip->oob_poi;
762} 752}
763 753
764static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv) 754static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
@@ -791,8 +781,8 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
791 chip->bbt_md = &bbt_mirror_descr; 781 chip->bbt_md = &bbt_mirror_descr;
792 782
793 /* set up nand options */ 783 /* set up nand options */
794 chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR | 784 chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR;
795 NAND_USE_FLASH_BBT; 785 chip->bbt_options = NAND_BBT_USE_FLASH;
796 786
797 chip->controller = &elbc_fcm_ctrl->controller; 787 chip->controller = &elbc_fcm_ctrl->controller;
798 chip->priv = priv; 788 chip->priv = priv;
@@ -829,7 +819,6 @@ static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv)
829 819
830 elbc_fcm_ctrl->chips[priv->bank] = NULL; 820 elbc_fcm_ctrl->chips[priv->bank] = NULL;
831 kfree(priv); 821 kfree(priv);
832 kfree(elbc_fcm_ctrl);
833 return 0; 822 return 0;
834} 823}
835 824
@@ -842,13 +831,14 @@ static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev)
842 struct resource res; 831 struct resource res;
843 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl; 832 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl;
844 static const char *part_probe_types[] 833 static const char *part_probe_types[]
845 = { "cmdlinepart", "RedBoot", NULL }; 834 = { "cmdlinepart", "RedBoot", "ofpart", NULL };
846 struct mtd_partition *parts;
847 int ret; 835 int ret;
848 int bank; 836 int bank;
849 struct device *dev; 837 struct device *dev;
850 struct device_node *node = pdev->dev.of_node; 838 struct device_node *node = pdev->dev.of_node;
839 struct mtd_part_parser_data ppdata;
851 840
841 ppdata.of_node = pdev->dev.of_node;
852 if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs) 842 if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
853 return -ENODEV; 843 return -ENODEV;
854 lbc = fsl_lbc_ctrl_dev->regs; 844 lbc = fsl_lbc_ctrl_dev->regs;
@@ -934,17 +924,8 @@ static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev)
934 924
935 /* First look for RedBoot table or partitions on the command 925 /* First look for RedBoot table or partitions on the command
936 * line, these take precedence over device tree information */ 926 * line, these take precedence over device tree information */
937 ret = parse_mtd_partitions(&priv->mtd, part_probe_types, &parts, 0); 927 mtd_device_parse_register(&priv->mtd, part_probe_types, &ppdata,
938 if (ret < 0) 928 NULL, 0);
939 goto err;
940
941 if (ret == 0) {
942 ret = of_mtd_parse_partitions(priv->dev, node, &parts);
943 if (ret < 0)
944 goto err;
945 }
946
947 mtd_device_register(&priv->mtd, parts, ret);
948 929
949 printk(KERN_INFO "eLBC NAND device at 0x%llx, bank %d\n", 930 printk(KERN_INFO "eLBC NAND device at 0x%llx, bank %d\n",
950 (unsigned long long)res.start, priv->bank); 931 (unsigned long long)res.start, priv->bank);
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 23752fd5bc59..b4f3cc9f32fb 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -158,7 +158,7 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
158{ 158{
159 int ret; 159 int ret;
160 struct device_node *flash_np; 160 struct device_node *flash_np;
161 static const char *part_types[] = { "cmdlinepart", NULL, }; 161 struct mtd_part_parser_data ppdata;
162 162
163 fun->chip.IO_ADDR_R = fun->io_base; 163 fun->chip.IO_ADDR_R = fun->io_base;
164 fun->chip.IO_ADDR_W = fun->io_base; 164 fun->chip.IO_ADDR_W = fun->io_base;
@@ -192,18 +192,12 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
192 if (ret) 192 if (ret)
193 goto err; 193 goto err;
194 194
195 ret = parse_mtd_partitions(&fun->mtd, part_types, &fun->parts, 0); 195 ppdata.of_node = flash_np;
196 196 ret = mtd_device_parse_register(&fun->mtd, NULL, &ppdata, NULL, 0);
197#ifdef CONFIG_MTD_OF_PARTS
198 if (ret == 0) {
199 ret = of_mtd_parse_partitions(fun->dev, flash_np, &fun->parts);
200 if (ret < 0)
201 goto err;
202 }
203#endif
204 ret = mtd_device_register(&fun->mtd, fun->parts, ret);
205err: 197err:
206 of_node_put(flash_np); 198 of_node_put(flash_np);
199 if (ret)
200 kfree(fun->mtd.name);
207 return ret; 201 return ret;
208} 202}
209 203
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index e9b275ac381c..e53b76064133 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -146,7 +146,7 @@ static struct mtd_partition partition_info_16KB_blk[] = {
146 { 146 {
147 .name = "Root File System", 147 .name = "Root File System",
148 .offset = 0x460000, 148 .offset = 0x460000,
149 .size = 0, 149 .size = MTDPART_SIZ_FULL,
150 }, 150 },
151}; 151};
152 152
@@ -173,13 +173,10 @@ static struct mtd_partition partition_info_128KB_blk[] = {
173 { 173 {
174 .name = "Root File System", 174 .name = "Root File System",
175 .offset = 0x800000, 175 .offset = 0x800000,
176 .size = 0, 176 .size = MTDPART_SIZ_FULL,
177 }, 177 },
178}; 178};
179 179
180#ifdef CONFIG_MTD_CMDLINE_PARTS
181const char *part_probes[] = { "cmdlinepart", NULL };
182#endif
183 180
184/** 181/**
185 * struct fsmc_nand_data - structure for FSMC NAND device state 182 * struct fsmc_nand_data - structure for FSMC NAND device state
@@ -187,8 +184,6 @@ const char *part_probes[] = { "cmdlinepart", NULL };
187 * @pid: Part ID on the AMBA PrimeCell format 184 * @pid: Part ID on the AMBA PrimeCell format
188 * @mtd: MTD info for a NAND flash. 185 * @mtd: MTD info for a NAND flash.
189 * @nand: Chip related info for a NAND flash. 186 * @nand: Chip related info for a NAND flash.
190 * @partitions: Partition info for a NAND Flash.
191 * @nr_partitions: Total number of partition of a NAND flash.
192 * 187 *
193 * @ecc_place: ECC placing locations in oobfree type format. 188 * @ecc_place: ECC placing locations in oobfree type format.
194 * @bank: Bank number for probed device. 189 * @bank: Bank number for probed device.
@@ -203,8 +198,6 @@ struct fsmc_nand_data {
203 u32 pid; 198 u32 pid;
204 struct mtd_info mtd; 199 struct mtd_info mtd;
205 struct nand_chip nand; 200 struct nand_chip nand;
206 struct mtd_partition *partitions;
207 unsigned int nr_partitions;
208 201
209 struct fsmc_eccplace *ecc_place; 202 struct fsmc_eccplace *ecc_place;
210 unsigned int bank; 203 unsigned int bank;
@@ -716,65 +709,17 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
716 * platform data, 709 * platform data,
717 * default partition information present in driver. 710 * default partition information present in driver.
718 */ 711 */
719#ifdef CONFIG_MTD_CMDLINE_PARTS
720 /* 712 /*
721 * Check if partition info passed via command line 713 * Check for partition info passed
722 */ 714 */
723 host->mtd.name = "nand"; 715 host->mtd.name = "nand";
724 host->nr_partitions = parse_mtd_partitions(&host->mtd, part_probes, 716 ret = mtd_device_parse_register(&host->mtd, NULL, 0,
725 &host->partitions, 0); 717 host->mtd.size <= 0x04000000 ?
726 if (host->nr_partitions <= 0) { 718 partition_info_16KB_blk :
727#endif 719 partition_info_128KB_blk,
728 /* 720 host->mtd.size <= 0x04000000 ?
729 * Check if partition info passed via command line 721 ARRAY_SIZE(partition_info_16KB_blk) :
730 */ 722 ARRAY_SIZE(partition_info_128KB_blk));
731 if (pdata->partitions) {
732 host->partitions = pdata->partitions;
733 host->nr_partitions = pdata->nr_partitions;
734 } else {
735 struct mtd_partition *partition;
736 int i;
737
738 /* Select the default partitions info */
739 switch (host->mtd.size) {
740 case 0x01000000:
741 case 0x02000000:
742 case 0x04000000:
743 host->partitions = partition_info_16KB_blk;
744 host->nr_partitions =
745 sizeof(partition_info_16KB_blk) /
746 sizeof(struct mtd_partition);
747 break;
748 case 0x08000000:
749 case 0x10000000:
750 case 0x20000000:
751 case 0x40000000:
752 host->partitions = partition_info_128KB_blk;
753 host->nr_partitions =
754 sizeof(partition_info_128KB_blk) /
755 sizeof(struct mtd_partition);
756 break;
757 default:
758 ret = -ENXIO;
759 pr_err("Unsupported NAND size\n");
760 goto err_probe;
761 }
762
763 partition = host->partitions;
764 for (i = 0; i < host->nr_partitions; i++, partition++) {
765 if (partition->size == 0) {
766 partition->size = host->mtd.size -
767 partition->offset;
768 break;
769 }
770 }
771 }
772#ifdef CONFIG_MTD_CMDLINE_PARTS
773 }
774#endif
775
776 ret = mtd_device_register(&host->mtd, host->partitions,
777 host->nr_partitions);
778 if (ret) 723 if (ret)
779 goto err_probe; 724 goto err_probe;
780 725
@@ -822,7 +767,7 @@ static int fsmc_nand_remove(struct platform_device *pdev)
822 platform_set_drvdata(pdev, NULL); 767 platform_set_drvdata(pdev, NULL);
823 768
824 if (host) { 769 if (host) {
825 mtd_device_unregister(&host->mtd); 770 nand_release(&host->mtd);
826 clk_disable(host->clk); 771 clk_disable(host->clk);
827 clk_put(host->clk); 772 clk_put(host->clk);
828 773
diff --git a/drivers/mtd/nand/gpmi-nand/Makefile b/drivers/mtd/nand/gpmi-nand/Makefile
new file mode 100644
index 000000000000..3a462487c35e
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi_nand.o
2gpmi_nand-objs += gpmi-nand.o
3gpmi_nand-objs += gpmi-lib.o
diff --git a/drivers/mtd/nand/gpmi-nand/bch-regs.h b/drivers/mtd/nand/gpmi-nand/bch-regs.h
new file mode 100644
index 000000000000..4effb8c579db
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/bch-regs.h
@@ -0,0 +1,84 @@
1/*
2 * Freescale GPMI NAND Flash Driver
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 * Copyright 2008 Embedded Alley Solutions, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21#ifndef __GPMI_NAND_BCH_REGS_H
22#define __GPMI_NAND_BCH_REGS_H
23
24#define HW_BCH_CTRL 0x00000000
25#define HW_BCH_CTRL_SET 0x00000004
26#define HW_BCH_CTRL_CLR 0x00000008
27#define HW_BCH_CTRL_TOG 0x0000000c
28
29#define BM_BCH_CTRL_COMPLETE_IRQ_EN (1 << 8)
30#define BM_BCH_CTRL_COMPLETE_IRQ (1 << 0)
31
32#define HW_BCH_STATUS0 0x00000010
33#define HW_BCH_MODE 0x00000020
34#define HW_BCH_ENCODEPTR 0x00000030
35#define HW_BCH_DATAPTR 0x00000040
36#define HW_BCH_METAPTR 0x00000050
37#define HW_BCH_LAYOUTSELECT 0x00000070
38
39#define HW_BCH_FLASH0LAYOUT0 0x00000080
40
41#define BP_BCH_FLASH0LAYOUT0_NBLOCKS 24
42#define BM_BCH_FLASH0LAYOUT0_NBLOCKS (0xff << BP_BCH_FLASH0LAYOUT0_NBLOCKS)
43#define BF_BCH_FLASH0LAYOUT0_NBLOCKS(v) \
44 (((v) << BP_BCH_FLASH0LAYOUT0_NBLOCKS) & BM_BCH_FLASH0LAYOUT0_NBLOCKS)
45
46#define BP_BCH_FLASH0LAYOUT0_META_SIZE 16
47#define BM_BCH_FLASH0LAYOUT0_META_SIZE (0xff << BP_BCH_FLASH0LAYOUT0_META_SIZE)
48#define BF_BCH_FLASH0LAYOUT0_META_SIZE(v) \
49 (((v) << BP_BCH_FLASH0LAYOUT0_META_SIZE)\
50 & BM_BCH_FLASH0LAYOUT0_META_SIZE)
51
52#define BP_BCH_FLASH0LAYOUT0_ECC0 12
53#define BM_BCH_FLASH0LAYOUT0_ECC0 (0xf << BP_BCH_FLASH0LAYOUT0_ECC0)
54#define BF_BCH_FLASH0LAYOUT0_ECC0(v) \
55 (((v) << BP_BCH_FLASH0LAYOUT0_ECC0) & BM_BCH_FLASH0LAYOUT0_ECC0)
56
57#define BP_BCH_FLASH0LAYOUT0_DATA0_SIZE 0
58#define BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \
59 (0xfff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
60#define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v) \
61 (((v) << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)\
62 & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE)
63
64#define HW_BCH_FLASH0LAYOUT1 0x00000090
65
66#define BP_BCH_FLASH0LAYOUT1_PAGE_SIZE 16
67#define BM_BCH_FLASH0LAYOUT1_PAGE_SIZE \
68 (0xffff << BP_BCH_FLASH0LAYOUT1_PAGE_SIZE)
69#define BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(v) \
70 (((v) << BP_BCH_FLASH0LAYOUT1_PAGE_SIZE) \
71 & BM_BCH_FLASH0LAYOUT1_PAGE_SIZE)
72
73#define BP_BCH_FLASH0LAYOUT1_ECCN 12
74#define BM_BCH_FLASH0LAYOUT1_ECCN (0xf << BP_BCH_FLASH0LAYOUT1_ECCN)
75#define BF_BCH_FLASH0LAYOUT1_ECCN(v) \
76 (((v) << BP_BCH_FLASH0LAYOUT1_ECCN) & BM_BCH_FLASH0LAYOUT1_ECCN)
77
78#define BP_BCH_FLASH0LAYOUT1_DATAN_SIZE 0
79#define BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \
80 (0xfff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
81#define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v) \
82 (((v) << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
83 & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE)
84#endif
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
new file mode 100644
index 000000000000..de4db7604a3f
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -0,0 +1,1057 @@
1/*
2 * Freescale GPMI NAND Flash Driver
3 *
4 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc.
5 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21#include <linux/mtd/gpmi-nand.h>
22#include <linux/delay.h>
23#include <linux/clk.h>
24#include <mach/mxs.h>
25
26#include "gpmi-nand.h"
27#include "gpmi-regs.h"
28#include "bch-regs.h"
29
30struct timing_threshod timing_default_threshold = {
31 .max_data_setup_cycles = (BM_GPMI_TIMING0_DATA_SETUP >>
32 BP_GPMI_TIMING0_DATA_SETUP),
33 .internal_data_setup_in_ns = 0,
34 .max_sample_delay_factor = (BM_GPMI_CTRL1_RDN_DELAY >>
35 BP_GPMI_CTRL1_RDN_DELAY),
36 .max_dll_clock_period_in_ns = 32,
37 .max_dll_delay_in_ns = 16,
38};
39
40/*
41 * Clear the bit and poll it cleared. This is usually called with
42 * a reset address and mask being either SFTRST(bit 31) or CLKGATE
43 * (bit 30).
44 */
45static int clear_poll_bit(void __iomem *addr, u32 mask)
46{
47 int timeout = 0x400;
48
49 /* clear the bit */
50 __mxs_clrl(mask, addr);
51
52 /*
53 * SFTRST needs 3 GPMI clocks to settle, the reference manual
54 * recommends to wait 1us.
55 */
56 udelay(1);
57
58 /* poll the bit becoming clear */
59 while ((readl(addr) & mask) && --timeout)
60 /* nothing */;
61
62 return !timeout;
63}
64
65#define MODULE_CLKGATE (1 << 30)
66#define MODULE_SFTRST (1 << 31)
67/*
68 * The current mxs_reset_block() will do two things:
69 * [1] enable the module.
70 * [2] reset the module.
71 *
72 * In most of the cases, it's ok. But there is a hardware bug in the BCH block.
73 * If you try to soft reset the BCH block, it becomes unusable until
74 * the next hard reset. This case occurs in the NAND boot mode. When the board
75 * boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
76 * So If the driver tries to reset the BCH again, the BCH will not work anymore.
77 * You will see a DMA timeout in this case.
78 *
79 * To avoid this bug, just add a new parameter `just_enable` for
80 * the mxs_reset_block(), and rewrite it here.
81 */
82int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
83{
84 int ret;
85 int timeout = 0x400;
86
87 /* clear and poll SFTRST */
88 ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
89 if (unlikely(ret))
90 goto error;
91
92 /* clear CLKGATE */
93 __mxs_clrl(MODULE_CLKGATE, reset_addr);
94
95 if (!just_enable) {
96 /* set SFTRST to reset the block */
97 __mxs_setl(MODULE_SFTRST, reset_addr);
98 udelay(1);
99
100 /* poll CLKGATE becoming set */
101 while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
102 /* nothing */;
103 if (unlikely(!timeout))
104 goto error;
105 }
106
107 /* clear and poll SFTRST */
108 ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
109 if (unlikely(ret))
110 goto error;
111
112 /* clear and poll CLKGATE */
113 ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
114 if (unlikely(ret))
115 goto error;
116
117 return 0;
118
119error:
120 pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
121 return -ETIMEDOUT;
122}
123
124int gpmi_init(struct gpmi_nand_data *this)
125{
126 struct resources *r = &this->resources;
127 int ret;
128
129 ret = clk_enable(r->clock);
130 if (ret)
131 goto err_out;
132 ret = gpmi_reset_block(r->gpmi_regs, false);
133 if (ret)
134 goto err_out;
135
136 /* Choose NAND mode. */
137 writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
138
139 /* Set the IRQ polarity. */
140 writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
141 r->gpmi_regs + HW_GPMI_CTRL1_SET);
142
143 /* Disable Write-Protection. */
144 writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
145
146 /* Select BCH ECC. */
147 writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
148
149 clk_disable(r->clock);
150 return 0;
151err_out:
152 return ret;
153}
154
155/* This function is very useful. It is called only when the bug occur. */
156void gpmi_dump_info(struct gpmi_nand_data *this)
157{
158 struct resources *r = &this->resources;
159 struct bch_geometry *geo = &this->bch_geometry;
160 u32 reg;
161 int i;
162
163 pr_err("Show GPMI registers :\n");
164 for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
165 reg = readl(r->gpmi_regs + i * 0x10);
166 pr_err("offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
167 }
168
169 /* start to print out the BCH info */
170 pr_err("BCH Geometry :\n");
171 pr_err("GF length : %u\n", geo->gf_len);
172 pr_err("ECC Strength : %u\n", geo->ecc_strength);
173 pr_err("Page Size in Bytes : %u\n", geo->page_size);
174 pr_err("Metadata Size in Bytes : %u\n", geo->metadata_size);
175 pr_err("ECC Chunk Size in Bytes: %u\n", geo->ecc_chunk_size);
176 pr_err("ECC Chunk Count : %u\n", geo->ecc_chunk_count);
177 pr_err("Payload Size in Bytes : %u\n", geo->payload_size);
178 pr_err("Auxiliary Size in Bytes: %u\n", geo->auxiliary_size);
179 pr_err("Auxiliary Status Offset: %u\n", geo->auxiliary_status_offset);
180 pr_err("Block Mark Byte Offset : %u\n", geo->block_mark_byte_offset);
181 pr_err("Block Mark Bit Offset : %u\n", geo->block_mark_bit_offset);
182}
183
184/* Configures the geometry for BCH. */
185int bch_set_geometry(struct gpmi_nand_data *this)
186{
187 struct resources *r = &this->resources;
188 struct bch_geometry *bch_geo = &this->bch_geometry;
189 unsigned int block_count;
190 unsigned int block_size;
191 unsigned int metadata_size;
192 unsigned int ecc_strength;
193 unsigned int page_size;
194 int ret;
195
196 if (common_nfc_set_geometry(this))
197 return !0;
198
199 block_count = bch_geo->ecc_chunk_count - 1;
200 block_size = bch_geo->ecc_chunk_size;
201 metadata_size = bch_geo->metadata_size;
202 ecc_strength = bch_geo->ecc_strength >> 1;
203 page_size = bch_geo->page_size;
204
205 ret = clk_enable(r->clock);
206 if (ret)
207 goto err_out;
208
209 ret = gpmi_reset_block(r->bch_regs, true);
210 if (ret)
211 goto err_out;
212
213 /* Configure layout 0. */
214 writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count)
215 | BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size)
216 | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength)
217 | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size),
218 r->bch_regs + HW_BCH_FLASH0LAYOUT0);
219
220 writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size)
221 | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength)
222 | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size),
223 r->bch_regs + HW_BCH_FLASH0LAYOUT1);
224
225 /* Set *all* chip selects to use layout 0. */
226 writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
227
228 /* Enable interrupts. */
229 writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
230 r->bch_regs + HW_BCH_CTRL_SET);
231
232 clk_disable(r->clock);
233 return 0;
234err_out:
235 return ret;
236}
237
238/* Converts time in nanoseconds to cycles. */
239static unsigned int ns_to_cycles(unsigned int time,
240 unsigned int period, unsigned int min)
241{
242 unsigned int k;
243
244 k = (time + period - 1) / period;
245 return max(k, min);
246}
247
248/* Apply timing to current hardware conditions. */
249static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
250 struct gpmi_nfc_hardware_timing *hw)
251{
252 struct gpmi_nand_platform_data *pdata = this->pdata;
253 struct timing_threshod *nfc = &timing_default_threshold;
254 struct nand_chip *nand = &this->nand;
255 struct nand_timing target = this->timing;
256 bool improved_timing_is_available;
257 unsigned long clock_frequency_in_hz;
258 unsigned int clock_period_in_ns;
259 bool dll_use_half_periods;
260 unsigned int dll_delay_shift;
261 unsigned int max_sample_delay_in_ns;
262 unsigned int address_setup_in_cycles;
263 unsigned int data_setup_in_ns;
264 unsigned int data_setup_in_cycles;
265 unsigned int data_hold_in_cycles;
266 int ideal_sample_delay_in_ns;
267 unsigned int sample_delay_factor;
268 int tEYE;
269 unsigned int min_prop_delay_in_ns = pdata->min_prop_delay_in_ns;
270 unsigned int max_prop_delay_in_ns = pdata->max_prop_delay_in_ns;
271
272 /*
273 * If there are multiple chips, we need to relax the timings to allow
274 * for signal distortion due to higher capacitance.
275 */
276 if (nand->numchips > 2) {
277 target.data_setup_in_ns += 10;
278 target.data_hold_in_ns += 10;
279 target.address_setup_in_ns += 10;
280 } else if (nand->numchips > 1) {
281 target.data_setup_in_ns += 5;
282 target.data_hold_in_ns += 5;
283 target.address_setup_in_ns += 5;
284 }
285
286 /* Check if improved timing information is available. */
287 improved_timing_is_available =
288 (target.tREA_in_ns >= 0) &&
289 (target.tRLOH_in_ns >= 0) &&
290 (target.tRHOH_in_ns >= 0) ;
291
292 /* Inspect the clock. */
293 clock_frequency_in_hz = nfc->clock_frequency_in_hz;
294 clock_period_in_ns = 1000000000 / clock_frequency_in_hz;
295
296 /*
297 * The NFC quantizes setup and hold parameters in terms of clock cycles.
298 * Here, we quantize the setup and hold timing parameters to the
299 * next-highest clock period to make sure we apply at least the
300 * specified times.
301 *
302 * For data setup and data hold, the hardware interprets a value of zero
303 * as the largest possible delay. This is not what's intended by a zero
304 * in the input parameter, so we impose a minimum of one cycle.
305 */
306 data_setup_in_cycles = ns_to_cycles(target.data_setup_in_ns,
307 clock_period_in_ns, 1);
308 data_hold_in_cycles = ns_to_cycles(target.data_hold_in_ns,
309 clock_period_in_ns, 1);
310 address_setup_in_cycles = ns_to_cycles(target.address_setup_in_ns,
311 clock_period_in_ns, 0);
312
313 /*
314 * The clock's period affects the sample delay in a number of ways:
315 *
316 * (1) The NFC HAL tells us the maximum clock period the sample delay
317 * DLL can tolerate. If the clock period is greater than half that
318 * maximum, we must configure the DLL to be driven by half periods.
319 *
320 * (2) We need to convert from an ideal sample delay, in ns, to a
321 * "sample delay factor," which the NFC uses. This factor depends on
322 * whether we're driving the DLL with full or half periods.
323 * Paraphrasing the reference manual:
324 *
325 * AD = SDF x 0.125 x RP
326 *
327 * where:
328 *
329 * AD is the applied delay, in ns.
330 * SDF is the sample delay factor, which is dimensionless.
331 * RP is the reference period, in ns, which is a full clock period
332 * if the DLL is being driven by full periods, or half that if
333 * the DLL is being driven by half periods.
334 *
335 * Let's re-arrange this in a way that's more useful to us:
336 *
337 * 8
338 * SDF = AD x ----
339 * RP
340 *
341 * The reference period is either the clock period or half that, so this
342 * is:
343 *
344 * 8 AD x DDF
345 * SDF = AD x ----- = --------
346 * f x P P
347 *
348 * where:
349 *
350 * f is 1 or 1/2, depending on how we're driving the DLL.
351 * P is the clock period.
352 * DDF is the DLL Delay Factor, a dimensionless value that
353 * incorporates all the constants in the conversion.
354 *
355 * DDF will be either 8 or 16, both of which are powers of two. We can
356 * reduce the cost of this conversion by using bit shifts instead of
357 * multiplication or division. Thus:
358 *
359 * AD << DDS
360 * SDF = ---------
361 * P
362 *
363 * or
364 *
365 * AD = (SDF >> DDS) x P
366 *
367 * where:
368 *
369 * DDS is the DLL Delay Shift, the logarithm to base 2 of the DDF.
370 */
371 if (clock_period_in_ns > (nfc->max_dll_clock_period_in_ns >> 1)) {
372 dll_use_half_periods = true;
373 dll_delay_shift = 3 + 1;
374 } else {
375 dll_use_half_periods = false;
376 dll_delay_shift = 3;
377 }
378
379 /*
380 * Compute the maximum sample delay the NFC allows, under current
381 * conditions. If the clock is running too slowly, no sample delay is
382 * possible.
383 */
384 if (clock_period_in_ns > nfc->max_dll_clock_period_in_ns)
385 max_sample_delay_in_ns = 0;
386 else {
387 /*
388 * Compute the delay implied by the largest sample delay factor
389 * the NFC allows.
390 */
391 max_sample_delay_in_ns =
392 (nfc->max_sample_delay_factor * clock_period_in_ns) >>
393 dll_delay_shift;
394
395 /*
396 * Check if the implied sample delay larger than the NFC
397 * actually allows.
398 */
399 if (max_sample_delay_in_ns > nfc->max_dll_delay_in_ns)
400 max_sample_delay_in_ns = nfc->max_dll_delay_in_ns;
401 }
402
403 /*
404 * Check if improved timing information is available. If not, we have to
405 * use a less-sophisticated algorithm.
406 */
407 if (!improved_timing_is_available) {
408 /*
409 * Fold the read setup time required by the NFC into the ideal
410 * sample delay.
411 */
412 ideal_sample_delay_in_ns = target.gpmi_sample_delay_in_ns +
413 nfc->internal_data_setup_in_ns;
414
415 /*
416 * The ideal sample delay may be greater than the maximum
417 * allowed by the NFC. If so, we can trade off sample delay time
418 * for more data setup time.
419 *
420 * In each iteration of the following loop, we add a cycle to
421 * the data setup time and subtract a corresponding amount from
422 * the sample delay until we've satisified the constraints or
423 * can't do any better.
424 */
425 while ((ideal_sample_delay_in_ns > max_sample_delay_in_ns) &&
426 (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
427
428 data_setup_in_cycles++;
429 ideal_sample_delay_in_ns -= clock_period_in_ns;
430
431 if (ideal_sample_delay_in_ns < 0)
432 ideal_sample_delay_in_ns = 0;
433
434 }
435
436 /*
437 * Compute the sample delay factor that corresponds most closely
438 * to the ideal sample delay. If the result is too large for the
439 * NFC, use the maximum value.
440 *
441 * Notice that we use the ns_to_cycles function to compute the
442 * sample delay factor. We do this because the form of the
443 * computation is the same as that for calculating cycles.
444 */
445 sample_delay_factor =
446 ns_to_cycles(
447 ideal_sample_delay_in_ns << dll_delay_shift,
448 clock_period_in_ns, 0);
449
450 if (sample_delay_factor > nfc->max_sample_delay_factor)
451 sample_delay_factor = nfc->max_sample_delay_factor;
452
453 /* Skip to the part where we return our results. */
454 goto return_results;
455 }
456
457 /*
458 * If control arrives here, we have more detailed timing information,
459 * so we can use a better algorithm.
460 */
461
462 /*
463 * Fold the read setup time required by the NFC into the maximum
464 * propagation delay.
465 */
466 max_prop_delay_in_ns += nfc->internal_data_setup_in_ns;
467
468 /*
469 * Earlier, we computed the number of clock cycles required to satisfy
470 * the data setup time. Now, we need to know the actual nanoseconds.
471 */
472 data_setup_in_ns = clock_period_in_ns * data_setup_in_cycles;
473
474 /*
475 * Compute tEYE, the width of the data eye when reading from the NAND
476 * Flash. The eye width is fundamentally determined by the data setup
477 * time, perturbed by propagation delays and some characteristics of the
478 * NAND Flash device.
479 *
480 * start of the eye = max_prop_delay + tREA
481 * end of the eye = min_prop_delay + tRHOH + data_setup
482 */
483 tEYE = (int)min_prop_delay_in_ns + (int)target.tRHOH_in_ns +
484 (int)data_setup_in_ns;
485
486 tEYE -= (int)max_prop_delay_in_ns + (int)target.tREA_in_ns;
487
488 /*
489 * The eye must be open. If it's not, we can try to open it by
490 * increasing its main forcer, the data setup time.
491 *
492 * In each iteration of the following loop, we increase the data setup
493 * time by a single clock cycle. We do this until either the eye is
494 * open or we run into NFC limits.
495 */
496 while ((tEYE <= 0) &&
497 (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
498 /* Give a cycle to data setup. */
499 data_setup_in_cycles++;
500 /* Synchronize the data setup time with the cycles. */
501 data_setup_in_ns += clock_period_in_ns;
502 /* Adjust tEYE accordingly. */
503 tEYE += clock_period_in_ns;
504 }
505
506 /*
507 * When control arrives here, the eye is open. The ideal time to sample
508 * the data is in the center of the eye:
509 *
510 * end of the eye + start of the eye
511 * --------------------------------- - data_setup
512 * 2
513 *
514 * After some algebra, this simplifies to the code immediately below.
515 */
516 ideal_sample_delay_in_ns =
517 ((int)max_prop_delay_in_ns +
518 (int)target.tREA_in_ns +
519 (int)min_prop_delay_in_ns +
520 (int)target.tRHOH_in_ns -
521 (int)data_setup_in_ns) >> 1;
522
523 /*
524 * The following figure illustrates some aspects of a NAND Flash read:
525 *
526 *
527 * __ _____________________________________
528 * RDN \_________________/
529 *
530 * <---- tEYE ----->
531 * /-----------------\
532 * Read Data ----------------------------< >---------
533 * \-----------------/
534 * ^ ^ ^ ^
535 * | | | |
536 * |<--Data Setup -->|<--Delay Time -->| |
537 * | | | |
538 * | | |
539 * | |<-- Quantized Delay Time -->|
540 * | | |
541 *
542 *
543 * We have some issues we must now address:
544 *
545 * (1) The *ideal* sample delay time must not be negative. If it is, we
546 * jam it to zero.
547 *
548 * (2) The *ideal* sample delay time must not be greater than that
549 * allowed by the NFC. If it is, we can increase the data setup
550 * time, which will reduce the delay between the end of the data
551 * setup and the center of the eye. It will also make the eye
552 * larger, which might help with the next issue...
553 *
554 * (3) The *quantized* sample delay time must not fall either before the
555 * eye opens or after it closes (the latter is the problem
556 * illustrated in the above figure).
557 */
558
559 /* Jam a negative ideal sample delay to zero. */
560 if (ideal_sample_delay_in_ns < 0)
561 ideal_sample_delay_in_ns = 0;
562
563 /*
564 * Extend the data setup as needed to reduce the ideal sample delay
565 * below the maximum permitted by the NFC.
566 */
567 while ((ideal_sample_delay_in_ns > max_sample_delay_in_ns) &&
568 (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
569
570 /* Give a cycle to data setup. */
571 data_setup_in_cycles++;
572 /* Synchronize the data setup time with the cycles. */
573 data_setup_in_ns += clock_period_in_ns;
574 /* Adjust tEYE accordingly. */
575 tEYE += clock_period_in_ns;
576
577 /*
578 * Decrease the ideal sample delay by one half cycle, to keep it
579 * in the middle of the eye.
580 */
581 ideal_sample_delay_in_ns -= (clock_period_in_ns >> 1);
582
583 /* Jam a negative ideal sample delay to zero. */
584 if (ideal_sample_delay_in_ns < 0)
585 ideal_sample_delay_in_ns = 0;
586 }
587
588 /*
589 * Compute the sample delay factor that corresponds to the ideal sample
590 * delay. If the result is too large, then use the maximum allowed
591 * value.
592 *
593 * Notice that we use the ns_to_cycles function to compute the sample
594 * delay factor. We do this because the form of the computation is the
595 * same as that for calculating cycles.
596 */
597 sample_delay_factor =
598 ns_to_cycles(ideal_sample_delay_in_ns << dll_delay_shift,
599 clock_period_in_ns, 0);
600
601 if (sample_delay_factor > nfc->max_sample_delay_factor)
602 sample_delay_factor = nfc->max_sample_delay_factor;
603
604 /*
605 * These macros conveniently encapsulate a computation we'll use to
606 * continuously evaluate whether or not the data sample delay is inside
607 * the eye.
608 */
609 #define IDEAL_DELAY ((int) ideal_sample_delay_in_ns)
610
611 #define QUANTIZED_DELAY \
612 ((int) ((sample_delay_factor * clock_period_in_ns) >> \
613 dll_delay_shift))
614
615 #define DELAY_ERROR (abs(QUANTIZED_DELAY - IDEAL_DELAY))
616
617 #define SAMPLE_IS_NOT_WITHIN_THE_EYE (DELAY_ERROR > (tEYE >> 1))
618
619 /*
620 * While the quantized sample time falls outside the eye, reduce the
621 * sample delay or extend the data setup to move the sampling point back
622 * toward the eye. Do not allow the number of data setup cycles to
623 * exceed the maximum allowed by the NFC.
624 */
625 while (SAMPLE_IS_NOT_WITHIN_THE_EYE &&
626 (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
627 /*
628 * If control arrives here, the quantized sample delay falls
629 * outside the eye. Check if it's before the eye opens, or after
630 * the eye closes.
631 */
632 if (QUANTIZED_DELAY > IDEAL_DELAY) {
633 /*
634 * If control arrives here, the quantized sample delay
635 * falls after the eye closes. Decrease the quantized
636 * delay time and then go back to re-evaluate.
637 */
638 if (sample_delay_factor != 0)
639 sample_delay_factor--;
640 continue;
641 }
642
643 /*
644 * If control arrives here, the quantized sample delay falls
645 * before the eye opens. Shift the sample point by increasing
646 * data setup time. This will also make the eye larger.
647 */
648
649 /* Give a cycle to data setup. */
650 data_setup_in_cycles++;
651 /* Synchronize the data setup time with the cycles. */
652 data_setup_in_ns += clock_period_in_ns;
653 /* Adjust tEYE accordingly. */
654 tEYE += clock_period_in_ns;
655
656 /*
657 * Decrease the ideal sample delay by one half cycle, to keep it
658 * in the middle of the eye.
659 */
660 ideal_sample_delay_in_ns -= (clock_period_in_ns >> 1);
661
662 /* ...and one less period for the delay time. */
663 ideal_sample_delay_in_ns -= clock_period_in_ns;
664
665 /* Jam a negative ideal sample delay to zero. */
666 if (ideal_sample_delay_in_ns < 0)
667 ideal_sample_delay_in_ns = 0;
668
669 /*
670 * We have a new ideal sample delay, so re-compute the quantized
671 * delay.
672 */
673 sample_delay_factor =
674 ns_to_cycles(
675 ideal_sample_delay_in_ns << dll_delay_shift,
676 clock_period_in_ns, 0);
677
678 if (sample_delay_factor > nfc->max_sample_delay_factor)
679 sample_delay_factor = nfc->max_sample_delay_factor;
680 }
681
682 /* Control arrives here when we're ready to return our results. */
683return_results:
684 hw->data_setup_in_cycles = data_setup_in_cycles;
685 hw->data_hold_in_cycles = data_hold_in_cycles;
686 hw->address_setup_in_cycles = address_setup_in_cycles;
687 hw->use_half_periods = dll_use_half_periods;
688 hw->sample_delay_factor = sample_delay_factor;
689
690 /* Return success. */
691 return 0;
692}
693
694/* Begin the I/O */
695void gpmi_begin(struct gpmi_nand_data *this)
696{
697 struct resources *r = &this->resources;
698 struct timing_threshod *nfc = &timing_default_threshold;
699 unsigned char *gpmi_regs = r->gpmi_regs;
700 unsigned int clock_period_in_ns;
701 uint32_t reg;
702 unsigned int dll_wait_time_in_us;
703 struct gpmi_nfc_hardware_timing hw;
704 int ret;
705
706 /* Enable the clock. */
707 ret = clk_enable(r->clock);
708 if (ret) {
709 pr_err("We failed in enable the clk\n");
710 goto err_out;
711 }
712
713 /* set ready/busy timeout */
714 writel(0x500 << BP_GPMI_TIMING1_BUSY_TIMEOUT,
715 gpmi_regs + HW_GPMI_TIMING1);
716
717 /* Get the timing information we need. */
718 nfc->clock_frequency_in_hz = clk_get_rate(r->clock);
719 clock_period_in_ns = 1000000000 / nfc->clock_frequency_in_hz;
720
721 gpmi_nfc_compute_hardware_timing(this, &hw);
722
723 /* Set up all the simple timing parameters. */
724 reg = BF_GPMI_TIMING0_ADDRESS_SETUP(hw.address_setup_in_cycles) |
725 BF_GPMI_TIMING0_DATA_HOLD(hw.data_hold_in_cycles) |
726 BF_GPMI_TIMING0_DATA_SETUP(hw.data_setup_in_cycles) ;
727
728 writel(reg, gpmi_regs + HW_GPMI_TIMING0);
729
730 /*
731 * DLL_ENABLE must be set to 0 when setting RDN_DELAY or HALF_PERIOD.
732 */
733 writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_CLR);
734
735 /* Clear out the DLL control fields. */
736 writel(BM_GPMI_CTRL1_RDN_DELAY, gpmi_regs + HW_GPMI_CTRL1_CLR);
737 writel(BM_GPMI_CTRL1_HALF_PERIOD, gpmi_regs + HW_GPMI_CTRL1_CLR);
738
739 /* If no sample delay is called for, return immediately. */
740 if (!hw.sample_delay_factor)
741 return;
742
743 /* Configure the HALF_PERIOD flag. */
744 if (hw.use_half_periods)
745 writel(BM_GPMI_CTRL1_HALF_PERIOD,
746 gpmi_regs + HW_GPMI_CTRL1_SET);
747
748 /* Set the delay factor. */
749 writel(BF_GPMI_CTRL1_RDN_DELAY(hw.sample_delay_factor),
750 gpmi_regs + HW_GPMI_CTRL1_SET);
751
752 /* Enable the DLL. */
753 writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_SET);
754
755 /*
756 * After we enable the GPMI DLL, we have to wait 64 clock cycles before
757 * we can use the GPMI.
758 *
759 * Calculate the amount of time we need to wait, in microseconds.
760 */
761 dll_wait_time_in_us = (clock_period_in_ns * 64) / 1000;
762
763 if (!dll_wait_time_in_us)
764 dll_wait_time_in_us = 1;
765
766 /* Wait for the DLL to settle. */
767 udelay(dll_wait_time_in_us);
768
769err_out:
770 return;
771}
772
773void gpmi_end(struct gpmi_nand_data *this)
774{
775 struct resources *r = &this->resources;
776 clk_disable(r->clock);
777}
778
779/* Clears a BCH interrupt. */
780void gpmi_clear_bch(struct gpmi_nand_data *this)
781{
782 struct resources *r = &this->resources;
783 writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
784}
785
786/* Returns the Ready/Busy status of the given chip. */
787int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
788{
789 struct resources *r = &this->resources;
790 uint32_t mask = 0;
791 uint32_t reg = 0;
792
793 if (GPMI_IS_MX23(this)) {
794 mask = MX23_BM_GPMI_DEBUG_READY0 << chip;
795 reg = readl(r->gpmi_regs + HW_GPMI_DEBUG);
796 } else if (GPMI_IS_MX28(this)) {
797 mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
798 reg = readl(r->gpmi_regs + HW_GPMI_STAT);
799 } else
800 pr_err("unknow arch.\n");
801 return reg & mask;
802}
803
804static inline void set_dma_type(struct gpmi_nand_data *this,
805 enum dma_ops_type type)
806{
807 this->last_dma_type = this->dma_type;
808 this->dma_type = type;
809}
810
811int gpmi_send_command(struct gpmi_nand_data *this)
812{
813 struct dma_chan *channel = get_dma_chan(this);
814 struct dma_async_tx_descriptor *desc;
815 struct scatterlist *sgl;
816 int chip = this->current_chip;
817 u32 pio[3];
818
819 /* [1] send out the PIO words */
820 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
821 | BM_GPMI_CTRL0_WORD_LENGTH
822 | BF_GPMI_CTRL0_CS(chip, this)
823 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
824 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
825 | BM_GPMI_CTRL0_ADDRESS_INCREMENT
826 | BF_GPMI_CTRL0_XFER_COUNT(this->command_length);
827 pio[1] = pio[2] = 0;
828 desc = channel->device->device_prep_slave_sg(channel,
829 (struct scatterlist *)pio,
830 ARRAY_SIZE(pio), DMA_NONE, 0);
831 if (!desc) {
832 pr_err("step 1 error\n");
833 return -1;
834 }
835
836 /* [2] send out the COMMAND + ADDRESS string stored in @buffer */
837 sgl = &this->cmd_sgl;
838
839 sg_init_one(sgl, this->cmd_buffer, this->command_length);
840 dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
841 desc = channel->device->device_prep_slave_sg(channel,
842 sgl, 1, DMA_TO_DEVICE, 1);
843 if (!desc) {
844 pr_err("step 2 error\n");
845 return -1;
846 }
847
848 /* [3] submit the DMA */
849 set_dma_type(this, DMA_FOR_COMMAND);
850 return start_dma_without_bch_irq(this, desc);
851}
852
853int gpmi_send_data(struct gpmi_nand_data *this)
854{
855 struct dma_async_tx_descriptor *desc;
856 struct dma_chan *channel = get_dma_chan(this);
857 int chip = this->current_chip;
858 uint32_t command_mode;
859 uint32_t address;
860 u32 pio[2];
861
862 /* [1] PIO */
863 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
864 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
865
866 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
867 | BM_GPMI_CTRL0_WORD_LENGTH
868 | BF_GPMI_CTRL0_CS(chip, this)
869 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
870 | BF_GPMI_CTRL0_ADDRESS(address)
871 | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
872 pio[1] = 0;
873 desc = channel->device->device_prep_slave_sg(channel,
874 (struct scatterlist *)pio,
875 ARRAY_SIZE(pio), DMA_NONE, 0);
876 if (!desc) {
877 pr_err("step 1 error\n");
878 return -1;
879 }
880
881 /* [2] send DMA request */
882 prepare_data_dma(this, DMA_TO_DEVICE);
883 desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl,
884 1, DMA_TO_DEVICE, 1);
885 if (!desc) {
886 pr_err("step 2 error\n");
887 return -1;
888 }
889 /* [3] submit the DMA */
890 set_dma_type(this, DMA_FOR_WRITE_DATA);
891 return start_dma_without_bch_irq(this, desc);
892}
893
894int gpmi_read_data(struct gpmi_nand_data *this)
895{
896 struct dma_async_tx_descriptor *desc;
897 struct dma_chan *channel = get_dma_chan(this);
898 int chip = this->current_chip;
899 u32 pio[2];
900
901 /* [1] : send PIO */
902 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
903 | BM_GPMI_CTRL0_WORD_LENGTH
904 | BF_GPMI_CTRL0_CS(chip, this)
905 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
906 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
907 | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
908 pio[1] = 0;
909 desc = channel->device->device_prep_slave_sg(channel,
910 (struct scatterlist *)pio,
911 ARRAY_SIZE(pio), DMA_NONE, 0);
912 if (!desc) {
913 pr_err("step 1 error\n");
914 return -1;
915 }
916
917 /* [2] : send DMA request */
918 prepare_data_dma(this, DMA_FROM_DEVICE);
919 desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl,
920 1, DMA_FROM_DEVICE, 1);
921 if (!desc) {
922 pr_err("step 2 error\n");
923 return -1;
924 }
925
926 /* [3] : submit the DMA */
927 set_dma_type(this, DMA_FOR_READ_DATA);
928 return start_dma_without_bch_irq(this, desc);
929}
930
931int gpmi_send_page(struct gpmi_nand_data *this,
932 dma_addr_t payload, dma_addr_t auxiliary)
933{
934 struct bch_geometry *geo = &this->bch_geometry;
935 uint32_t command_mode;
936 uint32_t address;
937 uint32_t ecc_command;
938 uint32_t buffer_mask;
939 struct dma_async_tx_descriptor *desc;
940 struct dma_chan *channel = get_dma_chan(this);
941 int chip = this->current_chip;
942 u32 pio[6];
943
944 /* A DMA descriptor that does an ECC page read. */
945 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
946 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
947 ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE;
948 buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
949 BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
950
951 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
952 | BM_GPMI_CTRL0_WORD_LENGTH
953 | BF_GPMI_CTRL0_CS(chip, this)
954 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
955 | BF_GPMI_CTRL0_ADDRESS(address)
956 | BF_GPMI_CTRL0_XFER_COUNT(0);
957 pio[1] = 0;
958 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
959 | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
960 | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
961 pio[3] = geo->page_size;
962 pio[4] = payload;
963 pio[5] = auxiliary;
964
965 desc = channel->device->device_prep_slave_sg(channel,
966 (struct scatterlist *)pio,
967 ARRAY_SIZE(pio), DMA_NONE, 0);
968 if (!desc) {
969 pr_err("step 2 error\n");
970 return -1;
971 }
972 set_dma_type(this, DMA_FOR_WRITE_ECC_PAGE);
973 return start_dma_with_bch_irq(this, desc);
974}
975
976int gpmi_read_page(struct gpmi_nand_data *this,
977 dma_addr_t payload, dma_addr_t auxiliary)
978{
979 struct bch_geometry *geo = &this->bch_geometry;
980 uint32_t command_mode;
981 uint32_t address;
982 uint32_t ecc_command;
983 uint32_t buffer_mask;
984 struct dma_async_tx_descriptor *desc;
985 struct dma_chan *channel = get_dma_chan(this);
986 int chip = this->current_chip;
987 u32 pio[6];
988
989 /* [1] Wait for the chip to report ready. */
990 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
991 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
992
993 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
994 | BM_GPMI_CTRL0_WORD_LENGTH
995 | BF_GPMI_CTRL0_CS(chip, this)
996 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
997 | BF_GPMI_CTRL0_ADDRESS(address)
998 | BF_GPMI_CTRL0_XFER_COUNT(0);
999 pio[1] = 0;
1000 desc = channel->device->device_prep_slave_sg(channel,
1001 (struct scatterlist *)pio, 2, DMA_NONE, 0);
1002 if (!desc) {
1003 pr_err("step 1 error\n");
1004 return -1;
1005 }
1006
1007 /* [2] Enable the BCH block and read. */
1008 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__READ;
1009 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1010 ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE;
1011 buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
1012 | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
1013
1014 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
1015 | BM_GPMI_CTRL0_WORD_LENGTH
1016 | BF_GPMI_CTRL0_CS(chip, this)
1017 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1018 | BF_GPMI_CTRL0_ADDRESS(address)
1019 | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
1020
1021 pio[1] = 0;
1022 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
1023 | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
1024 | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
1025 pio[3] = geo->page_size;
1026 pio[4] = payload;
1027 pio[5] = auxiliary;
1028 desc = channel->device->device_prep_slave_sg(channel,
1029 (struct scatterlist *)pio,
1030 ARRAY_SIZE(pio), DMA_NONE, 1);
1031 if (!desc) {
1032 pr_err("step 2 error\n");
1033 return -1;
1034 }
1035
1036 /* [3] Disable the BCH block */
1037 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
1038 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1039
1040 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
1041 | BM_GPMI_CTRL0_WORD_LENGTH
1042 | BF_GPMI_CTRL0_CS(chip, this)
1043 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1044 | BF_GPMI_CTRL0_ADDRESS(address)
1045 | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
1046 pio[1] = 0;
1047 desc = channel->device->device_prep_slave_sg(channel,
1048 (struct scatterlist *)pio, 2, DMA_NONE, 1);
1049 if (!desc) {
1050 pr_err("step 3 error\n");
1051 return -1;
1052 }
1053
1054 /* [4] submit the DMA */
1055 set_dma_type(this, DMA_FOR_READ_ECC_PAGE);
1056 return start_dma_with_bch_irq(this, desc);
1057}
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
new file mode 100644
index 000000000000..071b63420f0e
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -0,0 +1,1619 @@
1/*
2 * Freescale GPMI NAND Flash Driver
3 *
4 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
5 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21#include <linux/clk.h>
22#include <linux/slab.h>
23#include <linux/interrupt.h>
24#include <linux/mtd/gpmi-nand.h>
25#include <linux/mtd/partitions.h>
26
27#include "gpmi-nand.h"
28
29/* add our owner bbt descriptor */
30static uint8_t scan_ff_pattern[] = { 0xff };
31static struct nand_bbt_descr gpmi_bbt_descr = {
32 .options = 0,
33 .offs = 0,
34 .len = 1,
35 .pattern = scan_ff_pattern
36};
37
38/* We will use all the (page + OOB). */
39static struct nand_ecclayout gpmi_hw_ecclayout = {
40 .eccbytes = 0,
41 .eccpos = { 0, },
42 .oobfree = { {.offset = 0, .length = 0} }
43};
44
45static irqreturn_t bch_irq(int irq, void *cookie)
46{
47 struct gpmi_nand_data *this = cookie;
48
49 gpmi_clear_bch(this);
50 complete(&this->bch_done);
51 return IRQ_HANDLED;
52}
53
54/*
55 * Calculate the ECC strength by hand:
56 * E : The ECC strength.
57 * G : the length of Galois Field.
58 * N : The chunk count of per page.
59 * O : the oobsize of the NAND chip.
60 * M : the metasize of per page.
61 *
62 * The formula is :
63 * E * G * N
64 * ------------ <= (O - M)
65 * 8
66 *
67 * So, we get E by:
68 * (O - M) * 8
69 * E <= -------------
70 * G * N
71 */
72static inline int get_ecc_strength(struct gpmi_nand_data *this)
73{
74 struct bch_geometry *geo = &this->bch_geometry;
75 struct mtd_info *mtd = &this->mtd;
76 int ecc_strength;
77
78 ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8)
79 / (geo->gf_len * geo->ecc_chunk_count);
80
81 /* We need the minor even number. */
82 return round_down(ecc_strength, 2);
83}
84
85int common_nfc_set_geometry(struct gpmi_nand_data *this)
86{
87 struct bch_geometry *geo = &this->bch_geometry;
88 struct mtd_info *mtd = &this->mtd;
89 unsigned int metadata_size;
90 unsigned int status_size;
91 unsigned int block_mark_bit_offset;
92
93 /*
94 * The size of the metadata can be changed, though we set it to 10
95 * bytes now. But it can't be too large, because we have to save
96 * enough space for BCH.
97 */
98 geo->metadata_size = 10;
99
100 /* The default for the length of Galois Field. */
101 geo->gf_len = 13;
102
103 /* The default for chunk size. There is no oobsize greater then 512. */
104 geo->ecc_chunk_size = 512;
105 while (geo->ecc_chunk_size < mtd->oobsize)
106 geo->ecc_chunk_size *= 2; /* keep C >= O */
107
108 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
109
110 /* We use the same ECC strength for all chunks. */
111 geo->ecc_strength = get_ecc_strength(this);
112 if (!geo->ecc_strength) {
113 pr_err("We get a wrong ECC strength.\n");
114 return -EINVAL;
115 }
116
117 geo->page_size = mtd->writesize + mtd->oobsize;
118 geo->payload_size = mtd->writesize;
119
120 /*
121 * The auxiliary buffer contains the metadata and the ECC status. The
122 * metadata is padded to the nearest 32-bit boundary. The ECC status
123 * contains one byte for every ECC chunk, and is also padded to the
124 * nearest 32-bit boundary.
125 */
126 metadata_size = ALIGN(geo->metadata_size, 4);
127 status_size = ALIGN(geo->ecc_chunk_count, 4);
128
129 geo->auxiliary_size = metadata_size + status_size;
130 geo->auxiliary_status_offset = metadata_size;
131
132 if (!this->swap_block_mark)
133 return 0;
134
135 /*
136 * We need to compute the byte and bit offsets of
137 * the physical block mark within the ECC-based view of the page.
138 *
139 * NAND chip with 2K page shows below:
140 * (Block Mark)
141 * | |
142 * | D |
143 * |<---->|
144 * V V
145 * +---+----------+-+----------+-+----------+-+----------+-+
146 * | M | data |E| data |E| data |E| data |E|
147 * +---+----------+-+----------+-+----------+-+----------+-+
148 *
149 * The position of block mark moves forward in the ECC-based view
150 * of page, and the delta is:
151 *
152 * E * G * (N - 1)
153 * D = (---------------- + M)
154 * 8
155 *
156 * With the formula to compute the ECC strength, and the condition
157 * : C >= O (C is the ecc chunk size)
158 *
159 * It's easy to deduce to the following result:
160 *
161 * E * G (O - M) C - M C - M
162 * ----------- <= ------- <= -------- < ---------
163 * 8 N N (N - 1)
164 *
165 * So, we get:
166 *
167 * E * G * (N - 1)
168 * D = (---------------- + M) < C
169 * 8
170 *
171 * The above inequality means the position of block mark
172 * within the ECC-based view of the page is still in the data chunk,
173 * and it's NOT in the ECC bits of the chunk.
174 *
175 * Use the following to compute the bit position of the
176 * physical block mark within the ECC-based view of the page:
177 * (page_size - D) * 8
178 *
179 * --Huang Shijie
180 */
181 block_mark_bit_offset = mtd->writesize * 8 -
182 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
183 + geo->metadata_size * 8);
184
185 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
186 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
187 return 0;
188}
189
190struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
191{
192 int chipnr = this->current_chip;
193
194 return this->dma_chans[chipnr];
195}
196
197/* Can we use the upper's buffer directly for DMA? */
198void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
199{
200 struct scatterlist *sgl = &this->data_sgl;
201 int ret;
202
203 this->direct_dma_map_ok = true;
204
205 /* first try to map the upper buffer directly */
206 sg_init_one(sgl, this->upper_buf, this->upper_len);
207 ret = dma_map_sg(this->dev, sgl, 1, dr);
208 if (ret == 0) {
209 /* We have to use our own DMA buffer. */
210 sg_init_one(sgl, this->data_buffer_dma, PAGE_SIZE);
211
212 if (dr == DMA_TO_DEVICE)
213 memcpy(this->data_buffer_dma, this->upper_buf,
214 this->upper_len);
215
216 ret = dma_map_sg(this->dev, sgl, 1, dr);
217 if (ret == 0)
218 pr_err("map failed.\n");
219
220 this->direct_dma_map_ok = false;
221 }
222}
223
224/* This will be called after the DMA operation is finished. */
225static void dma_irq_callback(void *param)
226{
227 struct gpmi_nand_data *this = param;
228 struct completion *dma_c = &this->dma_done;
229
230 complete(dma_c);
231
232 switch (this->dma_type) {
233 case DMA_FOR_COMMAND:
234 dma_unmap_sg(this->dev, &this->cmd_sgl, 1, DMA_TO_DEVICE);
235 break;
236
237 case DMA_FOR_READ_DATA:
238 dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_FROM_DEVICE);
239 if (this->direct_dma_map_ok == false)
240 memcpy(this->upper_buf, this->data_buffer_dma,
241 this->upper_len);
242 break;
243
244 case DMA_FOR_WRITE_DATA:
245 dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_TO_DEVICE);
246 break;
247
248 case DMA_FOR_READ_ECC_PAGE:
249 case DMA_FOR_WRITE_ECC_PAGE:
250 /* We have to wait the BCH interrupt to finish. */
251 break;
252
253 default:
254 pr_err("in wrong DMA operation.\n");
255 }
256}
257
258int start_dma_without_bch_irq(struct gpmi_nand_data *this,
259 struct dma_async_tx_descriptor *desc)
260{
261 struct completion *dma_c = &this->dma_done;
262 int err;
263
264 init_completion(dma_c);
265
266 desc->callback = dma_irq_callback;
267 desc->callback_param = this;
268 dmaengine_submit(desc);
269
270 /* Wait for the interrupt from the DMA block. */
271 err = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000));
272 if (!err) {
273 pr_err("DMA timeout, last DMA :%d\n", this->last_dma_type);
274 gpmi_dump_info(this);
275 return -ETIMEDOUT;
276 }
277 return 0;
278}
279
280/*
281 * This function is used in BCH reading or BCH writing pages.
282 * It will wait for the BCH interrupt as long as ONE second.
283 * Actually, we must wait for two interrupts :
284 * [1] firstly the DMA interrupt and
285 * [2] secondly the BCH interrupt.
286 */
287int start_dma_with_bch_irq(struct gpmi_nand_data *this,
288 struct dma_async_tx_descriptor *desc)
289{
290 struct completion *bch_c = &this->bch_done;
291 int err;
292
293 /* Prepare to receive an interrupt from the BCH block. */
294 init_completion(bch_c);
295
296 /* start the DMA */
297 start_dma_without_bch_irq(this, desc);
298
299 /* Wait for the interrupt from the BCH block. */
300 err = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000));
301 if (!err) {
302 pr_err("BCH timeout, last DMA :%d\n", this->last_dma_type);
303 gpmi_dump_info(this);
304 return -ETIMEDOUT;
305 }
306 return 0;
307}
308
309static int __devinit
310acquire_register_block(struct gpmi_nand_data *this, const char *res_name)
311{
312 struct platform_device *pdev = this->pdev;
313 struct resources *res = &this->resources;
314 struct resource *r;
315 void *p;
316
317 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
318 if (!r) {
319 pr_err("Can't get resource for %s\n", res_name);
320 return -ENXIO;
321 }
322
323 p = ioremap(r->start, resource_size(r));
324 if (!p) {
325 pr_err("Can't remap %s\n", res_name);
326 return -ENOMEM;
327 }
328
329 if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME))
330 res->gpmi_regs = p;
331 else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME))
332 res->bch_regs = p;
333 else
334 pr_err("unknown resource name : %s\n", res_name);
335
336 return 0;
337}
338
339static void release_register_block(struct gpmi_nand_data *this)
340{
341 struct resources *res = &this->resources;
342 if (res->gpmi_regs)
343 iounmap(res->gpmi_regs);
344 if (res->bch_regs)
345 iounmap(res->bch_regs);
346 res->gpmi_regs = NULL;
347 res->bch_regs = NULL;
348}
349
350static int __devinit
351acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
352{
353 struct platform_device *pdev = this->pdev;
354 struct resources *res = &this->resources;
355 const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME;
356 struct resource *r;
357 int err;
358
359 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
360 if (!r) {
361 pr_err("Can't get resource for %s\n", res_name);
362 return -ENXIO;
363 }
364
365 err = request_irq(r->start, irq_h, 0, res_name, this);
366 if (err) {
367 pr_err("Can't own %s\n", res_name);
368 return err;
369 }
370
371 res->bch_low_interrupt = r->start;
372 res->bch_high_interrupt = r->end;
373 return 0;
374}
375
376static void release_bch_irq(struct gpmi_nand_data *this)
377{
378 struct resources *res = &this->resources;
379 int i = res->bch_low_interrupt;
380
381 for (; i <= res->bch_high_interrupt; i++)
382 free_irq(i, this);
383}
384
385static bool gpmi_dma_filter(struct dma_chan *chan, void *param)
386{
387 struct gpmi_nand_data *this = param;
388 struct resource *r = this->private;
389
390 if (!mxs_dma_is_apbh(chan))
391 return false;
392 /*
393 * only catch the GPMI dma channels :
394 * for mx23 : MX23_DMA_GPMI0 ~ MX23_DMA_GPMI3
395 * (These four channels share the same IRQ!)
396 *
397 * for mx28 : MX28_DMA_GPMI0 ~ MX28_DMA_GPMI7
398 * (These eight channels share the same IRQ!)
399 */
400 if (r->start <= chan->chan_id && chan->chan_id <= r->end) {
401 chan->private = &this->dma_data;
402 return true;
403 }
404 return false;
405}
406
407static void release_dma_channels(struct gpmi_nand_data *this)
408{
409 unsigned int i;
410 for (i = 0; i < DMA_CHANS; i++)
411 if (this->dma_chans[i]) {
412 dma_release_channel(this->dma_chans[i]);
413 this->dma_chans[i] = NULL;
414 }
415}
416
417static int __devinit acquire_dma_channels(struct gpmi_nand_data *this)
418{
419 struct platform_device *pdev = this->pdev;
420 struct gpmi_nand_platform_data *pdata = this->pdata;
421 struct resources *res = &this->resources;
422 struct resource *r, *r_dma;
423 unsigned int i;
424
425 r = platform_get_resource_byname(pdev, IORESOURCE_DMA,
426 GPMI_NAND_DMA_CHANNELS_RES_NAME);
427 r_dma = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
428 GPMI_NAND_DMA_INTERRUPT_RES_NAME);
429 if (!r || !r_dma) {
430 pr_err("Can't get resource for DMA\n");
431 return -ENXIO;
432 }
433
434 /* used in gpmi_dma_filter() */
435 this->private = r;
436
437 for (i = r->start; i <= r->end; i++) {
438 struct dma_chan *dma_chan;
439 dma_cap_mask_t mask;
440
441 if (i - r->start >= pdata->max_chip_count)
442 break;
443
444 dma_cap_zero(mask);
445 dma_cap_set(DMA_SLAVE, mask);
446
447 /* get the DMA interrupt */
448 if (r_dma->start == r_dma->end) {
449 /* only register the first. */
450 if (i == r->start)
451 this->dma_data.chan_irq = r_dma->start;
452 else
453 this->dma_data.chan_irq = NO_IRQ;
454 } else
455 this->dma_data.chan_irq = r_dma->start + (i - r->start);
456
457 dma_chan = dma_request_channel(mask, gpmi_dma_filter, this);
458 if (!dma_chan)
459 goto acquire_err;
460
461 /* fill the first empty item */
462 this->dma_chans[i - r->start] = dma_chan;
463 }
464
465 res->dma_low_channel = r->start;
466 res->dma_high_channel = i;
467 return 0;
468
469acquire_err:
470 pr_err("Can't acquire DMA channel %u\n", i);
471 release_dma_channels(this);
472 return -EINVAL;
473}
474
475static int __devinit acquire_resources(struct gpmi_nand_data *this)
476{
477 struct resources *res = &this->resources;
478 int ret;
479
480 ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME);
481 if (ret)
482 goto exit_regs;
483
484 ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME);
485 if (ret)
486 goto exit_regs;
487
488 ret = acquire_bch_irq(this, bch_irq);
489 if (ret)
490 goto exit_regs;
491
492 ret = acquire_dma_channels(this);
493 if (ret)
494 goto exit_dma_channels;
495
496 res->clock = clk_get(&this->pdev->dev, NULL);
497 if (IS_ERR(res->clock)) {
498 pr_err("can not get the clock\n");
499 ret = -ENOENT;
500 goto exit_clock;
501 }
502 return 0;
503
504exit_clock:
505 release_dma_channels(this);
506exit_dma_channels:
507 release_bch_irq(this);
508exit_regs:
509 release_register_block(this);
510 return ret;
511}
512
513static void release_resources(struct gpmi_nand_data *this)
514{
515 struct resources *r = &this->resources;
516
517 clk_put(r->clock);
518 release_register_block(this);
519 release_bch_irq(this);
520 release_dma_channels(this);
521}
522
523static int __devinit init_hardware(struct gpmi_nand_data *this)
524{
525 int ret;
526
527 /*
528 * This structure contains the "safe" GPMI timing that should succeed
529 * with any NAND Flash device
530 * (although, with less-than-optimal performance).
531 */
532 struct nand_timing safe_timing = {
533 .data_setup_in_ns = 80,
534 .data_hold_in_ns = 60,
535 .address_setup_in_ns = 25,
536 .gpmi_sample_delay_in_ns = 6,
537 .tREA_in_ns = -1,
538 .tRLOH_in_ns = -1,
539 .tRHOH_in_ns = -1,
540 };
541
542 /* Initialize the hardwares. */
543 ret = gpmi_init(this);
544 if (ret)
545 return ret;
546
547 this->timing = safe_timing;
548 return 0;
549}
550
551static int read_page_prepare(struct gpmi_nand_data *this,
552 void *destination, unsigned length,
553 void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
554 void **use_virt, dma_addr_t *use_phys)
555{
556 struct device *dev = this->dev;
557
558 if (virt_addr_valid(destination)) {
559 dma_addr_t dest_phys;
560
561 dest_phys = dma_map_single(dev, destination,
562 length, DMA_FROM_DEVICE);
563 if (dma_mapping_error(dev, dest_phys)) {
564 if (alt_size < length) {
565 pr_err("Alternate buffer is too small\n");
566 return -ENOMEM;
567 }
568 goto map_failed;
569 }
570 *use_virt = destination;
571 *use_phys = dest_phys;
572 this->direct_dma_map_ok = true;
573 return 0;
574 }
575
576map_failed:
577 *use_virt = alt_virt;
578 *use_phys = alt_phys;
579 this->direct_dma_map_ok = false;
580 return 0;
581}
582
583static inline void read_page_end(struct gpmi_nand_data *this,
584 void *destination, unsigned length,
585 void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
586 void *used_virt, dma_addr_t used_phys)
587{
588 if (this->direct_dma_map_ok)
589 dma_unmap_single(this->dev, used_phys, length, DMA_FROM_DEVICE);
590}
591
592static inline void read_page_swap_end(struct gpmi_nand_data *this,
593 void *destination, unsigned length,
594 void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
595 void *used_virt, dma_addr_t used_phys)
596{
597 if (!this->direct_dma_map_ok)
598 memcpy(destination, alt_virt, length);
599}
600
601static int send_page_prepare(struct gpmi_nand_data *this,
602 const void *source, unsigned length,
603 void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
604 const void **use_virt, dma_addr_t *use_phys)
605{
606 struct device *dev = this->dev;
607
608 if (virt_addr_valid(source)) {
609 dma_addr_t source_phys;
610
611 source_phys = dma_map_single(dev, (void *)source, length,
612 DMA_TO_DEVICE);
613 if (dma_mapping_error(dev, source_phys)) {
614 if (alt_size < length) {
615 pr_err("Alternate buffer is too small\n");
616 return -ENOMEM;
617 }
618 goto map_failed;
619 }
620 *use_virt = source;
621 *use_phys = source_phys;
622 return 0;
623 }
624map_failed:
625 /*
626 * Copy the content of the source buffer into the alternate
627 * buffer and set up the return values accordingly.
628 */
629 memcpy(alt_virt, source, length);
630
631 *use_virt = alt_virt;
632 *use_phys = alt_phys;
633 return 0;
634}
635
636static void send_page_end(struct gpmi_nand_data *this,
637 const void *source, unsigned length,
638 void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
639 const void *used_virt, dma_addr_t used_phys)
640{
641 struct device *dev = this->dev;
642 if (used_virt == source)
643 dma_unmap_single(dev, used_phys, length, DMA_TO_DEVICE);
644}
645
646static void gpmi_free_dma_buffer(struct gpmi_nand_data *this)
647{
648 struct device *dev = this->dev;
649
650 if (this->page_buffer_virt && virt_addr_valid(this->page_buffer_virt))
651 dma_free_coherent(dev, this->page_buffer_size,
652 this->page_buffer_virt,
653 this->page_buffer_phys);
654 kfree(this->cmd_buffer);
655 kfree(this->data_buffer_dma);
656
657 this->cmd_buffer = NULL;
658 this->data_buffer_dma = NULL;
659 this->page_buffer_virt = NULL;
660 this->page_buffer_size = 0;
661}
662
663/* Allocate the DMA buffers */
664static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
665{
666 struct bch_geometry *geo = &this->bch_geometry;
667 struct device *dev = this->dev;
668
669 /* [1] Allocate a command buffer. PAGE_SIZE is enough. */
670 this->cmd_buffer = kzalloc(PAGE_SIZE, GFP_DMA);
671 if (this->cmd_buffer == NULL)
672 goto error_alloc;
673
674 /* [2] Allocate a read/write data buffer. PAGE_SIZE is enough. */
675 this->data_buffer_dma = kzalloc(PAGE_SIZE, GFP_DMA);
676 if (this->data_buffer_dma == NULL)
677 goto error_alloc;
678
679 /*
680 * [3] Allocate the page buffer.
681 *
682 * Both the payload buffer and the auxiliary buffer must appear on
683 * 32-bit boundaries. We presume the size of the payload buffer is a
684 * power of two and is much larger than four, which guarantees the
685 * auxiliary buffer will appear on a 32-bit boundary.
686 */
687 this->page_buffer_size = geo->payload_size + geo->auxiliary_size;
688 this->page_buffer_virt = dma_alloc_coherent(dev, this->page_buffer_size,
689 &this->page_buffer_phys, GFP_DMA);
690 if (!this->page_buffer_virt)
691 goto error_alloc;
692
693
694 /* Slice up the page buffer. */
695 this->payload_virt = this->page_buffer_virt;
696 this->payload_phys = this->page_buffer_phys;
697 this->auxiliary_virt = this->payload_virt + geo->payload_size;
698 this->auxiliary_phys = this->payload_phys + geo->payload_size;
699 return 0;
700
701error_alloc:
702 gpmi_free_dma_buffer(this);
703 pr_err("allocate DMA buffer ret!!\n");
704 return -ENOMEM;
705}
706
707static void gpmi_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
708{
709 struct nand_chip *chip = mtd->priv;
710 struct gpmi_nand_data *this = chip->priv;
711 int ret;
712
713 /*
714 * Every operation begins with a command byte and a series of zero or
715 * more address bytes. These are distinguished by either the Address
716 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
717 * asserted. When MTD is ready to execute the command, it will deassert
718 * both latch enables.
719 *
720 * Rather than run a separate DMA operation for every single byte, we
721 * queue them up and run a single DMA operation for the entire series
722 * of command and data bytes. NAND_CMD_NONE means the END of the queue.
723 */
724 if ((ctrl & (NAND_ALE | NAND_CLE))) {
725 if (data != NAND_CMD_NONE)
726 this->cmd_buffer[this->command_length++] = data;
727 return;
728 }
729
730 if (!this->command_length)
731 return;
732
733 ret = gpmi_send_command(this);
734 if (ret)
735 pr_err("Chip: %u, Error %d\n", this->current_chip, ret);
736
737 this->command_length = 0;
738}
739
740static int gpmi_dev_ready(struct mtd_info *mtd)
741{
742 struct nand_chip *chip = mtd->priv;
743 struct gpmi_nand_data *this = chip->priv;
744
745 return gpmi_is_ready(this, this->current_chip);
746}
747
748static void gpmi_select_chip(struct mtd_info *mtd, int chipnr)
749{
750 struct nand_chip *chip = mtd->priv;
751 struct gpmi_nand_data *this = chip->priv;
752
753 if ((this->current_chip < 0) && (chipnr >= 0))
754 gpmi_begin(this);
755 else if ((this->current_chip >= 0) && (chipnr < 0))
756 gpmi_end(this);
757
758 this->current_chip = chipnr;
759}
760
761static void gpmi_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
762{
763 struct nand_chip *chip = mtd->priv;
764 struct gpmi_nand_data *this = chip->priv;
765
766 pr_debug("len is %d\n", len);
767 this->upper_buf = buf;
768 this->upper_len = len;
769
770 gpmi_read_data(this);
771}
772
773static void gpmi_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
774{
775 struct nand_chip *chip = mtd->priv;
776 struct gpmi_nand_data *this = chip->priv;
777
778 pr_debug("len is %d\n", len);
779 this->upper_buf = (uint8_t *)buf;
780 this->upper_len = len;
781
782 gpmi_send_data(this);
783}
784
785static uint8_t gpmi_read_byte(struct mtd_info *mtd)
786{
787 struct nand_chip *chip = mtd->priv;
788 struct gpmi_nand_data *this = chip->priv;
789 uint8_t *buf = this->data_buffer_dma;
790
791 gpmi_read_buf(mtd, buf, 1);
792 return buf[0];
793}
794
795/*
796 * Handles block mark swapping.
797 * It can be called in swapping the block mark, or swapping it back,
798 * because the the operations are the same.
799 */
800static void block_mark_swapping(struct gpmi_nand_data *this,
801 void *payload, void *auxiliary)
802{
803 struct bch_geometry *nfc_geo = &this->bch_geometry;
804 unsigned char *p;
805 unsigned char *a;
806 unsigned int bit;
807 unsigned char mask;
808 unsigned char from_data;
809 unsigned char from_oob;
810
811 if (!this->swap_block_mark)
812 return;
813
814 /*
815 * If control arrives here, we're swapping. Make some convenience
816 * variables.
817 */
818 bit = nfc_geo->block_mark_bit_offset;
819 p = payload + nfc_geo->block_mark_byte_offset;
820 a = auxiliary;
821
822 /*
823 * Get the byte from the data area that overlays the block mark. Since
824 * the ECC engine applies its own view to the bits in the page, the
825 * physical block mark won't (in general) appear on a byte boundary in
826 * the data.
827 */
828 from_data = (p[0] >> bit) | (p[1] << (8 - bit));
829
830 /* Get the byte from the OOB. */
831 from_oob = a[0];
832
833 /* Swap them. */
834 a[0] = from_data;
835
836 mask = (0x1 << bit) - 1;
837 p[0] = (p[0] & mask) | (from_oob << bit);
838
839 mask = ~0 << bit;
840 p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
841}
842
843static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
844 uint8_t *buf, int page)
845{
846 struct gpmi_nand_data *this = chip->priv;
847 struct bch_geometry *nfc_geo = &this->bch_geometry;
848 void *payload_virt;
849 dma_addr_t payload_phys;
850 void *auxiliary_virt;
851 dma_addr_t auxiliary_phys;
852 unsigned int i;
853 unsigned char *status;
854 unsigned int failed;
855 unsigned int corrected;
856 int ret;
857
858 pr_debug("page number is : %d\n", page);
859 ret = read_page_prepare(this, buf, mtd->writesize,
860 this->payload_virt, this->payload_phys,
861 nfc_geo->payload_size,
862 &payload_virt, &payload_phys);
863 if (ret) {
864 pr_err("Inadequate DMA buffer\n");
865 ret = -ENOMEM;
866 return ret;
867 }
868 auxiliary_virt = this->auxiliary_virt;
869 auxiliary_phys = this->auxiliary_phys;
870
871 /* go! */
872 ret = gpmi_read_page(this, payload_phys, auxiliary_phys);
873 read_page_end(this, buf, mtd->writesize,
874 this->payload_virt, this->payload_phys,
875 nfc_geo->payload_size,
876 payload_virt, payload_phys);
877 if (ret) {
878 pr_err("Error in ECC-based read: %d\n", ret);
879 goto exit_nfc;
880 }
881
882 /* handle the block mark swapping */
883 block_mark_swapping(this, payload_virt, auxiliary_virt);
884
885 /* Loop over status bytes, accumulating ECC status. */
886 failed = 0;
887 corrected = 0;
888 status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
889
890 for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) {
891 if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
892 continue;
893
894 if (*status == STATUS_UNCORRECTABLE) {
895 failed++;
896 continue;
897 }
898 corrected += *status;
899 }
900
901 /*
902 * Propagate ECC status to the owning MTD only when failed or
903 * corrected times nearly reaches our ECC correction threshold.
904 */
905 if (failed || corrected >= (nfc_geo->ecc_strength - 1)) {
906 mtd->ecc_stats.failed += failed;
907 mtd->ecc_stats.corrected += corrected;
908 }
909
910 /*
911 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob() for
912 * details about our policy for delivering the OOB.
913 *
914 * We fill the caller's buffer with set bits, and then copy the block
915 * mark to th caller's buffer. Note that, if block mark swapping was
916 * necessary, it has already been done, so we can rely on the first
917 * byte of the auxiliary buffer to contain the block mark.
918 */
919 memset(chip->oob_poi, ~0, mtd->oobsize);
920 chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
921
922 read_page_swap_end(this, buf, mtd->writesize,
923 this->payload_virt, this->payload_phys,
924 nfc_geo->payload_size,
925 payload_virt, payload_phys);
926exit_nfc:
927 return ret;
928}
929
930static void gpmi_ecc_write_page(struct mtd_info *mtd,
931 struct nand_chip *chip, const uint8_t *buf)
932{
933 struct gpmi_nand_data *this = chip->priv;
934 struct bch_geometry *nfc_geo = &this->bch_geometry;
935 const void *payload_virt;
936 dma_addr_t payload_phys;
937 const void *auxiliary_virt;
938 dma_addr_t auxiliary_phys;
939 int ret;
940
941 pr_debug("ecc write page.\n");
942 if (this->swap_block_mark) {
943 /*
944 * If control arrives here, we're doing block mark swapping.
945 * Since we can't modify the caller's buffers, we must copy them
946 * into our own.
947 */
948 memcpy(this->payload_virt, buf, mtd->writesize);
949 payload_virt = this->payload_virt;
950 payload_phys = this->payload_phys;
951
952 memcpy(this->auxiliary_virt, chip->oob_poi,
953 nfc_geo->auxiliary_size);
954 auxiliary_virt = this->auxiliary_virt;
955 auxiliary_phys = this->auxiliary_phys;
956
957 /* Handle block mark swapping. */
958 block_mark_swapping(this,
959 (void *) payload_virt, (void *) auxiliary_virt);
960 } else {
961 /*
962 * If control arrives here, we're not doing block mark swapping,
963 * so we can to try and use the caller's buffers.
964 */
965 ret = send_page_prepare(this,
966 buf, mtd->writesize,
967 this->payload_virt, this->payload_phys,
968 nfc_geo->payload_size,
969 &payload_virt, &payload_phys);
970 if (ret) {
971 pr_err("Inadequate payload DMA buffer\n");
972 return;
973 }
974
975 ret = send_page_prepare(this,
976 chip->oob_poi, mtd->oobsize,
977 this->auxiliary_virt, this->auxiliary_phys,
978 nfc_geo->auxiliary_size,
979 &auxiliary_virt, &auxiliary_phys);
980 if (ret) {
981 pr_err("Inadequate auxiliary DMA buffer\n");
982 goto exit_auxiliary;
983 }
984 }
985
986 /* Ask the NFC. */
987 ret = gpmi_send_page(this, payload_phys, auxiliary_phys);
988 if (ret)
989 pr_err("Error in ECC-based write: %d\n", ret);
990
991 if (!this->swap_block_mark) {
992 send_page_end(this, chip->oob_poi, mtd->oobsize,
993 this->auxiliary_virt, this->auxiliary_phys,
994 nfc_geo->auxiliary_size,
995 auxiliary_virt, auxiliary_phys);
996exit_auxiliary:
997 send_page_end(this, buf, mtd->writesize,
998 this->payload_virt, this->payload_phys,
999 nfc_geo->payload_size,
1000 payload_virt, payload_phys);
1001 }
1002}
1003
1004/*
1005 * There are several places in this driver where we have to handle the OOB and
1006 * block marks. This is the function where things are the most complicated, so
1007 * this is where we try to explain it all. All the other places refer back to
1008 * here.
1009 *
1010 * These are the rules, in order of decreasing importance:
1011 *
1012 * 1) Nothing the caller does can be allowed to imperil the block mark.
1013 *
1014 * 2) In read operations, the first byte of the OOB we return must reflect the
1015 * true state of the block mark, no matter where that block mark appears in
1016 * the physical page.
1017 *
1018 * 3) ECC-based read operations return an OOB full of set bits (since we never
1019 * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
1020 * return).
1021 *
1022 * 4) "Raw" read operations return a direct view of the physical bytes in the
1023 * page, using the conventional definition of which bytes are data and which
1024 * are OOB. This gives the caller a way to see the actual, physical bytes
1025 * in the page, without the distortions applied by our ECC engine.
1026 *
1027 *
1028 * What we do for this specific read operation depends on two questions:
1029 *
1030 * 1) Are we doing a "raw" read, or an ECC-based read?
1031 *
1032 * 2) Are we using block mark swapping or transcription?
1033 *
1034 * There are four cases, illustrated by the following Karnaugh map:
1035 *
1036 * | Raw | ECC-based |
1037 * -------------+-------------------------+-------------------------+
1038 * | Read the conventional | |
1039 * | OOB at the end of the | |
1040 * Swapping | page and return it. It | |
1041 * | contains exactly what | |
1042 * | we want. | Read the block mark and |
1043 * -------------+-------------------------+ return it in a buffer |
1044 * | Read the conventional | full of set bits. |
1045 * | OOB at the end of the | |
1046 * | page and also the block | |
1047 * Transcribing | mark in the metadata. | |
1048 * | Copy the block mark | |
1049 * | into the first byte of | |
1050 * | the OOB. | |
1051 * -------------+-------------------------+-------------------------+
1052 *
1053 * Note that we break rule #4 in the Transcribing/Raw case because we're not
1054 * giving an accurate view of the actual, physical bytes in the page (we're
1055 * overwriting the block mark). That's OK because it's more important to follow
1056 * rule #2.
1057 *
1058 * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
1059 * easy. When reading a page, for example, the NAND Flash MTD code calls our
1060 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
1061 * ECC-based or raw view of the page is implicit in which function it calls
1062 * (there is a similar pair of ECC-based/raw functions for writing).
1063 *
1064 * Since MTD assumes the OOB is not covered by ECC, there is no pair of
1065 * ECC-based/raw functions for reading or or writing the OOB. The fact that the
1066 * caller wants an ECC-based or raw view of the page is not propagated down to
1067 * this driver.
1068 */
1069static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1070 int page, int sndcmd)
1071{
1072 struct gpmi_nand_data *this = chip->priv;
1073
1074 pr_debug("page number is %d\n", page);
1075 /* clear the OOB buffer */
1076 memset(chip->oob_poi, ~0, mtd->oobsize);
1077
1078 /* Read out the conventional OOB. */
1079 chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
1080 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1081
1082 /*
1083 * Now, we want to make sure the block mark is correct. In the
1084 * Swapping/Raw case, we already have it. Otherwise, we need to
1085 * explicitly read it.
1086 */
1087 if (!this->swap_block_mark) {
1088 /* Read the block mark into the first byte of the OOB buffer. */
1089 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
1090 chip->oob_poi[0] = chip->read_byte(mtd);
1091 }
1092
1093 /*
1094 * Return true, indicating that the next call to this function must send
1095 * a command.
1096 */
1097 return true;
1098}
1099
1100static int
1101gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
1102{
1103 /*
1104 * The BCH will use all the (page + oob).
1105 * Our gpmi_hw_ecclayout can only prohibit the JFFS2 to write the oob.
1106 * But it can not stop some ioctls such MEMWRITEOOB which uses
1107 * MTD_OPS_PLACE_OOB. So We have to implement this function to prohibit
1108 * these ioctls too.
1109 */
1110 return -EPERM;
1111}
1112
1113static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
1114{
1115 struct nand_chip *chip = mtd->priv;
1116 struct gpmi_nand_data *this = chip->priv;
1117 int block, ret = 0;
1118 uint8_t *block_mark;
1119 int column, page, status, chipnr;
1120
1121 /* Get block number */
1122 block = (int)(ofs >> chip->bbt_erase_shift);
1123 if (chip->bbt)
1124 chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
1125
1126 /* Do we have a flash based bad block table ? */
1127 if (chip->options & NAND_BBT_USE_FLASH)
1128 ret = nand_update_bbt(mtd, ofs);
1129 else {
1130 chipnr = (int)(ofs >> chip->chip_shift);
1131 chip->select_chip(mtd, chipnr);
1132
1133 column = this->swap_block_mark ? mtd->writesize : 0;
1134
1135 /* Write the block mark. */
1136 block_mark = this->data_buffer_dma;
1137 block_mark[0] = 0; /* bad block marker */
1138
1139 /* Shift to get page */
1140 page = (int)(ofs >> chip->page_shift);
1141
1142 chip->cmdfunc(mtd, NAND_CMD_SEQIN, column, page);
1143 chip->write_buf(mtd, block_mark, 1);
1144 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1145
1146 status = chip->waitfunc(mtd, chip);
1147 if (status & NAND_STATUS_FAIL)
1148 ret = -EIO;
1149
1150 chip->select_chip(mtd, -1);
1151 }
1152 if (!ret)
1153 mtd->ecc_stats.badblocks++;
1154
1155 return ret;
1156}
1157
1158static int __devinit nand_boot_set_geometry(struct gpmi_nand_data *this)
1159{
1160 struct boot_rom_geometry *geometry = &this->rom_geometry;
1161
1162 /*
1163 * Set the boot block stride size.
1164 *
1165 * In principle, we should be reading this from the OTP bits, since
1166 * that's where the ROM is going to get it. In fact, we don't have any
1167 * way to read the OTP bits, so we go with the default and hope for the
1168 * best.
1169 */
1170 geometry->stride_size_in_pages = 64;
1171
1172 /*
1173 * Set the search area stride exponent.
1174 *
1175 * In principle, we should be reading this from the OTP bits, since
1176 * that's where the ROM is going to get it. In fact, we don't have any
1177 * way to read the OTP bits, so we go with the default and hope for the
1178 * best.
1179 */
1180 geometry->search_area_stride_exponent = 2;
1181 return 0;
1182}
1183
1184static const char *fingerprint = "STMP";
1185static int __devinit mx23_check_transcription_stamp(struct gpmi_nand_data *this)
1186{
1187 struct boot_rom_geometry *rom_geo = &this->rom_geometry;
1188 struct device *dev = this->dev;
1189 struct mtd_info *mtd = &this->mtd;
1190 struct nand_chip *chip = &this->nand;
1191 unsigned int search_area_size_in_strides;
1192 unsigned int stride;
1193 unsigned int page;
1194 loff_t byte;
1195 uint8_t *buffer = chip->buffers->databuf;
1196 int saved_chip_number;
1197 int found_an_ncb_fingerprint = false;
1198
1199 /* Compute the number of strides in a search area. */
1200 search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
1201
1202 saved_chip_number = this->current_chip;
1203 chip->select_chip(mtd, 0);
1204
1205 /*
1206 * Loop through the first search area, looking for the NCB fingerprint.
1207 */
1208 dev_dbg(dev, "Scanning for an NCB fingerprint...\n");
1209
1210 for (stride = 0; stride < search_area_size_in_strides; stride++) {
1211 /* Compute the page and byte addresses. */
1212 page = stride * rom_geo->stride_size_in_pages;
1213 byte = page * mtd->writesize;
1214
1215 dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page);
1216
1217 /*
1218 * Read the NCB fingerprint. The fingerprint is four bytes long
1219 * and starts in the 12th byte of the page.
1220 */
1221 chip->cmdfunc(mtd, NAND_CMD_READ0, 12, page);
1222 chip->read_buf(mtd, buffer, strlen(fingerprint));
1223
1224 /* Look for the fingerprint. */
1225 if (!memcmp(buffer, fingerprint, strlen(fingerprint))) {
1226 found_an_ncb_fingerprint = true;
1227 break;
1228 }
1229
1230 }
1231
1232 chip->select_chip(mtd, saved_chip_number);
1233
1234 if (found_an_ncb_fingerprint)
1235 dev_dbg(dev, "\tFound a fingerprint\n");
1236 else
1237 dev_dbg(dev, "\tNo fingerprint found\n");
1238 return found_an_ncb_fingerprint;
1239}
1240
1241/* Writes a transcription stamp. */
1242static int __devinit mx23_write_transcription_stamp(struct gpmi_nand_data *this)
1243{
1244 struct device *dev = this->dev;
1245 struct boot_rom_geometry *rom_geo = &this->rom_geometry;
1246 struct mtd_info *mtd = &this->mtd;
1247 struct nand_chip *chip = &this->nand;
1248 unsigned int block_size_in_pages;
1249 unsigned int search_area_size_in_strides;
1250 unsigned int search_area_size_in_pages;
1251 unsigned int search_area_size_in_blocks;
1252 unsigned int block;
1253 unsigned int stride;
1254 unsigned int page;
1255 loff_t byte;
1256 uint8_t *buffer = chip->buffers->databuf;
1257 int saved_chip_number;
1258 int status;
1259
1260 /* Compute the search area geometry. */
1261 block_size_in_pages = mtd->erasesize / mtd->writesize;
1262 search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
1263 search_area_size_in_pages = search_area_size_in_strides *
1264 rom_geo->stride_size_in_pages;
1265 search_area_size_in_blocks =
1266 (search_area_size_in_pages + (block_size_in_pages - 1)) /
1267 block_size_in_pages;
1268
1269 dev_dbg(dev, "Search Area Geometry :\n");
1270 dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks);
1271 dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides);
1272 dev_dbg(dev, "\tin Pages : %u\n", search_area_size_in_pages);
1273
1274 /* Select chip 0. */
1275 saved_chip_number = this->current_chip;
1276 chip->select_chip(mtd, 0);
1277
1278 /* Loop over blocks in the first search area, erasing them. */
1279 dev_dbg(dev, "Erasing the search area...\n");
1280
1281 for (block = 0; block < search_area_size_in_blocks; block++) {
1282 /* Compute the page address. */
1283 page = block * block_size_in_pages;
1284
1285 /* Erase this block. */
1286 dev_dbg(dev, "\tErasing block 0x%x\n", block);
1287 chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
1288 chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
1289
1290 /* Wait for the erase to finish. */
1291 status = chip->waitfunc(mtd, chip);
1292 if (status & NAND_STATUS_FAIL)
1293 dev_err(dev, "[%s] Erase failed.\n", __func__);
1294 }
1295
1296 /* Write the NCB fingerprint into the page buffer. */
1297 memset(buffer, ~0, mtd->writesize);
1298 memset(chip->oob_poi, ~0, mtd->oobsize);
1299 memcpy(buffer + 12, fingerprint, strlen(fingerprint));
1300
1301 /* Loop through the first search area, writing NCB fingerprints. */
1302 dev_dbg(dev, "Writing NCB fingerprints...\n");
1303 for (stride = 0; stride < search_area_size_in_strides; stride++) {
1304 /* Compute the page and byte addresses. */
1305 page = stride * rom_geo->stride_size_in_pages;
1306 byte = page * mtd->writesize;
1307
1308 /* Write the first page of the current stride. */
1309 dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
1310 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
1311 chip->ecc.write_page_raw(mtd, chip, buffer);
1312 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1313
1314 /* Wait for the write to finish. */
1315 status = chip->waitfunc(mtd, chip);
1316 if (status & NAND_STATUS_FAIL)
1317 dev_err(dev, "[%s] Write failed.\n", __func__);
1318 }
1319
1320 /* Deselect chip 0. */
1321 chip->select_chip(mtd, saved_chip_number);
1322 return 0;
1323}
1324
1325static int __devinit mx23_boot_init(struct gpmi_nand_data *this)
1326{
1327 struct device *dev = this->dev;
1328 struct nand_chip *chip = &this->nand;
1329 struct mtd_info *mtd = &this->mtd;
1330 unsigned int block_count;
1331 unsigned int block;
1332 int chipnr;
1333 int page;
1334 loff_t byte;
1335 uint8_t block_mark;
1336 int ret = 0;
1337
1338 /*
1339 * If control arrives here, we can't use block mark swapping, which
1340 * means we're forced to use transcription. First, scan for the
1341 * transcription stamp. If we find it, then we don't have to do
1342 * anything -- the block marks are already transcribed.
1343 */
1344 if (mx23_check_transcription_stamp(this))
1345 return 0;
1346
1347 /*
1348 * If control arrives here, we couldn't find a transcription stamp, so
1349 * so we presume the block marks are in the conventional location.
1350 */
1351 dev_dbg(dev, "Transcribing bad block marks...\n");
1352
1353 /* Compute the number of blocks in the entire medium. */
1354 block_count = chip->chipsize >> chip->phys_erase_shift;
1355
1356 /*
1357 * Loop over all the blocks in the medium, transcribing block marks as
1358 * we go.
1359 */
1360 for (block = 0; block < block_count; block++) {
1361 /*
1362 * Compute the chip, page and byte addresses for this block's
1363 * conventional mark.
1364 */
1365 chipnr = block >> (chip->chip_shift - chip->phys_erase_shift);
1366 page = block << (chip->phys_erase_shift - chip->page_shift);
1367 byte = block << chip->phys_erase_shift;
1368
1369 /* Send the command to read the conventional block mark. */
1370 chip->select_chip(mtd, chipnr);
1371 chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
1372 block_mark = chip->read_byte(mtd);
1373 chip->select_chip(mtd, -1);
1374
1375 /*
1376 * Check if the block is marked bad. If so, we need to mark it
1377 * again, but this time the result will be a mark in the
1378 * location where we transcribe block marks.
1379 */
1380 if (block_mark != 0xff) {
1381 dev_dbg(dev, "Transcribing mark in block %u\n", block);
1382 ret = chip->block_markbad(mtd, byte);
1383 if (ret)
1384 dev_err(dev, "Failed to mark block bad with "
1385 "ret %d\n", ret);
1386 }
1387 }
1388
1389 /* Write the stamp that indicates we've transcribed the block marks. */
1390 mx23_write_transcription_stamp(this);
1391 return 0;
1392}
1393
1394static int __devinit nand_boot_init(struct gpmi_nand_data *this)
1395{
1396 nand_boot_set_geometry(this);
1397
1398 /* This is ROM arch-specific initilization before the BBT scanning. */
1399 if (GPMI_IS_MX23(this))
1400 return mx23_boot_init(this);
1401 return 0;
1402}
1403
1404static int __devinit gpmi_set_geometry(struct gpmi_nand_data *this)
1405{
1406 int ret;
1407
1408 /* Free the temporary DMA memory for reading ID. */
1409 gpmi_free_dma_buffer(this);
1410
1411 /* Set up the NFC geometry which is used by BCH. */
1412 ret = bch_set_geometry(this);
1413 if (ret) {
1414 pr_err("set geometry ret : %d\n", ret);
1415 return ret;
1416 }
1417
1418 /* Alloc the new DMA buffers according to the pagesize and oobsize */
1419 return gpmi_alloc_dma_buffer(this);
1420}
1421
1422static int gpmi_pre_bbt_scan(struct gpmi_nand_data *this)
1423{
1424 int ret;
1425
1426 /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
1427 if (GPMI_IS_MX23(this))
1428 this->swap_block_mark = false;
1429 else
1430 this->swap_block_mark = true;
1431
1432 /* Set up the medium geometry */
1433 ret = gpmi_set_geometry(this);
1434 if (ret)
1435 return ret;
1436
1437 /* NAND boot init, depends on the gpmi_set_geometry(). */
1438 return nand_boot_init(this);
1439}
1440
1441static int gpmi_scan_bbt(struct mtd_info *mtd)
1442{
1443 struct nand_chip *chip = mtd->priv;
1444 struct gpmi_nand_data *this = chip->priv;
1445 int ret;
1446
1447 /* Prepare for the BBT scan. */
1448 ret = gpmi_pre_bbt_scan(this);
1449 if (ret)
1450 return ret;
1451
1452 /* use the default BBT implementation */
1453 return nand_default_bbt(mtd);
1454}
1455
1456void gpmi_nfc_exit(struct gpmi_nand_data *this)
1457{
1458 nand_release(&this->mtd);
1459 gpmi_free_dma_buffer(this);
1460}
1461
1462static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this)
1463{
1464 struct gpmi_nand_platform_data *pdata = this->pdata;
1465 struct mtd_info *mtd = &this->mtd;
1466 struct nand_chip *chip = &this->nand;
1467 int ret;
1468
1469 /* init current chip */
1470 this->current_chip = -1;
1471
1472 /* init the MTD data structures */
1473 mtd->priv = chip;
1474 mtd->name = "gpmi-nand";
1475 mtd->owner = THIS_MODULE;
1476
1477 /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */
1478 chip->priv = this;
1479 chip->select_chip = gpmi_select_chip;
1480 chip->cmd_ctrl = gpmi_cmd_ctrl;
1481 chip->dev_ready = gpmi_dev_ready;
1482 chip->read_byte = gpmi_read_byte;
1483 chip->read_buf = gpmi_read_buf;
1484 chip->write_buf = gpmi_write_buf;
1485 chip->ecc.read_page = gpmi_ecc_read_page;
1486 chip->ecc.write_page = gpmi_ecc_write_page;
1487 chip->ecc.read_oob = gpmi_ecc_read_oob;
1488 chip->ecc.write_oob = gpmi_ecc_write_oob;
1489 chip->scan_bbt = gpmi_scan_bbt;
1490 chip->badblock_pattern = &gpmi_bbt_descr;
1491 chip->block_markbad = gpmi_block_markbad;
1492 chip->options |= NAND_NO_SUBPAGE_WRITE;
1493 chip->ecc.mode = NAND_ECC_HW;
1494 chip->ecc.size = 1;
1495 chip->ecc.layout = &gpmi_hw_ecclayout;
1496
1497 /* Allocate a temporary DMA buffer for reading ID in the nand_scan() */
1498 this->bch_geometry.payload_size = 1024;
1499 this->bch_geometry.auxiliary_size = 128;
1500 ret = gpmi_alloc_dma_buffer(this);
1501 if (ret)
1502 goto err_out;
1503
1504 ret = nand_scan(mtd, pdata->max_chip_count);
1505 if (ret) {
1506 pr_err("Chip scan failed\n");
1507 goto err_out;
1508 }
1509
1510 ret = mtd_device_parse_register(mtd, NULL, NULL,
1511 pdata->partitions, pdata->partition_count);
1512 if (ret)
1513 goto err_out;
1514 return 0;
1515
1516err_out:
1517 gpmi_nfc_exit(this);
1518 return ret;
1519}
1520
1521static int __devinit gpmi_nand_probe(struct platform_device *pdev)
1522{
1523 struct gpmi_nand_platform_data *pdata = pdev->dev.platform_data;
1524 struct gpmi_nand_data *this;
1525 int ret;
1526
1527 this = kzalloc(sizeof(*this), GFP_KERNEL);
1528 if (!this) {
1529 pr_err("Failed to allocate per-device memory\n");
1530 return -ENOMEM;
1531 }
1532
1533 platform_set_drvdata(pdev, this);
1534 this->pdev = pdev;
1535 this->dev = &pdev->dev;
1536 this->pdata = pdata;
1537
1538 if (pdata->platform_init) {
1539 ret = pdata->platform_init();
1540 if (ret)
1541 goto platform_init_error;
1542 }
1543
1544 ret = acquire_resources(this);
1545 if (ret)
1546 goto exit_acquire_resources;
1547
1548 ret = init_hardware(this);
1549 if (ret)
1550 goto exit_nfc_init;
1551
1552 ret = gpmi_nfc_init(this);
1553 if (ret)
1554 goto exit_nfc_init;
1555
1556 return 0;
1557
1558exit_nfc_init:
1559 release_resources(this);
1560platform_init_error:
1561exit_acquire_resources:
1562 platform_set_drvdata(pdev, NULL);
1563 kfree(this);
1564 return ret;
1565}
1566
1567static int __exit gpmi_nand_remove(struct platform_device *pdev)
1568{
1569 struct gpmi_nand_data *this = platform_get_drvdata(pdev);
1570
1571 gpmi_nfc_exit(this);
1572 release_resources(this);
1573 platform_set_drvdata(pdev, NULL);
1574 kfree(this);
1575 return 0;
1576}
1577
1578static const struct platform_device_id gpmi_ids[] = {
1579 {
1580 .name = "imx23-gpmi-nand",
1581 .driver_data = IS_MX23,
1582 }, {
1583 .name = "imx28-gpmi-nand",
1584 .driver_data = IS_MX28,
1585 }, {},
1586};
1587
1588static struct platform_driver gpmi_nand_driver = {
1589 .driver = {
1590 .name = "gpmi-nand",
1591 },
1592 .probe = gpmi_nand_probe,
1593 .remove = __exit_p(gpmi_nand_remove),
1594 .id_table = gpmi_ids,
1595};
1596
1597static int __init gpmi_nand_init(void)
1598{
1599 int err;
1600
1601 err = platform_driver_register(&gpmi_nand_driver);
1602 if (err == 0)
1603 printk(KERN_INFO "GPMI NAND driver registered. (IMX)\n");
1604 else
1605 pr_err("i.MX GPMI NAND driver registration failed\n");
1606 return err;
1607}
1608
1609static void __exit gpmi_nand_exit(void)
1610{
1611 platform_driver_unregister(&gpmi_nand_driver);
1612}
1613
1614module_init(gpmi_nand_init);
1615module_exit(gpmi_nand_exit);
1616
1617MODULE_AUTHOR("Freescale Semiconductor, Inc.");
1618MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver");
1619MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
new file mode 100644
index 000000000000..e023bccb7781
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -0,0 +1,273 @@
1/*
2 * Freescale GPMI NAND Flash Driver
3 *
4 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
5 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17#ifndef __DRIVERS_MTD_NAND_GPMI_NAND_H
18#define __DRIVERS_MTD_NAND_GPMI_NAND_H
19
20#include <linux/mtd/nand.h>
21#include <linux/platform_device.h>
22#include <linux/dma-mapping.h>
23#include <mach/dma.h>
24
25struct resources {
26 void *gpmi_regs;
27 void *bch_regs;
28 unsigned int bch_low_interrupt;
29 unsigned int bch_high_interrupt;
30 unsigned int dma_low_channel;
31 unsigned int dma_high_channel;
32 struct clk *clock;
33};
34
35/**
36 * struct bch_geometry - BCH geometry description.
37 * @gf_len: The length of Galois Field. (e.g., 13 or 14)
38 * @ecc_strength: A number that describes the strength of the ECC
39 * algorithm.
40 * @page_size: The size, in bytes, of a physical page, including
41 * both data and OOB.
42 * @metadata_size: The size, in bytes, of the metadata.
43 * @ecc_chunk_size: The size, in bytes, of a single ECC chunk. Note
44 * the first chunk in the page includes both data and
45 * metadata, so it's a bit larger than this value.
46 * @ecc_chunk_count: The number of ECC chunks in the page,
47 * @payload_size: The size, in bytes, of the payload buffer.
48 * @auxiliary_size: The size, in bytes, of the auxiliary buffer.
49 * @auxiliary_status_offset: The offset into the auxiliary buffer at which
50 * the ECC status appears.
51 * @block_mark_byte_offset: The byte offset in the ECC-based page view at
52 * which the underlying physical block mark appears.
53 * @block_mark_bit_offset: The bit offset into the ECC-based page view at
54 * which the underlying physical block mark appears.
55 */
56struct bch_geometry {
57 unsigned int gf_len;
58 unsigned int ecc_strength;
59 unsigned int page_size;
60 unsigned int metadata_size;
61 unsigned int ecc_chunk_size;
62 unsigned int ecc_chunk_count;
63 unsigned int payload_size;
64 unsigned int auxiliary_size;
65 unsigned int auxiliary_status_offset;
66 unsigned int block_mark_byte_offset;
67 unsigned int block_mark_bit_offset;
68};
69
70/**
71 * struct boot_rom_geometry - Boot ROM geometry description.
72 * @stride_size_in_pages: The size of a boot block stride, in pages.
73 * @search_area_stride_exponent: The logarithm to base 2 of the size of a
74 * search area in boot block strides.
75 */
76struct boot_rom_geometry {
77 unsigned int stride_size_in_pages;
78 unsigned int search_area_stride_exponent;
79};
80
81/* DMA operations types */
82enum dma_ops_type {
83 DMA_FOR_COMMAND = 1,
84 DMA_FOR_READ_DATA,
85 DMA_FOR_WRITE_DATA,
86 DMA_FOR_READ_ECC_PAGE,
87 DMA_FOR_WRITE_ECC_PAGE
88};
89
90/**
91 * struct nand_timing - Fundamental timing attributes for NAND.
92 * @data_setup_in_ns: The data setup time, in nanoseconds. Usually the
93 * maximum of tDS and tWP. A negative value
94 * indicates this characteristic isn't known.
95 * @data_hold_in_ns: The data hold time, in nanoseconds. Usually the
96 * maximum of tDH, tWH and tREH. A negative value
97 * indicates this characteristic isn't known.
98 * @address_setup_in_ns: The address setup time, in nanoseconds. Usually
99 * the maximum of tCLS, tCS and tALS. A negative
100 * value indicates this characteristic isn't known.
101 * @gpmi_sample_delay_in_ns: A GPMI-specific timing parameter. A negative value
102 * indicates this characteristic isn't known.
103 * @tREA_in_ns: tREA, in nanoseconds, from the data sheet. A
104 * negative value indicates this characteristic isn't
105 * known.
106 * @tRLOH_in_ns: tRLOH, in nanoseconds, from the data sheet. A
107 * negative value indicates this characteristic isn't
108 * known.
109 * @tRHOH_in_ns: tRHOH, in nanoseconds, from the data sheet. A
110 * negative value indicates this characteristic isn't
111 * known.
112 */
113struct nand_timing {
114 int8_t data_setup_in_ns;
115 int8_t data_hold_in_ns;
116 int8_t address_setup_in_ns;
117 int8_t gpmi_sample_delay_in_ns;
118 int8_t tREA_in_ns;
119 int8_t tRLOH_in_ns;
120 int8_t tRHOH_in_ns;
121};
122
123struct gpmi_nand_data {
124 /* System Interface */
125 struct device *dev;
126 struct platform_device *pdev;
127 struct gpmi_nand_platform_data *pdata;
128
129 /* Resources */
130 struct resources resources;
131
132 /* Flash Hardware */
133 struct nand_timing timing;
134
135 /* BCH */
136 struct bch_geometry bch_geometry;
137 struct completion bch_done;
138
139 /* NAND Boot issue */
140 bool swap_block_mark;
141 struct boot_rom_geometry rom_geometry;
142
143 /* MTD / NAND */
144 struct nand_chip nand;
145 struct mtd_info mtd;
146
147 /* General-use Variables */
148 int current_chip;
149 unsigned int command_length;
150
151 /* passed from upper layer */
152 uint8_t *upper_buf;
153 int upper_len;
154
155 /* for DMA operations */
156 bool direct_dma_map_ok;
157
158 struct scatterlist cmd_sgl;
159 char *cmd_buffer;
160
161 struct scatterlist data_sgl;
162 char *data_buffer_dma;
163
164 void *page_buffer_virt;
165 dma_addr_t page_buffer_phys;
166 unsigned int page_buffer_size;
167
168 void *payload_virt;
169 dma_addr_t payload_phys;
170
171 void *auxiliary_virt;
172 dma_addr_t auxiliary_phys;
173
174 /* DMA channels */
175#define DMA_CHANS 8
176 struct dma_chan *dma_chans[DMA_CHANS];
177 struct mxs_dma_data dma_data;
178 enum dma_ops_type last_dma_type;
179 enum dma_ops_type dma_type;
180 struct completion dma_done;
181
182 /* private */
183 void *private;
184};
185
186/**
187 * struct gpmi_nfc_hardware_timing - GPMI hardware timing parameters.
188 * @data_setup_in_cycles: The data setup time, in cycles.
189 * @data_hold_in_cycles: The data hold time, in cycles.
190 * @address_setup_in_cycles: The address setup time, in cycles.
191 * @use_half_periods: Indicates the clock is running slowly, so the
192 * NFC DLL should use half-periods.
193 * @sample_delay_factor: The sample delay factor.
194 */
195struct gpmi_nfc_hardware_timing {
196 uint8_t data_setup_in_cycles;
197 uint8_t data_hold_in_cycles;
198 uint8_t address_setup_in_cycles;
199 bool use_half_periods;
200 uint8_t sample_delay_factor;
201};
202
203/**
204 * struct timing_threshod - Timing threshold
205 * @max_data_setup_cycles: The maximum number of data setup cycles that
206 * can be expressed in the hardware.
207 * @internal_data_setup_in_ns: The time, in ns, that the NFC hardware requires
208 * for data read internal setup. In the Reference
209 * Manual, see the chapter "High-Speed NAND
210 * Timing" for more details.
211 * @max_sample_delay_factor: The maximum sample delay factor that can be
212 * expressed in the hardware.
213 * @max_dll_clock_period_in_ns: The maximum period of the GPMI clock that the
214 * sample delay DLL hardware can possibly work
215 * with (the DLL is unusable with longer periods).
216 * If the full-cycle period is greater than HALF
217 * this value, the DLL must be configured to use
218 * half-periods.
219 * @max_dll_delay_in_ns: The maximum amount of delay, in ns, that the
220 * DLL can implement.
221 * @clock_frequency_in_hz: The clock frequency, in Hz, during the current
222 * I/O transaction. If no I/O transaction is in
223 * progress, this is the clock frequency during
224 * the most recent I/O transaction.
225 */
226struct timing_threshod {
227 const unsigned int max_chip_count;
228 const unsigned int max_data_setup_cycles;
229 const unsigned int internal_data_setup_in_ns;
230 const unsigned int max_sample_delay_factor;
231 const unsigned int max_dll_clock_period_in_ns;
232 const unsigned int max_dll_delay_in_ns;
233 unsigned long clock_frequency_in_hz;
234
235};
236
237/* Common Services */
238extern int common_nfc_set_geometry(struct gpmi_nand_data *);
239extern struct dma_chan *get_dma_chan(struct gpmi_nand_data *);
240extern void prepare_data_dma(struct gpmi_nand_data *,
241 enum dma_data_direction dr);
242extern int start_dma_without_bch_irq(struct gpmi_nand_data *,
243 struct dma_async_tx_descriptor *);
244extern int start_dma_with_bch_irq(struct gpmi_nand_data *,
245 struct dma_async_tx_descriptor *);
246
247/* GPMI-NAND helper function library */
248extern int gpmi_init(struct gpmi_nand_data *);
249extern void gpmi_clear_bch(struct gpmi_nand_data *);
250extern void gpmi_dump_info(struct gpmi_nand_data *);
251extern int bch_set_geometry(struct gpmi_nand_data *);
252extern int gpmi_is_ready(struct gpmi_nand_data *, unsigned chip);
253extern int gpmi_send_command(struct gpmi_nand_data *);
254extern void gpmi_begin(struct gpmi_nand_data *);
255extern void gpmi_end(struct gpmi_nand_data *);
256extern int gpmi_read_data(struct gpmi_nand_data *);
257extern int gpmi_send_data(struct gpmi_nand_data *);
258extern int gpmi_send_page(struct gpmi_nand_data *,
259 dma_addr_t payload, dma_addr_t auxiliary);
260extern int gpmi_read_page(struct gpmi_nand_data *,
261 dma_addr_t payload, dma_addr_t auxiliary);
262
263/* BCH : Status Block Completion Codes */
264#define STATUS_GOOD 0x00
265#define STATUS_ERASED 0xff
266#define STATUS_UNCORRECTABLE 0xfe
267
268/* Use the platform_id to distinguish different Archs. */
269#define IS_MX23 0x1
270#define IS_MX28 0x2
271#define GPMI_IS_MX23(x) ((x)->pdev->id_entry->driver_data == IS_MX23)
272#define GPMI_IS_MX28(x) ((x)->pdev->id_entry->driver_data == IS_MX28)
273#endif
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-regs.h b/drivers/mtd/nand/gpmi-nand/gpmi-regs.h
new file mode 100644
index 000000000000..83431240e2f2
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-regs.h
@@ -0,0 +1,172 @@
1/*
2 * Freescale GPMI NAND Flash Driver
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 * Copyright 2008 Embedded Alley Solutions, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21#ifndef __GPMI_NAND_GPMI_REGS_H
22#define __GPMI_NAND_GPMI_REGS_H
23
24#define HW_GPMI_CTRL0 0x00000000
25#define HW_GPMI_CTRL0_SET 0x00000004
26#define HW_GPMI_CTRL0_CLR 0x00000008
27#define HW_GPMI_CTRL0_TOG 0x0000000c
28
29#define BP_GPMI_CTRL0_COMMAND_MODE 24
30#define BM_GPMI_CTRL0_COMMAND_MODE (3 << BP_GPMI_CTRL0_COMMAND_MODE)
31#define BF_GPMI_CTRL0_COMMAND_MODE(v) \
32 (((v) << BP_GPMI_CTRL0_COMMAND_MODE) & BM_GPMI_CTRL0_COMMAND_MODE)
33#define BV_GPMI_CTRL0_COMMAND_MODE__WRITE 0x0
34#define BV_GPMI_CTRL0_COMMAND_MODE__READ 0x1
35#define BV_GPMI_CTRL0_COMMAND_MODE__READ_AND_COMPARE 0x2
36#define BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY 0x3
37
38#define BM_GPMI_CTRL0_WORD_LENGTH (1 << 23)
39#define BV_GPMI_CTRL0_WORD_LENGTH__16_BIT 0x0
40#define BV_GPMI_CTRL0_WORD_LENGTH__8_BIT 0x1
41
42/*
43 * Difference in LOCK_CS between imx23 and imx28 :
44 * This bit may impact the _POWER_ consumption. So some chips
45 * do not set it.
46 */
47#define MX23_BP_GPMI_CTRL0_LOCK_CS 22
48#define MX28_BP_GPMI_CTRL0_LOCK_CS 27
49#define LOCK_CS_ENABLE 0x1
50#define BF_GPMI_CTRL0_LOCK_CS(v, x) 0x0
51
52/* Difference in CS between imx23 and imx28 */
53#define BP_GPMI_CTRL0_CS 20
54#define MX23_BM_GPMI_CTRL0_CS (3 << BP_GPMI_CTRL0_CS)
55#define MX28_BM_GPMI_CTRL0_CS (7 << BP_GPMI_CTRL0_CS)
56#define BF_GPMI_CTRL0_CS(v, x) (((v) << BP_GPMI_CTRL0_CS) & \
57 (GPMI_IS_MX23((x)) \
58 ? MX23_BM_GPMI_CTRL0_CS \
59 : MX28_BM_GPMI_CTRL0_CS))
60
61#define BP_GPMI_CTRL0_ADDRESS 17
62#define BM_GPMI_CTRL0_ADDRESS (3 << BP_GPMI_CTRL0_ADDRESS)
63#define BF_GPMI_CTRL0_ADDRESS(v) \
64 (((v) << BP_GPMI_CTRL0_ADDRESS) & BM_GPMI_CTRL0_ADDRESS)
65#define BV_GPMI_CTRL0_ADDRESS__NAND_DATA 0x0
66#define BV_GPMI_CTRL0_ADDRESS__NAND_CLE 0x1
67#define BV_GPMI_CTRL0_ADDRESS__NAND_ALE 0x2
68
69#define BM_GPMI_CTRL0_ADDRESS_INCREMENT (1 << 16)
70#define BV_GPMI_CTRL0_ADDRESS_INCREMENT__DISABLED 0x0
71#define BV_GPMI_CTRL0_ADDRESS_INCREMENT__ENABLED 0x1
72
73#define BP_GPMI_CTRL0_XFER_COUNT 0
74#define BM_GPMI_CTRL0_XFER_COUNT (0xffff << BP_GPMI_CTRL0_XFER_COUNT)
75#define BF_GPMI_CTRL0_XFER_COUNT(v) \
76 (((v) << BP_GPMI_CTRL0_XFER_COUNT) & BM_GPMI_CTRL0_XFER_COUNT)
77
78#define HW_GPMI_COMPARE 0x00000010
79
80#define HW_GPMI_ECCCTRL 0x00000020
81#define HW_GPMI_ECCCTRL_SET 0x00000024
82#define HW_GPMI_ECCCTRL_CLR 0x00000028
83#define HW_GPMI_ECCCTRL_TOG 0x0000002c
84
85#define BP_GPMI_ECCCTRL_ECC_CMD 13
86#define BM_GPMI_ECCCTRL_ECC_CMD (3 << BP_GPMI_ECCCTRL_ECC_CMD)
87#define BF_GPMI_ECCCTRL_ECC_CMD(v) \
88 (((v) << BP_GPMI_ECCCTRL_ECC_CMD) & BM_GPMI_ECCCTRL_ECC_CMD)
89#define BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE 0x0
90#define BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE 0x1
91
92#define BM_GPMI_ECCCTRL_ENABLE_ECC (1 << 12)
93#define BV_GPMI_ECCCTRL_ENABLE_ECC__ENABLE 0x1
94#define BV_GPMI_ECCCTRL_ENABLE_ECC__DISABLE 0x0
95
96#define BP_GPMI_ECCCTRL_BUFFER_MASK 0
97#define BM_GPMI_ECCCTRL_BUFFER_MASK (0x1ff << BP_GPMI_ECCCTRL_BUFFER_MASK)
98#define BF_GPMI_ECCCTRL_BUFFER_MASK(v) \
99 (((v) << BP_GPMI_ECCCTRL_BUFFER_MASK) & BM_GPMI_ECCCTRL_BUFFER_MASK)
100#define BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY 0x100
101#define BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE 0x1FF
102
103#define HW_GPMI_ECCCOUNT 0x00000030
104#define HW_GPMI_PAYLOAD 0x00000040
105#define HW_GPMI_AUXILIARY 0x00000050
106#define HW_GPMI_CTRL1 0x00000060
107#define HW_GPMI_CTRL1_SET 0x00000064
108#define HW_GPMI_CTRL1_CLR 0x00000068
109#define HW_GPMI_CTRL1_TOG 0x0000006c
110
111#define BM_GPMI_CTRL1_BCH_MODE (1 << 18)
112
113#define BP_GPMI_CTRL1_DLL_ENABLE 17
114#define BM_GPMI_CTRL1_DLL_ENABLE (1 << BP_GPMI_CTRL1_DLL_ENABLE)
115
116#define BP_GPMI_CTRL1_HALF_PERIOD 16
117#define BM_GPMI_CTRL1_HALF_PERIOD (1 << BP_GPMI_CTRL1_HALF_PERIOD)
118
119#define BP_GPMI_CTRL1_RDN_DELAY 12
120#define BM_GPMI_CTRL1_RDN_DELAY (0xf << BP_GPMI_CTRL1_RDN_DELAY)
121#define BF_GPMI_CTRL1_RDN_DELAY(v) \
122 (((v) << BP_GPMI_CTRL1_RDN_DELAY) & BM_GPMI_CTRL1_RDN_DELAY)
123
124#define BM_GPMI_CTRL1_DEV_RESET (1 << 3)
125#define BV_GPMI_CTRL1_DEV_RESET__ENABLED 0x0
126#define BV_GPMI_CTRL1_DEV_RESET__DISABLED 0x1
127
128#define BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY (1 << 2)
129#define BV_GPMI_CTRL1_ATA_IRQRDY_POLARITY__ACTIVELOW 0x0
130#define BV_GPMI_CTRL1_ATA_IRQRDY_POLARITY__ACTIVEHIGH 0x1
131
132#define BM_GPMI_CTRL1_CAMERA_MODE (1 << 1)
133#define BV_GPMI_CTRL1_GPMI_MODE__NAND 0x0
134#define BV_GPMI_CTRL1_GPMI_MODE__ATA 0x1
135
136#define BM_GPMI_CTRL1_GPMI_MODE (1 << 0)
137
138#define HW_GPMI_TIMING0 0x00000070
139
140#define BP_GPMI_TIMING0_ADDRESS_SETUP 16
141#define BM_GPMI_TIMING0_ADDRESS_SETUP (0xff << BP_GPMI_TIMING0_ADDRESS_SETUP)
142#define BF_GPMI_TIMING0_ADDRESS_SETUP(v) \
143 (((v) << BP_GPMI_TIMING0_ADDRESS_SETUP) & BM_GPMI_TIMING0_ADDRESS_SETUP)
144
145#define BP_GPMI_TIMING0_DATA_HOLD 8
146#define BM_GPMI_TIMING0_DATA_HOLD (0xff << BP_GPMI_TIMING0_DATA_HOLD)
147#define BF_GPMI_TIMING0_DATA_HOLD(v) \
148 (((v) << BP_GPMI_TIMING0_DATA_HOLD) & BM_GPMI_TIMING0_DATA_HOLD)
149
150#define BP_GPMI_TIMING0_DATA_SETUP 0
151#define BM_GPMI_TIMING0_DATA_SETUP (0xff << BP_GPMI_TIMING0_DATA_SETUP)
152#define BF_GPMI_TIMING0_DATA_SETUP(v) \
153 (((v) << BP_GPMI_TIMING0_DATA_SETUP) & BM_GPMI_TIMING0_DATA_SETUP)
154
155#define HW_GPMI_TIMING1 0x00000080
156#define BP_GPMI_TIMING1_BUSY_TIMEOUT 16
157
158#define HW_GPMI_TIMING2 0x00000090
159#define HW_GPMI_DATA 0x000000a0
160
161/* MX28 uses this to detect READY. */
162#define HW_GPMI_STAT 0x000000b0
163#define MX28_BP_GPMI_STAT_READY_BUSY 24
164#define MX28_BM_GPMI_STAT_READY_BUSY (0xff << MX28_BP_GPMI_STAT_READY_BUSY)
165#define MX28_BF_GPMI_STAT_READY_BUSY(v) \
166 (((v) << MX28_BP_GPMI_STAT_READY_BUSY) & MX28_BM_GPMI_STAT_READY_BUSY)
167
168/* MX23 uses this to detect READY. */
169#define HW_GPMI_DEBUG 0x000000c0
170#define MX23_BP_GPMI_DEBUG_READY0 28
171#define MX23_BM_GPMI_DEBUG_READY0 (1 << MX23_BP_GPMI_DEBUG_READY0)
172#endif
diff --git a/drivers/mtd/nand/h1910.c b/drivers/mtd/nand/h1910.c
index 02a03e67109c..5dc6f0d92f1a 100644
--- a/drivers/mtd/nand/h1910.c
+++ b/drivers/mtd/nand/h1910.c
@@ -81,9 +81,6 @@ static int h1910_device_ready(struct mtd_info *mtd)
81static int __init h1910_init(void) 81static int __init h1910_init(void)
82{ 82{
83 struct nand_chip *this; 83 struct nand_chip *this;
84 const char *part_type = 0;
85 int mtd_parts_nb = 0;
86 struct mtd_partition *mtd_parts = 0;
87 void __iomem *nandaddr; 84 void __iomem *nandaddr;
88 85
89 if (!machine_is_h1900()) 86 if (!machine_is_h1900())
@@ -136,22 +133,10 @@ static int __init h1910_init(void)
136 iounmap((void *)nandaddr); 133 iounmap((void *)nandaddr);
137 return -ENXIO; 134 return -ENXIO;
138 } 135 }
139#ifdef CONFIG_MTD_CMDLINE_PARTS
140 mtd_parts_nb = parse_cmdline_partitions(h1910_nand_mtd, &mtd_parts, "h1910-nand");
141 if (mtd_parts_nb > 0)
142 part_type = "command line";
143 else
144 mtd_parts_nb = 0;
145#endif
146 if (mtd_parts_nb == 0) {
147 mtd_parts = partition_info;
148 mtd_parts_nb = NUM_PARTITIONS;
149 part_type = "static";
150 }
151 136
152 /* Register the partitions */ 137 /* Register the partitions */
153 printk(KERN_NOTICE "Using %s partition definition\n", part_type); 138 mtd_device_parse_register(h1910_nand_mtd, NULL, 0,
154 mtd_device_register(h1910_nand_mtd, mtd_parts, mtd_parts_nb); 139 partition_info, NUM_PARTITIONS);
155 140
156 /* Return happy */ 141 /* Return happy */
157 return 0; 142 return 0;
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index 6e813daed068..e2664073a89b 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -251,10 +251,6 @@ static int jz_nand_correct_ecc_rs(struct mtd_info *mtd, uint8_t *dat,
251 return 0; 251 return 0;
252} 252}
253 253
254#ifdef CONFIG_MTD_CMDLINE_PARTS
255static const char *part_probes[] = {"cmdline", NULL};
256#endif
257
258static int jz_nand_ioremap_resource(struct platform_device *pdev, 254static int jz_nand_ioremap_resource(struct platform_device *pdev,
259 const char *name, struct resource **res, void __iomem **base) 255 const char *name, struct resource **res, void __iomem **base)
260{ 256{
@@ -299,8 +295,6 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
299 struct nand_chip *chip; 295 struct nand_chip *chip;
300 struct mtd_info *mtd; 296 struct mtd_info *mtd;
301 struct jz_nand_platform_data *pdata = pdev->dev.platform_data; 297 struct jz_nand_platform_data *pdata = pdev->dev.platform_data;
302 struct mtd_partition *partition_info;
303 int num_partitions = 0;
304 298
305 nand = kzalloc(sizeof(*nand), GFP_KERNEL); 299 nand = kzalloc(sizeof(*nand), GFP_KERNEL);
306 if (!nand) { 300 if (!nand) {
@@ -373,15 +367,9 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
373 goto err_gpio_free; 367 goto err_gpio_free;
374 } 368 }
375 369
376#ifdef CONFIG_MTD_CMDLINE_PARTS 370 ret = mtd_device_parse_register(mtd, NULL, 0,
377 num_partitions = parse_mtd_partitions(mtd, part_probes, 371 pdata ? pdata->partitions : NULL,
378 &partition_info, 0); 372 pdata ? pdata->num_partitions : 0);
379#endif
380 if (num_partitions <= 0 && pdata) {
381 num_partitions = pdata->num_partitions;
382 partition_info = pdata->partitions;
383 }
384 ret = mtd_device_register(mtd, partition_info, num_partitions);
385 373
386 if (ret) { 374 if (ret) {
387 dev_err(&pdev->dev, "Failed to add mtd device\n"); 375 dev_err(&pdev->dev, "Failed to add mtd device\n");
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index eb1fbac63eb6..5ede64706346 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -131,8 +131,6 @@ struct mpc5121_nfc_prv {
131 131
132static void mpc5121_nfc_done(struct mtd_info *mtd); 132static void mpc5121_nfc_done(struct mtd_info *mtd);
133 133
134static const char *mpc5121_nfc_pprobes[] = { "cmdlinepart", NULL };
135
136/* Read NFC register */ 134/* Read NFC register */
137static inline u16 nfc_read(struct mtd_info *mtd, uint reg) 135static inline u16 nfc_read(struct mtd_info *mtd, uint reg)
138{ 136{
@@ -656,13 +654,13 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
656 struct mpc5121_nfc_prv *prv; 654 struct mpc5121_nfc_prv *prv;
657 struct resource res; 655 struct resource res;
658 struct mtd_info *mtd; 656 struct mtd_info *mtd;
659 struct mtd_partition *parts;
660 struct nand_chip *chip; 657 struct nand_chip *chip;
661 unsigned long regs_paddr, regs_size; 658 unsigned long regs_paddr, regs_size;
662 const __be32 *chips_no; 659 const __be32 *chips_no;
663 int resettime = 0; 660 int resettime = 0;
664 int retval = 0; 661 int retval = 0;
665 int rev, len; 662 int rev, len;
663 struct mtd_part_parser_data ppdata;
666 664
667 /* 665 /*
668 * Check SoC revision. This driver supports only NFC 666 * Check SoC revision. This driver supports only NFC
@@ -727,6 +725,7 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
727 } 725 }
728 726
729 mtd->name = "MPC5121 NAND"; 727 mtd->name = "MPC5121 NAND";
728 ppdata.of_node = dn;
730 chip->dev_ready = mpc5121_nfc_dev_ready; 729 chip->dev_ready = mpc5121_nfc_dev_ready;
731 chip->cmdfunc = mpc5121_nfc_command; 730 chip->cmdfunc = mpc5121_nfc_command;
732 chip->read_byte = mpc5121_nfc_read_byte; 731 chip->read_byte = mpc5121_nfc_read_byte;
@@ -735,7 +734,8 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
735 chip->write_buf = mpc5121_nfc_write_buf; 734 chip->write_buf = mpc5121_nfc_write_buf;
736 chip->verify_buf = mpc5121_nfc_verify_buf; 735 chip->verify_buf = mpc5121_nfc_verify_buf;
737 chip->select_chip = mpc5121_nfc_select_chip; 736 chip->select_chip = mpc5121_nfc_select_chip;
738 chip->options = NAND_NO_AUTOINCR | NAND_USE_FLASH_BBT; 737 chip->options = NAND_NO_AUTOINCR;
738 chip->bbt_options = NAND_BBT_USE_FLASH;
739 chip->ecc.mode = NAND_ECC_SOFT; 739 chip->ecc.mode = NAND_ECC_SOFT;
740 740
741 /* Support external chip-select logic on ADS5121 board */ 741 /* Support external chip-select logic on ADS5121 board */
@@ -837,19 +837,7 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
837 dev_set_drvdata(dev, mtd); 837 dev_set_drvdata(dev, mtd);
838 838
839 /* Register device in MTD */ 839 /* Register device in MTD */
840 retval = parse_mtd_partitions(mtd, mpc5121_nfc_pprobes, &parts, 0); 840 retval = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
841#ifdef CONFIG_MTD_OF_PARTS
842 if (retval == 0)
843 retval = of_mtd_parse_partitions(dev, dn, &parts);
844#endif
845 if (retval < 0) {
846 dev_err(dev, "Error parsing MTD partitions!\n");
847 devm_free_irq(dev, prv->irq, mtd);
848 retval = -EINVAL;
849 goto error;
850 }
851
852 retval = mtd_device_register(mtd, parts, retval);
853 if (retval) { 841 if (retval) {
854 dev_err(dev, "Error adding MTD device!\n"); 842 dev_err(dev, "Error adding MTD device!\n");
855 devm_free_irq(dev, prv->irq, mtd); 843 devm_free_irq(dev, prv->irq, mtd);
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 90df34c4d26c..74a43b818d0e 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -41,7 +41,7 @@
41 41
42#define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35()) 42#define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35())
43#define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27() || cpu_is_mx21()) 43#define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27() || cpu_is_mx21())
44#define nfc_is_v3_2() cpu_is_mx51() 44#define nfc_is_v3_2() (cpu_is_mx51() || cpu_is_mx53())
45#define nfc_is_v3() nfc_is_v3_2() 45#define nfc_is_v3() nfc_is_v3_2()
46 46
47/* Addresses for NFC registers */ 47/* Addresses for NFC registers */
@@ -143,7 +143,6 @@
143struct mxc_nand_host { 143struct mxc_nand_host {
144 struct mtd_info mtd; 144 struct mtd_info mtd;
145 struct nand_chip nand; 145 struct nand_chip nand;
146 struct mtd_partition *parts;
147 struct device *dev; 146 struct device *dev;
148 147
149 void *spare0; 148 void *spare0;
@@ -350,8 +349,7 @@ static void wait_op_done(struct mxc_nand_host *host, int useirq)
350 udelay(1); 349 udelay(1);
351 } 350 }
352 if (max_retries < 0) 351 if (max_retries < 0)
353 DEBUG(MTD_DEBUG_LEVEL0, "%s: INT not set\n", 352 pr_debug("%s: INT not set\n", __func__);
354 __func__);
355 } 353 }
356} 354}
357 355
@@ -371,7 +369,7 @@ static void send_cmd_v3(struct mxc_nand_host *host, uint16_t cmd, int useirq)
371 * waits for completion. */ 369 * waits for completion. */
372static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq) 370static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
373{ 371{
374 DEBUG(MTD_DEBUG_LEVEL3, "send_cmd(host, 0x%x, %d)\n", cmd, useirq); 372 pr_debug("send_cmd(host, 0x%x, %d)\n", cmd, useirq);
375 373
376 writew(cmd, NFC_V1_V2_FLASH_CMD); 374 writew(cmd, NFC_V1_V2_FLASH_CMD);
377 writew(NFC_CMD, NFC_V1_V2_CONFIG2); 375 writew(NFC_CMD, NFC_V1_V2_CONFIG2);
@@ -387,8 +385,7 @@ static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
387 udelay(1); 385 udelay(1);
388 } 386 }
389 if (max_retries < 0) 387 if (max_retries < 0)
390 DEBUG(MTD_DEBUG_LEVEL0, "%s: RESET failed\n", 388 pr_debug("%s: RESET failed\n", __func__);
391 __func__);
392 } else { 389 } else {
393 /* Wait for operation to complete */ 390 /* Wait for operation to complete */
394 wait_op_done(host, useirq); 391 wait_op_done(host, useirq);
@@ -411,7 +408,7 @@ static void send_addr_v3(struct mxc_nand_host *host, uint16_t addr, int islast)
411 * a NAND command. */ 408 * a NAND command. */
412static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast) 409static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast)
413{ 410{
414 DEBUG(MTD_DEBUG_LEVEL3, "send_addr(host, 0x%x %d)\n", addr, islast); 411 pr_debug("send_addr(host, 0x%x %d)\n", addr, islast);
415 412
416 writew(addr, NFC_V1_V2_FLASH_ADDR); 413 writew(addr, NFC_V1_V2_FLASH_ADDR);
417 writew(NFC_ADDR, NFC_V1_V2_CONFIG2); 414 writew(NFC_ADDR, NFC_V1_V2_CONFIG2);
@@ -561,8 +558,7 @@ static int mxc_nand_correct_data_v1(struct mtd_info *mtd, u_char *dat,
561 uint16_t ecc_status = readw(NFC_V1_V2_ECC_STATUS_RESULT); 558 uint16_t ecc_status = readw(NFC_V1_V2_ECC_STATUS_RESULT);
562 559
563 if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) { 560 if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) {
564 DEBUG(MTD_DEBUG_LEVEL0, 561 pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n");
565 "MXC_NAND: HWECC uncorrectable 2-bit ECC error\n");
566 return -1; 562 return -1;
567 } 563 }
568 564
@@ -849,7 +845,7 @@ static void preset_v1_v2(struct mtd_info *mtd)
849 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3); 845 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3);
850 } else if (nfc_is_v1()) { 846 } else if (nfc_is_v1()) {
851 writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR); 847 writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
852 writew(0x4000, NFC_V1_UNLOCKEND_BLKADDR); 848 writew(0xffff, NFC_V1_UNLOCKEND_BLKADDR);
853 } else 849 } else
854 BUG(); 850 BUG();
855 851
@@ -932,8 +928,7 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
932 struct nand_chip *nand_chip = mtd->priv; 928 struct nand_chip *nand_chip = mtd->priv;
933 struct mxc_nand_host *host = nand_chip->priv; 929 struct mxc_nand_host *host = nand_chip->priv;
934 930
935 DEBUG(MTD_DEBUG_LEVEL3, 931 pr_debug("mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
936 "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
937 command, column, page_addr); 932 command, column, page_addr);
938 933
939 /* Reset command state information */ 934 /* Reset command state information */
@@ -1044,7 +1039,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1044 struct mxc_nand_platform_data *pdata = pdev->dev.platform_data; 1039 struct mxc_nand_platform_data *pdata = pdev->dev.platform_data;
1045 struct mxc_nand_host *host; 1040 struct mxc_nand_host *host;
1046 struct resource *res; 1041 struct resource *res;
1047 int err = 0, __maybe_unused nr_parts = 0; 1042 int err = 0;
1048 struct nand_ecclayout *oob_smallpage, *oob_largepage; 1043 struct nand_ecclayout *oob_smallpage, *oob_largepage;
1049 1044
1050 /* Allocate memory for MTD device structure and private data */ 1045 /* Allocate memory for MTD device structure and private data */
@@ -1179,7 +1174,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1179 this->bbt_td = &bbt_main_descr; 1174 this->bbt_td = &bbt_main_descr;
1180 this->bbt_md = &bbt_mirror_descr; 1175 this->bbt_md = &bbt_mirror_descr;
1181 /* update flash based bbt */ 1176 /* update flash based bbt */
1182 this->options |= NAND_USE_FLASH_BBT; 1177 this->bbt_options |= NAND_BBT_USE_FLASH;
1183 } 1178 }
1184 1179
1185 init_completion(&host->op_completion); 1180 init_completion(&host->op_completion);
@@ -1231,16 +1226,8 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1231 } 1226 }
1232 1227
1233 /* Register the partitions */ 1228 /* Register the partitions */
1234 nr_parts = 1229 mtd_device_parse_register(mtd, part_probes, 0,
1235 parse_mtd_partitions(mtd, part_probes, &host->parts, 0); 1230 pdata->parts, pdata->nr_parts);
1236 if (nr_parts > 0)
1237 mtd_device_register(mtd, host->parts, nr_parts);
1238 else if (pdata->parts)
1239 mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
1240 else {
1241 pr_info("Registering %s as whole device\n", mtd->name);
1242 mtd_device_register(mtd, NULL, 0);
1243 }
1244 1231
1245 platform_set_drvdata(pdev, host); 1232 platform_set_drvdata(pdev, host);
1246 1233
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index a46e9bb847bd..3ed9c5e4d34e 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -21,7 +21,7 @@
21 * TODO: 21 * TODO:
22 * Enable cached programming for 2k page size chips 22 * Enable cached programming for 2k page size chips
23 * Check, if mtd->ecctype should be set to MTD_ECC_HW 23 * Check, if mtd->ecctype should be set to MTD_ECC_HW
24 * if we have HW ecc support. 24 * if we have HW ECC support.
25 * The AG-AND chips have nice features for speed improvement, 25 * The AG-AND chips have nice features for speed improvement,
26 * which are not supported yet. Read / program 4 pages in one go. 26 * which are not supported yet. Read / program 4 pages in one go.
27 * BBT table is not serialized, has to be fixed 27 * BBT table is not serialized, has to be fixed
@@ -113,21 +113,19 @@ static int check_offs_len(struct mtd_info *mtd,
113 113
114 /* Start address must align on block boundary */ 114 /* Start address must align on block boundary */
115 if (ofs & ((1 << chip->phys_erase_shift) - 1)) { 115 if (ofs & ((1 << chip->phys_erase_shift) - 1)) {
116 DEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __func__); 116 pr_debug("%s: unaligned address\n", __func__);
117 ret = -EINVAL; 117 ret = -EINVAL;
118 } 118 }
119 119
120 /* Length must align on block boundary */ 120 /* Length must align on block boundary */
121 if (len & ((1 << chip->phys_erase_shift) - 1)) { 121 if (len & ((1 << chip->phys_erase_shift) - 1)) {
122 DEBUG(MTD_DEBUG_LEVEL0, "%s: Length not block aligned\n", 122 pr_debug("%s: length not block aligned\n", __func__);
123 __func__);
124 ret = -EINVAL; 123 ret = -EINVAL;
125 } 124 }
126 125
127 /* Do not allow past end of device */ 126 /* Do not allow past end of device */
128 if (ofs + len > mtd->size) { 127 if (ofs + len > mtd->size) {
129 DEBUG(MTD_DEBUG_LEVEL0, "%s: Past end of device\n", 128 pr_debug("%s: past end of device\n", __func__);
130 __func__);
131 ret = -EINVAL; 129 ret = -EINVAL;
132 } 130 }
133 131
@@ -136,9 +134,9 @@ static int check_offs_len(struct mtd_info *mtd,
136 134
137/** 135/**
138 * nand_release_device - [GENERIC] release chip 136 * nand_release_device - [GENERIC] release chip
139 * @mtd: MTD device structure 137 * @mtd: MTD device structure
140 * 138 *
141 * Deselect, release chip lock and wake up anyone waiting on the device 139 * Deselect, release chip lock and wake up anyone waiting on the device.
142 */ 140 */
143static void nand_release_device(struct mtd_info *mtd) 141static void nand_release_device(struct mtd_info *mtd)
144{ 142{
@@ -157,9 +155,9 @@ static void nand_release_device(struct mtd_info *mtd)
157 155
158/** 156/**
159 * nand_read_byte - [DEFAULT] read one byte from the chip 157 * nand_read_byte - [DEFAULT] read one byte from the chip
160 * @mtd: MTD device structure 158 * @mtd: MTD device structure
161 * 159 *
162 * Default read function for 8bit buswith 160 * Default read function for 8bit buswidth
163 */ 161 */
164static uint8_t nand_read_byte(struct mtd_info *mtd) 162static uint8_t nand_read_byte(struct mtd_info *mtd)
165{ 163{
@@ -169,10 +167,11 @@ static uint8_t nand_read_byte(struct mtd_info *mtd)
169 167
170/** 168/**
171 * nand_read_byte16 - [DEFAULT] read one byte endianess aware from the chip 169 * nand_read_byte16 - [DEFAULT] read one byte endianess aware from the chip
172 * @mtd: MTD device structure 170 * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
171 * @mtd: MTD device structure
172 *
173 * Default read function for 16bit buswidth with endianness conversion.
173 * 174 *
174 * Default read function for 16bit buswith with
175 * endianess conversion
176 */ 175 */
177static uint8_t nand_read_byte16(struct mtd_info *mtd) 176static uint8_t nand_read_byte16(struct mtd_info *mtd)
178{ 177{
@@ -182,10 +181,9 @@ static uint8_t nand_read_byte16(struct mtd_info *mtd)
182 181
183/** 182/**
184 * nand_read_word - [DEFAULT] read one word from the chip 183 * nand_read_word - [DEFAULT] read one word from the chip
185 * @mtd: MTD device structure 184 * @mtd: MTD device structure
186 * 185 *
187 * Default read function for 16bit buswith without 186 * Default read function for 16bit buswidth without endianness conversion.
188 * endianess conversion
189 */ 187 */
190static u16 nand_read_word(struct mtd_info *mtd) 188static u16 nand_read_word(struct mtd_info *mtd)
191{ 189{
@@ -195,8 +193,8 @@ static u16 nand_read_word(struct mtd_info *mtd)
195 193
196/** 194/**
197 * nand_select_chip - [DEFAULT] control CE line 195 * nand_select_chip - [DEFAULT] control CE line
198 * @mtd: MTD device structure 196 * @mtd: MTD device structure
199 * @chipnr: chipnumber to select, -1 for deselect 197 * @chipnr: chipnumber to select, -1 for deselect
200 * 198 *
201 * Default select function for 1 chip devices. 199 * Default select function for 1 chip devices.
202 */ 200 */
@@ -218,11 +216,11 @@ static void nand_select_chip(struct mtd_info *mtd, int chipnr)
218 216
219/** 217/**
220 * nand_write_buf - [DEFAULT] write buffer to chip 218 * nand_write_buf - [DEFAULT] write buffer to chip
221 * @mtd: MTD device structure 219 * @mtd: MTD device structure
222 * @buf: data buffer 220 * @buf: data buffer
223 * @len: number of bytes to write 221 * @len: number of bytes to write
224 * 222 *
225 * Default write function for 8bit buswith 223 * Default write function for 8bit buswidth.
226 */ 224 */
227static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) 225static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
228{ 226{
@@ -235,11 +233,11 @@ static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
235 233
236/** 234/**
237 * nand_read_buf - [DEFAULT] read chip data into buffer 235 * nand_read_buf - [DEFAULT] read chip data into buffer
238 * @mtd: MTD device structure 236 * @mtd: MTD device structure
239 * @buf: buffer to store date 237 * @buf: buffer to store date
240 * @len: number of bytes to read 238 * @len: number of bytes to read
241 * 239 *
242 * Default read function for 8bit buswith 240 * Default read function for 8bit buswidth.
243 */ 241 */
244static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) 242static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
245{ 243{
@@ -252,11 +250,11 @@ static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
252 250
253/** 251/**
254 * nand_verify_buf - [DEFAULT] Verify chip data against buffer 252 * nand_verify_buf - [DEFAULT] Verify chip data against buffer
255 * @mtd: MTD device structure 253 * @mtd: MTD device structure
256 * @buf: buffer containing the data to compare 254 * @buf: buffer containing the data to compare
257 * @len: number of bytes to compare 255 * @len: number of bytes to compare
258 * 256 *
259 * Default verify function for 8bit buswith 257 * Default verify function for 8bit buswidth.
260 */ 258 */
261static int nand_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len) 259static int nand_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
262{ 260{
@@ -271,11 +269,11 @@ static int nand_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
271 269
272/** 270/**
273 * nand_write_buf16 - [DEFAULT] write buffer to chip 271 * nand_write_buf16 - [DEFAULT] write buffer to chip
274 * @mtd: MTD device structure 272 * @mtd: MTD device structure
275 * @buf: data buffer 273 * @buf: data buffer
276 * @len: number of bytes to write 274 * @len: number of bytes to write
277 * 275 *
278 * Default write function for 16bit buswith 276 * Default write function for 16bit buswidth.
279 */ 277 */
280static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len) 278static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
281{ 279{
@@ -291,11 +289,11 @@ static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
291 289
292/** 290/**
293 * nand_read_buf16 - [DEFAULT] read chip data into buffer 291 * nand_read_buf16 - [DEFAULT] read chip data into buffer
294 * @mtd: MTD device structure 292 * @mtd: MTD device structure
295 * @buf: buffer to store date 293 * @buf: buffer to store date
296 * @len: number of bytes to read 294 * @len: number of bytes to read
297 * 295 *
298 * Default read function for 16bit buswith 296 * Default read function for 16bit buswidth.
299 */ 297 */
300static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len) 298static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
301{ 299{
@@ -310,11 +308,11 @@ static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
310 308
311/** 309/**
312 * nand_verify_buf16 - [DEFAULT] Verify chip data against buffer 310 * nand_verify_buf16 - [DEFAULT] Verify chip data against buffer
313 * @mtd: MTD device structure 311 * @mtd: MTD device structure
314 * @buf: buffer containing the data to compare 312 * @buf: buffer containing the data to compare
315 * @len: number of bytes to compare 313 * @len: number of bytes to compare
316 * 314 *
317 * Default verify function for 16bit buswith 315 * Default verify function for 16bit buswidth.
318 */ 316 */
319static int nand_verify_buf16(struct mtd_info *mtd, const uint8_t *buf, int len) 317static int nand_verify_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
320{ 318{
@@ -332,9 +330,9 @@ static int nand_verify_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
332 330
333/** 331/**
334 * nand_block_bad - [DEFAULT] Read bad block marker from the chip 332 * nand_block_bad - [DEFAULT] Read bad block marker from the chip
335 * @mtd: MTD device structure 333 * @mtd: MTD device structure
336 * @ofs: offset from device start 334 * @ofs: offset from device start
337 * @getchip: 0, if the chip is already selected 335 * @getchip: 0, if the chip is already selected
338 * 336 *
339 * Check, if the block is bad. 337 * Check, if the block is bad.
340 */ 338 */
@@ -344,7 +342,7 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
344 struct nand_chip *chip = mtd->priv; 342 struct nand_chip *chip = mtd->priv;
345 u16 bad; 343 u16 bad;
346 344
347 if (chip->options & NAND_BBT_SCANLASTPAGE) 345 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
348 ofs += mtd->erasesize - mtd->writesize; 346 ofs += mtd->erasesize - mtd->writesize;
349 347
350 page = (int)(ofs >> chip->page_shift) & chip->pagemask; 348 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
@@ -384,11 +382,11 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
384 382
385/** 383/**
386 * nand_default_block_markbad - [DEFAULT] mark a block bad 384 * nand_default_block_markbad - [DEFAULT] mark a block bad
387 * @mtd: MTD device structure 385 * @mtd: MTD device structure
388 * @ofs: offset from device start 386 * @ofs: offset from device start
389 * 387 *
390 * This is the default implementation, which can be overridden by 388 * This is the default implementation, which can be overridden by a hardware
391 * a hardware specific driver. 389 * specific driver.
392*/ 390*/
393static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs) 391static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
394{ 392{
@@ -396,7 +394,7 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
396 uint8_t buf[2] = { 0, 0 }; 394 uint8_t buf[2] = { 0, 0 };
397 int block, ret, i = 0; 395 int block, ret, i = 0;
398 396
399 if (chip->options & NAND_BBT_SCANLASTPAGE) 397 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
400 ofs += mtd->erasesize - mtd->writesize; 398 ofs += mtd->erasesize - mtd->writesize;
401 399
402 /* Get block number */ 400 /* Get block number */
@@ -404,33 +402,31 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
404 if (chip->bbt) 402 if (chip->bbt)
405 chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1); 403 chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
406 404
407 /* Do we have a flash based bad block table ? */ 405 /* Do we have a flash based bad block table? */
408 if (chip->options & NAND_USE_FLASH_BBT) 406 if (chip->bbt_options & NAND_BBT_USE_FLASH)
409 ret = nand_update_bbt(mtd, ofs); 407 ret = nand_update_bbt(mtd, ofs);
410 else { 408 else {
409 struct mtd_oob_ops ops;
410
411 nand_get_device(chip, mtd, FL_WRITING); 411 nand_get_device(chip, mtd, FL_WRITING);
412 412
413 /* Write to first two pages and to byte 1 and 6 if necessary. 413 /*
414 * If we write to more than one location, the first error 414 * Write to first two pages if necessary. If we write to more
415 * encountered quits the procedure. We write two bytes per 415 * than one location, the first error encountered quits the
416 * location, so we dont have to mess with 16 bit access. 416 * procedure. We write two bytes per location, so we dont have
417 * to mess with 16 bit access.
417 */ 418 */
419 ops.len = ops.ooblen = 2;
420 ops.datbuf = NULL;
421 ops.oobbuf = buf;
422 ops.ooboffs = chip->badblockpos & ~0x01;
423 ops.mode = MTD_OPS_PLACE_OOB;
418 do { 424 do {
419 chip->ops.len = chip->ops.ooblen = 2; 425 ret = nand_do_write_oob(mtd, ofs, &ops);
420 chip->ops.datbuf = NULL;
421 chip->ops.oobbuf = buf;
422 chip->ops.ooboffs = chip->badblockpos & ~0x01;
423
424 ret = nand_do_write_oob(mtd, ofs, &chip->ops);
425 426
426 if (!ret && (chip->options & NAND_BBT_SCANBYTE1AND6)) {
427 chip->ops.ooboffs = NAND_SMALL_BADBLOCK_POS
428 & ~0x01;
429 ret = nand_do_write_oob(mtd, ofs, &chip->ops);
430 }
431 i++; 427 i++;
432 ofs += mtd->writesize; 428 ofs += mtd->writesize;
433 } while (!ret && (chip->options & NAND_BBT_SCAN2NDPAGE) && 429 } while (!ret && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE) &&
434 i < 2); 430 i < 2);
435 431
436 nand_release_device(mtd); 432 nand_release_device(mtd);
@@ -443,16 +439,16 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
443 439
444/** 440/**
445 * nand_check_wp - [GENERIC] check if the chip is write protected 441 * nand_check_wp - [GENERIC] check if the chip is write protected
446 * @mtd: MTD device structure 442 * @mtd: MTD device structure
447 * Check, if the device is write protected
448 * 443 *
449 * The function expects, that the device is already selected 444 * Check, if the device is write protected. The function expects, that the
445 * device is already selected.
450 */ 446 */
451static int nand_check_wp(struct mtd_info *mtd) 447static int nand_check_wp(struct mtd_info *mtd)
452{ 448{
453 struct nand_chip *chip = mtd->priv; 449 struct nand_chip *chip = mtd->priv;
454 450
455 /* broken xD cards report WP despite being writable */ 451 /* Broken xD cards report WP despite being writable */
456 if (chip->options & NAND_BROKEN_XD) 452 if (chip->options & NAND_BROKEN_XD)
457 return 0; 453 return 0;
458 454
@@ -463,10 +459,10 @@ static int nand_check_wp(struct mtd_info *mtd)
463 459
464/** 460/**
465 * nand_block_checkbad - [GENERIC] Check if a block is marked bad 461 * nand_block_checkbad - [GENERIC] Check if a block is marked bad
466 * @mtd: MTD device structure 462 * @mtd: MTD device structure
467 * @ofs: offset from device start 463 * @ofs: offset from device start
468 * @getchip: 0, if the chip is already selected 464 * @getchip: 0, if the chip is already selected
469 * @allowbbt: 1, if its allowed to access the bbt area 465 * @allowbbt: 1, if its allowed to access the bbt area
470 * 466 *
471 * Check, if the block is bad. Either by reading the bad block table or 467 * Check, if the block is bad. Either by reading the bad block table or
472 * calling of the scan function. 468 * calling of the scan function.
@@ -485,8 +481,8 @@ static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip,
485 481
486/** 482/**
487 * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands. 483 * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
488 * @mtd: MTD device structure 484 * @mtd: MTD device structure
489 * @timeo: Timeout 485 * @timeo: Timeout
490 * 486 *
491 * Helper function for nand_wait_ready used when needing to wait in interrupt 487 * Helper function for nand_wait_ready used when needing to wait in interrupt
492 * context. 488 * context.
@@ -505,10 +501,7 @@ static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
505 } 501 }
506} 502}
507 503
508/* 504/* Wait for the ready pin, after a command. The timeout is caught later. */
509 * Wait for the ready pin, after a command
510 * The timeout is catched later.
511 */
512void nand_wait_ready(struct mtd_info *mtd) 505void nand_wait_ready(struct mtd_info *mtd)
513{ 506{
514 struct nand_chip *chip = mtd->priv; 507 struct nand_chip *chip = mtd->priv;
@@ -519,7 +512,7 @@ void nand_wait_ready(struct mtd_info *mtd)
519 return panic_nand_wait_ready(mtd, 400); 512 return panic_nand_wait_ready(mtd, 400);
520 513
521 led_trigger_event(nand_led_trigger, LED_FULL); 514 led_trigger_event(nand_led_trigger, LED_FULL);
522 /* wait until command is processed or timeout occures */ 515 /* Wait until command is processed or timeout occurs */
523 do { 516 do {
524 if (chip->dev_ready(mtd)) 517 if (chip->dev_ready(mtd))
525 break; 518 break;
@@ -531,13 +524,13 @@ EXPORT_SYMBOL_GPL(nand_wait_ready);
531 524
532/** 525/**
533 * nand_command - [DEFAULT] Send command to NAND device 526 * nand_command - [DEFAULT] Send command to NAND device
534 * @mtd: MTD device structure 527 * @mtd: MTD device structure
535 * @command: the command to be sent 528 * @command: the command to be sent
536 * @column: the column address for this command, -1 if none 529 * @column: the column address for this command, -1 if none
537 * @page_addr: the page address for this command, -1 if none 530 * @page_addr: the page address for this command, -1 if none
538 * 531 *
539 * Send command to NAND device. This function is used for small page 532 * Send command to NAND device. This function is used for small page devices
540 * devices (256/512 Bytes per page) 533 * (256/512 Bytes per page).
541 */ 534 */
542static void nand_command(struct mtd_info *mtd, unsigned int command, 535static void nand_command(struct mtd_info *mtd, unsigned int command,
543 int column, int page_addr) 536 int column, int page_addr)
@@ -545,9 +538,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
545 register struct nand_chip *chip = mtd->priv; 538 register struct nand_chip *chip = mtd->priv;
546 int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE; 539 int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
547 540
548 /* 541 /* Write out the command to the device */
549 * Write out the command to the device.
550 */
551 if (command == NAND_CMD_SEQIN) { 542 if (command == NAND_CMD_SEQIN) {
552 int readcmd; 543 int readcmd;
553 544
@@ -567,9 +558,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
567 } 558 }
568 chip->cmd_ctrl(mtd, command, ctrl); 559 chip->cmd_ctrl(mtd, command, ctrl);
569 560
570 /* 561 /* Address cycle, when necessary */
571 * Address cycle, when necessary
572 */
573 ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE; 562 ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
574 /* Serially input address */ 563 /* Serially input address */
575 if (column != -1) { 564 if (column != -1) {
@@ -590,8 +579,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
590 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); 579 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
591 580
592 /* 581 /*
593 * program and erase have their own busy handlers 582 * Program and erase have their own busy handlers status and sequential
594 * status and sequential in needs no delay 583 * in needs no delay
595 */ 584 */
596 switch (command) { 585 switch (command) {
597 586
@@ -625,8 +614,10 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
625 return; 614 return;
626 } 615 }
627 } 616 }
628 /* Apply this short delay always to ensure that we do wait tWB in 617 /*
629 * any case on any machine. */ 618 * Apply this short delay always to ensure that we do wait tWB in
619 * any case on any machine.
620 */
630 ndelay(100); 621 ndelay(100);
631 622
632 nand_wait_ready(mtd); 623 nand_wait_ready(mtd);
@@ -634,14 +625,14 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
634 625
635/** 626/**
636 * nand_command_lp - [DEFAULT] Send command to NAND large page device 627 * nand_command_lp - [DEFAULT] Send command to NAND large page device
637 * @mtd: MTD device structure 628 * @mtd: MTD device structure
638 * @command: the command to be sent 629 * @command: the command to be sent
639 * @column: the column address for this command, -1 if none 630 * @column: the column address for this command, -1 if none
640 * @page_addr: the page address for this command, -1 if none 631 * @page_addr: the page address for this command, -1 if none
641 * 632 *
642 * Send command to NAND device. This is the version for the new large page 633 * Send command to NAND device. This is the version for the new large page
643 * devices We dont have the separate regions as we have in the small page 634 * devices. We don't have the separate regions as we have in the small page
644 * devices. We must emulate NAND_CMD_READOOB to keep the code compatible. 635 * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
645 */ 636 */
646static void nand_command_lp(struct mtd_info *mtd, unsigned int command, 637static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
647 int column, int page_addr) 638 int column, int page_addr)
@@ -683,8 +674,8 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
683 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); 674 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
684 675
685 /* 676 /*
686 * program and erase have their own busy handlers 677 * Program and erase have their own busy handlers status, sequential
687 * status, sequential in, and deplete1 need no delay 678 * in, and deplete1 need no delay.
688 */ 679 */
689 switch (command) { 680 switch (command) {
690 681
@@ -698,14 +689,12 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
698 case NAND_CMD_DEPLETE1: 689 case NAND_CMD_DEPLETE1:
699 return; 690 return;
700 691
701 /*
702 * read error status commands require only a short delay
703 */
704 case NAND_CMD_STATUS_ERROR: 692 case NAND_CMD_STATUS_ERROR:
705 case NAND_CMD_STATUS_ERROR0: 693 case NAND_CMD_STATUS_ERROR0:
706 case NAND_CMD_STATUS_ERROR1: 694 case NAND_CMD_STATUS_ERROR1:
707 case NAND_CMD_STATUS_ERROR2: 695 case NAND_CMD_STATUS_ERROR2:
708 case NAND_CMD_STATUS_ERROR3: 696 case NAND_CMD_STATUS_ERROR3:
697 /* Read error status commands require only a short delay */
709 udelay(chip->chip_delay); 698 udelay(chip->chip_delay);
710 return; 699 return;
711 700
@@ -739,7 +728,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
739 default: 728 default:
740 /* 729 /*
741 * If we don't have access to the busy pin, we apply the given 730 * If we don't have access to the busy pin, we apply the given
742 * command delay 731 * command delay.
743 */ 732 */
744 if (!chip->dev_ready) { 733 if (!chip->dev_ready) {
745 udelay(chip->chip_delay); 734 udelay(chip->chip_delay);
@@ -747,8 +736,10 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
747 } 736 }
748 } 737 }
749 738
750 /* Apply this short delay always to ensure that we do wait tWB in 739 /*
751 * any case on any machine. */ 740 * Apply this short delay always to ensure that we do wait tWB in
741 * any case on any machine.
742 */
752 ndelay(100); 743 ndelay(100);
753 744
754 nand_wait_ready(mtd); 745 nand_wait_ready(mtd);
@@ -756,25 +747,25 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
756 747
757/** 748/**
758 * panic_nand_get_device - [GENERIC] Get chip for selected access 749 * panic_nand_get_device - [GENERIC] Get chip for selected access
759 * @chip: the nand chip descriptor 750 * @chip: the nand chip descriptor
760 * @mtd: MTD device structure 751 * @mtd: MTD device structure
761 * @new_state: the state which is requested 752 * @new_state: the state which is requested
762 * 753 *
763 * Used when in panic, no locks are taken. 754 * Used when in panic, no locks are taken.
764 */ 755 */
765static void panic_nand_get_device(struct nand_chip *chip, 756static void panic_nand_get_device(struct nand_chip *chip,
766 struct mtd_info *mtd, int new_state) 757 struct mtd_info *mtd, int new_state)
767{ 758{
768 /* Hardware controller shared among independend devices */ 759 /* Hardware controller shared among independent devices */
769 chip->controller->active = chip; 760 chip->controller->active = chip;
770 chip->state = new_state; 761 chip->state = new_state;
771} 762}
772 763
773/** 764/**
774 * nand_get_device - [GENERIC] Get chip for selected access 765 * nand_get_device - [GENERIC] Get chip for selected access
775 * @chip: the nand chip descriptor 766 * @chip: the nand chip descriptor
776 * @mtd: MTD device structure 767 * @mtd: MTD device structure
777 * @new_state: the state which is requested 768 * @new_state: the state which is requested
778 * 769 *
779 * Get the device and lock it for exclusive access 770 * Get the device and lock it for exclusive access
780 */ 771 */
@@ -812,10 +803,10 @@ retry:
812} 803}
813 804
814/** 805/**
815 * panic_nand_wait - [GENERIC] wait until the command is done 806 * panic_nand_wait - [GENERIC] wait until the command is done
816 * @mtd: MTD device structure 807 * @mtd: MTD device structure
817 * @chip: NAND chip structure 808 * @chip: NAND chip structure
818 * @timeo: Timeout 809 * @timeo: timeout
819 * 810 *
820 * Wait for command done. This is a helper function for nand_wait used when 811 * Wait for command done. This is a helper function for nand_wait used when
821 * we are in interrupt context. May happen when in panic and trying to write 812 * we are in interrupt context. May happen when in panic and trying to write
@@ -838,13 +829,13 @@ static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
838} 829}
839 830
840/** 831/**
841 * nand_wait - [DEFAULT] wait until the command is done 832 * nand_wait - [DEFAULT] wait until the command is done
842 * @mtd: MTD device structure 833 * @mtd: MTD device structure
843 * @chip: NAND chip structure 834 * @chip: NAND chip structure
844 * 835 *
845 * Wait for command done. This applies to erase and program only 836 * Wait for command done. This applies to erase and program only. Erase can
846 * Erase can take up to 400ms and program up to 20ms according to 837 * take up to 400ms and program up to 20ms according to general NAND and
847 * general NAND and SmartMedia specs 838 * SmartMedia specs.
848 */ 839 */
849static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip) 840static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
850{ 841{
@@ -859,8 +850,10 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
859 850
860 led_trigger_event(nand_led_trigger, LED_FULL); 851 led_trigger_event(nand_led_trigger, LED_FULL);
861 852
862 /* Apply this short delay always to ensure that we do wait tWB in 853 /*
863 * any case on any machine. */ 854 * Apply this short delay always to ensure that we do wait tWB in any
855 * case on any machine.
856 */
864 ndelay(100); 857 ndelay(100);
865 858
866 if ((state == FL_ERASING) && (chip->options & NAND_IS_AND)) 859 if ((state == FL_ERASING) && (chip->options & NAND_IS_AND))
@@ -890,16 +883,15 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
890 883
891/** 884/**
892 * __nand_unlock - [REPLACEABLE] unlocks specified locked blocks 885 * __nand_unlock - [REPLACEABLE] unlocks specified locked blocks
893 *
894 * @mtd: mtd info 886 * @mtd: mtd info
895 * @ofs: offset to start unlock from 887 * @ofs: offset to start unlock from
896 * @len: length to unlock 888 * @len: length to unlock
897 * @invert: when = 0, unlock the range of blocks within the lower and 889 * @invert: when = 0, unlock the range of blocks within the lower and
898 * upper boundary address 890 * upper boundary address
899 * when = 1, unlock the range of blocks outside the boundaries 891 * when = 1, unlock the range of blocks outside the boundaries
900 * of the lower and upper boundary address 892 * of the lower and upper boundary address
901 * 893 *
902 * return - unlock status 894 * Returs unlock status.
903 */ 895 */
904static int __nand_unlock(struct mtd_info *mtd, loff_t ofs, 896static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
905 uint64_t len, int invert) 897 uint64_t len, int invert)
@@ -919,10 +911,9 @@ static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
919 911
920 /* Call wait ready function */ 912 /* Call wait ready function */
921 status = chip->waitfunc(mtd, chip); 913 status = chip->waitfunc(mtd, chip);
922 udelay(1000);
923 /* See if device thinks it succeeded */ 914 /* See if device thinks it succeeded */
924 if (status & 0x01) { 915 if (status & 0x01) {
925 DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n", 916 pr_debug("%s: error status = 0x%08x\n",
926 __func__, status); 917 __func__, status);
927 ret = -EIO; 918 ret = -EIO;
928 } 919 }
@@ -932,12 +923,11 @@ static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
932 923
933/** 924/**
934 * nand_unlock - [REPLACEABLE] unlocks specified locked blocks 925 * nand_unlock - [REPLACEABLE] unlocks specified locked blocks
935 *
936 * @mtd: mtd info 926 * @mtd: mtd info
937 * @ofs: offset to start unlock from 927 * @ofs: offset to start unlock from
938 * @len: length to unlock 928 * @len: length to unlock
939 * 929 *
940 * return - unlock status 930 * Returns unlock status.
941 */ 931 */
942int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 932int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
943{ 933{
@@ -945,7 +935,7 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
945 int chipnr; 935 int chipnr;
946 struct nand_chip *chip = mtd->priv; 936 struct nand_chip *chip = mtd->priv;
947 937
948 DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n", 938 pr_debug("%s: start = 0x%012llx, len = %llu\n",
949 __func__, (unsigned long long)ofs, len); 939 __func__, (unsigned long long)ofs, len);
950 940
951 if (check_offs_len(mtd, ofs, len)) 941 if (check_offs_len(mtd, ofs, len))
@@ -964,7 +954,7 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
964 954
965 /* Check, if it is write protected */ 955 /* Check, if it is write protected */
966 if (nand_check_wp(mtd)) { 956 if (nand_check_wp(mtd)) {
967 DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n", 957 pr_debug("%s: device is write protected!\n",
968 __func__); 958 __func__);
969 ret = -EIO; 959 ret = -EIO;
970 goto out; 960 goto out;
@@ -981,18 +971,16 @@ EXPORT_SYMBOL(nand_unlock);
981 971
982/** 972/**
983 * nand_lock - [REPLACEABLE] locks all blocks present in the device 973 * nand_lock - [REPLACEABLE] locks all blocks present in the device
984 *
985 * @mtd: mtd info 974 * @mtd: mtd info
986 * @ofs: offset to start unlock from 975 * @ofs: offset to start unlock from
987 * @len: length to unlock 976 * @len: length to unlock
988 * 977 *
989 * return - lock status 978 * This feature is not supported in many NAND parts. 'Micron' NAND parts do
979 * have this feature, but it allows only to lock all blocks, not for specified
980 * range for block. Implementing 'lock' feature by making use of 'unlock', for
981 * now.
990 * 982 *
991 * This feature is not supported in many NAND parts. 'Micron' NAND parts 983 * Returns lock status.
992 * do have this feature, but it allows only to lock all blocks, not for
993 * specified range for block.
994 *
995 * Implementing 'lock' feature by making use of 'unlock', for now.
996 */ 984 */
997int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 985int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
998{ 986{
@@ -1000,7 +988,7 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1000 int chipnr, status, page; 988 int chipnr, status, page;
1001 struct nand_chip *chip = mtd->priv; 989 struct nand_chip *chip = mtd->priv;
1002 990
1003 DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n", 991 pr_debug("%s: start = 0x%012llx, len = %llu\n",
1004 __func__, (unsigned long long)ofs, len); 992 __func__, (unsigned long long)ofs, len);
1005 993
1006 if (check_offs_len(mtd, ofs, len)) 994 if (check_offs_len(mtd, ofs, len))
@@ -1015,7 +1003,7 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1015 1003
1016 /* Check, if it is write protected */ 1004 /* Check, if it is write protected */
1017 if (nand_check_wp(mtd)) { 1005 if (nand_check_wp(mtd)) {
1018 DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n", 1006 pr_debug("%s: device is write protected!\n",
1019 __func__); 1007 __func__);
1020 status = MTD_ERASE_FAILED; 1008 status = MTD_ERASE_FAILED;
1021 ret = -EIO; 1009 ret = -EIO;
@@ -1028,10 +1016,9 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1028 1016
1029 /* Call wait ready function */ 1017 /* Call wait ready function */
1030 status = chip->waitfunc(mtd, chip); 1018 status = chip->waitfunc(mtd, chip);
1031 udelay(1000);
1032 /* See if device thinks it succeeded */ 1019 /* See if device thinks it succeeded */
1033 if (status & 0x01) { 1020 if (status & 0x01) {
1034 DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n", 1021 pr_debug("%s: error status = 0x%08x\n",
1035 __func__, status); 1022 __func__, status);
1036 ret = -EIO; 1023 ret = -EIO;
1037 goto out; 1024 goto out;
@@ -1047,13 +1034,13 @@ out:
1047EXPORT_SYMBOL(nand_lock); 1034EXPORT_SYMBOL(nand_lock);
1048 1035
1049/** 1036/**
1050 * nand_read_page_raw - [Intern] read raw page data without ecc 1037 * nand_read_page_raw - [INTERN] read raw page data without ecc
1051 * @mtd: mtd info structure 1038 * @mtd: mtd info structure
1052 * @chip: nand chip info structure 1039 * @chip: nand chip info structure
1053 * @buf: buffer to store read data 1040 * @buf: buffer to store read data
1054 * @page: page number to read 1041 * @page: page number to read
1055 * 1042 *
1056 * Not for syndrome calculating ecc controllers, which use a special oob layout 1043 * Not for syndrome calculating ECC controllers, which use a special oob layout.
1057 */ 1044 */
1058static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 1045static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1059 uint8_t *buf, int page) 1046 uint8_t *buf, int page)
@@ -1064,11 +1051,11 @@ static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1064} 1051}
1065 1052
1066/** 1053/**
1067 * nand_read_page_raw_syndrome - [Intern] read raw page data without ecc 1054 * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
1068 * @mtd: mtd info structure 1055 * @mtd: mtd info structure
1069 * @chip: nand chip info structure 1056 * @chip: nand chip info structure
1070 * @buf: buffer to store read data 1057 * @buf: buffer to store read data
1071 * @page: page number to read 1058 * @page: page number to read
1072 * 1059 *
1073 * We need a special oob layout and handling even when OOB isn't used. 1060 * We need a special oob layout and handling even when OOB isn't used.
1074 */ 1061 */
@@ -1107,11 +1094,11 @@ static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
1107} 1094}
1108 1095
1109/** 1096/**
1110 * nand_read_page_swecc - [REPLACABLE] software ecc based page read function 1097 * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
1111 * @mtd: mtd info structure 1098 * @mtd: mtd info structure
1112 * @chip: nand chip info structure 1099 * @chip: nand chip info structure
1113 * @buf: buffer to store read data 1100 * @buf: buffer to store read data
1114 * @page: page number to read 1101 * @page: page number to read
1115 */ 1102 */
1116static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, 1103static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1117 uint8_t *buf, int page) 1104 uint8_t *buf, int page)
@@ -1148,12 +1135,12 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1148} 1135}
1149 1136
1150/** 1137/**
1151 * nand_read_subpage - [REPLACABLE] software ecc based sub-page read function 1138 * nand_read_subpage - [REPLACEABLE] software ECC based sub-page read function
1152 * @mtd: mtd info structure 1139 * @mtd: mtd info structure
1153 * @chip: nand chip info structure 1140 * @chip: nand chip info structure
1154 * @data_offs: offset of requested data within the page 1141 * @data_offs: offset of requested data within the page
1155 * @readlen: data length 1142 * @readlen: data length
1156 * @bufpoi: buffer to store read data 1143 * @bufpoi: buffer to store read data
1157 */ 1144 */
1158static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, 1145static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1159 uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi) 1146 uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi)
@@ -1166,12 +1153,12 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1166 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1; 1153 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
1167 int index = 0; 1154 int index = 0;
1168 1155
1169 /* Column address wihin the page aligned to ECC size (256bytes). */ 1156 /* Column address within the page aligned to ECC size (256bytes) */
1170 start_step = data_offs / chip->ecc.size; 1157 start_step = data_offs / chip->ecc.size;
1171 end_step = (data_offs + readlen - 1) / chip->ecc.size; 1158 end_step = (data_offs + readlen - 1) / chip->ecc.size;
1172 num_steps = end_step - start_step + 1; 1159 num_steps = end_step - start_step + 1;
1173 1160
1174 /* Data size aligned to ECC ecc.size*/ 1161 /* Data size aligned to ECC ecc.size */
1175 datafrag_len = num_steps * chip->ecc.size; 1162 datafrag_len = num_steps * chip->ecc.size;
1176 eccfrag_len = num_steps * chip->ecc.bytes; 1163 eccfrag_len = num_steps * chip->ecc.bytes;
1177 1164
@@ -1183,13 +1170,14 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1183 p = bufpoi + data_col_addr; 1170 p = bufpoi + data_col_addr;
1184 chip->read_buf(mtd, p, datafrag_len); 1171 chip->read_buf(mtd, p, datafrag_len);
1185 1172
1186 /* Calculate ECC */ 1173 /* Calculate ECC */
1187 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) 1174 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
1188 chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]); 1175 chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
1189 1176
1190 /* The performance is faster if to position offsets 1177 /*
1191 according to ecc.pos. Let make sure here that 1178 * The performance is faster if we position offsets according to
1192 there are no gaps in ecc positions */ 1179 * ecc.pos. Let's make sure that there are no gaps in ECC positions.
1180 */
1193 for (i = 0; i < eccfrag_len - 1; i++) { 1181 for (i = 0; i < eccfrag_len - 1; i++) {
1194 if (eccpos[i + start_step * chip->ecc.bytes] + 1 != 1182 if (eccpos[i + start_step * chip->ecc.bytes] + 1 !=
1195 eccpos[i + start_step * chip->ecc.bytes + 1]) { 1183 eccpos[i + start_step * chip->ecc.bytes + 1]) {
@@ -1201,8 +1189,10 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1201 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1); 1189 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
1202 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 1190 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1203 } else { 1191 } else {
1204 /* send the command to read the particular ecc bytes */ 1192 /*
1205 /* take care about buswidth alignment in read_buf */ 1193 * Send the command to read the particular ECC bytes take care
1194 * about buswidth alignment in read_buf.
1195 */
1206 index = start_step * chip->ecc.bytes; 1196 index = start_step * chip->ecc.bytes;
1207 1197
1208 aligned_pos = eccpos[index] & ~(busw - 1); 1198 aligned_pos = eccpos[index] & ~(busw - 1);
@@ -1235,13 +1225,13 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1235} 1225}
1236 1226
1237/** 1227/**
1238 * nand_read_page_hwecc - [REPLACABLE] hardware ecc based page read function 1228 * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
1239 * @mtd: mtd info structure 1229 * @mtd: mtd info structure
1240 * @chip: nand chip info structure 1230 * @chip: nand chip info structure
1241 * @buf: buffer to store read data 1231 * @buf: buffer to store read data
1242 * @page: page number to read 1232 * @page: page number to read
1243 * 1233 *
1244 * Not for syndrome calculating ecc controllers which need a special oob layout 1234 * Not for syndrome calculating ECC controllers which need a special oob layout.
1245 */ 1235 */
1246static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 1236static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1247 uint8_t *buf, int page) 1237 uint8_t *buf, int page)
@@ -1280,18 +1270,17 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1280} 1270}
1281 1271
1282/** 1272/**
1283 * nand_read_page_hwecc_oob_first - [REPLACABLE] hw ecc, read oob first 1273 * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
1284 * @mtd: mtd info structure 1274 * @mtd: mtd info structure
1285 * @chip: nand chip info structure 1275 * @chip: nand chip info structure
1286 * @buf: buffer to store read data 1276 * @buf: buffer to store read data
1287 * @page: page number to read 1277 * @page: page number to read
1288 * 1278 *
1289 * Hardware ECC for large page chips, require OOB to be read first. 1279 * Hardware ECC for large page chips, require OOB to be read first. For this
1290 * For this ECC mode, the write_page method is re-used from ECC_HW. 1280 * ECC mode, the write_page method is re-used from ECC_HW. These methods
1291 * These methods read/write ECC from the OOB area, unlike the 1281 * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
1292 * ECC_HW_SYNDROME support with multiple ECC steps, follows the 1282 * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
1293 * "infix ECC" scheme and reads/writes ECC from the data area, by 1283 * the data area, by overwriting the NAND manufacturer bad block markings.
1294 * overwriting the NAND manufacturer bad block markings.
1295 */ 1284 */
1296static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd, 1285static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
1297 struct nand_chip *chip, uint8_t *buf, int page) 1286 struct nand_chip *chip, uint8_t *buf, int page)
@@ -1329,14 +1318,14 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
1329} 1318}
1330 1319
1331/** 1320/**
1332 * nand_read_page_syndrome - [REPLACABLE] hardware ecc syndrom based page read 1321 * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
1333 * @mtd: mtd info structure 1322 * @mtd: mtd info structure
1334 * @chip: nand chip info structure 1323 * @chip: nand chip info structure
1335 * @buf: buffer to store read data 1324 * @buf: buffer to store read data
1336 * @page: page number to read 1325 * @page: page number to read
1337 * 1326 *
1338 * The hw generator calculates the error syndrome automatically. Therefor 1327 * The hw generator calculates the error syndrome automatically. Therefore we
1339 * we need a special oob layout and handling. 1328 * need a special oob layout and handling.
1340 */ 1329 */
1341static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip, 1330static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1342 uint8_t *buf, int page) 1331 uint8_t *buf, int page)
@@ -1384,29 +1373,29 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1384} 1373}
1385 1374
1386/** 1375/**
1387 * nand_transfer_oob - [Internal] Transfer oob to client buffer 1376 * nand_transfer_oob - [INTERN] Transfer oob to client buffer
1388 * @chip: nand chip structure 1377 * @chip: nand chip structure
1389 * @oob: oob destination address 1378 * @oob: oob destination address
1390 * @ops: oob ops structure 1379 * @ops: oob ops structure
1391 * @len: size of oob to transfer 1380 * @len: size of oob to transfer
1392 */ 1381 */
1393static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob, 1382static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
1394 struct mtd_oob_ops *ops, size_t len) 1383 struct mtd_oob_ops *ops, size_t len)
1395{ 1384{
1396 switch (ops->mode) { 1385 switch (ops->mode) {
1397 1386
1398 case MTD_OOB_PLACE: 1387 case MTD_OPS_PLACE_OOB:
1399 case MTD_OOB_RAW: 1388 case MTD_OPS_RAW:
1400 memcpy(oob, chip->oob_poi + ops->ooboffs, len); 1389 memcpy(oob, chip->oob_poi + ops->ooboffs, len);
1401 return oob + len; 1390 return oob + len;
1402 1391
1403 case MTD_OOB_AUTO: { 1392 case MTD_OPS_AUTO_OOB: {
1404 struct nand_oobfree *free = chip->ecc.layout->oobfree; 1393 struct nand_oobfree *free = chip->ecc.layout->oobfree;
1405 uint32_t boffs = 0, roffs = ops->ooboffs; 1394 uint32_t boffs = 0, roffs = ops->ooboffs;
1406 size_t bytes = 0; 1395 size_t bytes = 0;
1407 1396
1408 for (; free->length && len; free++, len -= bytes) { 1397 for (; free->length && len; free++, len -= bytes) {
1409 /* Read request not from offset 0 ? */ 1398 /* Read request not from offset 0? */
1410 if (unlikely(roffs)) { 1399 if (unlikely(roffs)) {
1411 if (roffs >= free->length) { 1400 if (roffs >= free->length) {
1412 roffs -= free->length; 1401 roffs -= free->length;
@@ -1432,11 +1421,10 @@ static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
1432} 1421}
1433 1422
1434/** 1423/**
1435 * nand_do_read_ops - [Internal] Read data with ECC 1424 * nand_do_read_ops - [INTERN] Read data with ECC
1436 * 1425 * @mtd: MTD device structure
1437 * @mtd: MTD device structure 1426 * @from: offset to read from
1438 * @from: offset to read from 1427 * @ops: oob ops structure
1439 * @ops: oob ops structure
1440 * 1428 *
1441 * Internal function. Called with chip held. 1429 * Internal function. Called with chip held.
1442 */ 1430 */
@@ -1451,7 +1439,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1451 int ret = 0; 1439 int ret = 0;
1452 uint32_t readlen = ops->len; 1440 uint32_t readlen = ops->len;
1453 uint32_t oobreadlen = ops->ooblen; 1441 uint32_t oobreadlen = ops->ooblen;
1454 uint32_t max_oobsize = ops->mode == MTD_OOB_AUTO ? 1442 uint32_t max_oobsize = ops->mode == MTD_OPS_AUTO_OOB ?
1455 mtd->oobavail : mtd->oobsize; 1443 mtd->oobavail : mtd->oobsize;
1456 1444
1457 uint8_t *bufpoi, *oob, *buf; 1445 uint8_t *bufpoi, *oob, *buf;
@@ -1473,7 +1461,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1473 bytes = min(mtd->writesize - col, readlen); 1461 bytes = min(mtd->writesize - col, readlen);
1474 aligned = (bytes == mtd->writesize); 1462 aligned = (bytes == mtd->writesize);
1475 1463
1476 /* Is the current page in the buffer ? */ 1464 /* Is the current page in the buffer? */
1477 if (realpage != chip->pagebuf || oob) { 1465 if (realpage != chip->pagebuf || oob) {
1478 bufpoi = aligned ? buf : chip->buffers->databuf; 1466 bufpoi = aligned ? buf : chip->buffers->databuf;
1479 1467
@@ -1483,7 +1471,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1483 } 1471 }
1484 1472
1485 /* Now read the page into the buffer */ 1473 /* Now read the page into the buffer */
1486 if (unlikely(ops->mode == MTD_OOB_RAW)) 1474 if (unlikely(ops->mode == MTD_OPS_RAW))
1487 ret = chip->ecc.read_page_raw(mtd, chip, 1475 ret = chip->ecc.read_page_raw(mtd, chip,
1488 bufpoi, page); 1476 bufpoi, page);
1489 else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob) 1477 else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob)
@@ -1492,14 +1480,22 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1492 else 1480 else
1493 ret = chip->ecc.read_page(mtd, chip, bufpoi, 1481 ret = chip->ecc.read_page(mtd, chip, bufpoi,
1494 page); 1482 page);
1495 if (ret < 0) 1483 if (ret < 0) {
1484 if (!aligned)
1485 /* Invalidate page cache */
1486 chip->pagebuf = -1;
1496 break; 1487 break;
1488 }
1497 1489
1498 /* Transfer not aligned data */ 1490 /* Transfer not aligned data */
1499 if (!aligned) { 1491 if (!aligned) {
1500 if (!NAND_SUBPAGE_READ(chip) && !oob && 1492 if (!NAND_SUBPAGE_READ(chip) && !oob &&
1501 !(mtd->ecc_stats.failed - stats.failed)) 1493 !(mtd->ecc_stats.failed - stats.failed) &&
1494 (ops->mode != MTD_OPS_RAW))
1502 chip->pagebuf = realpage; 1495 chip->pagebuf = realpage;
1496 else
1497 /* Invalidate page cache */
1498 chip->pagebuf = -1;
1503 memcpy(buf, chip->buffers->databuf + col, bytes); 1499 memcpy(buf, chip->buffers->databuf + col, bytes);
1504 } 1500 }
1505 1501
@@ -1539,7 +1535,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1539 if (!readlen) 1535 if (!readlen)
1540 break; 1536 break;
1541 1537
1542 /* For subsequent reads align to page boundary. */ 1538 /* For subsequent reads align to page boundary */
1543 col = 0; 1539 col = 0;
1544 /* Increment page address */ 1540 /* Increment page address */
1545 realpage++; 1541 realpage++;
@@ -1552,8 +1548,9 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1552 chip->select_chip(mtd, chipnr); 1548 chip->select_chip(mtd, chipnr);
1553 } 1549 }
1554 1550
1555 /* Check, if the chip supports auto page increment 1551 /*
1556 * or if we have hit a block boundary. 1552 * Check, if the chip supports auto page increment or if we
1553 * have hit a block boundary.
1557 */ 1554 */
1558 if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck)) 1555 if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
1559 sndcmd = 1; 1556 sndcmd = 1;
@@ -1574,18 +1571,19 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1574 1571
1575/** 1572/**
1576 * nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc 1573 * nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc
1577 * @mtd: MTD device structure 1574 * @mtd: MTD device structure
1578 * @from: offset to read from 1575 * @from: offset to read from
1579 * @len: number of bytes to read 1576 * @len: number of bytes to read
1580 * @retlen: pointer to variable to store the number of read bytes 1577 * @retlen: pointer to variable to store the number of read bytes
1581 * @buf: the databuffer to put data 1578 * @buf: the databuffer to put data
1582 * 1579 *
1583 * Get hold of the chip and call nand_do_read 1580 * Get hold of the chip and call nand_do_read.
1584 */ 1581 */
1585static int nand_read(struct mtd_info *mtd, loff_t from, size_t len, 1582static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
1586 size_t *retlen, uint8_t *buf) 1583 size_t *retlen, uint8_t *buf)
1587{ 1584{
1588 struct nand_chip *chip = mtd->priv; 1585 struct nand_chip *chip = mtd->priv;
1586 struct mtd_oob_ops ops;
1589 int ret; 1587 int ret;
1590 1588
1591 /* Do not allow reads past end of device */ 1589 /* Do not allow reads past end of device */
@@ -1596,13 +1594,14 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
1596 1594
1597 nand_get_device(chip, mtd, FL_READING); 1595 nand_get_device(chip, mtd, FL_READING);
1598 1596
1599 chip->ops.len = len; 1597 ops.len = len;
1600 chip->ops.datbuf = buf; 1598 ops.datbuf = buf;
1601 chip->ops.oobbuf = NULL; 1599 ops.oobbuf = NULL;
1600 ops.mode = 0;
1602 1601
1603 ret = nand_do_read_ops(mtd, from, &chip->ops); 1602 ret = nand_do_read_ops(mtd, from, &ops);
1604 1603
1605 *retlen = chip->ops.retlen; 1604 *retlen = ops.retlen;
1606 1605
1607 nand_release_device(mtd); 1606 nand_release_device(mtd);
1608 1607
@@ -1610,11 +1609,11 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
1610} 1609}
1611 1610
1612/** 1611/**
1613 * nand_read_oob_std - [REPLACABLE] the most common OOB data read function 1612 * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
1614 * @mtd: mtd info structure 1613 * @mtd: mtd info structure
1615 * @chip: nand chip info structure 1614 * @chip: nand chip info structure
1616 * @page: page number to read 1615 * @page: page number to read
1617 * @sndcmd: flag whether to issue read command or not 1616 * @sndcmd: flag whether to issue read command or not
1618 */ 1617 */
1619static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, 1618static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
1620 int page, int sndcmd) 1619 int page, int sndcmd)
@@ -1628,12 +1627,12 @@ static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
1628} 1627}
1629 1628
1630/** 1629/**
1631 * nand_read_oob_syndrome - [REPLACABLE] OOB data read function for HW ECC 1630 * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
1632 * with syndromes 1631 * with syndromes
1633 * @mtd: mtd info structure 1632 * @mtd: mtd info structure
1634 * @chip: nand chip info structure 1633 * @chip: nand chip info structure
1635 * @page: page number to read 1634 * @page: page number to read
1636 * @sndcmd: flag whether to issue read command or not 1635 * @sndcmd: flag whether to issue read command or not
1637 */ 1636 */
1638static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, 1637static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1639 int page, int sndcmd) 1638 int page, int sndcmd)
@@ -1667,10 +1666,10 @@ static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1667} 1666}
1668 1667
1669/** 1668/**
1670 * nand_write_oob_std - [REPLACABLE] the most common OOB data write function 1669 * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
1671 * @mtd: mtd info structure 1670 * @mtd: mtd info structure
1672 * @chip: nand chip info structure 1671 * @chip: nand chip info structure
1673 * @page: page number to write 1672 * @page: page number to write
1674 */ 1673 */
1675static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, 1674static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
1676 int page) 1675 int page)
@@ -1690,11 +1689,11 @@ static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
1690} 1689}
1691 1690
1692/** 1691/**
1693 * nand_write_oob_syndrome - [REPLACABLE] OOB data write function for HW ECC 1692 * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
1694 * with syndrome - only for large page flash ! 1693 * with syndrome - only for large page flash
1695 * @mtd: mtd info structure 1694 * @mtd: mtd info structure
1696 * @chip: nand chip info structure 1695 * @chip: nand chip info structure
1697 * @page: page number to write 1696 * @page: page number to write
1698 */ 1697 */
1699static int nand_write_oob_syndrome(struct mtd_info *mtd, 1698static int nand_write_oob_syndrome(struct mtd_info *mtd,
1700 struct nand_chip *chip, int page) 1699 struct nand_chip *chip, int page)
@@ -1749,34 +1748,37 @@ static int nand_write_oob_syndrome(struct mtd_info *mtd,
1749} 1748}
1750 1749
1751/** 1750/**
1752 * nand_do_read_oob - [Intern] NAND read out-of-band 1751 * nand_do_read_oob - [INTERN] NAND read out-of-band
1753 * @mtd: MTD device structure 1752 * @mtd: MTD device structure
1754 * @from: offset to read from 1753 * @from: offset to read from
1755 * @ops: oob operations description structure 1754 * @ops: oob operations description structure
1756 * 1755 *
1757 * NAND read out-of-band data from the spare area 1756 * NAND read out-of-band data from the spare area.
1758 */ 1757 */
1759static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, 1758static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
1760 struct mtd_oob_ops *ops) 1759 struct mtd_oob_ops *ops)
1761{ 1760{
1762 int page, realpage, chipnr, sndcmd = 1; 1761 int page, realpage, chipnr, sndcmd = 1;
1763 struct nand_chip *chip = mtd->priv; 1762 struct nand_chip *chip = mtd->priv;
1763 struct mtd_ecc_stats stats;
1764 int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1; 1764 int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
1765 int readlen = ops->ooblen; 1765 int readlen = ops->ooblen;
1766 int len; 1766 int len;
1767 uint8_t *buf = ops->oobbuf; 1767 uint8_t *buf = ops->oobbuf;
1768 1768
1769 DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08Lx, len = %i\n", 1769 pr_debug("%s: from = 0x%08Lx, len = %i\n",
1770 __func__, (unsigned long long)from, readlen); 1770 __func__, (unsigned long long)from, readlen);
1771 1771
1772 if (ops->mode == MTD_OOB_AUTO) 1772 stats = mtd->ecc_stats;
1773
1774 if (ops->mode == MTD_OPS_AUTO_OOB)
1773 len = chip->ecc.layout->oobavail; 1775 len = chip->ecc.layout->oobavail;
1774 else 1776 else
1775 len = mtd->oobsize; 1777 len = mtd->oobsize;
1776 1778
1777 if (unlikely(ops->ooboffs >= len)) { 1779 if (unlikely(ops->ooboffs >= len)) {
1778 DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to start read " 1780 pr_debug("%s: attempt to start read outside oob\n",
1779 "outside oob\n", __func__); 1781 __func__);
1780 return -EINVAL; 1782 return -EINVAL;
1781 } 1783 }
1782 1784
@@ -1784,8 +1786,8 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
1784 if (unlikely(from >= mtd->size || 1786 if (unlikely(from >= mtd->size ||
1785 ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) - 1787 ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
1786 (from >> chip->page_shift)) * len)) { 1788 (from >> chip->page_shift)) * len)) {
1787 DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt read beyond end " 1789 pr_debug("%s: attempt to read beyond end of device\n",
1788 "of device\n", __func__); 1790 __func__);
1789 return -EINVAL; 1791 return -EINVAL;
1790 } 1792 }
1791 1793
@@ -1797,7 +1799,10 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
1797 page = realpage & chip->pagemask; 1799 page = realpage & chip->pagemask;
1798 1800
1799 while (1) { 1801 while (1) {
1800 sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd); 1802 if (ops->mode == MTD_OPS_RAW)
1803 sndcmd = chip->ecc.read_oob_raw(mtd, chip, page, sndcmd);
1804 else
1805 sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd);
1801 1806
1802 len = min(len, readlen); 1807 len = min(len, readlen);
1803 buf = nand_transfer_oob(chip, buf, ops, len); 1808 buf = nand_transfer_oob(chip, buf, ops, len);
@@ -1830,24 +1835,29 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
1830 chip->select_chip(mtd, chipnr); 1835 chip->select_chip(mtd, chipnr);
1831 } 1836 }
1832 1837
1833 /* Check, if the chip supports auto page increment 1838 /*
1834 * or if we have hit a block boundary. 1839 * Check, if the chip supports auto page increment or if we
1840 * have hit a block boundary.
1835 */ 1841 */
1836 if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck)) 1842 if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
1837 sndcmd = 1; 1843 sndcmd = 1;
1838 } 1844 }
1839 1845
1840 ops->oobretlen = ops->ooblen; 1846 ops->oobretlen = ops->ooblen;
1841 return 0; 1847
1848 if (mtd->ecc_stats.failed - stats.failed)
1849 return -EBADMSG;
1850
1851 return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
1842} 1852}
1843 1853
1844/** 1854/**
1845 * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band 1855 * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
1846 * @mtd: MTD device structure 1856 * @mtd: MTD device structure
1847 * @from: offset to read from 1857 * @from: offset to read from
1848 * @ops: oob operation description structure 1858 * @ops: oob operation description structure
1849 * 1859 *
1850 * NAND read data and/or out-of-band data 1860 * NAND read data and/or out-of-band data.
1851 */ 1861 */
1852static int nand_read_oob(struct mtd_info *mtd, loff_t from, 1862static int nand_read_oob(struct mtd_info *mtd, loff_t from,
1853 struct mtd_oob_ops *ops) 1863 struct mtd_oob_ops *ops)
@@ -1859,17 +1869,17 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
1859 1869
1860 /* Do not allow reads past end of device */ 1870 /* Do not allow reads past end of device */
1861 if (ops->datbuf && (from + ops->len) > mtd->size) { 1871 if (ops->datbuf && (from + ops->len) > mtd->size) {
1862 DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt read " 1872 pr_debug("%s: attempt to read beyond end of device\n",
1863 "beyond end of device\n", __func__); 1873 __func__);
1864 return -EINVAL; 1874 return -EINVAL;
1865 } 1875 }
1866 1876
1867 nand_get_device(chip, mtd, FL_READING); 1877 nand_get_device(chip, mtd, FL_READING);
1868 1878
1869 switch (ops->mode) { 1879 switch (ops->mode) {
1870 case MTD_OOB_PLACE: 1880 case MTD_OPS_PLACE_OOB:
1871 case MTD_OOB_AUTO: 1881 case MTD_OPS_AUTO_OOB:
1872 case MTD_OOB_RAW: 1882 case MTD_OPS_RAW:
1873 break; 1883 break;
1874 1884
1875 default: 1885 default:
@@ -1888,12 +1898,12 @@ out:
1888 1898
1889 1899
1890/** 1900/**
1891 * nand_write_page_raw - [Intern] raw page write function 1901 * nand_write_page_raw - [INTERN] raw page write function
1892 * @mtd: mtd info structure 1902 * @mtd: mtd info structure
1893 * @chip: nand chip info structure 1903 * @chip: nand chip info structure
1894 * @buf: data buffer 1904 * @buf: data buffer
1895 * 1905 *
1896 * Not for syndrome calculating ecc controllers, which use a special oob layout 1906 * Not for syndrome calculating ECC controllers, which use a special oob layout.
1897 */ 1907 */
1898static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 1908static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1899 const uint8_t *buf) 1909 const uint8_t *buf)
@@ -1903,10 +1913,10 @@ static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1903} 1913}
1904 1914
1905/** 1915/**
1906 * nand_write_page_raw_syndrome - [Intern] raw page write function 1916 * nand_write_page_raw_syndrome - [INTERN] raw page write function
1907 * @mtd: mtd info structure 1917 * @mtd: mtd info structure
1908 * @chip: nand chip info structure 1918 * @chip: nand chip info structure
1909 * @buf: data buffer 1919 * @buf: data buffer
1910 * 1920 *
1911 * We need a special oob layout and handling even when ECC isn't checked. 1921 * We need a special oob layout and handling even when ECC isn't checked.
1912 */ 1922 */
@@ -1942,10 +1952,10 @@ static void nand_write_page_raw_syndrome(struct mtd_info *mtd,
1942 chip->write_buf(mtd, oob, size); 1952 chip->write_buf(mtd, oob, size);
1943} 1953}
1944/** 1954/**
1945 * nand_write_page_swecc - [REPLACABLE] software ecc based page write function 1955 * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
1946 * @mtd: mtd info structure 1956 * @mtd: mtd info structure
1947 * @chip: nand chip info structure 1957 * @chip: nand chip info structure
1948 * @buf: data buffer 1958 * @buf: data buffer
1949 */ 1959 */
1950static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, 1960static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1951 const uint8_t *buf) 1961 const uint8_t *buf)
@@ -1957,7 +1967,7 @@ static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1957 const uint8_t *p = buf; 1967 const uint8_t *p = buf;
1958 uint32_t *eccpos = chip->ecc.layout->eccpos; 1968 uint32_t *eccpos = chip->ecc.layout->eccpos;
1959 1969
1960 /* Software ecc calculation */ 1970 /* Software ECC calculation */
1961 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) 1971 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
1962 chip->ecc.calculate(mtd, p, &ecc_calc[i]); 1972 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1963 1973
@@ -1968,10 +1978,10 @@ static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1968} 1978}
1969 1979
1970/** 1980/**
1971 * nand_write_page_hwecc - [REPLACABLE] hardware ecc based page write function 1981 * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
1972 * @mtd: mtd info structure 1982 * @mtd: mtd info structure
1973 * @chip: nand chip info structure 1983 * @chip: nand chip info structure
1974 * @buf: data buffer 1984 * @buf: data buffer
1975 */ 1985 */
1976static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 1986static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1977 const uint8_t *buf) 1987 const uint8_t *buf)
@@ -1996,13 +2006,13 @@ static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1996} 2006}
1997 2007
1998/** 2008/**
1999 * nand_write_page_syndrome - [REPLACABLE] hardware ecc syndrom based page write 2009 * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
2000 * @mtd: mtd info structure 2010 * @mtd: mtd info structure
2001 * @chip: nand chip info structure 2011 * @chip: nand chip info structure
2002 * @buf: data buffer 2012 * @buf: data buffer
2003 * 2013 *
2004 * The hw generator calculates the error syndrome automatically. Therefor 2014 * The hw generator calculates the error syndrome automatically. Therefore we
2005 * we need a special oob layout and handling. 2015 * need a special oob layout and handling.
2006 */ 2016 */
2007static void nand_write_page_syndrome(struct mtd_info *mtd, 2017static void nand_write_page_syndrome(struct mtd_info *mtd,
2008 struct nand_chip *chip, const uint8_t *buf) 2018 struct nand_chip *chip, const uint8_t *buf)
@@ -2041,12 +2051,12 @@ static void nand_write_page_syndrome(struct mtd_info *mtd,
2041 2051
2042/** 2052/**
2043 * nand_write_page - [REPLACEABLE] write one page 2053 * nand_write_page - [REPLACEABLE] write one page
2044 * @mtd: MTD device structure 2054 * @mtd: MTD device structure
2045 * @chip: NAND chip descriptor 2055 * @chip: NAND chip descriptor
2046 * @buf: the data to write 2056 * @buf: the data to write
2047 * @page: page number to write 2057 * @page: page number to write
2048 * @cached: cached programming 2058 * @cached: cached programming
2049 * @raw: use _raw version of write_page 2059 * @raw: use _raw version of write_page
2050 */ 2060 */
2051static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, 2061static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
2052 const uint8_t *buf, int page, int cached, int raw) 2062 const uint8_t *buf, int page, int cached, int raw)
@@ -2061,8 +2071,8 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
2061 chip->ecc.write_page(mtd, chip, buf); 2071 chip->ecc.write_page(mtd, chip, buf);
2062 2072
2063 /* 2073 /*
2064 * Cached progamming disabled for now, Not sure if its worth the 2074 * Cached progamming disabled for now. Not sure if it's worth the
2065 * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s) 2075 * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s).
2066 */ 2076 */
2067 cached = 0; 2077 cached = 0;
2068 2078
@@ -2072,7 +2082,7 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
2072 status = chip->waitfunc(mtd, chip); 2082 status = chip->waitfunc(mtd, chip);
2073 /* 2083 /*
2074 * See if operation failed and additional status checks are 2084 * See if operation failed and additional status checks are
2075 * available 2085 * available.
2076 */ 2086 */
2077 if ((status & NAND_STATUS_FAIL) && (chip->errstat)) 2087 if ((status & NAND_STATUS_FAIL) && (chip->errstat))
2078 status = chip->errstat(mtd, chip, FL_WRITING, status, 2088 status = chip->errstat(mtd, chip, FL_WRITING, status,
@@ -2096,29 +2106,37 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
2096} 2106}
2097 2107
2098/** 2108/**
2099 * nand_fill_oob - [Internal] Transfer client buffer to oob 2109 * nand_fill_oob - [INTERN] Transfer client buffer to oob
2100 * @chip: nand chip structure 2110 * @mtd: MTD device structure
2101 * @oob: oob data buffer 2111 * @oob: oob data buffer
2102 * @len: oob data write length 2112 * @len: oob data write length
2103 * @ops: oob ops structure 2113 * @ops: oob ops structure
2104 */ 2114 */
2105static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len, 2115static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
2106 struct mtd_oob_ops *ops) 2116 struct mtd_oob_ops *ops)
2107{ 2117{
2118 struct nand_chip *chip = mtd->priv;
2119
2120 /*
2121 * Initialise to all 0xFF, to avoid the possibility of left over OOB
2122 * data from a previous OOB read.
2123 */
2124 memset(chip->oob_poi, 0xff, mtd->oobsize);
2125
2108 switch (ops->mode) { 2126 switch (ops->mode) {
2109 2127
2110 case MTD_OOB_PLACE: 2128 case MTD_OPS_PLACE_OOB:
2111 case MTD_OOB_RAW: 2129 case MTD_OPS_RAW:
2112 memcpy(chip->oob_poi + ops->ooboffs, oob, len); 2130 memcpy(chip->oob_poi + ops->ooboffs, oob, len);
2113 return oob + len; 2131 return oob + len;
2114 2132
2115 case MTD_OOB_AUTO: { 2133 case MTD_OPS_AUTO_OOB: {
2116 struct nand_oobfree *free = chip->ecc.layout->oobfree; 2134 struct nand_oobfree *free = chip->ecc.layout->oobfree;
2117 uint32_t boffs = 0, woffs = ops->ooboffs; 2135 uint32_t boffs = 0, woffs = ops->ooboffs;
2118 size_t bytes = 0; 2136 size_t bytes = 0;
2119 2137
2120 for (; free->length && len; free++, len -= bytes) { 2138 for (; free->length && len; free++, len -= bytes) {
2121 /* Write request not from offset 0 ? */ 2139 /* Write request not from offset 0? */
2122 if (unlikely(woffs)) { 2140 if (unlikely(woffs)) {
2123 if (woffs >= free->length) { 2141 if (woffs >= free->length) {
2124 woffs -= free->length; 2142 woffs -= free->length;
@@ -2146,12 +2164,12 @@ static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
2146#define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0) 2164#define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
2147 2165
2148/** 2166/**
2149 * nand_do_write_ops - [Internal] NAND write with ECC 2167 * nand_do_write_ops - [INTERN] NAND write with ECC
2150 * @mtd: MTD device structure 2168 * @mtd: MTD device structure
2151 * @to: offset to write to 2169 * @to: offset to write to
2152 * @ops: oob operations description structure 2170 * @ops: oob operations description structure
2153 * 2171 *
2154 * NAND write with ECC 2172 * NAND write with ECC.
2155 */ 2173 */
2156static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, 2174static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2157 struct mtd_oob_ops *ops) 2175 struct mtd_oob_ops *ops)
@@ -2161,7 +2179,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2161 uint32_t writelen = ops->len; 2179 uint32_t writelen = ops->len;
2162 2180
2163 uint32_t oobwritelen = ops->ooblen; 2181 uint32_t oobwritelen = ops->ooblen;
2164 uint32_t oobmaxlen = ops->mode == MTD_OOB_AUTO ? 2182 uint32_t oobmaxlen = ops->mode == MTD_OPS_AUTO_OOB ?
2165 mtd->oobavail : mtd->oobsize; 2183 mtd->oobavail : mtd->oobsize;
2166 2184
2167 uint8_t *oob = ops->oobbuf; 2185 uint8_t *oob = ops->oobbuf;
@@ -2172,10 +2190,10 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2172 if (!writelen) 2190 if (!writelen)
2173 return 0; 2191 return 0;
2174 2192
2175 /* reject writes, which are not page aligned */ 2193 /* Reject writes, which are not page aligned */
2176 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) { 2194 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
2177 printk(KERN_NOTICE "%s: Attempt to write not " 2195 pr_notice("%s: attempt to write non page aligned data\n",
2178 "page aligned data\n", __func__); 2196 __func__);
2179 return -EINVAL; 2197 return -EINVAL;
2180 } 2198 }
2181 2199
@@ -2201,10 +2219,6 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2201 (chip->pagebuf << chip->page_shift) < (to + ops->len)) 2219 (chip->pagebuf << chip->page_shift) < (to + ops->len))
2202 chip->pagebuf = -1; 2220 chip->pagebuf = -1;
2203 2221
2204 /* If we're not given explicit OOB data, let it be 0xFF */
2205 if (likely(!oob))
2206 memset(chip->oob_poi, 0xff, mtd->oobsize);
2207
2208 /* Don't allow multipage oob writes with offset */ 2222 /* Don't allow multipage oob writes with offset */
2209 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) 2223 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen))
2210 return -EINVAL; 2224 return -EINVAL;
@@ -2214,7 +2228,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2214 int cached = writelen > bytes && page != blockmask; 2228 int cached = writelen > bytes && page != blockmask;
2215 uint8_t *wbuf = buf; 2229 uint8_t *wbuf = buf;
2216 2230
2217 /* Partial page write ? */ 2231 /* Partial page write? */
2218 if (unlikely(column || writelen < (mtd->writesize - 1))) { 2232 if (unlikely(column || writelen < (mtd->writesize - 1))) {
2219 cached = 0; 2233 cached = 0;
2220 bytes = min_t(int, bytes - column, (int) writelen); 2234 bytes = min_t(int, bytes - column, (int) writelen);
@@ -2226,12 +2240,15 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2226 2240
2227 if (unlikely(oob)) { 2241 if (unlikely(oob)) {
2228 size_t len = min(oobwritelen, oobmaxlen); 2242 size_t len = min(oobwritelen, oobmaxlen);
2229 oob = nand_fill_oob(chip, oob, len, ops); 2243 oob = nand_fill_oob(mtd, oob, len, ops);
2230 oobwritelen -= len; 2244 oobwritelen -= len;
2245 } else {
2246 /* We still need to erase leftover OOB data */
2247 memset(chip->oob_poi, 0xff, mtd->oobsize);
2231 } 2248 }
2232 2249
2233 ret = chip->write_page(mtd, chip, wbuf, page, cached, 2250 ret = chip->write_page(mtd, chip, wbuf, page, cached,
2234 (ops->mode == MTD_OOB_RAW)); 2251 (ops->mode == MTD_OPS_RAW));
2235 if (ret) 2252 if (ret)
2236 break; 2253 break;
2237 2254
@@ -2260,11 +2277,11 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2260 2277
2261/** 2278/**
2262 * panic_nand_write - [MTD Interface] NAND write with ECC 2279 * panic_nand_write - [MTD Interface] NAND write with ECC
2263 * @mtd: MTD device structure 2280 * @mtd: MTD device structure
2264 * @to: offset to write to 2281 * @to: offset to write to
2265 * @len: number of bytes to write 2282 * @len: number of bytes to write
2266 * @retlen: pointer to variable to store the number of written bytes 2283 * @retlen: pointer to variable to store the number of written bytes
2267 * @buf: the data to write 2284 * @buf: the data to write
2268 * 2285 *
2269 * NAND write with ECC. Used when performing writes in interrupt context, this 2286 * NAND write with ECC. Used when performing writes in interrupt context, this
2270 * may for example be called by mtdoops when writing an oops while in panic. 2287 * may for example be called by mtdoops when writing an oops while in panic.
@@ -2273,6 +2290,7 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2273 size_t *retlen, const uint8_t *buf) 2290 size_t *retlen, const uint8_t *buf)
2274{ 2291{
2275 struct nand_chip *chip = mtd->priv; 2292 struct nand_chip *chip = mtd->priv;
2293 struct mtd_oob_ops ops;
2276 int ret; 2294 int ret;
2277 2295
2278 /* Do not allow reads past end of device */ 2296 /* Do not allow reads past end of device */
@@ -2281,36 +2299,38 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2281 if (!len) 2299 if (!len)
2282 return 0; 2300 return 0;
2283 2301
2284 /* Wait for the device to get ready. */ 2302 /* Wait for the device to get ready */
2285 panic_nand_wait(mtd, chip, 400); 2303 panic_nand_wait(mtd, chip, 400);
2286 2304
2287 /* Grab the device. */ 2305 /* Grab the device */
2288 panic_nand_get_device(chip, mtd, FL_WRITING); 2306 panic_nand_get_device(chip, mtd, FL_WRITING);
2289 2307
2290 chip->ops.len = len; 2308 ops.len = len;
2291 chip->ops.datbuf = (uint8_t *)buf; 2309 ops.datbuf = (uint8_t *)buf;
2292 chip->ops.oobbuf = NULL; 2310 ops.oobbuf = NULL;
2311 ops.mode = 0;
2293 2312
2294 ret = nand_do_write_ops(mtd, to, &chip->ops); 2313 ret = nand_do_write_ops(mtd, to, &ops);
2295 2314
2296 *retlen = chip->ops.retlen; 2315 *retlen = ops.retlen;
2297 return ret; 2316 return ret;
2298} 2317}
2299 2318
2300/** 2319/**
2301 * nand_write - [MTD Interface] NAND write with ECC 2320 * nand_write - [MTD Interface] NAND write with ECC
2302 * @mtd: MTD device structure 2321 * @mtd: MTD device structure
2303 * @to: offset to write to 2322 * @to: offset to write to
2304 * @len: number of bytes to write 2323 * @len: number of bytes to write
2305 * @retlen: pointer to variable to store the number of written bytes 2324 * @retlen: pointer to variable to store the number of written bytes
2306 * @buf: the data to write 2325 * @buf: the data to write
2307 * 2326 *
2308 * NAND write with ECC 2327 * NAND write with ECC.
2309 */ 2328 */
2310static int nand_write(struct mtd_info *mtd, loff_t to, size_t len, 2329static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2311 size_t *retlen, const uint8_t *buf) 2330 size_t *retlen, const uint8_t *buf)
2312{ 2331{
2313 struct nand_chip *chip = mtd->priv; 2332 struct nand_chip *chip = mtd->priv;
2333 struct mtd_oob_ops ops;
2314 int ret; 2334 int ret;
2315 2335
2316 /* Do not allow reads past end of device */ 2336 /* Do not allow reads past end of device */
@@ -2321,13 +2341,14 @@ static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2321 2341
2322 nand_get_device(chip, mtd, FL_WRITING); 2342 nand_get_device(chip, mtd, FL_WRITING);
2323 2343
2324 chip->ops.len = len; 2344 ops.len = len;
2325 chip->ops.datbuf = (uint8_t *)buf; 2345 ops.datbuf = (uint8_t *)buf;
2326 chip->ops.oobbuf = NULL; 2346 ops.oobbuf = NULL;
2347 ops.mode = 0;
2327 2348
2328 ret = nand_do_write_ops(mtd, to, &chip->ops); 2349 ret = nand_do_write_ops(mtd, to, &ops);
2329 2350
2330 *retlen = chip->ops.retlen; 2351 *retlen = ops.retlen;
2331 2352
2332 nand_release_device(mtd); 2353 nand_release_device(mtd);
2333 2354
@@ -2336,11 +2357,11 @@ static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2336 2357
2337/** 2358/**
2338 * nand_do_write_oob - [MTD Interface] NAND write out-of-band 2359 * nand_do_write_oob - [MTD Interface] NAND write out-of-band
2339 * @mtd: MTD device structure 2360 * @mtd: MTD device structure
2340 * @to: offset to write to 2361 * @to: offset to write to
2341 * @ops: oob operation description structure 2362 * @ops: oob operation description structure
2342 * 2363 *
2343 * NAND write out-of-band 2364 * NAND write out-of-band.
2344 */ 2365 */
2345static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, 2366static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
2346 struct mtd_oob_ops *ops) 2367 struct mtd_oob_ops *ops)
@@ -2348,24 +2369,24 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
2348 int chipnr, page, status, len; 2369 int chipnr, page, status, len;
2349 struct nand_chip *chip = mtd->priv; 2370 struct nand_chip *chip = mtd->priv;
2350 2371
2351 DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n", 2372 pr_debug("%s: to = 0x%08x, len = %i\n",
2352 __func__, (unsigned int)to, (int)ops->ooblen); 2373 __func__, (unsigned int)to, (int)ops->ooblen);
2353 2374
2354 if (ops->mode == MTD_OOB_AUTO) 2375 if (ops->mode == MTD_OPS_AUTO_OOB)
2355 len = chip->ecc.layout->oobavail; 2376 len = chip->ecc.layout->oobavail;
2356 else 2377 else
2357 len = mtd->oobsize; 2378 len = mtd->oobsize;
2358 2379
2359 /* Do not allow write past end of page */ 2380 /* Do not allow write past end of page */
2360 if ((ops->ooboffs + ops->ooblen) > len) { 2381 if ((ops->ooboffs + ops->ooblen) > len) {
2361 DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to write " 2382 pr_debug("%s: attempt to write past end of page\n",
2362 "past end of page\n", __func__); 2383 __func__);
2363 return -EINVAL; 2384 return -EINVAL;
2364 } 2385 }
2365 2386
2366 if (unlikely(ops->ooboffs >= len)) { 2387 if (unlikely(ops->ooboffs >= len)) {
2367 DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to start " 2388 pr_debug("%s: attempt to start write outside oob\n",
2368 "write outside oob\n", __func__); 2389 __func__);
2369 return -EINVAL; 2390 return -EINVAL;
2370 } 2391 }
2371 2392
@@ -2374,8 +2395,8 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
2374 ops->ooboffs + ops->ooblen > 2395 ops->ooboffs + ops->ooblen >
2375 ((mtd->size >> chip->page_shift) - 2396 ((mtd->size >> chip->page_shift) -
2376 (to >> chip->page_shift)) * len)) { 2397 (to >> chip->page_shift)) * len)) {
2377 DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt write beyond " 2398 pr_debug("%s: attempt to write beyond end of device\n",
2378 "end of device\n", __func__); 2399 __func__);
2379 return -EINVAL; 2400 return -EINVAL;
2380 } 2401 }
2381 2402
@@ -2401,10 +2422,12 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
2401 if (page == chip->pagebuf) 2422 if (page == chip->pagebuf)
2402 chip->pagebuf = -1; 2423 chip->pagebuf = -1;
2403 2424
2404 memset(chip->oob_poi, 0xff, mtd->oobsize); 2425 nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
2405 nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops); 2426
2406 status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask); 2427 if (ops->mode == MTD_OPS_RAW)
2407 memset(chip->oob_poi, 0xff, mtd->oobsize); 2428 status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask);
2429 else
2430 status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
2408 2431
2409 if (status) 2432 if (status)
2410 return status; 2433 return status;
@@ -2416,9 +2439,9 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
2416 2439
2417/** 2440/**
2418 * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band 2441 * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
2419 * @mtd: MTD device structure 2442 * @mtd: MTD device structure
2420 * @to: offset to write to 2443 * @to: offset to write to
2421 * @ops: oob operation description structure 2444 * @ops: oob operation description structure
2422 */ 2445 */
2423static int nand_write_oob(struct mtd_info *mtd, loff_t to, 2446static int nand_write_oob(struct mtd_info *mtd, loff_t to,
2424 struct mtd_oob_ops *ops) 2447 struct mtd_oob_ops *ops)
@@ -2430,17 +2453,17 @@ static int nand_write_oob(struct mtd_info *mtd, loff_t to,
2430 2453
2431 /* Do not allow writes past end of device */ 2454 /* Do not allow writes past end of device */
2432 if (ops->datbuf && (to + ops->len) > mtd->size) { 2455 if (ops->datbuf && (to + ops->len) > mtd->size) {
2433 DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt write beyond " 2456 pr_debug("%s: attempt to write beyond end of device\n",
2434 "end of device\n", __func__); 2457 __func__);
2435 return -EINVAL; 2458 return -EINVAL;
2436 } 2459 }
2437 2460
2438 nand_get_device(chip, mtd, FL_WRITING); 2461 nand_get_device(chip, mtd, FL_WRITING);
2439 2462
2440 switch (ops->mode) { 2463 switch (ops->mode) {
2441 case MTD_OOB_PLACE: 2464 case MTD_OPS_PLACE_OOB:
2442 case MTD_OOB_AUTO: 2465 case MTD_OPS_AUTO_OOB:
2443 case MTD_OOB_RAW: 2466 case MTD_OPS_RAW:
2444 break; 2467 break;
2445 2468
2446 default: 2469 default:
@@ -2458,11 +2481,11 @@ out:
2458} 2481}
2459 2482
2460/** 2483/**
2461 * single_erease_cmd - [GENERIC] NAND standard block erase command function 2484 * single_erase_cmd - [GENERIC] NAND standard block erase command function
2462 * @mtd: MTD device structure 2485 * @mtd: MTD device structure
2463 * @page: the page address of the block which will be erased 2486 * @page: the page address of the block which will be erased
2464 * 2487 *
2465 * Standard erase command for NAND chips 2488 * Standard erase command for NAND chips.
2466 */ 2489 */
2467static void single_erase_cmd(struct mtd_info *mtd, int page) 2490static void single_erase_cmd(struct mtd_info *mtd, int page)
2468{ 2491{
@@ -2473,12 +2496,11 @@ static void single_erase_cmd(struct mtd_info *mtd, int page)
2473} 2496}
2474 2497
2475/** 2498/**
2476 * multi_erease_cmd - [GENERIC] AND specific block erase command function 2499 * multi_erase_cmd - [GENERIC] AND specific block erase command function
2477 * @mtd: MTD device structure 2500 * @mtd: MTD device structure
2478 * @page: the page address of the block which will be erased 2501 * @page: the page address of the block which will be erased
2479 * 2502 *
2480 * AND multi block erase command function 2503 * AND multi block erase command function. Erase 4 consecutive blocks.
2481 * Erase 4 consecutive blocks
2482 */ 2504 */
2483static void multi_erase_cmd(struct mtd_info *mtd, int page) 2505static void multi_erase_cmd(struct mtd_info *mtd, int page)
2484{ 2506{
@@ -2493,10 +2515,10 @@ static void multi_erase_cmd(struct mtd_info *mtd, int page)
2493 2515
2494/** 2516/**
2495 * nand_erase - [MTD Interface] erase block(s) 2517 * nand_erase - [MTD Interface] erase block(s)
2496 * @mtd: MTD device structure 2518 * @mtd: MTD device structure
2497 * @instr: erase instruction 2519 * @instr: erase instruction
2498 * 2520 *
2499 * Erase one ore more blocks 2521 * Erase one ore more blocks.
2500 */ 2522 */
2501static int nand_erase(struct mtd_info *mtd, struct erase_info *instr) 2523static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
2502{ 2524{
@@ -2505,12 +2527,12 @@ static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
2505 2527
2506#define BBT_PAGE_MASK 0xffffff3f 2528#define BBT_PAGE_MASK 0xffffff3f
2507/** 2529/**
2508 * nand_erase_nand - [Internal] erase block(s) 2530 * nand_erase_nand - [INTERN] erase block(s)
2509 * @mtd: MTD device structure 2531 * @mtd: MTD device structure
2510 * @instr: erase instruction 2532 * @instr: erase instruction
2511 * @allowbbt: allow erasing the bbt area 2533 * @allowbbt: allow erasing the bbt area
2512 * 2534 *
2513 * Erase one ore more blocks 2535 * Erase one ore more blocks.
2514 */ 2536 */
2515int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, 2537int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2516 int allowbbt) 2538 int allowbbt)
@@ -2521,9 +2543,9 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2521 unsigned int bbt_masked_page = 0xffffffff; 2543 unsigned int bbt_masked_page = 0xffffffff;
2522 loff_t len; 2544 loff_t len;
2523 2545
2524 DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n", 2546 pr_debug("%s: start = 0x%012llx, len = %llu\n",
2525 __func__, (unsigned long long)instr->addr, 2547 __func__, (unsigned long long)instr->addr,
2526 (unsigned long long)instr->len); 2548 (unsigned long long)instr->len);
2527 2549
2528 if (check_offs_len(mtd, instr->addr, instr->len)) 2550 if (check_offs_len(mtd, instr->addr, instr->len))
2529 return -EINVAL; 2551 return -EINVAL;
@@ -2545,8 +2567,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2545 2567
2546 /* Check, if it is write protected */ 2568 /* Check, if it is write protected */
2547 if (nand_check_wp(mtd)) { 2569 if (nand_check_wp(mtd)) {
2548 DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n", 2570 pr_debug("%s: device is write protected!\n",
2549 __func__); 2571 __func__);
2550 instr->state = MTD_ERASE_FAILED; 2572 instr->state = MTD_ERASE_FAILED;
2551 goto erase_exit; 2573 goto erase_exit;
2552 } 2574 }
@@ -2555,7 +2577,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2555 * If BBT requires refresh, set the BBT page mask to see if the BBT 2577 * If BBT requires refresh, set the BBT page mask to see if the BBT
2556 * should be rewritten. Otherwise the mask is set to 0xffffffff which 2578 * should be rewritten. Otherwise the mask is set to 0xffffffff which
2557 * can not be matched. This is also done when the bbt is actually 2579 * can not be matched. This is also done when the bbt is actually
2558 * erased to avoid recusrsive updates 2580 * erased to avoid recursive updates.
2559 */ 2581 */
2560 if (chip->options & BBT_AUTO_REFRESH && !allowbbt) 2582 if (chip->options & BBT_AUTO_REFRESH && !allowbbt)
2561 bbt_masked_page = chip->bbt_td->pages[chipnr] & BBT_PAGE_MASK; 2583 bbt_masked_page = chip->bbt_td->pages[chipnr] & BBT_PAGE_MASK;
@@ -2566,20 +2588,18 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2566 instr->state = MTD_ERASING; 2588 instr->state = MTD_ERASING;
2567 2589
2568 while (len) { 2590 while (len) {
2569 /* 2591 /* Heck if we have a bad block, we do not erase bad blocks! */
2570 * heck if we have a bad block, we do not erase bad blocks !
2571 */
2572 if (nand_block_checkbad(mtd, ((loff_t) page) << 2592 if (nand_block_checkbad(mtd, ((loff_t) page) <<
2573 chip->page_shift, 0, allowbbt)) { 2593 chip->page_shift, 0, allowbbt)) {
2574 printk(KERN_WARNING "%s: attempt to erase a bad block " 2594 pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
2575 "at page 0x%08x\n", __func__, page); 2595 __func__, page);
2576 instr->state = MTD_ERASE_FAILED; 2596 instr->state = MTD_ERASE_FAILED;
2577 goto erase_exit; 2597 goto erase_exit;
2578 } 2598 }
2579 2599
2580 /* 2600 /*
2581 * Invalidate the page cache, if we erase the block which 2601 * Invalidate the page cache, if we erase the block which
2582 * contains the current cached page 2602 * contains the current cached page.
2583 */ 2603 */
2584 if (page <= chip->pagebuf && chip->pagebuf < 2604 if (page <= chip->pagebuf && chip->pagebuf <
2585 (page + pages_per_block)) 2605 (page + pages_per_block))
@@ -2599,8 +2619,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2599 2619
2600 /* See if block erase succeeded */ 2620 /* See if block erase succeeded */
2601 if (status & NAND_STATUS_FAIL) { 2621 if (status & NAND_STATUS_FAIL) {
2602 DEBUG(MTD_DEBUG_LEVEL0, "%s: Failed erase, " 2622 pr_debug("%s: failed erase, page 0x%08x\n",
2603 "page 0x%08x\n", __func__, page); 2623 __func__, page);
2604 instr->state = MTD_ERASE_FAILED; 2624 instr->state = MTD_ERASE_FAILED;
2605 instr->fail_addr = 2625 instr->fail_addr =
2606 ((loff_t)page << chip->page_shift); 2626 ((loff_t)page << chip->page_shift);
@@ -2609,7 +2629,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2609 2629
2610 /* 2630 /*
2611 * If BBT requires refresh, set the BBT rewrite flag to the 2631 * If BBT requires refresh, set the BBT rewrite flag to the
2612 * page being erased 2632 * page being erased.
2613 */ 2633 */
2614 if (bbt_masked_page != 0xffffffff && 2634 if (bbt_masked_page != 0xffffffff &&
2615 (page & BBT_PAGE_MASK) == bbt_masked_page) 2635 (page & BBT_PAGE_MASK) == bbt_masked_page)
@@ -2628,7 +2648,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2628 2648
2629 /* 2649 /*
2630 * If BBT requires refresh and BBT-PERCHIP, set the BBT 2650 * If BBT requires refresh and BBT-PERCHIP, set the BBT
2631 * page mask to see if this BBT should be rewritten 2651 * page mask to see if this BBT should be rewritten.
2632 */ 2652 */
2633 if (bbt_masked_page != 0xffffffff && 2653 if (bbt_masked_page != 0xffffffff &&
2634 (chip->bbt_td->options & NAND_BBT_PERCHIP)) 2654 (chip->bbt_td->options & NAND_BBT_PERCHIP))
@@ -2651,7 +2671,7 @@ erase_exit:
2651 2671
2652 /* 2672 /*
2653 * If BBT requires refresh and erase was successful, rewrite any 2673 * If BBT requires refresh and erase was successful, rewrite any
2654 * selected bad block tables 2674 * selected bad block tables.
2655 */ 2675 */
2656 if (bbt_masked_page == 0xffffffff || ret) 2676 if (bbt_masked_page == 0xffffffff || ret)
2657 return ret; 2677 return ret;
@@ -2659,10 +2679,10 @@ erase_exit:
2659 for (chipnr = 0; chipnr < chip->numchips; chipnr++) { 2679 for (chipnr = 0; chipnr < chip->numchips; chipnr++) {
2660 if (!rewrite_bbt[chipnr]) 2680 if (!rewrite_bbt[chipnr])
2661 continue; 2681 continue;
2662 /* update the BBT for chip */ 2682 /* Update the BBT for chip */
2663 DEBUG(MTD_DEBUG_LEVEL0, "%s: nand_update_bbt " 2683 pr_debug("%s: nand_update_bbt (%d:0x%0llx 0x%0x)\n",
2664 "(%d:0x%0llx 0x%0x)\n", __func__, chipnr, 2684 __func__, chipnr, rewrite_bbt[chipnr],
2665 rewrite_bbt[chipnr], chip->bbt_td->pages[chipnr]); 2685 chip->bbt_td->pages[chipnr]);
2666 nand_update_bbt(mtd, rewrite_bbt[chipnr]); 2686 nand_update_bbt(mtd, rewrite_bbt[chipnr]);
2667 } 2687 }
2668 2688
@@ -2672,15 +2692,15 @@ erase_exit:
2672 2692
2673/** 2693/**
2674 * nand_sync - [MTD Interface] sync 2694 * nand_sync - [MTD Interface] sync
2675 * @mtd: MTD device structure 2695 * @mtd: MTD device structure
2676 * 2696 *
2677 * Sync is actually a wait for chip ready function 2697 * Sync is actually a wait for chip ready function.
2678 */ 2698 */
2679static void nand_sync(struct mtd_info *mtd) 2699static void nand_sync(struct mtd_info *mtd)
2680{ 2700{
2681 struct nand_chip *chip = mtd->priv; 2701 struct nand_chip *chip = mtd->priv;
2682 2702
2683 DEBUG(MTD_DEBUG_LEVEL3, "%s: called\n", __func__); 2703 pr_debug("%s: called\n", __func__);
2684 2704
2685 /* Grab the lock and see if the device is available */ 2705 /* Grab the lock and see if the device is available */
2686 nand_get_device(chip, mtd, FL_SYNCING); 2706 nand_get_device(chip, mtd, FL_SYNCING);
@@ -2690,8 +2710,8 @@ static void nand_sync(struct mtd_info *mtd)
2690 2710
2691/** 2711/**
2692 * nand_block_isbad - [MTD Interface] Check if block at offset is bad 2712 * nand_block_isbad - [MTD Interface] Check if block at offset is bad
2693 * @mtd: MTD device structure 2713 * @mtd: MTD device structure
2694 * @offs: offset relative to mtd start 2714 * @offs: offset relative to mtd start
2695 */ 2715 */
2696static int nand_block_isbad(struct mtd_info *mtd, loff_t offs) 2716static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
2697{ 2717{
@@ -2704,8 +2724,8 @@ static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
2704 2724
2705/** 2725/**
2706 * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad 2726 * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
2707 * @mtd: MTD device structure 2727 * @mtd: MTD device structure
2708 * @ofs: offset relative to mtd start 2728 * @ofs: offset relative to mtd start
2709 */ 2729 */
2710static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs) 2730static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
2711{ 2731{
@@ -2714,7 +2734,7 @@ static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
2714 2734
2715 ret = nand_block_isbad(mtd, ofs); 2735 ret = nand_block_isbad(mtd, ofs);
2716 if (ret) { 2736 if (ret) {
2717 /* If it was bad already, return success and do nothing. */ 2737 /* If it was bad already, return success and do nothing */
2718 if (ret > 0) 2738 if (ret > 0)
2719 return 0; 2739 return 0;
2720 return ret; 2740 return ret;
@@ -2725,7 +2745,7 @@ static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
2725 2745
2726/** 2746/**
2727 * nand_suspend - [MTD Interface] Suspend the NAND flash 2747 * nand_suspend - [MTD Interface] Suspend the NAND flash
2728 * @mtd: MTD device structure 2748 * @mtd: MTD device structure
2729 */ 2749 */
2730static int nand_suspend(struct mtd_info *mtd) 2750static int nand_suspend(struct mtd_info *mtd)
2731{ 2751{
@@ -2736,7 +2756,7 @@ static int nand_suspend(struct mtd_info *mtd)
2736 2756
2737/** 2757/**
2738 * nand_resume - [MTD Interface] Resume the NAND flash 2758 * nand_resume - [MTD Interface] Resume the NAND flash
2739 * @mtd: MTD device structure 2759 * @mtd: MTD device structure
2740 */ 2760 */
2741static void nand_resume(struct mtd_info *mtd) 2761static void nand_resume(struct mtd_info *mtd)
2742{ 2762{
@@ -2745,13 +2765,11 @@ static void nand_resume(struct mtd_info *mtd)
2745 if (chip->state == FL_PM_SUSPENDED) 2765 if (chip->state == FL_PM_SUSPENDED)
2746 nand_release_device(mtd); 2766 nand_release_device(mtd);
2747 else 2767 else
2748 printk(KERN_ERR "%s called for a chip which is not " 2768 pr_err("%s called for a chip which is not in suspended state\n",
2749 "in suspended state\n", __func__); 2769 __func__);
2750} 2770}
2751 2771
2752/* 2772/* Set default functions */
2753 * Set default functions
2754 */
2755static void nand_set_defaults(struct nand_chip *chip, int busw) 2773static void nand_set_defaults(struct nand_chip *chip, int busw)
2756{ 2774{
2757 /* check for proper chip_delay setup, set 20us if not */ 2775 /* check for proper chip_delay setup, set 20us if not */
@@ -2793,23 +2811,21 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
2793 2811
2794} 2812}
2795 2813
2796/* 2814/* Sanitize ONFI strings so we can safely print them */
2797 * sanitize ONFI strings so we can safely print them
2798 */
2799static void sanitize_string(uint8_t *s, size_t len) 2815static void sanitize_string(uint8_t *s, size_t len)
2800{ 2816{
2801 ssize_t i; 2817 ssize_t i;
2802 2818
2803 /* null terminate */ 2819 /* Null terminate */
2804 s[len - 1] = 0; 2820 s[len - 1] = 0;
2805 2821
2806 /* remove non printable chars */ 2822 /* Remove non printable chars */
2807 for (i = 0; i < len - 1; i++) { 2823 for (i = 0; i < len - 1; i++) {
2808 if (s[i] < ' ' || s[i] > 127) 2824 if (s[i] < ' ' || s[i] > 127)
2809 s[i] = '?'; 2825 s[i] = '?';
2810 } 2826 }
2811 2827
2812 /* remove trailing spaces */ 2828 /* Remove trailing spaces */
2813 strim(s); 2829 strim(s);
2814} 2830}
2815 2831
@@ -2826,28 +2842,28 @@ static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
2826} 2842}
2827 2843
2828/* 2844/*
2829 * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise 2845 * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
2830 */ 2846 */
2831static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip, 2847static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
2832 int busw) 2848 int *busw)
2833{ 2849{
2834 struct nand_onfi_params *p = &chip->onfi_params; 2850 struct nand_onfi_params *p = &chip->onfi_params;
2835 int i; 2851 int i;
2836 int val; 2852 int val;
2837 2853
2838 /* try ONFI for unknow chip or LP */ 2854 /* Try ONFI for unknown chip or LP */
2839 chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1); 2855 chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
2840 if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' || 2856 if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
2841 chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I') 2857 chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
2842 return 0; 2858 return 0;
2843 2859
2844 printk(KERN_INFO "ONFI flash detected\n"); 2860 pr_info("ONFI flash detected\n");
2845 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1); 2861 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
2846 for (i = 0; i < 3; i++) { 2862 for (i = 0; i < 3; i++) {
2847 chip->read_buf(mtd, (uint8_t *)p, sizeof(*p)); 2863 chip->read_buf(mtd, (uint8_t *)p, sizeof(*p));
2848 if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) == 2864 if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
2849 le16_to_cpu(p->crc)) { 2865 le16_to_cpu(p->crc)) {
2850 printk(KERN_INFO "ONFI param page %d valid\n", i); 2866 pr_info("ONFI param page %d valid\n", i);
2851 break; 2867 break;
2852 } 2868 }
2853 } 2869 }
@@ -2855,7 +2871,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
2855 if (i == 3) 2871 if (i == 3)
2856 return 0; 2872 return 0;
2857 2873
2858 /* check version */ 2874 /* Check version */
2859 val = le16_to_cpu(p->revision); 2875 val = le16_to_cpu(p->revision);
2860 if (val & (1 << 5)) 2876 if (val & (1 << 5))
2861 chip->onfi_version = 23; 2877 chip->onfi_version = 23;
@@ -2871,8 +2887,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
2871 chip->onfi_version = 0; 2887 chip->onfi_version = 0;
2872 2888
2873 if (!chip->onfi_version) { 2889 if (!chip->onfi_version) {
2874 printk(KERN_INFO "%s: unsupported ONFI version: %d\n", 2890 pr_info("%s: unsupported ONFI version: %d\n", __func__, val);
2875 __func__, val);
2876 return 0; 2891 return 0;
2877 } 2892 }
2878 2893
@@ -2884,9 +2899,9 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
2884 mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize; 2899 mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize;
2885 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page); 2900 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
2886 chip->chipsize = (uint64_t)le32_to_cpu(p->blocks_per_lun) * mtd->erasesize; 2901 chip->chipsize = (uint64_t)le32_to_cpu(p->blocks_per_lun) * mtd->erasesize;
2887 busw = 0; 2902 *busw = 0;
2888 if (le16_to_cpu(p->features) & 1) 2903 if (le16_to_cpu(p->features) & 1)
2889 busw = NAND_BUSWIDTH_16; 2904 *busw = NAND_BUSWIDTH_16;
2890 2905
2891 chip->options &= ~NAND_CHIPOPTIONS_MSK; 2906 chip->options &= ~NAND_CHIPOPTIONS_MSK;
2892 chip->options |= (NAND_NO_READRDY | 2907 chip->options |= (NAND_NO_READRDY |
@@ -2896,7 +2911,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
2896} 2911}
2897 2912
2898/* 2913/*
2899 * Get the flash and manufacturer id and lookup if the type is supported 2914 * Get the flash and manufacturer id and lookup if the type is supported.
2900 */ 2915 */
2901static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, 2916static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2902 struct nand_chip *chip, 2917 struct nand_chip *chip,
@@ -2913,7 +2928,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2913 2928
2914 /* 2929 /*
2915 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx) 2930 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
2916 * after power-up 2931 * after power-up.
2917 */ 2932 */
2918 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); 2933 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
2919 2934
@@ -2924,7 +2939,8 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2924 *maf_id = chip->read_byte(mtd); 2939 *maf_id = chip->read_byte(mtd);
2925 *dev_id = chip->read_byte(mtd); 2940 *dev_id = chip->read_byte(mtd);
2926 2941
2927 /* Try again to make sure, as some systems the bus-hold or other 2942 /*
2943 * Try again to make sure, as some systems the bus-hold or other
2928 * interface concerns can cause random data which looks like a 2944 * interface concerns can cause random data which looks like a
2929 * possibly credible NAND flash to appear. If the two results do 2945 * possibly credible NAND flash to appear. If the two results do
2930 * not match, ignore the device completely. 2946 * not match, ignore the device completely.
@@ -2936,9 +2952,9 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2936 id_data[i] = chip->read_byte(mtd); 2952 id_data[i] = chip->read_byte(mtd);
2937 2953
2938 if (id_data[0] != *maf_id || id_data[1] != *dev_id) { 2954 if (id_data[0] != *maf_id || id_data[1] != *dev_id) {
2939 printk(KERN_INFO "%s: second ID read did not match " 2955 pr_info("%s: second ID read did not match "
2940 "%02x,%02x against %02x,%02x\n", __func__, 2956 "%02x,%02x against %02x,%02x\n", __func__,
2941 *maf_id, *dev_id, id_data[0], id_data[1]); 2957 *maf_id, *dev_id, id_data[0], id_data[1]);
2942 return ERR_PTR(-ENODEV); 2958 return ERR_PTR(-ENODEV);
2943 } 2959 }
2944 2960
@@ -2952,7 +2968,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2952 chip->onfi_version = 0; 2968 chip->onfi_version = 0;
2953 if (!type->name || !type->pagesize) { 2969 if (!type->name || !type->pagesize) {
2954 /* Check is chip is ONFI compliant */ 2970 /* Check is chip is ONFI compliant */
2955 ret = nand_flash_detect_onfi(mtd, chip, busw); 2971 ret = nand_flash_detect_onfi(mtd, chip, &busw);
2956 if (ret) 2972 if (ret)
2957 goto ident_done; 2973 goto ident_done;
2958 } 2974 }
@@ -2973,7 +2989,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2973 chip->chipsize = (uint64_t)type->chipsize << 20; 2989 chip->chipsize = (uint64_t)type->chipsize << 20;
2974 2990
2975 if (!type->pagesize && chip->init_size) { 2991 if (!type->pagesize && chip->init_size) {
2976 /* set the pagesize, oobsize, erasesize by the driver*/ 2992 /* Set the pagesize, oobsize, erasesize by the driver */
2977 busw = chip->init_size(mtd, chip, id_data); 2993 busw = chip->init_size(mtd, chip, id_data);
2978 } else if (!type->pagesize) { 2994 } else if (!type->pagesize) {
2979 int extid; 2995 int extid;
@@ -3033,7 +3049,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
3033 } 3049 }
3034 } else { 3050 } else {
3035 /* 3051 /*
3036 * Old devices have chip data hardcoded in the device id table 3052 * Old devices have chip data hardcoded in the device id table.
3037 */ 3053 */
3038 mtd->erasesize = type->erasesize; 3054 mtd->erasesize = type->erasesize;
3039 mtd->writesize = type->pagesize; 3055 mtd->writesize = type->pagesize;
@@ -3043,7 +3059,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
3043 /* 3059 /*
3044 * Check for Spansion/AMD ID + repeating 5th, 6th byte since 3060 * Check for Spansion/AMD ID + repeating 5th, 6th byte since
3045 * some Spansion chips have erasesize that conflicts with size 3061 * some Spansion chips have erasesize that conflicts with size
3046 * listed in nand_ids table 3062 * listed in nand_ids table.
3047 * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39) 3063 * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39)
3048 */ 3064 */
3049 if (*maf_id == NAND_MFR_AMD && id_data[4] != 0x00 && 3065 if (*maf_id == NAND_MFR_AMD && id_data[4] != 0x00 &&
@@ -3057,15 +3073,16 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
3057 chip->options &= ~NAND_CHIPOPTIONS_MSK; 3073 chip->options &= ~NAND_CHIPOPTIONS_MSK;
3058 chip->options |= type->options & NAND_CHIPOPTIONS_MSK; 3074 chip->options |= type->options & NAND_CHIPOPTIONS_MSK;
3059 3075
3060 /* Check if chip is a not a samsung device. Do not clear the 3076 /*
3061 * options for chips which are not having an extended id. 3077 * Check if chip is not a Samsung device. Do not clear the
3078 * options for chips which do not have an extended id.
3062 */ 3079 */
3063 if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize) 3080 if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)
3064 chip->options &= ~NAND_SAMSUNG_LP_OPTIONS; 3081 chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
3065ident_done: 3082ident_done:
3066 3083
3067 /* 3084 /*
3068 * Set chip as a default. Board drivers can override it, if necessary 3085 * Set chip as a default. Board drivers can override it, if necessary.
3069 */ 3086 */
3070 chip->options |= NAND_NO_AUTOINCR; 3087 chip->options |= NAND_NO_AUTOINCR;
3071 3088
@@ -3077,21 +3094,21 @@ ident_done:
3077 3094
3078 /* 3095 /*
3079 * Check, if buswidth is correct. Hardware drivers should set 3096 * Check, if buswidth is correct. Hardware drivers should set
3080 * chip correct ! 3097 * chip correct!
3081 */ 3098 */
3082 if (busw != (chip->options & NAND_BUSWIDTH_16)) { 3099 if (busw != (chip->options & NAND_BUSWIDTH_16)) {
3083 printk(KERN_INFO "NAND device: Manufacturer ID:" 3100 pr_info("NAND device: Manufacturer ID:"
3084 " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, 3101 " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id,
3085 *dev_id, nand_manuf_ids[maf_idx].name, mtd->name); 3102 *dev_id, nand_manuf_ids[maf_idx].name, mtd->name);
3086 printk(KERN_WARNING "NAND bus width %d instead %d bit\n", 3103 pr_warn("NAND bus width %d instead %d bit\n",
3087 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8, 3104 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
3088 busw ? 16 : 8); 3105 busw ? 16 : 8);
3089 return ERR_PTR(-EINVAL); 3106 return ERR_PTR(-EINVAL);
3090 } 3107 }
3091 3108
3092 /* Calculate the address shift from the page size */ 3109 /* Calculate the address shift from the page size */
3093 chip->page_shift = ffs(mtd->writesize) - 1; 3110 chip->page_shift = ffs(mtd->writesize) - 1;
3094 /* Convert chipsize to number of pages per chip -1. */ 3111 /* Convert chipsize to number of pages per chip -1 */
3095 chip->pagemask = (chip->chipsize >> chip->page_shift) - 1; 3112 chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
3096 3113
3097 chip->bbt_erase_shift = chip->phys_erase_shift = 3114 chip->bbt_erase_shift = chip->phys_erase_shift =
@@ -3121,7 +3138,7 @@ ident_done:
3121 if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) && 3138 if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
3122 (*maf_id == NAND_MFR_SAMSUNG || 3139 (*maf_id == NAND_MFR_SAMSUNG ||
3123 *maf_id == NAND_MFR_HYNIX)) 3140 *maf_id == NAND_MFR_HYNIX))
3124 chip->options |= NAND_BBT_SCANLASTPAGE; 3141 chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
3125 else if ((!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) && 3142 else if ((!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
3126 (*maf_id == NAND_MFR_SAMSUNG || 3143 (*maf_id == NAND_MFR_SAMSUNG ||
3127 *maf_id == NAND_MFR_HYNIX || 3144 *maf_id == NAND_MFR_HYNIX ||
@@ -3129,17 +3146,7 @@ ident_done:
3129 *maf_id == NAND_MFR_AMD)) || 3146 *maf_id == NAND_MFR_AMD)) ||
3130 (mtd->writesize == 2048 && 3147 (mtd->writesize == 2048 &&
3131 *maf_id == NAND_MFR_MICRON)) 3148 *maf_id == NAND_MFR_MICRON))
3132 chip->options |= NAND_BBT_SCAN2NDPAGE; 3149 chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
3133
3134 /*
3135 * Numonyx/ST 2K pages, x8 bus use BOTH byte 1 and 6
3136 */
3137 if (!(busw & NAND_BUSWIDTH_16) &&
3138 *maf_id == NAND_MFR_STMICRO &&
3139 mtd->writesize == 2048) {
3140 chip->options |= NAND_BBT_SCANBYTE1AND6;
3141 chip->badblockpos = 0;
3142 }
3143 3150
3144 /* Check for AND chips with 4 page planes */ 3151 /* Check for AND chips with 4 page planes */
3145 if (chip->options & NAND_4PAGE_ARRAY) 3152 if (chip->options & NAND_4PAGE_ARRAY)
@@ -3147,12 +3154,11 @@ ident_done:
3147 else 3154 else
3148 chip->erase_cmd = single_erase_cmd; 3155 chip->erase_cmd = single_erase_cmd;
3149 3156
3150 /* Do not replace user supplied command function ! */ 3157 /* Do not replace user supplied command function! */
3151 if (mtd->writesize > 512 && chip->cmdfunc == nand_command) 3158 if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
3152 chip->cmdfunc = nand_command_lp; 3159 chip->cmdfunc = nand_command_lp;
3153 3160
3154 /* TODO onfi flash name */ 3161 pr_info("NAND device: Manufacturer ID:"
3155 printk(KERN_INFO "NAND device: Manufacturer ID:"
3156 " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, *dev_id, 3162 " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, *dev_id,
3157 nand_manuf_ids[maf_idx].name, 3163 nand_manuf_ids[maf_idx].name,
3158 chip->onfi_version ? chip->onfi_params.model : type->name); 3164 chip->onfi_version ? chip->onfi_params.model : type->name);
@@ -3162,12 +3168,12 @@ ident_done:
3162 3168
3163/** 3169/**
3164 * nand_scan_ident - [NAND Interface] Scan for the NAND device 3170 * nand_scan_ident - [NAND Interface] Scan for the NAND device
3165 * @mtd: MTD device structure 3171 * @mtd: MTD device structure
3166 * @maxchips: Number of chips to scan for 3172 * @maxchips: number of chips to scan for
3167 * @table: Alternative NAND ID table 3173 * @table: alternative NAND ID table
3168 * 3174 *
3169 * This is the first phase of the normal nand_scan() function. It 3175 * This is the first phase of the normal nand_scan() function. It reads the
3170 * reads the flash ID and sets up MTD fields accordingly. 3176 * flash ID and sets up MTD fields accordingly.
3171 * 3177 *
3172 * The mtd->owner field must be set to the module of the caller. 3178 * The mtd->owner field must be set to the module of the caller.
3173 */ 3179 */
@@ -3189,7 +3195,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
3189 3195
3190 if (IS_ERR(type)) { 3196 if (IS_ERR(type)) {
3191 if (!(chip->options & NAND_SCAN_SILENT_NODEV)) 3197 if (!(chip->options & NAND_SCAN_SILENT_NODEV))
3192 printk(KERN_WARNING "No NAND device found.\n"); 3198 pr_warn("No NAND device found\n");
3193 chip->select_chip(mtd, -1); 3199 chip->select_chip(mtd, -1);
3194 return PTR_ERR(type); 3200 return PTR_ERR(type);
3195 } 3201 }
@@ -3207,7 +3213,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
3207 break; 3213 break;
3208 } 3214 }
3209 if (i > 1) 3215 if (i > 1)
3210 printk(KERN_INFO "%d NAND chips detected\n", i); 3216 pr_info("%d NAND chips detected\n", i);
3211 3217
3212 /* Store the number of chips and calc total size for mtd */ 3218 /* Store the number of chips and calc total size for mtd */
3213 chip->numchips = i; 3219 chip->numchips = i;
@@ -3220,11 +3226,11 @@ EXPORT_SYMBOL(nand_scan_ident);
3220 3226
3221/** 3227/**
3222 * nand_scan_tail - [NAND Interface] Scan for the NAND device 3228 * nand_scan_tail - [NAND Interface] Scan for the NAND device
3223 * @mtd: MTD device structure 3229 * @mtd: MTD device structure
3224 * 3230 *
3225 * This is the second phase of the normal nand_scan() function. It 3231 * This is the second phase of the normal nand_scan() function. It fills out
3226 * fills out all the uninitialized function pointers with the defaults 3232 * all the uninitialized function pointers with the defaults and scans for a
3227 * and scans for a bad block table if appropriate. 3233 * bad block table if appropriate.
3228 */ 3234 */
3229int nand_scan_tail(struct mtd_info *mtd) 3235int nand_scan_tail(struct mtd_info *mtd)
3230{ 3236{
@@ -3240,7 +3246,7 @@ int nand_scan_tail(struct mtd_info *mtd)
3240 chip->oob_poi = chip->buffers->databuf + mtd->writesize; 3246 chip->oob_poi = chip->buffers->databuf + mtd->writesize;
3241 3247
3242 /* 3248 /*
3243 * If no default placement scheme is given, select an appropriate one 3249 * If no default placement scheme is given, select an appropriate one.
3244 */ 3250 */
3245 if (!chip->ecc.layout && (chip->ecc.mode != NAND_ECC_SOFT_BCH)) { 3251 if (!chip->ecc.layout && (chip->ecc.mode != NAND_ECC_SOFT_BCH)) {
3246 switch (mtd->oobsize) { 3252 switch (mtd->oobsize) {
@@ -3257,8 +3263,8 @@ int nand_scan_tail(struct mtd_info *mtd)
3257 chip->ecc.layout = &nand_oob_128; 3263 chip->ecc.layout = &nand_oob_128;
3258 break; 3264 break;
3259 default: 3265 default:
3260 printk(KERN_WARNING "No oob scheme defined for " 3266 pr_warn("No oob scheme defined for oobsize %d\n",
3261 "oobsize %d\n", mtd->oobsize); 3267 mtd->oobsize);
3262 BUG(); 3268 BUG();
3263 } 3269 }
3264 } 3270 }
@@ -3267,7 +3273,7 @@ int nand_scan_tail(struct mtd_info *mtd)
3267 chip->write_page = nand_write_page; 3273 chip->write_page = nand_write_page;
3268 3274
3269 /* 3275 /*
3270 * check ECC mode, default to software if 3byte/512byte hardware ECC is 3276 * Check ECC mode, default to software if 3byte/512byte hardware ECC is
3271 * selected and we have 256 byte pagesize fallback to software ECC 3277 * selected and we have 256 byte pagesize fallback to software ECC
3272 */ 3278 */
3273 3279
@@ -3276,15 +3282,15 @@ int nand_scan_tail(struct mtd_info *mtd)
3276 /* Similar to NAND_ECC_HW, but a separate read_page handle */ 3282 /* Similar to NAND_ECC_HW, but a separate read_page handle */
3277 if (!chip->ecc.calculate || !chip->ecc.correct || 3283 if (!chip->ecc.calculate || !chip->ecc.correct ||
3278 !chip->ecc.hwctl) { 3284 !chip->ecc.hwctl) {
3279 printk(KERN_WARNING "No ECC functions supplied; " 3285 pr_warn("No ECC functions supplied; "
3280 "Hardware ECC not possible\n"); 3286 "hardware ECC not possible\n");
3281 BUG(); 3287 BUG();
3282 } 3288 }
3283 if (!chip->ecc.read_page) 3289 if (!chip->ecc.read_page)
3284 chip->ecc.read_page = nand_read_page_hwecc_oob_first; 3290 chip->ecc.read_page = nand_read_page_hwecc_oob_first;
3285 3291
3286 case NAND_ECC_HW: 3292 case NAND_ECC_HW:
3287 /* Use standard hwecc read page function ? */ 3293 /* Use standard hwecc read page function? */
3288 if (!chip->ecc.read_page) 3294 if (!chip->ecc.read_page)
3289 chip->ecc.read_page = nand_read_page_hwecc; 3295 chip->ecc.read_page = nand_read_page_hwecc;
3290 if (!chip->ecc.write_page) 3296 if (!chip->ecc.write_page)
@@ -3305,11 +3311,11 @@ int nand_scan_tail(struct mtd_info *mtd)
3305 chip->ecc.read_page == nand_read_page_hwecc || 3311 chip->ecc.read_page == nand_read_page_hwecc ||
3306 !chip->ecc.write_page || 3312 !chip->ecc.write_page ||
3307 chip->ecc.write_page == nand_write_page_hwecc)) { 3313 chip->ecc.write_page == nand_write_page_hwecc)) {
3308 printk(KERN_WARNING "No ECC functions supplied; " 3314 pr_warn("No ECC functions supplied; "
3309 "Hardware ECC not possible\n"); 3315 "hardware ECC not possible\n");
3310 BUG(); 3316 BUG();
3311 } 3317 }
3312 /* Use standard syndrome read/write page function ? */ 3318 /* Use standard syndrome read/write page function? */
3313 if (!chip->ecc.read_page) 3319 if (!chip->ecc.read_page)
3314 chip->ecc.read_page = nand_read_page_syndrome; 3320 chip->ecc.read_page = nand_read_page_syndrome;
3315 if (!chip->ecc.write_page) 3321 if (!chip->ecc.write_page)
@@ -3325,9 +3331,9 @@ int nand_scan_tail(struct mtd_info *mtd)
3325 3331
3326 if (mtd->writesize >= chip->ecc.size) 3332 if (mtd->writesize >= chip->ecc.size)
3327 break; 3333 break;
3328 printk(KERN_WARNING "%d byte HW ECC not possible on " 3334 pr_warn("%d byte HW ECC not possible on "
3329 "%d byte page size, fallback to SW ECC\n", 3335 "%d byte page size, fallback to SW ECC\n",
3330 chip->ecc.size, mtd->writesize); 3336 chip->ecc.size, mtd->writesize);
3331 chip->ecc.mode = NAND_ECC_SOFT; 3337 chip->ecc.mode = NAND_ECC_SOFT;
3332 3338
3333 case NAND_ECC_SOFT: 3339 case NAND_ECC_SOFT:
@@ -3347,7 +3353,7 @@ int nand_scan_tail(struct mtd_info *mtd)
3347 3353
3348 case NAND_ECC_SOFT_BCH: 3354 case NAND_ECC_SOFT_BCH:
3349 if (!mtd_nand_has_bch()) { 3355 if (!mtd_nand_has_bch()) {
3350 printk(KERN_WARNING "CONFIG_MTD_ECC_BCH not enabled\n"); 3356 pr_warn("CONFIG_MTD_ECC_BCH not enabled\n");
3351 BUG(); 3357 BUG();
3352 } 3358 }
3353 chip->ecc.calculate = nand_bch_calculate_ecc; 3359 chip->ecc.calculate = nand_bch_calculate_ecc;
@@ -3362,8 +3368,8 @@ int nand_scan_tail(struct mtd_info *mtd)
3362 /* 3368 /*
3363 * Board driver should supply ecc.size and ecc.bytes values to 3369 * Board driver should supply ecc.size and ecc.bytes values to
3364 * select how many bits are correctable; see nand_bch_init() 3370 * select how many bits are correctable; see nand_bch_init()
3365 * for details. 3371 * for details. Otherwise, default to 4 bits for large page
3366 * Otherwise, default to 4 bits for large page devices 3372 * devices.
3367 */ 3373 */
3368 if (!chip->ecc.size && (mtd->oobsize >= 64)) { 3374 if (!chip->ecc.size && (mtd->oobsize >= 64)) {
3369 chip->ecc.size = 512; 3375 chip->ecc.size = 512;
@@ -3374,14 +3380,14 @@ int nand_scan_tail(struct mtd_info *mtd)
3374 chip->ecc.bytes, 3380 chip->ecc.bytes,
3375 &chip->ecc.layout); 3381 &chip->ecc.layout);
3376 if (!chip->ecc.priv) { 3382 if (!chip->ecc.priv) {
3377 printk(KERN_WARNING "BCH ECC initialization failed!\n"); 3383 pr_warn("BCH ECC initialization failed!\n");
3378 BUG(); 3384 BUG();
3379 } 3385 }
3380 break; 3386 break;
3381 3387
3382 case NAND_ECC_NONE: 3388 case NAND_ECC_NONE:
3383 printk(KERN_WARNING "NAND_ECC_NONE selected by board driver. " 3389 pr_warn("NAND_ECC_NONE selected by board driver. "
3384 "This is not recommended !!\n"); 3390 "This is not recommended!\n");
3385 chip->ecc.read_page = nand_read_page_raw; 3391 chip->ecc.read_page = nand_read_page_raw;
3386 chip->ecc.write_page = nand_write_page_raw; 3392 chip->ecc.write_page = nand_write_page_raw;
3387 chip->ecc.read_oob = nand_read_oob_std; 3393 chip->ecc.read_oob = nand_read_oob_std;
@@ -3393,14 +3399,19 @@ int nand_scan_tail(struct mtd_info *mtd)
3393 break; 3399 break;
3394 3400
3395 default: 3401 default:
3396 printk(KERN_WARNING "Invalid NAND_ECC_MODE %d\n", 3402 pr_warn("Invalid NAND_ECC_MODE %d\n", chip->ecc.mode);
3397 chip->ecc.mode);
3398 BUG(); 3403 BUG();
3399 } 3404 }
3400 3405
3406 /* For many systems, the standard OOB write also works for raw */
3407 if (!chip->ecc.read_oob_raw)
3408 chip->ecc.read_oob_raw = chip->ecc.read_oob;
3409 if (!chip->ecc.write_oob_raw)
3410 chip->ecc.write_oob_raw = chip->ecc.write_oob;
3411
3401 /* 3412 /*
3402 * The number of bytes available for a client to place data into 3413 * The number of bytes available for a client to place data into
3403 * the out of band area 3414 * the out of band area.
3404 */ 3415 */
3405 chip->ecc.layout->oobavail = 0; 3416 chip->ecc.layout->oobavail = 0;
3406 for (i = 0; chip->ecc.layout->oobfree[i].length 3417 for (i = 0; chip->ecc.layout->oobfree[i].length
@@ -3411,19 +3422,16 @@ int nand_scan_tail(struct mtd_info *mtd)
3411 3422
3412 /* 3423 /*
3413 * Set the number of read / write steps for one page depending on ECC 3424 * Set the number of read / write steps for one page depending on ECC
3414 * mode 3425 * mode.
3415 */ 3426 */
3416 chip->ecc.steps = mtd->writesize / chip->ecc.size; 3427 chip->ecc.steps = mtd->writesize / chip->ecc.size;
3417 if (chip->ecc.steps * chip->ecc.size != mtd->writesize) { 3428 if (chip->ecc.steps * chip->ecc.size != mtd->writesize) {
3418 printk(KERN_WARNING "Invalid ecc parameters\n"); 3429 pr_warn("Invalid ECC parameters\n");
3419 BUG(); 3430 BUG();
3420 } 3431 }
3421 chip->ecc.total = chip->ecc.steps * chip->ecc.bytes; 3432 chip->ecc.total = chip->ecc.steps * chip->ecc.bytes;
3422 3433
3423 /* 3434 /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
3424 * Allow subpage writes up to ecc.steps. Not possible for MLC
3425 * FLASH.
3426 */
3427 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && 3435 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
3428 !(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) { 3436 !(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) {
3429 switch (chip->ecc.steps) { 3437 switch (chip->ecc.steps) {
@@ -3481,9 +3489,11 @@ int nand_scan_tail(struct mtd_info *mtd)
3481} 3489}
3482EXPORT_SYMBOL(nand_scan_tail); 3490EXPORT_SYMBOL(nand_scan_tail);
3483 3491
3484/* is_module_text_address() isn't exported, and it's mostly a pointless 3492/*
3493 * is_module_text_address() isn't exported, and it's mostly a pointless
3485 * test if this is a module _anyway_ -- they'd have to try _really_ hard 3494 * test if this is a module _anyway_ -- they'd have to try _really_ hard
3486 * to call us from in-kernel code if the core NAND support is modular. */ 3495 * to call us from in-kernel code if the core NAND support is modular.
3496 */
3487#ifdef MODULE 3497#ifdef MODULE
3488#define caller_is_module() (1) 3498#define caller_is_module() (1)
3489#else 3499#else
@@ -3493,15 +3503,13 @@ EXPORT_SYMBOL(nand_scan_tail);
3493 3503
3494/** 3504/**
3495 * nand_scan - [NAND Interface] Scan for the NAND device 3505 * nand_scan - [NAND Interface] Scan for the NAND device
3496 * @mtd: MTD device structure 3506 * @mtd: MTD device structure
3497 * @maxchips: Number of chips to scan for 3507 * @maxchips: number of chips to scan for
3498 *
3499 * This fills out all the uninitialized function pointers
3500 * with the defaults.
3501 * The flash ID is read and the mtd/chip structures are
3502 * filled with the appropriate values.
3503 * The mtd->owner field must be set to the module of the caller
3504 * 3508 *
3509 * This fills out all the uninitialized function pointers with the defaults.
3510 * The flash ID is read and the mtd/chip structures are filled with the
3511 * appropriate values. The mtd->owner field must be set to the module of the
3512 * caller.
3505 */ 3513 */
3506int nand_scan(struct mtd_info *mtd, int maxchips) 3514int nand_scan(struct mtd_info *mtd, int maxchips)
3507{ 3515{
@@ -3509,8 +3517,7 @@ int nand_scan(struct mtd_info *mtd, int maxchips)
3509 3517
3510 /* Many callers got this wrong, so check for it for a while... */ 3518 /* Many callers got this wrong, so check for it for a while... */
3511 if (!mtd->owner && caller_is_module()) { 3519 if (!mtd->owner && caller_is_module()) {
3512 printk(KERN_CRIT "%s called with NULL mtd->owner!\n", 3520 pr_crit("%s called with NULL mtd->owner!\n", __func__);
3513 __func__);
3514 BUG(); 3521 BUG();
3515 } 3522 }
3516 3523
@@ -3523,8 +3530,8 @@ EXPORT_SYMBOL(nand_scan);
3523 3530
3524/** 3531/**
3525 * nand_release - [NAND Interface] Free resources held by the NAND device 3532 * nand_release - [NAND Interface] Free resources held by the NAND device
3526 * @mtd: MTD device structure 3533 * @mtd: MTD device structure
3527*/ 3534 */
3528void nand_release(struct mtd_info *mtd) 3535void nand_release(struct mtd_info *mtd)
3529{ 3536{
3530 struct nand_chip *chip = mtd->priv; 3537 struct nand_chip *chip = mtd->priv;
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 4165857752ca..69148ae3bf58 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -14,7 +14,7 @@
14 * 14 *
15 * When nand_scan_bbt is called, then it tries to find the bad block table 15 * When nand_scan_bbt is called, then it tries to find the bad block table
16 * depending on the options in the BBT descriptor(s). If no flash based BBT 16 * depending on the options in the BBT descriptor(s). If no flash based BBT
17 * (NAND_USE_FLASH_BBT) is specified then the device is scanned for factory 17 * (NAND_BBT_USE_FLASH) is specified then the device is scanned for factory
18 * marked good / bad blocks. This information is used to create a memory BBT. 18 * marked good / bad blocks. This information is used to create a memory BBT.
19 * Once a new bad block is discovered then the "factory" information is updated 19 * Once a new bad block is discovered then the "factory" information is updated
20 * on the device. 20 * on the device.
@@ -36,9 +36,9 @@
36 * The table is marked in the OOB area with an ident pattern and a version 36 * The table is marked in the OOB area with an ident pattern and a version
37 * number which indicates which of both tables is more up to date. If the NAND 37 * number which indicates which of both tables is more up to date. If the NAND
38 * controller needs the complete OOB area for the ECC information then the 38 * controller needs the complete OOB area for the ECC information then the
39 * option NAND_USE_FLASH_BBT_NO_OOB should be used: it moves the ident pattern 39 * option NAND_BBT_NO_OOB should be used (along with NAND_BBT_USE_FLASH, of
40 * and the version byte into the data area and the OOB area will remain 40 * course): it moves the ident pattern and the version byte into the data area
41 * untouched. 41 * and the OOB area will remain untouched.
42 * 42 *
43 * The table uses 2 bits per block 43 * The table uses 2 bits per block
44 * 11b: block is good 44 * 11b: block is good
@@ -81,17 +81,15 @@ static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td)
81 81
82/** 82/**
83 * check_pattern - [GENERIC] check if a pattern is in the buffer 83 * check_pattern - [GENERIC] check if a pattern is in the buffer
84 * @buf: the buffer to search 84 * @buf: the buffer to search
85 * @len: the length of buffer to search 85 * @len: the length of buffer to search
86 * @paglen: the pagelength 86 * @paglen: the pagelength
87 * @td: search pattern descriptor 87 * @td: search pattern descriptor
88 * 88 *
89 * Check for a pattern at the given place. Used to search bad block 89 * Check for a pattern at the given place. Used to search bad block tables and
90 * tables and good / bad block identifiers. 90 * good / bad block identifiers. If the SCAN_EMPTY option is set then check, if
91 * If the SCAN_EMPTY option is set then check, if all bytes except the 91 * all bytes except the pattern area contain 0xff.
92 * pattern area contain 0xff 92 */
93 *
94*/
95static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td) 93static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
96{ 94{
97 int i, end = 0; 95 int i, end = 0;
@@ -110,32 +108,8 @@ static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_desc
110 p += end; 108 p += end;
111 109
112 /* Compare the pattern */ 110 /* Compare the pattern */
113 for (i = 0; i < td->len; i++) { 111 if (memcmp(p, td->pattern, td->len))
114 if (p[i] != td->pattern[i]) 112 return -1;
115 return -1;
116 }
117
118 /* Check both positions 1 and 6 for pattern? */
119 if (td->options & NAND_BBT_SCANBYTE1AND6) {
120 if (td->options & NAND_BBT_SCANEMPTY) {
121 p += td->len;
122 end += NAND_SMALL_BADBLOCK_POS - td->offs;
123 /* Check region between positions 1 and 6 */
124 for (i = 0; i < NAND_SMALL_BADBLOCK_POS - td->offs - td->len;
125 i++) {
126 if (*p++ != 0xff)
127 return -1;
128 }
129 }
130 else {
131 p += NAND_SMALL_BADBLOCK_POS - td->offs;
132 }
133 /* Compare the pattern */
134 for (i = 0; i < td->len; i++) {
135 if (p[i] != td->pattern[i])
136 return -1;
137 }
138 }
139 113
140 if (td->options & NAND_BBT_SCANEMPTY) { 114 if (td->options & NAND_BBT_SCANEMPTY) {
141 p += td->len; 115 p += td->len;
@@ -150,14 +124,13 @@ static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_desc
150 124
151/** 125/**
152 * check_short_pattern - [GENERIC] check if a pattern is in the buffer 126 * check_short_pattern - [GENERIC] check if a pattern is in the buffer
153 * @buf: the buffer to search 127 * @buf: the buffer to search
154 * @td: search pattern descriptor 128 * @td: search pattern descriptor
155 *
156 * Check for a pattern at the given place. Used to search bad block
157 * tables and good / bad block identifiers. Same as check_pattern, but
158 * no optional empty check
159 * 129 *
160*/ 130 * Check for a pattern at the given place. Used to search bad block tables and
131 * good / bad block identifiers. Same as check_pattern, but no optional empty
132 * check.
133 */
161static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td) 134static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td)
162{ 135{
163 int i; 136 int i;
@@ -168,21 +141,14 @@ static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td)
168 if (p[td->offs + i] != td->pattern[i]) 141 if (p[td->offs + i] != td->pattern[i])
169 return -1; 142 return -1;
170 } 143 }
171 /* Need to check location 1 AND 6? */
172 if (td->options & NAND_BBT_SCANBYTE1AND6) {
173 for (i = 0; i < td->len; i++) {
174 if (p[NAND_SMALL_BADBLOCK_POS + i] != td->pattern[i])
175 return -1;
176 }
177 }
178 return 0; 144 return 0;
179} 145}
180 146
181/** 147/**
182 * add_marker_len - compute the length of the marker in data area 148 * add_marker_len - compute the length of the marker in data area
183 * @td: BBT descriptor used for computation 149 * @td: BBT descriptor used for computation
184 * 150 *
185 * The length will be 0 if the markeris located in OOB area. 151 * The length will be 0 if the marker is located in OOB area.
186 */ 152 */
187static u32 add_marker_len(struct nand_bbt_descr *td) 153static u32 add_marker_len(struct nand_bbt_descr *td)
188{ 154{
@@ -199,34 +165,33 @@ static u32 add_marker_len(struct nand_bbt_descr *td)
199 165
200/** 166/**
201 * read_bbt - [GENERIC] Read the bad block table starting from page 167 * read_bbt - [GENERIC] Read the bad block table starting from page
202 * @mtd: MTD device structure 168 * @mtd: MTD device structure
203 * @buf: temporary buffer 169 * @buf: temporary buffer
204 * @page: the starting page 170 * @page: the starting page
205 * @num: the number of bbt descriptors to read 171 * @num: the number of bbt descriptors to read
206 * @td: the bbt describtion table 172 * @td: the bbt describtion table
207 * @offs: offset in the memory table 173 * @offs: offset in the memory table
208 * 174 *
209 * Read the bad block table starting from page. 175 * Read the bad block table starting from page.
210 *
211 */ 176 */
212static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num, 177static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
213 struct nand_bbt_descr *td, int offs) 178 struct nand_bbt_descr *td, int offs)
214{ 179{
215 int res, i, j, act = 0; 180 int res, ret = 0, i, j, act = 0;
216 struct nand_chip *this = mtd->priv; 181 struct nand_chip *this = mtd->priv;
217 size_t retlen, len, totlen; 182 size_t retlen, len, totlen;
218 loff_t from; 183 loff_t from;
219 int bits = td->options & NAND_BBT_NRBITS_MSK; 184 int bits = td->options & NAND_BBT_NRBITS_MSK;
220 uint8_t msk = (uint8_t) ((1 << bits) - 1); 185 uint8_t msk = (uint8_t)((1 << bits) - 1);
221 u32 marker_len; 186 u32 marker_len;
222 int reserved_block_code = td->reserved_block_code; 187 int reserved_block_code = td->reserved_block_code;
223 188
224 totlen = (num * bits) >> 3; 189 totlen = (num * bits) >> 3;
225 marker_len = add_marker_len(td); 190 marker_len = add_marker_len(td);
226 from = ((loff_t) page) << this->page_shift; 191 from = ((loff_t)page) << this->page_shift;
227 192
228 while (totlen) { 193 while (totlen) {
229 len = min(totlen, (size_t) (1 << this->bbt_erase_shift)); 194 len = min(totlen, (size_t)(1 << this->bbt_erase_shift));
230 if (marker_len) { 195 if (marker_len) {
231 /* 196 /*
232 * In case the BBT marker is not in the OOB area it 197 * In case the BBT marker is not in the OOB area it
@@ -238,11 +203,18 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
238 } 203 }
239 res = mtd->read(mtd, from, len, &retlen, buf); 204 res = mtd->read(mtd, from, len, &retlen, buf);
240 if (res < 0) { 205 if (res < 0) {
241 if (retlen != len) { 206 if (mtd_is_eccerr(res)) {
242 printk(KERN_INFO "nand_bbt: Error reading bad block table\n"); 207 pr_info("nand_bbt: ECC error in BBT at "
208 "0x%012llx\n", from & ~mtd->writesize);
209 return res;
210 } else if (mtd_is_bitflip(res)) {
211 pr_info("nand_bbt: corrected error in BBT at "
212 "0x%012llx\n", from & ~mtd->writesize);
213 ret = res;
214 } else {
215 pr_info("nand_bbt: error reading BBT\n");
243 return res; 216 return res;
244 } 217 }
245 printk(KERN_WARNING "nand_bbt: ECC error while reading bad block table\n");
246 } 218 }
247 219
248 /* Analyse data */ 220 /* Analyse data */
@@ -253,17 +225,19 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
253 if (tmp == msk) 225 if (tmp == msk)
254 continue; 226 continue;
255 if (reserved_block_code && (tmp == reserved_block_code)) { 227 if (reserved_block_code && (tmp == reserved_block_code)) {
256 printk(KERN_DEBUG "nand_read_bbt: Reserved block at 0x%012llx\n", 228 pr_info("nand_read_bbt: reserved block at 0x%012llx\n",
257 (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift); 229 (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
258 this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06); 230 this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06);
259 mtd->ecc_stats.bbtblocks++; 231 mtd->ecc_stats.bbtblocks++;
260 continue; 232 continue;
261 } 233 }
262 /* Leave it for now, if its matured we can move this 234 /*
263 * message to MTD_DEBUG_LEVEL0 */ 235 * Leave it for now, if it's matured we can
264 printk(KERN_DEBUG "nand_read_bbt: Bad block at 0x%012llx\n", 236 * move this message to pr_debug.
265 (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift); 237 */
266 /* Factory marked bad or worn out ? */ 238 pr_info("nand_read_bbt: bad block at 0x%012llx\n",
239 (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
240 /* Factory marked bad or worn out? */
267 if (tmp == 0) 241 if (tmp == 0)
268 this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06); 242 this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06);
269 else 243 else
@@ -274,20 +248,20 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
274 totlen -= len; 248 totlen -= len;
275 from += len; 249 from += len;
276 } 250 }
277 return 0; 251 return ret;
278} 252}
279 253
280/** 254/**
281 * read_abs_bbt - [GENERIC] Read the bad block table starting at a given page 255 * read_abs_bbt - [GENERIC] Read the bad block table starting at a given page
282 * @mtd: MTD device structure 256 * @mtd: MTD device structure
283 * @buf: temporary buffer 257 * @buf: temporary buffer
284 * @td: descriptor for the bad block table 258 * @td: descriptor for the bad block table
285 * @chip: read the table for a specific chip, -1 read all chips. 259 * @chip: read the table for a specific chip, -1 read all chips; applies only if
286 * Applies only if NAND_BBT_PERCHIP option is set 260 * NAND_BBT_PERCHIP option is set
287 * 261 *
288 * Read the bad block table for all chips starting at a given page 262 * Read the bad block table for all chips starting at a given page. We assume
289 * We assume that the bbt bits are in consecutive order. 263 * that the bbt bits are in consecutive order.
290*/ 264 */
291static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip) 265static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip)
292{ 266{
293 struct nand_chip *this = mtd->priv; 267 struct nand_chip *this = mtd->priv;
@@ -313,9 +287,7 @@ static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
313 return 0; 287 return 0;
314} 288}
315 289
316/* 290/* BBT marker is in the first page, no OOB */
317 * BBT marker is in the first page, no OOB.
318 */
319static int scan_read_raw_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs, 291static int scan_read_raw_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
320 struct nand_bbt_descr *td) 292 struct nand_bbt_descr *td)
321{ 293{
@@ -329,35 +301,26 @@ static int scan_read_raw_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
329 return mtd->read(mtd, offs, len, &retlen, buf); 301 return mtd->read(mtd, offs, len, &retlen, buf);
330} 302}
331 303
332/* 304/* Scan read raw data from flash */
333 * Scan read raw data from flash
334 */
335static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs, 305static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
336 size_t len) 306 size_t len)
337{ 307{
338 struct mtd_oob_ops ops; 308 struct mtd_oob_ops ops;
339 int res; 309 int res;
340 310
341 ops.mode = MTD_OOB_RAW; 311 ops.mode = MTD_OPS_RAW;
342 ops.ooboffs = 0; 312 ops.ooboffs = 0;
343 ops.ooblen = mtd->oobsize; 313 ops.ooblen = mtd->oobsize;
344 314
345
346 while (len > 0) { 315 while (len > 0) {
347 if (len <= mtd->writesize) { 316 ops.datbuf = buf;
348 ops.oobbuf = buf + len; 317 ops.len = min(len, (size_t)mtd->writesize);
349 ops.datbuf = buf; 318 ops.oobbuf = buf + ops.len;
350 ops.len = len;
351 return mtd->read_oob(mtd, offs, &ops);
352 } else {
353 ops.oobbuf = buf + mtd->writesize;
354 ops.datbuf = buf;
355 ops.len = mtd->writesize;
356 res = mtd->read_oob(mtd, offs, &ops);
357 319
358 if (res) 320 res = mtd->read_oob(mtd, offs, &ops);
359 return res; 321
360 } 322 if (res)
323 return res;
361 324
362 buf += mtd->oobsize + mtd->writesize; 325 buf += mtd->oobsize + mtd->writesize;
363 len -= mtd->writesize; 326 len -= mtd->writesize;
@@ -374,15 +337,13 @@ static int scan_read_raw(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
374 return scan_read_raw_oob(mtd, buf, offs, len); 337 return scan_read_raw_oob(mtd, buf, offs, len);
375} 338}
376 339
377/* 340/* Scan write data with oob to flash */
378 * Scan write data with oob to flash
379 */
380static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len, 341static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len,
381 uint8_t *buf, uint8_t *oob) 342 uint8_t *buf, uint8_t *oob)
382{ 343{
383 struct mtd_oob_ops ops; 344 struct mtd_oob_ops ops;
384 345
385 ops.mode = MTD_OOB_PLACE; 346 ops.mode = MTD_OPS_PLACE_OOB;
386 ops.ooboffs = 0; 347 ops.ooboffs = 0;
387 ops.ooblen = mtd->oobsize; 348 ops.ooblen = mtd->oobsize;
388 ops.datbuf = buf; 349 ops.datbuf = buf;
@@ -403,15 +364,14 @@ static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td)
403 364
404/** 365/**
405 * read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page 366 * read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page
406 * @mtd: MTD device structure 367 * @mtd: MTD device structure
407 * @buf: temporary buffer 368 * @buf: temporary buffer
408 * @td: descriptor for the bad block table 369 * @td: descriptor for the bad block table
409 * @md: descriptor for the bad block table mirror 370 * @md: descriptor for the bad block table mirror
410 * 371 *
411 * Read the bad block table(s) for all chips starting at a given page 372 * Read the bad block table(s) for all chips starting at a given page. We
412 * We assume that the bbt bits are in consecutive order. 373 * assume that the bbt bits are in consecutive order.
413 * 374 */
414*/
415static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf, 375static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
416 struct nand_bbt_descr *td, struct nand_bbt_descr *md) 376 struct nand_bbt_descr *td, struct nand_bbt_descr *md)
417{ 377{
@@ -422,8 +382,8 @@ static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
422 scan_read_raw(mtd, buf, (loff_t)td->pages[0] << this->page_shift, 382 scan_read_raw(mtd, buf, (loff_t)td->pages[0] << this->page_shift,
423 mtd->writesize, td); 383 mtd->writesize, td);
424 td->version[0] = buf[bbt_get_ver_offs(mtd, td)]; 384 td->version[0] = buf[bbt_get_ver_offs(mtd, td)];
425 printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n", 385 pr_info("Bad block table at page %d, version 0x%02X\n",
426 td->pages[0], td->version[0]); 386 td->pages[0], td->version[0]);
427 } 387 }
428 388
429 /* Read the mirror version, if available */ 389 /* Read the mirror version, if available */
@@ -431,15 +391,13 @@ static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
431 scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift, 391 scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
432 mtd->writesize, td); 392 mtd->writesize, td);
433 md->version[0] = buf[bbt_get_ver_offs(mtd, md)]; 393 md->version[0] = buf[bbt_get_ver_offs(mtd, md)];
434 printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n", 394 pr_info("Bad block table at page %d, version 0x%02X\n",
435 md->pages[0], md->version[0]); 395 md->pages[0], md->version[0]);
436 } 396 }
437 return 1; 397 return 1;
438} 398}
439 399
440/* 400/* Scan a given block full */
441 * Scan a given block full
442 */
443static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd, 401static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd,
444 loff_t offs, uint8_t *buf, size_t readlen, 402 loff_t offs, uint8_t *buf, size_t readlen,
445 int scanlen, int len) 403 int scanlen, int len)
@@ -447,7 +405,8 @@ static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd,
447 int ret, j; 405 int ret, j;
448 406
449 ret = scan_read_raw_oob(mtd, buf, offs, readlen); 407 ret = scan_read_raw_oob(mtd, buf, offs, readlen);
450 if (ret) 408 /* Ignore ECC errors when checking for BBM */
409 if (ret && !mtd_is_bitflip_or_eccerr(ret))
451 return ret; 410 return ret;
452 411
453 for (j = 0; j < len; j++, buf += scanlen) { 412 for (j = 0; j < len; j++, buf += scanlen) {
@@ -457,9 +416,7 @@ static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd,
457 return 0; 416 return 0;
458} 417}
459 418
460/* 419/* Scan a given block partially */
461 * Scan a given block partially
462 */
463static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd, 420static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
464 loff_t offs, uint8_t *buf, int len) 421 loff_t offs, uint8_t *buf, int len)
465{ 422{
@@ -470,16 +427,16 @@ static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
470 ops.oobbuf = buf; 427 ops.oobbuf = buf;
471 ops.ooboffs = 0; 428 ops.ooboffs = 0;
472 ops.datbuf = NULL; 429 ops.datbuf = NULL;
473 ops.mode = MTD_OOB_PLACE; 430 ops.mode = MTD_OPS_PLACE_OOB;
474 431
475 for (j = 0; j < len; j++) { 432 for (j = 0; j < len; j++) {
476 /* 433 /*
477 * Read the full oob until read_oob is fixed to 434 * Read the full oob until read_oob is fixed to handle single
478 * handle single byte reads for 16 bit 435 * byte reads for 16 bit buswidth.
479 * buswidth
480 */ 436 */
481 ret = mtd->read_oob(mtd, offs, &ops); 437 ret = mtd->read_oob(mtd, offs, &ops);
482 if (ret) 438 /* Ignore ECC errors when checking for BBM */
439 if (ret && !mtd_is_bitflip_or_eccerr(ret))
483 return ret; 440 return ret;
484 441
485 if (check_short_pattern(buf, bd)) 442 if (check_short_pattern(buf, bd))
@@ -492,14 +449,14 @@ static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
492 449
493/** 450/**
494 * create_bbt - [GENERIC] Create a bad block table by scanning the device 451 * create_bbt - [GENERIC] Create a bad block table by scanning the device
495 * @mtd: MTD device structure 452 * @mtd: MTD device structure
496 * @buf: temporary buffer 453 * @buf: temporary buffer
497 * @bd: descriptor for the good/bad block search pattern 454 * @bd: descriptor for the good/bad block search pattern
498 * @chip: create the table for a specific chip, -1 read all chips. 455 * @chip: create the table for a specific chip, -1 read all chips; applies only
499 * Applies only if NAND_BBT_PERCHIP option is set 456 * if NAND_BBT_PERCHIP option is set
500 * 457 *
501 * Create a bad block table by scanning the device 458 * Create a bad block table by scanning the device for the given good/bad block
502 * for the given good/bad block identify pattern 459 * identify pattern.
503 */ 460 */
504static int create_bbt(struct mtd_info *mtd, uint8_t *buf, 461static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
505 struct nand_bbt_descr *bd, int chip) 462 struct nand_bbt_descr *bd, int chip)
@@ -510,7 +467,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
510 loff_t from; 467 loff_t from;
511 size_t readlen; 468 size_t readlen;
512 469
513 printk(KERN_INFO "Scanning device for bad blocks\n"); 470 pr_info("Scanning device for bad blocks\n");
514 471
515 if (bd->options & NAND_BBT_SCANALLPAGES) 472 if (bd->options & NAND_BBT_SCANALLPAGES)
516 len = 1 << (this->bbt_erase_shift - this->page_shift); 473 len = 1 << (this->bbt_erase_shift - this->page_shift);
@@ -530,14 +487,16 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
530 } 487 }
531 488
532 if (chip == -1) { 489 if (chip == -1) {
533 /* Note that numblocks is 2 * (real numblocks) here, see i+=2 490 /*
534 * below as it makes shifting and masking less painful */ 491 * Note that numblocks is 2 * (real numblocks) here, see i+=2
492 * below as it makes shifting and masking less painful
493 */
535 numblocks = mtd->size >> (this->bbt_erase_shift - 1); 494 numblocks = mtd->size >> (this->bbt_erase_shift - 1);
536 startblock = 0; 495 startblock = 0;
537 from = 0; 496 from = 0;
538 } else { 497 } else {
539 if (chip >= this->numchips) { 498 if (chip >= this->numchips) {
540 printk(KERN_WARNING "create_bbt(): chipnr (%d) > available chips (%d)\n", 499 pr_warn("create_bbt(): chipnr (%d) > available chips (%d)\n",
541 chip + 1, this->numchips); 500 chip + 1, this->numchips);
542 return -EINVAL; 501 return -EINVAL;
543 } 502 }
@@ -547,7 +506,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
547 from = (loff_t)startblock << (this->bbt_erase_shift - 1); 506 from = (loff_t)startblock << (this->bbt_erase_shift - 1);
548 } 507 }
549 508
550 if (this->options & NAND_BBT_SCANLASTPAGE) 509 if (this->bbt_options & NAND_BBT_SCANLASTPAGE)
551 from += mtd->erasesize - (mtd->writesize * len); 510 from += mtd->erasesize - (mtd->writesize * len);
552 511
553 for (i = startblock; i < numblocks;) { 512 for (i = startblock; i < numblocks;) {
@@ -566,8 +525,8 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
566 525
567 if (ret) { 526 if (ret) {
568 this->bbt[i >> 3] |= 0x03 << (i & 0x6); 527 this->bbt[i >> 3] |= 0x03 << (i & 0x6);
569 printk(KERN_WARNING "Bad eraseblock %d at 0x%012llx\n", 528 pr_warn("Bad eraseblock %d at 0x%012llx\n",
570 i >> 1, (unsigned long long)from); 529 i >> 1, (unsigned long long)from);
571 mtd->ecc_stats.badblocks++; 530 mtd->ecc_stats.badblocks++;
572 } 531 }
573 532
@@ -579,20 +538,18 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
579 538
580/** 539/**
581 * search_bbt - [GENERIC] scan the device for a specific bad block table 540 * search_bbt - [GENERIC] scan the device for a specific bad block table
582 * @mtd: MTD device structure 541 * @mtd: MTD device structure
583 * @buf: temporary buffer 542 * @buf: temporary buffer
584 * @td: descriptor for the bad block table 543 * @td: descriptor for the bad block table
585 * 544 *
586 * Read the bad block table by searching for a given ident pattern. 545 * Read the bad block table by searching for a given ident pattern. Search is
587 * Search is preformed either from the beginning up or from the end of 546 * preformed either from the beginning up or from the end of the device
588 * the device downwards. The search starts always at the start of a 547 * downwards. The search starts always at the start of a block. If the option
589 * block. 548 * NAND_BBT_PERCHIP is given, each chip is searched for a bbt, which contains
590 * If the option NAND_BBT_PERCHIP is given, each chip is searched 549 * the bad block information of this chip. This is necessary to provide support
591 * for a bbt, which contains the bad block information of this chip. 550 * for certain DOC devices.
592 * This is necessary to provide support for certain DOC devices.
593 * 551 *
594 * The bbt ident pattern resides in the oob area of the first page 552 * The bbt ident pattern resides in the oob area of the first page in a block.
595 * in a block.
596 */ 553 */
597static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td) 554static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td)
598{ 555{
@@ -603,7 +560,7 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
603 int bbtblocks; 560 int bbtblocks;
604 int blocktopage = this->bbt_erase_shift - this->page_shift; 561 int blocktopage = this->bbt_erase_shift - this->page_shift;
605 562
606 /* Search direction top -> down ? */ 563 /* Search direction top -> down? */
607 if (td->options & NAND_BBT_LASTBLOCK) { 564 if (td->options & NAND_BBT_LASTBLOCK) {
608 startblock = (mtd->size >> this->bbt_erase_shift) - 1; 565 startblock = (mtd->size >> this->bbt_erase_shift) - 1;
609 dir = -1; 566 dir = -1;
@@ -612,7 +569,7 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
612 dir = 1; 569 dir = 1;
613 } 570 }
614 571
615 /* Do we have a bbt per chip ? */ 572 /* Do we have a bbt per chip? */
616 if (td->options & NAND_BBT_PERCHIP) { 573 if (td->options & NAND_BBT_PERCHIP) {
617 chips = this->numchips; 574 chips = this->numchips;
618 bbtblocks = this->chipsize >> this->bbt_erase_shift; 575 bbtblocks = this->chipsize >> this->bbt_erase_shift;
@@ -651,23 +608,23 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
651 /* Check, if we found a bbt for each requested chip */ 608 /* Check, if we found a bbt for each requested chip */
652 for (i = 0; i < chips; i++) { 609 for (i = 0; i < chips; i++) {
653 if (td->pages[i] == -1) 610 if (td->pages[i] == -1)
654 printk(KERN_WARNING "Bad block table not found for chip %d\n", i); 611 pr_warn("Bad block table not found for chip %d\n", i);
655 else 612 else
656 printk(KERN_DEBUG "Bad block table found at page %d, version 0x%02X\n", td->pages[i], 613 pr_info("Bad block table found at page %d, version "
657 td->version[i]); 614 "0x%02X\n", td->pages[i], td->version[i]);
658 } 615 }
659 return 0; 616 return 0;
660} 617}
661 618
662/** 619/**
663 * search_read_bbts - [GENERIC] scan the device for bad block table(s) 620 * search_read_bbts - [GENERIC] scan the device for bad block table(s)
664 * @mtd: MTD device structure 621 * @mtd: MTD device structure
665 * @buf: temporary buffer 622 * @buf: temporary buffer
666 * @td: descriptor for the bad block table 623 * @td: descriptor for the bad block table
667 * @md: descriptor for the bad block table mirror 624 * @md: descriptor for the bad block table mirror
668 * 625 *
669 * Search and read the bad block table(s) 626 * Search and read the bad block table(s).
670*/ 627 */
671static int search_read_bbts(struct mtd_info *mtd, uint8_t * buf, struct nand_bbt_descr *td, struct nand_bbt_descr *md) 628static int search_read_bbts(struct mtd_info *mtd, uint8_t * buf, struct nand_bbt_descr *td, struct nand_bbt_descr *md)
672{ 629{
673 /* Search the primary table */ 630 /* Search the primary table */
@@ -683,16 +640,14 @@ static int search_read_bbts(struct mtd_info *mtd, uint8_t * buf, struct nand_bbt
683 640
684/** 641/**
685 * write_bbt - [GENERIC] (Re)write the bad block table 642 * write_bbt - [GENERIC] (Re)write the bad block table
643 * @mtd: MTD device structure
644 * @buf: temporary buffer
645 * @td: descriptor for the bad block table
646 * @md: descriptor for the bad block table mirror
647 * @chipsel: selector for a specific chip, -1 for all
686 * 648 *
687 * @mtd: MTD device structure 649 * (Re)write the bad block table.
688 * @buf: temporary buffer 650 */
689 * @td: descriptor for the bad block table
690 * @md: descriptor for the bad block table mirror
691 * @chipsel: selector for a specific chip, -1 for all
692 *
693 * (Re)write the bad block table
694 *
695*/
696static int write_bbt(struct mtd_info *mtd, uint8_t *buf, 651static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
697 struct nand_bbt_descr *td, struct nand_bbt_descr *md, 652 struct nand_bbt_descr *td, struct nand_bbt_descr *md,
698 int chipsel) 653 int chipsel)
@@ -711,14 +666,14 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
711 ops.ooblen = mtd->oobsize; 666 ops.ooblen = mtd->oobsize;
712 ops.ooboffs = 0; 667 ops.ooboffs = 0;
713 ops.datbuf = NULL; 668 ops.datbuf = NULL;
714 ops.mode = MTD_OOB_PLACE; 669 ops.mode = MTD_OPS_PLACE_OOB;
715 670
716 if (!rcode) 671 if (!rcode)
717 rcode = 0xff; 672 rcode = 0xff;
718 /* Write bad block table per chip rather than per device ? */ 673 /* Write bad block table per chip rather than per device? */
719 if (td->options & NAND_BBT_PERCHIP) { 674 if (td->options & NAND_BBT_PERCHIP) {
720 numblocks = (int)(this->chipsize >> this->bbt_erase_shift); 675 numblocks = (int)(this->chipsize >> this->bbt_erase_shift);
721 /* Full device write or specific chip ? */ 676 /* Full device write or specific chip? */
722 if (chipsel == -1) { 677 if (chipsel == -1) {
723 nrchips = this->numchips; 678 nrchips = this->numchips;
724 } else { 679 } else {
@@ -732,8 +687,8 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
732 687
733 /* Loop through the chips */ 688 /* Loop through the chips */
734 for (; chip < nrchips; chip++) { 689 for (; chip < nrchips; chip++) {
735 690 /*
736 /* There was already a version of the table, reuse the page 691 * There was already a version of the table, reuse the page
737 * This applies for absolute placement too, as we have the 692 * This applies for absolute placement too, as we have the
738 * page nr. in td->pages. 693 * page nr. in td->pages.
739 */ 694 */
@@ -742,8 +697,10 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
742 goto write; 697 goto write;
743 } 698 }
744 699
745 /* Automatic placement of the bad block table */ 700 /*
746 /* Search direction top -> down ? */ 701 * Automatic placement of the bad block table. Search direction
702 * top -> down?
703 */
747 if (td->options & NAND_BBT_LASTBLOCK) { 704 if (td->options & NAND_BBT_LASTBLOCK) {
748 startblock = numblocks * (chip + 1) - 1; 705 startblock = numblocks * (chip + 1) - 1;
749 dir = -1; 706 dir = -1;
@@ -767,7 +724,7 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
767 if (!md || md->pages[chip] != page) 724 if (!md || md->pages[chip] != page)
768 goto write; 725 goto write;
769 } 726 }
770 printk(KERN_ERR "No space left to write bad block table\n"); 727 pr_err("No space left to write bad block table\n");
771 return -ENOSPC; 728 return -ENOSPC;
772 write: 729 write:
773 730
@@ -792,24 +749,22 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
792 749
793 bbtoffs = chip * (numblocks >> 2); 750 bbtoffs = chip * (numblocks >> 2);
794 751
795 to = ((loff_t) page) << this->page_shift; 752 to = ((loff_t)page) << this->page_shift;
796 753
797 /* Must we save the block contents ? */ 754 /* Must we save the block contents? */
798 if (td->options & NAND_BBT_SAVECONTENT) { 755 if (td->options & NAND_BBT_SAVECONTENT) {
799 /* Make it block aligned */ 756 /* Make it block aligned */
800 to &= ~((loff_t) ((1 << this->bbt_erase_shift) - 1)); 757 to &= ~((loff_t)((1 << this->bbt_erase_shift) - 1));
801 len = 1 << this->bbt_erase_shift; 758 len = 1 << this->bbt_erase_shift;
802 res = mtd->read(mtd, to, len, &retlen, buf); 759 res = mtd->read(mtd, to, len, &retlen, buf);
803 if (res < 0) { 760 if (res < 0) {
804 if (retlen != len) { 761 if (retlen != len) {
805 printk(KERN_INFO "nand_bbt: Error " 762 pr_info("nand_bbt: error reading block "
806 "reading block for writing " 763 "for writing the bad block table\n");
807 "the bad block table\n");
808 return res; 764 return res;
809 } 765 }
810 printk(KERN_WARNING "nand_bbt: ECC error " 766 pr_warn("nand_bbt: ECC error while reading "
811 "while reading block for writing " 767 "block for writing bad block table\n");
812 "bad block table\n");
813 } 768 }
814 /* Read oob data */ 769 /* Read oob data */
815 ops.ooblen = (len >> this->page_shift) * mtd->oobsize; 770 ops.ooblen = (len >> this->page_shift) * mtd->oobsize;
@@ -822,19 +777,19 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
822 pageoffs = page - (int)(to >> this->page_shift); 777 pageoffs = page - (int)(to >> this->page_shift);
823 offs = pageoffs << this->page_shift; 778 offs = pageoffs << this->page_shift;
824 /* Preset the bbt area with 0xff */ 779 /* Preset the bbt area with 0xff */
825 memset(&buf[offs], 0xff, (size_t) (numblocks >> sft)); 780 memset(&buf[offs], 0xff, (size_t)(numblocks >> sft));
826 ooboffs = len + (pageoffs * mtd->oobsize); 781 ooboffs = len + (pageoffs * mtd->oobsize);
827 782
828 } else if (td->options & NAND_BBT_NO_OOB) { 783 } else if (td->options & NAND_BBT_NO_OOB) {
829 ooboffs = 0; 784 ooboffs = 0;
830 offs = td->len; 785 offs = td->len;
831 /* the version byte */ 786 /* The version byte */
832 if (td->options & NAND_BBT_VERSION) 787 if (td->options & NAND_BBT_VERSION)
833 offs++; 788 offs++;
834 /* Calc length */ 789 /* Calc length */
835 len = (size_t) (numblocks >> sft); 790 len = (size_t)(numblocks >> sft);
836 len += offs; 791 len += offs;
837 /* Make it page aligned ! */ 792 /* Make it page aligned! */
838 len = ALIGN(len, mtd->writesize); 793 len = ALIGN(len, mtd->writesize);
839 /* Preset the buffer with 0xff */ 794 /* Preset the buffer with 0xff */
840 memset(buf, 0xff, len); 795 memset(buf, 0xff, len);
@@ -842,8 +797,8 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
842 memcpy(buf, td->pattern, td->len); 797 memcpy(buf, td->pattern, td->len);
843 } else { 798 } else {
844 /* Calc length */ 799 /* Calc length */
845 len = (size_t) (numblocks >> sft); 800 len = (size_t)(numblocks >> sft);
846 /* Make it page aligned ! */ 801 /* Make it page aligned! */
847 len = ALIGN(len, mtd->writesize); 802 len = ALIGN(len, mtd->writesize);
848 /* Preset the buffer with 0xff */ 803 /* Preset the buffer with 0xff */
849 memset(buf, 0xff, len + 804 memset(buf, 0xff, len +
@@ -857,13 +812,13 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
857 if (td->options & NAND_BBT_VERSION) 812 if (td->options & NAND_BBT_VERSION)
858 buf[ooboffs + td->veroffs] = td->version[chip]; 813 buf[ooboffs + td->veroffs] = td->version[chip];
859 814
860 /* walk through the memory table */ 815 /* Walk through the memory table */
861 for (i = 0; i < numblocks;) { 816 for (i = 0; i < numblocks;) {
862 uint8_t dat; 817 uint8_t dat;
863 dat = this->bbt[bbtoffs + (i >> 2)]; 818 dat = this->bbt[bbtoffs + (i >> 2)];
864 for (j = 0; j < 4; j++, i++) { 819 for (j = 0; j < 4; j++, i++) {
865 int sftcnt = (i << (3 - sft)) & sftmsk; 820 int sftcnt = (i << (3 - sft)) & sftmsk;
866 /* Do not store the reserved bbt blocks ! */ 821 /* Do not store the reserved bbt blocks! */
867 buf[offs + (i >> sft)] &= 822 buf[offs + (i >> sft)] &=
868 ~(msk[dat & 0x03] << sftcnt); 823 ~(msk[dat & 0x03] << sftcnt);
869 dat >>= 2; 824 dat >>= 2;
@@ -884,8 +839,8 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
884 if (res < 0) 839 if (res < 0)
885 goto outerr; 840 goto outerr;
886 841
887 printk(KERN_DEBUG "Bad block table written to 0x%012llx, version " 842 pr_info("Bad block table written to 0x%012llx, version 0x%02X\n",
888 "0x%02X\n", (unsigned long long)to, td->version[chip]); 843 (unsigned long long)to, td->version[chip]);
889 844
890 /* Mark it as used */ 845 /* Mark it as used */
891 td->pages[chip] = page; 846 td->pages[chip] = page;
@@ -893,19 +848,18 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
893 return 0; 848 return 0;
894 849
895 outerr: 850 outerr:
896 printk(KERN_WARNING 851 pr_warn("nand_bbt: error while writing bad block table %d\n", res);
897 "nand_bbt: Error while writing bad block table %d\n", res);
898 return res; 852 return res;
899} 853}
900 854
901/** 855/**
902 * nand_memory_bbt - [GENERIC] create a memory based bad block table 856 * nand_memory_bbt - [GENERIC] create a memory based bad block table
903 * @mtd: MTD device structure 857 * @mtd: MTD device structure
904 * @bd: descriptor for the good/bad block search pattern 858 * @bd: descriptor for the good/bad block search pattern
905 * 859 *
906 * The function creates a memory based bbt by scanning the device 860 * The function creates a memory based bbt by scanning the device for
907 * for manufacturer / software marked good / bad blocks 861 * manufacturer / software marked good / bad blocks.
908*/ 862 */
909static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd) 863static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
910{ 864{
911 struct nand_chip *this = mtd->priv; 865 struct nand_chip *this = mtd->priv;
@@ -916,25 +870,24 @@ static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *b
916 870
917/** 871/**
918 * check_create - [GENERIC] create and write bbt(s) if necessary 872 * check_create - [GENERIC] create and write bbt(s) if necessary
919 * @mtd: MTD device structure 873 * @mtd: MTD device structure
920 * @buf: temporary buffer 874 * @buf: temporary buffer
921 * @bd: descriptor for the good/bad block search pattern 875 * @bd: descriptor for the good/bad block search pattern
922 * 876 *
923 * The function checks the results of the previous call to read_bbt 877 * The function checks the results of the previous call to read_bbt and creates
924 * and creates / updates the bbt(s) if necessary 878 * / updates the bbt(s) if necessary. Creation is necessary if no bbt was found
925 * Creation is necessary if no bbt was found for the chip/device 879 * for the chip/device. Update is necessary if one of the tables is missing or
926 * Update is necessary if one of the tables is missing or the 880 * the version nr. of one table is less than the other.
927 * version nr. of one table is less than the other 881 */
928*/
929static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd) 882static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd)
930{ 883{
931 int i, chips, writeops, chipsel, res; 884 int i, chips, writeops, create, chipsel, res, res2;
932 struct nand_chip *this = mtd->priv; 885 struct nand_chip *this = mtd->priv;
933 struct nand_bbt_descr *td = this->bbt_td; 886 struct nand_bbt_descr *td = this->bbt_td;
934 struct nand_bbt_descr *md = this->bbt_md; 887 struct nand_bbt_descr *md = this->bbt_md;
935 struct nand_bbt_descr *rd, *rd2; 888 struct nand_bbt_descr *rd, *rd2;
936 889
937 /* Do we have a bbt per chip ? */ 890 /* Do we have a bbt per chip? */
938 if (td->options & NAND_BBT_PERCHIP) 891 if (td->options & NAND_BBT_PERCHIP)
939 chips = this->numchips; 892 chips = this->numchips;
940 else 893 else
@@ -942,86 +895,98 @@ static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
942 895
943 for (i = 0; i < chips; i++) { 896 for (i = 0; i < chips; i++) {
944 writeops = 0; 897 writeops = 0;
898 create = 0;
945 rd = NULL; 899 rd = NULL;
946 rd2 = NULL; 900 rd2 = NULL;
947 /* Per chip or per device ? */ 901 res = res2 = 0;
902 /* Per chip or per device? */
948 chipsel = (td->options & NAND_BBT_PERCHIP) ? i : -1; 903 chipsel = (td->options & NAND_BBT_PERCHIP) ? i : -1;
949 /* Mirrored table available ? */ 904 /* Mirrored table available? */
950 if (md) { 905 if (md) {
951 if (td->pages[i] == -1 && md->pages[i] == -1) { 906 if (td->pages[i] == -1 && md->pages[i] == -1) {
907 create = 1;
952 writeops = 0x03; 908 writeops = 0x03;
953 goto create; 909 } else if (td->pages[i] == -1) {
954 }
955
956 if (td->pages[i] == -1) {
957 rd = md; 910 rd = md;
958 td->version[i] = md->version[i]; 911 writeops = 0x01;
959 writeops = 1; 912 } else if (md->pages[i] == -1) {
960 goto writecheck;
961 }
962
963 if (md->pages[i] == -1) {
964 rd = td; 913 rd = td;
965 md->version[i] = td->version[i]; 914 writeops = 0x02;
966 writeops = 2; 915 } else if (td->version[i] == md->version[i]) {
967 goto writecheck;
968 }
969
970 if (td->version[i] == md->version[i]) {
971 rd = td; 916 rd = td;
972 if (!(td->options & NAND_BBT_VERSION)) 917 if (!(td->options & NAND_BBT_VERSION))
973 rd2 = md; 918 rd2 = md;
974 goto writecheck; 919 } else if (((int8_t)(td->version[i] - md->version[i])) > 0) {
975 }
976
977 if (((int8_t) (td->version[i] - md->version[i])) > 0) {
978 rd = td; 920 rd = td;
979 md->version[i] = td->version[i]; 921 writeops = 0x02;
980 writeops = 2;
981 } else { 922 } else {
982 rd = md; 923 rd = md;
983 td->version[i] = md->version[i]; 924 writeops = 0x01;
984 writeops = 1;
985 } 925 }
986
987 goto writecheck;
988
989 } else { 926 } else {
990 if (td->pages[i] == -1) { 927 if (td->pages[i] == -1) {
928 create = 1;
991 writeops = 0x01; 929 writeops = 0x01;
992 goto create; 930 } else {
931 rd = td;
993 } 932 }
994 rd = td;
995 goto writecheck;
996 } 933 }
997 create:
998 /* Create the bad block table by scanning the device ? */
999 if (!(td->options & NAND_BBT_CREATE))
1000 continue;
1001 934
1002 /* Create the table in memory by scanning the chip(s) */ 935 if (create) {
1003 if (!(this->options & NAND_CREATE_EMPTY_BBT)) 936 /* Create the bad block table by scanning the device? */
1004 create_bbt(mtd, buf, bd, chipsel); 937 if (!(td->options & NAND_BBT_CREATE))
1005 938 continue;
1006 td->version[i] = 1; 939
1007 if (md) 940 /* Create the table in memory by scanning the chip(s) */
1008 md->version[i] = 1; 941 if (!(this->bbt_options & NAND_BBT_CREATE_EMPTY))
1009 writecheck: 942 create_bbt(mtd, buf, bd, chipsel);
1010 /* read back first ? */ 943
1011 if (rd) 944 td->version[i] = 1;
1012 read_abs_bbt(mtd, buf, rd, chipsel); 945 if (md)
1013 /* If they weren't versioned, read both. */ 946 md->version[i] = 1;
1014 if (rd2) 947 }
1015 read_abs_bbt(mtd, buf, rd2, chipsel); 948
1016 949 /* Read back first? */
1017 /* Write the bad block table to the device ? */ 950 if (rd) {
951 res = read_abs_bbt(mtd, buf, rd, chipsel);
952 if (mtd_is_eccerr(res)) {
953 /* Mark table as invalid */
954 rd->pages[i] = -1;
955 rd->version[i] = 0;
956 i--;
957 continue;
958 }
959 }
960 /* If they weren't versioned, read both */
961 if (rd2) {
962 res2 = read_abs_bbt(mtd, buf, rd2, chipsel);
963 if (mtd_is_eccerr(res2)) {
964 /* Mark table as invalid */
965 rd2->pages[i] = -1;
966 rd2->version[i] = 0;
967 i--;
968 continue;
969 }
970 }
971
972 /* Scrub the flash table(s)? */
973 if (mtd_is_bitflip(res) || mtd_is_bitflip(res2))
974 writeops = 0x03;
975
976 /* Update version numbers before writing */
977 if (md) {
978 td->version[i] = max(td->version[i], md->version[i]);
979 md->version[i] = td->version[i];
980 }
981
982 /* Write the bad block table to the device? */
1018 if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) { 983 if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
1019 res = write_bbt(mtd, buf, td, md, chipsel); 984 res = write_bbt(mtd, buf, td, md, chipsel);
1020 if (res < 0) 985 if (res < 0)
1021 return res; 986 return res;
1022 } 987 }
1023 988
1024 /* Write the mirror bad block table to the device ? */ 989 /* Write the mirror bad block table to the device? */
1025 if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) { 990 if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
1026 res = write_bbt(mtd, buf, md, td, chipsel); 991 res = write_bbt(mtd, buf, md, td, chipsel);
1027 if (res < 0) 992 if (res < 0)
@@ -1033,20 +998,19 @@ static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
1033 998
1034/** 999/**
1035 * mark_bbt_regions - [GENERIC] mark the bad block table regions 1000 * mark_bbt_regions - [GENERIC] mark the bad block table regions
1036 * @mtd: MTD device structure 1001 * @mtd: MTD device structure
1037 * @td: bad block table descriptor 1002 * @td: bad block table descriptor
1038 * 1003 *
1039 * The bad block table regions are marked as "bad" to prevent 1004 * The bad block table regions are marked as "bad" to prevent accidental
1040 * accidental erasures / writes. The regions are identified by 1005 * erasures / writes. The regions are identified by the mark 0x02.
1041 * the mark 0x02. 1006 */
1042*/
1043static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td) 1007static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
1044{ 1008{
1045 struct nand_chip *this = mtd->priv; 1009 struct nand_chip *this = mtd->priv;
1046 int i, j, chips, block, nrblocks, update; 1010 int i, j, chips, block, nrblocks, update;
1047 uint8_t oldval, newval; 1011 uint8_t oldval, newval;
1048 1012
1049 /* Do we have a bbt per chip ? */ 1013 /* Do we have a bbt per chip? */
1050 if (td->options & NAND_BBT_PERCHIP) { 1014 if (td->options & NAND_BBT_PERCHIP) {
1051 chips = this->numchips; 1015 chips = this->numchips;
1052 nrblocks = (int)(this->chipsize >> this->bbt_erase_shift); 1016 nrblocks = (int)(this->chipsize >> this->bbt_erase_shift);
@@ -1083,9 +1047,11 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
1083 update = 1; 1047 update = 1;
1084 block += 2; 1048 block += 2;
1085 } 1049 }
1086 /* If we want reserved blocks to be recorded to flash, and some 1050 /*
1087 new ones have been marked, then we need to update the stored 1051 * If we want reserved blocks to be recorded to flash, and some
1088 bbts. This should only happen once. */ 1052 * new ones have been marked, then we need to update the stored
1053 * bbts. This should only happen once.
1054 */
1089 if (update && td->reserved_block_code) 1055 if (update && td->reserved_block_code)
1090 nand_update_bbt(mtd, (loff_t)(block - 2) << (this->bbt_erase_shift - 1)); 1056 nand_update_bbt(mtd, (loff_t)(block - 2) << (this->bbt_erase_shift - 1));
1091 } 1057 }
@@ -1093,8 +1059,8 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
1093 1059
1094/** 1060/**
1095 * verify_bbt_descr - verify the bad block description 1061 * verify_bbt_descr - verify the bad block description
1096 * @mtd: MTD device structure 1062 * @mtd: MTD device structure
1097 * @bd: the table to verify 1063 * @bd: the table to verify
1098 * 1064 *
1099 * This functions performs a few sanity checks on the bad block description 1065 * This functions performs a few sanity checks on the bad block description
1100 * table. 1066 * table.
@@ -1112,16 +1078,16 @@ static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
1112 pattern_len = bd->len; 1078 pattern_len = bd->len;
1113 bits = bd->options & NAND_BBT_NRBITS_MSK; 1079 bits = bd->options & NAND_BBT_NRBITS_MSK;
1114 1080
1115 BUG_ON((this->options & NAND_USE_FLASH_BBT_NO_OOB) && 1081 BUG_ON((this->bbt_options & NAND_BBT_NO_OOB) &&
1116 !(this->options & NAND_USE_FLASH_BBT)); 1082 !(this->bbt_options & NAND_BBT_USE_FLASH));
1117 BUG_ON(!bits); 1083 BUG_ON(!bits);
1118 1084
1119 if (bd->options & NAND_BBT_VERSION) 1085 if (bd->options & NAND_BBT_VERSION)
1120 pattern_len++; 1086 pattern_len++;
1121 1087
1122 if (bd->options & NAND_BBT_NO_OOB) { 1088 if (bd->options & NAND_BBT_NO_OOB) {
1123 BUG_ON(!(this->options & NAND_USE_FLASH_BBT)); 1089 BUG_ON(!(this->bbt_options & NAND_BBT_USE_FLASH));
1124 BUG_ON(!(this->options & NAND_USE_FLASH_BBT_NO_OOB)); 1090 BUG_ON(!(this->bbt_options & NAND_BBT_NO_OOB));
1125 BUG_ON(bd->offs); 1091 BUG_ON(bd->offs);
1126 if (bd->options & NAND_BBT_VERSION) 1092 if (bd->options & NAND_BBT_VERSION)
1127 BUG_ON(bd->veroffs != bd->len); 1093 BUG_ON(bd->veroffs != bd->len);
@@ -1141,18 +1107,16 @@ static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
1141 1107
1142/** 1108/**
1143 * nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s) 1109 * nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s)
1144 * @mtd: MTD device structure 1110 * @mtd: MTD device structure
1145 * @bd: descriptor for the good/bad block search pattern 1111 * @bd: descriptor for the good/bad block search pattern
1146 *
1147 * The function checks, if a bad block table(s) is/are already
1148 * available. If not it scans the device for manufacturer
1149 * marked good / bad blocks and writes the bad block table(s) to
1150 * the selected place.
1151 * 1112 *
1152 * The bad block table memory is allocated here. It must be freed 1113 * The function checks, if a bad block table(s) is/are already available. If
1153 * by calling the nand_free_bbt function. 1114 * not it scans the device for manufacturer marked good / bad blocks and writes
1115 * the bad block table(s) to the selected place.
1154 * 1116 *
1155*/ 1117 * The bad block table memory is allocated here. It must be freed by calling
1118 * the nand_free_bbt function.
1119 */
1156int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd) 1120int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
1157{ 1121{
1158 struct nand_chip *this = mtd->priv; 1122 struct nand_chip *this = mtd->priv;
@@ -1162,19 +1126,21 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
1162 struct nand_bbt_descr *md = this->bbt_md; 1126 struct nand_bbt_descr *md = this->bbt_md;
1163 1127
1164 len = mtd->size >> (this->bbt_erase_shift + 2); 1128 len = mtd->size >> (this->bbt_erase_shift + 2);
1165 /* Allocate memory (2bit per block) and clear the memory bad block table */ 1129 /*
1130 * Allocate memory (2bit per block) and clear the memory bad block
1131 * table.
1132 */
1166 this->bbt = kzalloc(len, GFP_KERNEL); 1133 this->bbt = kzalloc(len, GFP_KERNEL);
1167 if (!this->bbt) { 1134 if (!this->bbt)
1168 printk(KERN_ERR "nand_scan_bbt: Out of memory\n");
1169 return -ENOMEM; 1135 return -ENOMEM;
1170 }
1171 1136
1172 /* If no primary table decriptor is given, scan the device 1137 /*
1173 * to build a memory based bad block table 1138 * If no primary table decriptor is given, scan the device to build a
1139 * memory based bad block table.
1174 */ 1140 */
1175 if (!td) { 1141 if (!td) {
1176 if ((res = nand_memory_bbt(mtd, bd))) { 1142 if ((res = nand_memory_bbt(mtd, bd))) {
1177 printk(KERN_ERR "nand_bbt: Can't scan flash and build the RAM-based BBT\n"); 1143 pr_err("nand_bbt: can't scan flash and build the RAM-based BBT\n");
1178 kfree(this->bbt); 1144 kfree(this->bbt);
1179 this->bbt = NULL; 1145 this->bbt = NULL;
1180 } 1146 }
@@ -1188,13 +1154,12 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
1188 len += (len >> this->page_shift) * mtd->oobsize; 1154 len += (len >> this->page_shift) * mtd->oobsize;
1189 buf = vmalloc(len); 1155 buf = vmalloc(len);
1190 if (!buf) { 1156 if (!buf) {
1191 printk(KERN_ERR "nand_bbt: Out of memory\n");
1192 kfree(this->bbt); 1157 kfree(this->bbt);
1193 this->bbt = NULL; 1158 this->bbt = NULL;
1194 return -ENOMEM; 1159 return -ENOMEM;
1195 } 1160 }
1196 1161
1197 /* Is the bbt at a given page ? */ 1162 /* Is the bbt at a given page? */
1198 if (td->options & NAND_BBT_ABSPAGE) { 1163 if (td->options & NAND_BBT_ABSPAGE) {
1199 res = read_abs_bbts(mtd, buf, td, md); 1164 res = read_abs_bbts(mtd, buf, td, md);
1200 } else { 1165 } else {
@@ -1216,15 +1181,15 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
1216 1181
1217/** 1182/**
1218 * nand_update_bbt - [NAND Interface] update bad block table(s) 1183 * nand_update_bbt - [NAND Interface] update bad block table(s)
1219 * @mtd: MTD device structure 1184 * @mtd: MTD device structure
1220 * @offs: the offset of the newly marked block 1185 * @offs: the offset of the newly marked block
1221 * 1186 *
1222 * The function updates the bad block table(s) 1187 * The function updates the bad block table(s).
1223*/ 1188 */
1224int nand_update_bbt(struct mtd_info *mtd, loff_t offs) 1189int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
1225{ 1190{
1226 struct nand_chip *this = mtd->priv; 1191 struct nand_chip *this = mtd->priv;
1227 int len, res = 0, writeops = 0; 1192 int len, res = 0;
1228 int chip, chipsel; 1193 int chip, chipsel;
1229 uint8_t *buf; 1194 uint8_t *buf;
1230 struct nand_bbt_descr *td = this->bbt_td; 1195 struct nand_bbt_descr *td = this->bbt_td;
@@ -1237,14 +1202,10 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
1237 len = (1 << this->bbt_erase_shift); 1202 len = (1 << this->bbt_erase_shift);
1238 len += (len >> this->page_shift) * mtd->oobsize; 1203 len += (len >> this->page_shift) * mtd->oobsize;
1239 buf = kmalloc(len, GFP_KERNEL); 1204 buf = kmalloc(len, GFP_KERNEL);
1240 if (!buf) { 1205 if (!buf)
1241 printk(KERN_ERR "nand_update_bbt: Out of memory\n");
1242 return -ENOMEM; 1206 return -ENOMEM;
1243 }
1244
1245 writeops = md != NULL ? 0x03 : 0x01;
1246 1207
1247 /* Do we have a bbt per chip ? */ 1208 /* Do we have a bbt per chip? */
1248 if (td->options & NAND_BBT_PERCHIP) { 1209 if (td->options & NAND_BBT_PERCHIP) {
1249 chip = (int)(offs >> this->chip_shift); 1210 chip = (int)(offs >> this->chip_shift);
1250 chipsel = chip; 1211 chipsel = chip;
@@ -1257,14 +1218,14 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
1257 if (md) 1218 if (md)
1258 md->version[chip]++; 1219 md->version[chip]++;
1259 1220
1260 /* Write the bad block table to the device ? */ 1221 /* Write the bad block table to the device? */
1261 if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) { 1222 if (td->options & NAND_BBT_WRITE) {
1262 res = write_bbt(mtd, buf, td, md, chipsel); 1223 res = write_bbt(mtd, buf, td, md, chipsel);
1263 if (res < 0) 1224 if (res < 0)
1264 goto out; 1225 goto out;
1265 } 1226 }
1266 /* Write the mirror bad block table to the device ? */ 1227 /* Write the mirror bad block table to the device? */
1267 if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) { 1228 if (md && (md->options & NAND_BBT_WRITE)) {
1268 res = write_bbt(mtd, buf, md, td, chipsel); 1229 res = write_bbt(mtd, buf, md, td, chipsel);
1269 } 1230 }
1270 1231
@@ -1273,8 +1234,10 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
1273 return res; 1234 return res;
1274} 1235}
1275 1236
1276/* Define some generic bad / good block scan pattern which are used 1237/*
1277 * while scanning a device for factory marked good / bad blocks. */ 1238 * Define some generic bad / good block scan pattern which are used
1239 * while scanning a device for factory marked good / bad blocks.
1240 */
1278static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; 1241static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
1279 1242
1280static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 }; 1243static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 };
@@ -1286,8 +1249,7 @@ static struct nand_bbt_descr agand_flashbased = {
1286 .pattern = scan_agand_pattern 1249 .pattern = scan_agand_pattern
1287}; 1250};
1288 1251
1289/* Generic flash bbt decriptors 1252/* Generic flash bbt descriptors */
1290*/
1291static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' }; 1253static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
1292static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' }; 1254static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
1293 1255
@@ -1331,31 +1293,27 @@ static struct nand_bbt_descr bbt_mirror_no_bbt_descr = {
1331 .pattern = mirror_pattern 1293 .pattern = mirror_pattern
1332}; 1294};
1333 1295
1334#define BBT_SCAN_OPTIONS (NAND_BBT_SCANLASTPAGE | NAND_BBT_SCAN2NDPAGE | \ 1296#define BADBLOCK_SCAN_MASK (~NAND_BBT_NO_OOB)
1335 NAND_BBT_SCANBYTE1AND6)
1336/** 1297/**
1337 * nand_create_default_bbt_descr - [Internal] Creates a BBT descriptor structure 1298 * nand_create_badblock_pattern - [INTERN] Creates a BBT descriptor structure
1338 * @this: NAND chip to create descriptor for 1299 * @this: NAND chip to create descriptor for
1339 * 1300 *
1340 * This function allocates and initializes a nand_bbt_descr for BBM detection 1301 * This function allocates and initializes a nand_bbt_descr for BBM detection
1341 * based on the properties of "this". The new descriptor is stored in 1302 * based on the properties of @this. The new descriptor is stored in
1342 * this->badblock_pattern. Thus, this->badblock_pattern should be NULL when 1303 * this->badblock_pattern. Thus, this->badblock_pattern should be NULL when
1343 * passed to this function. 1304 * passed to this function.
1344 *
1345 */ 1305 */
1346static int nand_create_default_bbt_descr(struct nand_chip *this) 1306static int nand_create_badblock_pattern(struct nand_chip *this)
1347{ 1307{
1348 struct nand_bbt_descr *bd; 1308 struct nand_bbt_descr *bd;
1349 if (this->badblock_pattern) { 1309 if (this->badblock_pattern) {
1350 printk(KERN_WARNING "BBT descr already allocated; not replacing.\n"); 1310 pr_warn("Bad block pattern already allocated; not replacing\n");
1351 return -EINVAL; 1311 return -EINVAL;
1352 } 1312 }
1353 bd = kzalloc(sizeof(*bd), GFP_KERNEL); 1313 bd = kzalloc(sizeof(*bd), GFP_KERNEL);
1354 if (!bd) { 1314 if (!bd)
1355 printk(KERN_ERR "nand_create_default_bbt_descr: Out of memory\n");
1356 return -ENOMEM; 1315 return -ENOMEM;
1357 } 1316 bd->options = this->bbt_options & BADBLOCK_SCAN_MASK;
1358 bd->options = this->options & BBT_SCAN_OPTIONS;
1359 bd->offs = this->badblockpos; 1317 bd->offs = this->badblockpos;
1360 bd->len = (this->options & NAND_BUSWIDTH_16) ? 2 : 1; 1318 bd->len = (this->options & NAND_BUSWIDTH_16) ? 2 : 1;
1361 bd->pattern = scan_ff_pattern; 1319 bd->pattern = scan_ff_pattern;
@@ -1366,22 +1324,20 @@ static int nand_create_default_bbt_descr(struct nand_chip *this)
1366 1324
1367/** 1325/**
1368 * nand_default_bbt - [NAND Interface] Select a default bad block table for the device 1326 * nand_default_bbt - [NAND Interface] Select a default bad block table for the device
1369 * @mtd: MTD device structure 1327 * @mtd: MTD device structure
1370 *
1371 * This function selects the default bad block table
1372 * support for the device and calls the nand_scan_bbt function
1373 * 1328 *
1374*/ 1329 * This function selects the default bad block table support for the device and
1330 * calls the nand_scan_bbt function.
1331 */
1375int nand_default_bbt(struct mtd_info *mtd) 1332int nand_default_bbt(struct mtd_info *mtd)
1376{ 1333{
1377 struct nand_chip *this = mtd->priv; 1334 struct nand_chip *this = mtd->priv;
1378 1335
1379 /* Default for AG-AND. We must use a flash based 1336 /*
1380 * bad block table as the devices have factory marked 1337 * Default for AG-AND. We must use a flash based bad block table as the
1381 * _good_ blocks. Erasing those blocks leads to loss 1338 * devices have factory marked _good_ blocks. Erasing those blocks
1382 * of the good / bad information, so we _must_ store 1339 * leads to loss of the good / bad information, so we _must_ store this
1383 * this information in a good / bad table during 1340 * information in a good / bad table during startup.
1384 * startup
1385 */ 1341 */
1386 if (this->options & NAND_IS_AND) { 1342 if (this->options & NAND_IS_AND) {
1387 /* Use the default pattern descriptors */ 1343 /* Use the default pattern descriptors */
@@ -1389,15 +1345,15 @@ int nand_default_bbt(struct mtd_info *mtd)
1389 this->bbt_td = &bbt_main_descr; 1345 this->bbt_td = &bbt_main_descr;
1390 this->bbt_md = &bbt_mirror_descr; 1346 this->bbt_md = &bbt_mirror_descr;
1391 } 1347 }
1392 this->options |= NAND_USE_FLASH_BBT; 1348 this->bbt_options |= NAND_BBT_USE_FLASH;
1393 return nand_scan_bbt(mtd, &agand_flashbased); 1349 return nand_scan_bbt(mtd, &agand_flashbased);
1394 } 1350 }
1395 1351
1396 /* Is a flash based bad block table requested ? */ 1352 /* Is a flash based bad block table requested? */
1397 if (this->options & NAND_USE_FLASH_BBT) { 1353 if (this->bbt_options & NAND_BBT_USE_FLASH) {
1398 /* Use the default pattern descriptors */ 1354 /* Use the default pattern descriptors */
1399 if (!this->bbt_td) { 1355 if (!this->bbt_td) {
1400 if (this->options & NAND_USE_FLASH_BBT_NO_OOB) { 1356 if (this->bbt_options & NAND_BBT_NO_OOB) {
1401 this->bbt_td = &bbt_main_no_bbt_descr; 1357 this->bbt_td = &bbt_main_no_bbt_descr;
1402 this->bbt_md = &bbt_mirror_no_bbt_descr; 1358 this->bbt_md = &bbt_mirror_no_bbt_descr;
1403 } else { 1359 } else {
@@ -1411,18 +1367,17 @@ int nand_default_bbt(struct mtd_info *mtd)
1411 } 1367 }
1412 1368
1413 if (!this->badblock_pattern) 1369 if (!this->badblock_pattern)
1414 nand_create_default_bbt_descr(this); 1370 nand_create_badblock_pattern(this);
1415 1371
1416 return nand_scan_bbt(mtd, this->badblock_pattern); 1372 return nand_scan_bbt(mtd, this->badblock_pattern);
1417} 1373}
1418 1374
1419/** 1375/**
1420 * nand_isbad_bbt - [NAND Interface] Check if a block is bad 1376 * nand_isbad_bbt - [NAND Interface] Check if a block is bad
1421 * @mtd: MTD device structure 1377 * @mtd: MTD device structure
1422 * @offs: offset in the device 1378 * @offs: offset in the device
1423 * @allowbbt: allow access to bad block table region 1379 * @allowbbt: allow access to bad block table region
1424 * 1380 */
1425*/
1426int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt) 1381int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
1427{ 1382{
1428 struct nand_chip *this = mtd->priv; 1383 struct nand_chip *this = mtd->priv;
@@ -1433,8 +1388,9 @@ int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
1433 block = (int)(offs >> (this->bbt_erase_shift - 1)); 1388 block = (int)(offs >> (this->bbt_erase_shift - 1));
1434 res = (this->bbt[block >> 3] >> (block & 0x06)) & 0x03; 1389 res = (this->bbt[block >> 3] >> (block & 0x06)) & 0x03;
1435 1390
1436 DEBUG(MTD_DEBUG_LEVEL2, "nand_isbad_bbt(): bbt info for offs 0x%08x: (block %d) 0x%02x\n", 1391 pr_debug("nand_isbad_bbt(): bbt info for offs 0x%08x: "
1437 (unsigned int)offs, block >> 1, res); 1392 "(block %d) 0x%02x\n",
1393 (unsigned int)offs, block >> 1, res);
1438 1394
1439 switch ((int)res) { 1395 switch ((int)res) {
1440 case 0x00: 1396 case 0x00:
diff --git a/drivers/mtd/nand/nand_bch.c b/drivers/mtd/nand/nand_bch.c
index 0f931e757116..3803e0bba23b 100644
--- a/drivers/mtd/nand/nand_bch.c
+++ b/drivers/mtd/nand/nand_bch.c
@@ -93,8 +93,8 @@ int nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf,
93 buf[errloc[i] >> 3] ^= (1 << (errloc[i] & 7)); 93 buf[errloc[i] >> 3] ^= (1 << (errloc[i] & 7));
94 /* else error in ecc, no action needed */ 94 /* else error in ecc, no action needed */
95 95
96 DEBUG(MTD_DEBUG_LEVEL0, "%s: corrected bitflip %u\n", 96 pr_debug("%s: corrected bitflip %u\n", __func__,
97 __func__, errloc[i]); 97 errloc[i]);
98 } 98 }
99 } else if (count < 0) { 99 } else if (count < 0) {
100 printk(KERN_ERR "ecc unrecoverable error\n"); 100 printk(KERN_ERR "ecc unrecoverable error\n");
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
index 271b8e735e8f..b7cfe0d37121 100644
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -110,7 +110,7 @@ static const char bitsperbyte[256] = {
110 110
111/* 111/*
112 * addressbits is a lookup table to filter out the bits from the xor-ed 112 * addressbits is a lookup table to filter out the bits from the xor-ed
113 * ecc data that identify the faulty location. 113 * ECC data that identify the faulty location.
114 * this is only used for repairing parity 114 * this is only used for repairing parity
115 * see the comments in nand_correct_data for more details 115 * see the comments in nand_correct_data for more details
116 */ 116 */
@@ -153,7 +153,7 @@ static const char addressbits[256] = {
153 * __nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte 153 * __nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
154 * block 154 * block
155 * @buf: input buffer with raw data 155 * @buf: input buffer with raw data
156 * @eccsize: data bytes per ecc step (256 or 512) 156 * @eccsize: data bytes per ECC step (256 or 512)
157 * @code: output buffer with ECC 157 * @code: output buffer with ECC
158 */ 158 */
159void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize, 159void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize,
@@ -348,7 +348,7 @@ void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize,
348 rp17 = (par ^ rp16) & 0xff; 348 rp17 = (par ^ rp16) & 0xff;
349 349
350 /* 350 /*
351 * Finally calculate the ecc bits. 351 * Finally calculate the ECC bits.
352 * Again here it might seem that there are performance optimisations 352 * Again here it might seem that there are performance optimisations
353 * possible, but benchmarks showed that on the system this is developed 353 * possible, but benchmarks showed that on the system this is developed
354 * the code below is the fastest 354 * the code below is the fastest
@@ -436,7 +436,7 @@ EXPORT_SYMBOL(nand_calculate_ecc);
436 * @buf: raw data read from the chip 436 * @buf: raw data read from the chip
437 * @read_ecc: ECC from the chip 437 * @read_ecc: ECC from the chip
438 * @calc_ecc: the ECC calculated from raw data 438 * @calc_ecc: the ECC calculated from raw data
439 * @eccsize: data bytes per ecc step (256 or 512) 439 * @eccsize: data bytes per ECC step (256 or 512)
440 * 440 *
441 * Detect and correct a 1 bit error for eccsize byte block 441 * Detect and correct a 1 bit error for eccsize byte block
442 */ 442 */
@@ -505,7 +505,7 @@ int __nand_correct_data(unsigned char *buf,
505 } 505 }
506 /* count nr of bits; use table lookup, faster than calculating it */ 506 /* count nr of bits; use table lookup, faster than calculating it */
507 if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1) 507 if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1)
508 return 1; /* error in ecc data; no action needed */ 508 return 1; /* error in ECC data; no action needed */
509 509
510 printk(KERN_ERR "uncorrectable error : "); 510 printk(KERN_ERR "uncorrectable error : ");
511 return -1; 511 return -1;
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 357e8c5252a8..34c03be77301 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -2273,9 +2273,9 @@ static int __init ns_init_module(void)
2273 2273
2274 switch (bbt) { 2274 switch (bbt) {
2275 case 2: 2275 case 2:
2276 chip->options |= NAND_USE_FLASH_BBT_NO_OOB; 2276 chip->bbt_options |= NAND_BBT_NO_OOB;
2277 case 1: 2277 case 1:
2278 chip->options |= NAND_USE_FLASH_BBT; 2278 chip->bbt_options |= NAND_BBT_USE_FLASH;
2279 case 0: 2279 case 0:
2280 break; 2280 break;
2281 default: 2281 default:
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index ea2dea8a9c88..ee1713907b92 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -42,7 +42,6 @@ struct ndfc_controller {
42 struct nand_chip chip; 42 struct nand_chip chip;
43 int chip_select; 43 int chip_select;
44 struct nand_hw_control ndfc_control; 44 struct nand_hw_control ndfc_control;
45 struct mtd_partition *parts;
46}; 45};
47 46
48static struct ndfc_controller ndfc_ctrl[NDFC_MAX_CS]; 47static struct ndfc_controller ndfc_ctrl[NDFC_MAX_CS];
@@ -159,13 +158,9 @@ static int ndfc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
159static int ndfc_chip_init(struct ndfc_controller *ndfc, 158static int ndfc_chip_init(struct ndfc_controller *ndfc,
160 struct device_node *node) 159 struct device_node *node)
161{ 160{
162#ifdef CONFIG_MTD_CMDLINE_PARTS
163 static const char *part_types[] = { "cmdlinepart", NULL };
164#else
165 static const char *part_types[] = { NULL };
166#endif
167 struct device_node *flash_np; 161 struct device_node *flash_np;
168 struct nand_chip *chip = &ndfc->chip; 162 struct nand_chip *chip = &ndfc->chip;
163 struct mtd_part_parser_data ppdata;
169 int ret; 164 int ret;
170 165
171 chip->IO_ADDR_R = ndfc->ndfcbase + NDFC_DATA; 166 chip->IO_ADDR_R = ndfc->ndfcbase + NDFC_DATA;
@@ -193,6 +188,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
193 if (!flash_np) 188 if (!flash_np)
194 return -ENODEV; 189 return -ENODEV;
195 190
191 ppdata->of_node = flash_np;
196 ndfc->mtd.name = kasprintf(GFP_KERNEL, "%s.%s", 192 ndfc->mtd.name = kasprintf(GFP_KERNEL, "%s.%s",
197 dev_name(&ndfc->ofdev->dev), flash_np->name); 193 dev_name(&ndfc->ofdev->dev), flash_np->name);
198 if (!ndfc->mtd.name) { 194 if (!ndfc->mtd.name) {
@@ -204,18 +200,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
204 if (ret) 200 if (ret)
205 goto err; 201 goto err;
206 202
207 ret = parse_mtd_partitions(&ndfc->mtd, part_types, &ndfc->parts, 0); 203 ret = mtd_device_parse_register(&ndfc->mtd, NULL, &ppdata, NULL, 0);
208 if (ret < 0)
209 goto err;
210
211 if (ret == 0) {
212 ret = of_mtd_parse_partitions(&ndfc->ofdev->dev, flash_np,
213 &ndfc->parts);
214 if (ret < 0)
215 goto err;
216 }
217
218 ret = mtd_device_register(&ndfc->mtd, ndfc->parts, ret);
219 204
220err: 205err:
221 of_node_put(flash_np); 206 of_node_put(flash_np);
@@ -288,6 +273,7 @@ static int __devexit ndfc_remove(struct platform_device *ofdev)
288 struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev); 273 struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev);
289 274
290 nand_release(&ndfc->mtd); 275 nand_release(&ndfc->mtd);
276 kfree(ndfc->mtd.name);
291 277
292 return 0; 278 return 0;
293} 279}
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
index b6a5c86ab31e..b463ecfb4c1a 100644
--- a/drivers/mtd/nand/nomadik_nand.c
+++ b/drivers/mtd/nand/nomadik_nand.c
@@ -187,6 +187,7 @@ static int nomadik_nand_remove(struct platform_device *pdev)
187 pdata->exit(); 187 pdata->exit();
188 188
189 if (host) { 189 if (host) {
190 nand_release(&host->mtd);
190 iounmap(host->cmd_va); 191 iounmap(host->cmd_va);
191 iounmap(host->data_va); 192 iounmap(host->data_va);
192 iounmap(host->addr_va); 193 iounmap(host->addr_va);
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 9c30a0b03171..fa8faedfad6e 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -339,6 +339,7 @@ static int __devexit nuc900_nand_remove(struct platform_device *pdev)
339 struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev); 339 struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev);
340 struct resource *res; 340 struct resource *res;
341 341
342 nand_release(&nuc900_nand->mtd);
342 iounmap(nuc900_nand->reg); 343 iounmap(nuc900_nand->reg);
343 344
344 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 345 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index ec22a5aab038..f745f00f3167 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -95,8 +95,6 @@
95#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0) 95#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
96#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1) 96#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
97 97
98static const char *part_probes[] = { "cmdlinepart", NULL };
99
100/* oob info generated runtime depending on ecc algorithm and layout selected */ 98/* oob info generated runtime depending on ecc algorithm and layout selected */
101static struct nand_ecclayout omap_oobinfo; 99static struct nand_ecclayout omap_oobinfo;
102/* Define some generic bad / good block scan pattern which are used 100/* Define some generic bad / good block scan pattern which are used
@@ -115,7 +113,6 @@ struct omap_nand_info {
115 struct nand_hw_control controller; 113 struct nand_hw_control controller;
116 struct omap_nand_platform_data *pdata; 114 struct omap_nand_platform_data *pdata;
117 struct mtd_info mtd; 115 struct mtd_info mtd;
118 struct mtd_partition *parts;
119 struct nand_chip nand; 116 struct nand_chip nand;
120 struct platform_device *pdev; 117 struct platform_device *pdev;
121 118
@@ -745,12 +742,12 @@ static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
745 742
746 case 1: 743 case 1:
747 /* Uncorrectable error */ 744 /* Uncorrectable error */
748 DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n"); 745 pr_debug("ECC UNCORRECTED_ERROR 1\n");
749 return -1; 746 return -1;
750 747
751 case 11: 748 case 11:
752 /* UN-Correctable error */ 749 /* UN-Correctable error */
753 DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR B\n"); 750 pr_debug("ECC UNCORRECTED_ERROR B\n");
754 return -1; 751 return -1;
755 752
756 case 12: 753 case 12:
@@ -767,8 +764,8 @@ static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
767 764
768 find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1]; 765 find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
769 766
770 DEBUG(MTD_DEBUG_LEVEL0, "Correcting single bit ECC error at " 767 pr_debug("Correcting single bit ECC error at offset: "
771 "offset: %d, bit: %d\n", find_byte, find_bit); 768 "%d, bit: %d\n", find_byte, find_bit);
772 769
773 page_data[find_byte] ^= (1 << find_bit); 770 page_data[find_byte] ^= (1 << find_bit);
774 771
@@ -780,7 +777,7 @@ static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
780 ecc_data2[2] == 0) 777 ecc_data2[2] == 0)
781 return 0; 778 return 0;
782 } 779 }
783 DEBUG(MTD_DEBUG_LEVEL0, "UNCORRECTED_ERROR default\n"); 780 pr_debug("UNCORRECTED_ERROR default\n");
784 return -1; 781 return -1;
785 } 782 }
786} 783}
@@ -1104,13 +1101,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
1104 goto out_release_mem_region; 1101 goto out_release_mem_region;
1105 } 1102 }
1106 1103
1107 err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0); 1104 mtd_device_parse_register(&info->mtd, NULL, 0,
1108 if (err > 0) 1105 pdata->parts, pdata->nr_parts);
1109 mtd_device_register(&info->mtd, info->parts, err);
1110 else if (pdata->parts)
1111 mtd_device_register(&info->mtd, pdata->parts, pdata->nr_parts);
1112 else
1113 mtd_device_register(&info->mtd, NULL, 0);
1114 1106
1115 platform_set_drvdata(pdev, &info->mtd); 1107 platform_set_drvdata(pdev, &info->mtd);
1116 1108
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 7794d0680f91..29f505adaf84 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -21,8 +21,6 @@
21#include <mach/hardware.h> 21#include <mach/hardware.h>
22#include <plat/orion_nand.h> 22#include <plat/orion_nand.h>
23 23
24static const char *part_probes[] = { "cmdlinepart", NULL };
25
26static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) 24static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
27{ 25{
28 struct nand_chip *nc = mtd->priv; 26 struct nand_chip *nc = mtd->priv;
@@ -81,8 +79,6 @@ static int __init orion_nand_probe(struct platform_device *pdev)
81 struct resource *res; 79 struct resource *res;
82 void __iomem *io_base; 80 void __iomem *io_base;
83 int ret = 0; 81 int ret = 0;
84 struct mtd_partition *partitions = NULL;
85 int num_part = 0;
86 82
87 nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL); 83 nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL);
88 if (!nc) { 84 if (!nc) {
@@ -132,17 +128,9 @@ static int __init orion_nand_probe(struct platform_device *pdev)
132 goto no_dev; 128 goto no_dev;
133 } 129 }
134 130
135#ifdef CONFIG_MTD_CMDLINE_PARTS
136 mtd->name = "orion_nand"; 131 mtd->name = "orion_nand";
137 num_part = parse_mtd_partitions(mtd, part_probes, &partitions, 0); 132 ret = mtd_device_parse_register(mtd, NULL, 0,
138#endif 133 board->parts, board->nr_parts);
139 /* If cmdline partitions have been passed, let them be used */
140 if (num_part <= 0) {
141 num_part = board->nr_parts;
142 partitions = board->parts;
143 }
144
145 ret = mtd_device_register(mtd, partitions, num_part);
146 if (ret) { 134 if (ret) {
147 nand_release(mtd); 135 nand_release(mtd);
148 goto no_dev; 136 goto no_dev;
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index b1aa41b8a4eb..a97264ececdb 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -155,7 +155,8 @@ static int __devinit pasemi_nand_probe(struct platform_device *ofdev)
155 chip->ecc.mode = NAND_ECC_SOFT; 155 chip->ecc.mode = NAND_ECC_SOFT;
156 156
157 /* Enable the following for a flash based bad block table */ 157 /* Enable the following for a flash based bad block table */
158 chip->options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR; 158 chip->options = NAND_NO_AUTOINCR;
159 chip->bbt_options = NAND_BBT_USE_FLASH;
159 160
160 /* Scan to find existence of the device */ 161 /* Scan to find existence of the device */
161 if (nand_scan(pasemi_nand_mtd, 1)) { 162 if (nand_scan(pasemi_nand_mtd, 1)) {
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index 633c04bf76f6..ea8e1234e0e2 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -21,8 +21,6 @@ struct plat_nand_data {
21 struct nand_chip chip; 21 struct nand_chip chip;
22 struct mtd_info mtd; 22 struct mtd_info mtd;
23 void __iomem *io_base; 23 void __iomem *io_base;
24 int nr_parts;
25 struct mtd_partition *parts;
26}; 24};
27 25
28/* 26/*
@@ -79,6 +77,7 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
79 data->chip.read_buf = pdata->ctrl.read_buf; 77 data->chip.read_buf = pdata->ctrl.read_buf;
80 data->chip.chip_delay = pdata->chip.chip_delay; 78 data->chip.chip_delay = pdata->chip.chip_delay;
81 data->chip.options |= pdata->chip.options; 79 data->chip.options |= pdata->chip.options;
80 data->chip.bbt_options |= pdata->chip.bbt_options;
82 81
83 data->chip.ecc.hwctl = pdata->ctrl.hwcontrol; 82 data->chip.ecc.hwctl = pdata->ctrl.hwcontrol;
84 data->chip.ecc.layout = pdata->chip.ecclayout; 83 data->chip.ecc.layout = pdata->chip.ecclayout;
@@ -99,23 +98,9 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
99 goto out; 98 goto out;
100 } 99 }
101 100
102 if (pdata->chip.part_probe_types) { 101 err = mtd_device_parse_register(&data->mtd,
103 err = parse_mtd_partitions(&data->mtd, 102 pdata->chip.part_probe_types, 0,
104 pdata->chip.part_probe_types, 103 pdata->chip.partitions, pdata->chip.nr_partitions);
105 &data->parts, 0);
106 if (err > 0) {
107 mtd_device_register(&data->mtd, data->parts, err);
108 return 0;
109 }
110 }
111 if (pdata->chip.set_parts)
112 pdata->chip.set_parts(data->mtd.size, &pdata->chip);
113 if (pdata->chip.partitions) {
114 data->parts = pdata->chip.partitions;
115 err = mtd_device_register(&data->mtd, data->parts,
116 pdata->chip.nr_partitions);
117 } else
118 err = mtd_device_register(&data->mtd, NULL, 0);
119 104
120 if (!err) 105 if (!err)
121 return err; 106 return err;
@@ -145,8 +130,6 @@ static int __devexit plat_nand_remove(struct platform_device *pdev)
145 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 130 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
146 131
147 nand_release(&data->mtd); 132 nand_release(&data->mtd);
148 if (data->parts && data->parts != pdata->chip.partitions)
149 kfree(data->parts);
150 if (pdata->ctrl.remove) 133 if (pdata->ctrl.remove)
151 pdata->ctrl.remove(pdev); 134 pdata->ctrl.remove(pdev);
152 iounmap(data->io_base); 135 iounmap(data->io_base);
diff --git a/drivers/mtd/nand/ppchameleonevb.c b/drivers/mtd/nand/ppchameleonevb.c
index 3bbb796b451c..7e52af51a198 100644
--- a/drivers/mtd/nand/ppchameleonevb.c
+++ b/drivers/mtd/nand/ppchameleonevb.c
@@ -99,8 +99,6 @@ static struct mtd_partition partition_info_evb[] = {
99 99
100#define NUM_PARTITIONS 1 100#define NUM_PARTITIONS 1
101 101
102extern int parse_cmdline_partitions(struct mtd_info *master, struct mtd_partition **pparts, const char *mtd_id);
103
104/* 102/*
105 * hardware specific access to control-lines 103 * hardware specific access to control-lines
106 */ 104 */
@@ -187,18 +185,12 @@ static int ppchameleonevb_device_ready(struct mtd_info *minfo)
187} 185}
188#endif 186#endif
189 187
190const char *part_probes[] = { "cmdlinepart", NULL };
191const char *part_probes_evb[] = { "cmdlinepart", NULL };
192
193/* 188/*
194 * Main initialization routine 189 * Main initialization routine
195 */ 190 */
196static int __init ppchameleonevb_init(void) 191static int __init ppchameleonevb_init(void)
197{ 192{
198 struct nand_chip *this; 193 struct nand_chip *this;
199 const char *part_type = 0;
200 int mtd_parts_nb = 0;
201 struct mtd_partition *mtd_parts = 0;
202 void __iomem *ppchameleon_fio_base; 194 void __iomem *ppchameleon_fio_base;
203 void __iomem *ppchameleonevb_fio_base; 195 void __iomem *ppchameleonevb_fio_base;
204 196
@@ -281,24 +273,13 @@ static int __init ppchameleonevb_init(void)
281#endif 273#endif
282 274
283 ppchameleon_mtd->name = "ppchameleon-nand"; 275 ppchameleon_mtd->name = "ppchameleon-nand";
284 mtd_parts_nb = parse_mtd_partitions(ppchameleon_mtd, part_probes, &mtd_parts, 0);
285 if (mtd_parts_nb > 0)
286 part_type = "command line";
287 else
288 mtd_parts_nb = 0;
289
290 if (mtd_parts_nb == 0) {
291 if (ppchameleon_mtd->size == NAND_SMALL_SIZE)
292 mtd_parts = partition_info_me;
293 else
294 mtd_parts = partition_info_hi;
295 mtd_parts_nb = NUM_PARTITIONS;
296 part_type = "static";
297 }
298 276
299 /* Register the partitions */ 277 /* Register the partitions */
300 printk(KERN_NOTICE "Using %s partition definition\n", part_type); 278 mtd_device_parse_register(ppchameleon_mtd, NULL, 0,
301 mtd_device_register(ppchameleon_mtd, mtd_parts, mtd_parts_nb); 279 ppchameleon_mtd->size == NAND_SMALL_SIZE ?
280 partition_info_me :
281 partition_info_hi,
282 NUM_PARTITIONS);
302 283
303 nand_evb_init: 284 nand_evb_init:
304 /**************************** 285 /****************************
@@ -382,21 +363,13 @@ static int __init ppchameleonevb_init(void)
382 } 363 }
383 364
384 ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME; 365 ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME;
385 mtd_parts_nb = parse_mtd_partitions(ppchameleonevb_mtd, part_probes_evb, &mtd_parts, 0);
386 if (mtd_parts_nb > 0)
387 part_type = "command line";
388 else
389 mtd_parts_nb = 0;
390
391 if (mtd_parts_nb == 0) {
392 mtd_parts = partition_info_evb;
393 mtd_parts_nb = NUM_PARTITIONS;
394 part_type = "static";
395 }
396 366
397 /* Register the partitions */ 367 /* Register the partitions */
398 printk(KERN_NOTICE "Using %s partition definition\n", part_type); 368 mtd_device_parse_register(ppchameleonevb_mtd, NULL, 0,
399 mtd_device_register(ppchameleonevb_mtd, mtd_parts, mtd_parts_nb); 369 ppchameleon_mtd->size == NAND_SMALL_SIZE ?
370 partition_info_me :
371 partition_info_hi,
372 NUM_PARTITIONS);
400 373
401 /* Return happy */ 374 /* Return happy */
402 return 0; 375 return 0;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 1fb3b3a80581..9eb7f879969e 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -110,6 +110,7 @@ enum {
110 110
111enum { 111enum {
112 STATE_IDLE = 0, 112 STATE_IDLE = 0,
113 STATE_PREPARED,
113 STATE_CMD_HANDLE, 114 STATE_CMD_HANDLE,
114 STATE_DMA_READING, 115 STATE_DMA_READING,
115 STATE_DMA_WRITING, 116 STATE_DMA_WRITING,
@@ -120,21 +121,40 @@ enum {
120 STATE_READY, 121 STATE_READY,
121}; 122};
122 123
123struct pxa3xx_nand_info { 124struct pxa3xx_nand_host {
124 struct nand_chip nand_chip; 125 struct nand_chip chip;
126 struct pxa3xx_nand_cmdset *cmdset;
127 struct mtd_info *mtd;
128 void *info_data;
129
130 /* page size of attached chip */
131 unsigned int page_size;
132 int use_ecc;
133 int cs;
125 134
135 /* calculated from pxa3xx_nand_flash data */
136 unsigned int col_addr_cycles;
137 unsigned int row_addr_cycles;
138 size_t read_id_bytes;
139
140 /* cached register value */
141 uint32_t reg_ndcr;
142 uint32_t ndtr0cs0;
143 uint32_t ndtr1cs0;
144};
145
146struct pxa3xx_nand_info {
126 struct nand_hw_control controller; 147 struct nand_hw_control controller;
127 struct platform_device *pdev; 148 struct platform_device *pdev;
128 struct pxa3xx_nand_cmdset *cmdset;
129 149
130 struct clk *clk; 150 struct clk *clk;
131 void __iomem *mmio_base; 151 void __iomem *mmio_base;
132 unsigned long mmio_phys; 152 unsigned long mmio_phys;
153 struct completion cmd_complete;
133 154
134 unsigned int buf_start; 155 unsigned int buf_start;
135 unsigned int buf_count; 156 unsigned int buf_count;
136 157
137 struct mtd_info *mtd;
138 /* DMA information */ 158 /* DMA information */
139 int drcmr_dat; 159 int drcmr_dat;
140 int drcmr_cmd; 160 int drcmr_cmd;
@@ -142,44 +162,27 @@ struct pxa3xx_nand_info {
142 unsigned char *data_buff; 162 unsigned char *data_buff;
143 unsigned char *oob_buff; 163 unsigned char *oob_buff;
144 dma_addr_t data_buff_phys; 164 dma_addr_t data_buff_phys;
145 size_t data_buff_size;
146 int data_dma_ch; 165 int data_dma_ch;
147 struct pxa_dma_desc *data_desc; 166 struct pxa_dma_desc *data_desc;
148 dma_addr_t data_desc_addr; 167 dma_addr_t data_desc_addr;
149 168
150 uint32_t reg_ndcr; 169 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
151
152 /* saved column/page_addr during CMD_SEQIN */
153 int seqin_column;
154 int seqin_page_addr;
155
156 /* relate to the command */
157 unsigned int state; 170 unsigned int state;
158 171
172 int cs;
159 int use_ecc; /* use HW ECC ? */ 173 int use_ecc; /* use HW ECC ? */
160 int use_dma; /* use DMA ? */ 174 int use_dma; /* use DMA ? */
161 int is_ready; 175 int is_ready;
162 176
163 unsigned int page_size; /* page size of attached chip */ 177 unsigned int page_size; /* page size of attached chip */
164 unsigned int data_size; /* data size in FIFO */ 178 unsigned int data_size; /* data size in FIFO */
179 unsigned int oob_size;
165 int retcode; 180 int retcode;
166 struct completion cmd_complete;
167 181
168 /* generated NDCBx register values */ 182 /* generated NDCBx register values */
169 uint32_t ndcb0; 183 uint32_t ndcb0;
170 uint32_t ndcb1; 184 uint32_t ndcb1;
171 uint32_t ndcb2; 185 uint32_t ndcb2;
172
173 /* timing calcuted from setting */
174 uint32_t ndtr0cs0;
175 uint32_t ndtr1cs0;
176
177 /* calculated from pxa3xx_nand_flash data */
178 size_t oob_size;
179 size_t read_id_bytes;
180
181 unsigned int col_addr_cycles;
182 unsigned int row_addr_cycles;
183}; 186};
184 187
185static int use_dma = 1; 188static int use_dma = 1;
@@ -225,7 +228,7 @@ static struct pxa3xx_nand_flash builtin_flash_types[] = {
225/* Define a default flash type setting serve as flash detecting only */ 228/* Define a default flash type setting serve as flash detecting only */
226#define DEFAULT_FLASH_TYPE (&builtin_flash_types[0]) 229#define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
227 230
228const char *mtd_names[] = {"pxa3xx_nand-0", NULL}; 231const char *mtd_names[] = {"pxa3xx_nand-0", "pxa3xx_nand-1", NULL};
229 232
230#define NDTR0_tCH(c) (min((c), 7) << 19) 233#define NDTR0_tCH(c) (min((c), 7) << 19)
231#define NDTR0_tCS(c) (min((c), 7) << 16) 234#define NDTR0_tCS(c) (min((c), 7) << 16)
@@ -241,9 +244,10 @@ const char *mtd_names[] = {"pxa3xx_nand-0", NULL};
241/* convert nano-seconds to nand flash controller clock cycles */ 244/* convert nano-seconds to nand flash controller clock cycles */
242#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000) 245#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
243 246
244static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info, 247static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
245 const struct pxa3xx_nand_timing *t) 248 const struct pxa3xx_nand_timing *t)
246{ 249{
250 struct pxa3xx_nand_info *info = host->info_data;
247 unsigned long nand_clk = clk_get_rate(info->clk); 251 unsigned long nand_clk = clk_get_rate(info->clk);
248 uint32_t ndtr0, ndtr1; 252 uint32_t ndtr0, ndtr1;
249 253
@@ -258,23 +262,24 @@ static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info,
258 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) | 262 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
259 NDTR1_tAR(ns2cycle(t->tAR, nand_clk)); 263 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
260 264
261 info->ndtr0cs0 = ndtr0; 265 host->ndtr0cs0 = ndtr0;
262 info->ndtr1cs0 = ndtr1; 266 host->ndtr1cs0 = ndtr1;
263 nand_writel(info, NDTR0CS0, ndtr0); 267 nand_writel(info, NDTR0CS0, ndtr0);
264 nand_writel(info, NDTR1CS0, ndtr1); 268 nand_writel(info, NDTR1CS0, ndtr1);
265} 269}
266 270
267static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info) 271static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info)
268{ 272{
269 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN; 273 struct pxa3xx_nand_host *host = info->host[info->cs];
274 int oob_enable = host->reg_ndcr & NDCR_SPARE_EN;
270 275
271 info->data_size = info->page_size; 276 info->data_size = host->page_size;
272 if (!oob_enable) { 277 if (!oob_enable) {
273 info->oob_size = 0; 278 info->oob_size = 0;
274 return; 279 return;
275 } 280 }
276 281
277 switch (info->page_size) { 282 switch (host->page_size) {
278 case 2048: 283 case 2048:
279 info->oob_size = (info->use_ecc) ? 40 : 64; 284 info->oob_size = (info->use_ecc) ? 40 : 64;
280 break; 285 break;
@@ -292,9 +297,10 @@ static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info)
292 */ 297 */
293static void pxa3xx_nand_start(struct pxa3xx_nand_info *info) 298static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
294{ 299{
300 struct pxa3xx_nand_host *host = info->host[info->cs];
295 uint32_t ndcr; 301 uint32_t ndcr;
296 302
297 ndcr = info->reg_ndcr; 303 ndcr = host->reg_ndcr;
298 ndcr |= info->use_ecc ? NDCR_ECC_EN : 0; 304 ndcr |= info->use_ecc ? NDCR_ECC_EN : 0;
299 ndcr |= info->use_dma ? NDCR_DMA_EN : 0; 305 ndcr |= info->use_dma ? NDCR_DMA_EN : 0;
300 ndcr |= NDCR_ND_RUN; 306 ndcr |= NDCR_ND_RUN;
@@ -359,7 +365,7 @@ static void handle_data_pio(struct pxa3xx_nand_info *info)
359 DIV_ROUND_UP(info->oob_size, 4)); 365 DIV_ROUND_UP(info->oob_size, 4));
360 break; 366 break;
361 default: 367 default:
362 printk(KERN_ERR "%s: invalid state %d\n", __func__, 368 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
363 info->state); 369 info->state);
364 BUG(); 370 BUG();
365 } 371 }
@@ -385,7 +391,7 @@ static void start_data_dma(struct pxa3xx_nand_info *info)
385 desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC; 391 desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
386 break; 392 break;
387 default: 393 default:
388 printk(KERN_ERR "%s: invalid state %d\n", __func__, 394 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
389 info->state); 395 info->state);
390 BUG(); 396 BUG();
391 } 397 }
@@ -416,6 +422,15 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
416{ 422{
417 struct pxa3xx_nand_info *info = devid; 423 struct pxa3xx_nand_info *info = devid;
418 unsigned int status, is_completed = 0; 424 unsigned int status, is_completed = 0;
425 unsigned int ready, cmd_done;
426
427 if (info->cs == 0) {
428 ready = NDSR_FLASH_RDY;
429 cmd_done = NDSR_CS0_CMDD;
430 } else {
431 ready = NDSR_RDY;
432 cmd_done = NDSR_CS1_CMDD;
433 }
419 434
420 status = nand_readl(info, NDSR); 435 status = nand_readl(info, NDSR);
421 436
@@ -437,11 +452,11 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
437 handle_data_pio(info); 452 handle_data_pio(info);
438 } 453 }
439 } 454 }
440 if (status & NDSR_CS0_CMDD) { 455 if (status & cmd_done) {
441 info->state = STATE_CMD_DONE; 456 info->state = STATE_CMD_DONE;
442 is_completed = 1; 457 is_completed = 1;
443 } 458 }
444 if (status & NDSR_FLASH_RDY) { 459 if (status & ready) {
445 info->is_ready = 1; 460 info->is_ready = 1;
446 info->state = STATE_READY; 461 info->state = STATE_READY;
447 } 462 }
@@ -463,12 +478,6 @@ NORMAL_IRQ_EXIT:
463 return IRQ_HANDLED; 478 return IRQ_HANDLED;
464} 479}
465 480
466static int pxa3xx_nand_dev_ready(struct mtd_info *mtd)
467{
468 struct pxa3xx_nand_info *info = mtd->priv;
469 return (nand_readl(info, NDSR) & NDSR_RDY) ? 1 : 0;
470}
471
472static inline int is_buf_blank(uint8_t *buf, size_t len) 481static inline int is_buf_blank(uint8_t *buf, size_t len)
473{ 482{
474 for (; len > 0; len--) 483 for (; len > 0; len--)
@@ -481,10 +490,12 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
481 uint16_t column, int page_addr) 490 uint16_t column, int page_addr)
482{ 491{
483 uint16_t cmd; 492 uint16_t cmd;
484 int addr_cycle, exec_cmd, ndcb0; 493 int addr_cycle, exec_cmd;
485 struct mtd_info *mtd = info->mtd; 494 struct pxa3xx_nand_host *host;
495 struct mtd_info *mtd;
486 496
487 ndcb0 = 0; 497 host = info->host[info->cs];
498 mtd = host->mtd;
488 addr_cycle = 0; 499 addr_cycle = 0;
489 exec_cmd = 1; 500 exec_cmd = 1;
490 501
@@ -495,6 +506,10 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
495 info->use_ecc = 0; 506 info->use_ecc = 0;
496 info->is_ready = 0; 507 info->is_ready = 0;
497 info->retcode = ERR_NONE; 508 info->retcode = ERR_NONE;
509 if (info->cs != 0)
510 info->ndcb0 = NDCB0_CSEL;
511 else
512 info->ndcb0 = 0;
498 513
499 switch (command) { 514 switch (command) {
500 case NAND_CMD_READ0: 515 case NAND_CMD_READ0:
@@ -512,20 +527,19 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
512 break; 527 break;
513 } 528 }
514 529
515 info->ndcb0 = ndcb0; 530 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
516 addr_cycle = NDCB0_ADDR_CYC(info->row_addr_cycles 531 + host->col_addr_cycles);
517 + info->col_addr_cycles);
518 532
519 switch (command) { 533 switch (command) {
520 case NAND_CMD_READOOB: 534 case NAND_CMD_READOOB:
521 case NAND_CMD_READ0: 535 case NAND_CMD_READ0:
522 cmd = info->cmdset->read1; 536 cmd = host->cmdset->read1;
523 if (command == NAND_CMD_READOOB) 537 if (command == NAND_CMD_READOOB)
524 info->buf_start = mtd->writesize + column; 538 info->buf_start = mtd->writesize + column;
525 else 539 else
526 info->buf_start = column; 540 info->buf_start = column;
527 541
528 if (unlikely(info->page_size < PAGE_CHUNK_SIZE)) 542 if (unlikely(host->page_size < PAGE_CHUNK_SIZE))
529 info->ndcb0 |= NDCB0_CMD_TYPE(0) 543 info->ndcb0 |= NDCB0_CMD_TYPE(0)
530 | addr_cycle 544 | addr_cycle
531 | (cmd & NDCB0_CMD1_MASK); 545 | (cmd & NDCB0_CMD1_MASK);
@@ -537,7 +551,7 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
537 551
538 case NAND_CMD_SEQIN: 552 case NAND_CMD_SEQIN:
539 /* small page addr setting */ 553 /* small page addr setting */
540 if (unlikely(info->page_size < PAGE_CHUNK_SIZE)) { 554 if (unlikely(host->page_size < PAGE_CHUNK_SIZE)) {
541 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8) 555 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
542 | (column & 0xFF); 556 | (column & 0xFF);
543 557
@@ -564,7 +578,7 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
564 break; 578 break;
565 } 579 }
566 580
567 cmd = info->cmdset->program; 581 cmd = host->cmdset->program;
568 info->ndcb0 |= NDCB0_CMD_TYPE(0x1) 582 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
569 | NDCB0_AUTO_RS 583 | NDCB0_AUTO_RS
570 | NDCB0_ST_ROW_EN 584 | NDCB0_ST_ROW_EN
@@ -574,8 +588,8 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
574 break; 588 break;
575 589
576 case NAND_CMD_READID: 590 case NAND_CMD_READID:
577 cmd = info->cmdset->read_id; 591 cmd = host->cmdset->read_id;
578 info->buf_count = info->read_id_bytes; 592 info->buf_count = host->read_id_bytes;
579 info->ndcb0 |= NDCB0_CMD_TYPE(3) 593 info->ndcb0 |= NDCB0_CMD_TYPE(3)
580 | NDCB0_ADDR_CYC(1) 594 | NDCB0_ADDR_CYC(1)
581 | cmd; 595 | cmd;
@@ -583,7 +597,7 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
583 info->data_size = 8; 597 info->data_size = 8;
584 break; 598 break;
585 case NAND_CMD_STATUS: 599 case NAND_CMD_STATUS:
586 cmd = info->cmdset->read_status; 600 cmd = host->cmdset->read_status;
587 info->buf_count = 1; 601 info->buf_count = 1;
588 info->ndcb0 |= NDCB0_CMD_TYPE(4) 602 info->ndcb0 |= NDCB0_CMD_TYPE(4)
589 | NDCB0_ADDR_CYC(1) 603 | NDCB0_ADDR_CYC(1)
@@ -593,7 +607,7 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
593 break; 607 break;
594 608
595 case NAND_CMD_ERASE1: 609 case NAND_CMD_ERASE1:
596 cmd = info->cmdset->erase; 610 cmd = host->cmdset->erase;
597 info->ndcb0 |= NDCB0_CMD_TYPE(2) 611 info->ndcb0 |= NDCB0_CMD_TYPE(2)
598 | NDCB0_AUTO_RS 612 | NDCB0_AUTO_RS
599 | NDCB0_ADDR_CYC(3) 613 | NDCB0_ADDR_CYC(3)
@@ -604,7 +618,7 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
604 618
605 break; 619 break;
606 case NAND_CMD_RESET: 620 case NAND_CMD_RESET:
607 cmd = info->cmdset->reset; 621 cmd = host->cmdset->reset;
608 info->ndcb0 |= NDCB0_CMD_TYPE(5) 622 info->ndcb0 |= NDCB0_CMD_TYPE(5)
609 | cmd; 623 | cmd;
610 624
@@ -616,8 +630,8 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
616 630
617 default: 631 default:
618 exec_cmd = 0; 632 exec_cmd = 0;
619 printk(KERN_ERR "pxa3xx-nand: non-supported" 633 dev_err(&info->pdev->dev, "non-supported command %x\n",
620 " command %x\n", command); 634 command);
621 break; 635 break;
622 } 636 }
623 637
@@ -627,7 +641,8 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
627static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command, 641static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
628 int column, int page_addr) 642 int column, int page_addr)
629{ 643{
630 struct pxa3xx_nand_info *info = mtd->priv; 644 struct pxa3xx_nand_host *host = mtd->priv;
645 struct pxa3xx_nand_info *info = host->info_data;
631 int ret, exec_cmd; 646 int ret, exec_cmd;
632 647
633 /* 648 /*
@@ -635,9 +650,21 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
635 * "byte" address into a "word" address appropriate 650 * "byte" address into a "word" address appropriate
636 * for indexing a word-oriented device 651 * for indexing a word-oriented device
637 */ 652 */
638 if (info->reg_ndcr & NDCR_DWIDTH_M) 653 if (host->reg_ndcr & NDCR_DWIDTH_M)
639 column /= 2; 654 column /= 2;
640 655
656 /*
657 * There may be different NAND chip hooked to
658 * different chip select, so check whether
659 * chip select has been changed, if yes, reset the timing
660 */
661 if (info->cs != host->cs) {
662 info->cs = host->cs;
663 nand_writel(info, NDTR0CS0, host->ndtr0cs0);
664 nand_writel(info, NDTR1CS0, host->ndtr1cs0);
665 }
666
667 info->state = STATE_PREPARED;
641 exec_cmd = prepare_command_pool(info, command, column, page_addr); 668 exec_cmd = prepare_command_pool(info, command, column, page_addr);
642 if (exec_cmd) { 669 if (exec_cmd) {
643 init_completion(&info->cmd_complete); 670 init_completion(&info->cmd_complete);
@@ -646,12 +673,12 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
646 ret = wait_for_completion_timeout(&info->cmd_complete, 673 ret = wait_for_completion_timeout(&info->cmd_complete,
647 CHIP_DELAY_TIMEOUT); 674 CHIP_DELAY_TIMEOUT);
648 if (!ret) { 675 if (!ret) {
649 printk(KERN_ERR "Wait time out!!!\n"); 676 dev_err(&info->pdev->dev, "Wait time out!!!\n");
650 /* Stop State Machine for next command cycle */ 677 /* Stop State Machine for next command cycle */
651 pxa3xx_nand_stop(info); 678 pxa3xx_nand_stop(info);
652 } 679 }
653 info->state = STATE_IDLE;
654 } 680 }
681 info->state = STATE_IDLE;
655} 682}
656 683
657static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd, 684static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
@@ -664,7 +691,8 @@ static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
664static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd, 691static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
665 struct nand_chip *chip, uint8_t *buf, int page) 692 struct nand_chip *chip, uint8_t *buf, int page)
666{ 693{
667 struct pxa3xx_nand_info *info = mtd->priv; 694 struct pxa3xx_nand_host *host = mtd->priv;
695 struct pxa3xx_nand_info *info = host->info_data;
668 696
669 chip->read_buf(mtd, buf, mtd->writesize); 697 chip->read_buf(mtd, buf, mtd->writesize);
670 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 698 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -685,6 +713,8 @@ static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
685 * OOB, ignore such double bit errors 713 * OOB, ignore such double bit errors
686 */ 714 */
687 if (is_buf_blank(buf, mtd->writesize)) 715 if (is_buf_blank(buf, mtd->writesize))
716 info->retcode = ERR_NONE;
717 else
688 mtd->ecc_stats.failed++; 718 mtd->ecc_stats.failed++;
689 } 719 }
690 720
@@ -693,7 +723,8 @@ static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
693 723
694static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd) 724static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
695{ 725{
696 struct pxa3xx_nand_info *info = mtd->priv; 726 struct pxa3xx_nand_host *host = mtd->priv;
727 struct pxa3xx_nand_info *info = host->info_data;
697 char retval = 0xFF; 728 char retval = 0xFF;
698 729
699 if (info->buf_start < info->buf_count) 730 if (info->buf_start < info->buf_count)
@@ -705,7 +736,8 @@ static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
705 736
706static u16 pxa3xx_nand_read_word(struct mtd_info *mtd) 737static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
707{ 738{
708 struct pxa3xx_nand_info *info = mtd->priv; 739 struct pxa3xx_nand_host *host = mtd->priv;
740 struct pxa3xx_nand_info *info = host->info_data;
709 u16 retval = 0xFFFF; 741 u16 retval = 0xFFFF;
710 742
711 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) { 743 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
@@ -717,7 +749,8 @@ static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
717 749
718static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) 750static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
719{ 751{
720 struct pxa3xx_nand_info *info = mtd->priv; 752 struct pxa3xx_nand_host *host = mtd->priv;
753 struct pxa3xx_nand_info *info = host->info_data;
721 int real_len = min_t(size_t, len, info->buf_count - info->buf_start); 754 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
722 755
723 memcpy(buf, info->data_buff + info->buf_start, real_len); 756 memcpy(buf, info->data_buff + info->buf_start, real_len);
@@ -727,7 +760,8 @@ static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
727static void pxa3xx_nand_write_buf(struct mtd_info *mtd, 760static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
728 const uint8_t *buf, int len) 761 const uint8_t *buf, int len)
729{ 762{
730 struct pxa3xx_nand_info *info = mtd->priv; 763 struct pxa3xx_nand_host *host = mtd->priv;
764 struct pxa3xx_nand_info *info = host->info_data;
731 int real_len = min_t(size_t, len, info->buf_count - info->buf_start); 765 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
732 766
733 memcpy(info->data_buff + info->buf_start, buf, real_len); 767 memcpy(info->data_buff + info->buf_start, buf, real_len);
@@ -747,7 +781,8 @@ static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
747 781
748static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this) 782static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
749{ 783{
750 struct pxa3xx_nand_info *info = mtd->priv; 784 struct pxa3xx_nand_host *host = mtd->priv;
785 struct pxa3xx_nand_info *info = host->info_data;
751 786
752 /* pxa3xx_nand_send_command has waited for command complete */ 787 /* pxa3xx_nand_send_command has waited for command complete */
753 if (this->state == FL_WRITING || this->state == FL_ERASING) { 788 if (this->state == FL_WRITING || this->state == FL_ERASING) {
@@ -770,54 +805,70 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
770{ 805{
771 struct platform_device *pdev = info->pdev; 806 struct platform_device *pdev = info->pdev;
772 struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data; 807 struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
808 struct pxa3xx_nand_host *host = info->host[info->cs];
773 uint32_t ndcr = 0x0; /* enable all interrupts */ 809 uint32_t ndcr = 0x0; /* enable all interrupts */
774 810
775 if (f->page_size != 2048 && f->page_size != 512) 811 if (f->page_size != 2048 && f->page_size != 512) {
812 dev_err(&pdev->dev, "Current only support 2048 and 512 size\n");
776 return -EINVAL; 813 return -EINVAL;
814 }
777 815
778 if (f->flash_width != 16 && f->flash_width != 8) 816 if (f->flash_width != 16 && f->flash_width != 8) {
817 dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n");
779 return -EINVAL; 818 return -EINVAL;
819 }
780 820
781 /* calculate flash information */ 821 /* calculate flash information */
782 info->cmdset = &default_cmdset; 822 host->cmdset = &default_cmdset;
783 info->page_size = f->page_size; 823 host->page_size = f->page_size;
784 info->read_id_bytes = (f->page_size == 2048) ? 4 : 2; 824 host->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
785 825
786 /* calculate addressing information */ 826 /* calculate addressing information */
787 info->col_addr_cycles = (f->page_size == 2048) ? 2 : 1; 827 host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
788 828
789 if (f->num_blocks * f->page_per_block > 65536) 829 if (f->num_blocks * f->page_per_block > 65536)
790 info->row_addr_cycles = 3; 830 host->row_addr_cycles = 3;
791 else 831 else
792 info->row_addr_cycles = 2; 832 host->row_addr_cycles = 2;
793 833
794 ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0; 834 ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
795 ndcr |= (info->col_addr_cycles == 2) ? NDCR_RA_START : 0; 835 ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
796 ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0; 836 ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
797 ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0; 837 ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
798 ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0; 838 ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
799 ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0; 839 ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
800 840
801 ndcr |= NDCR_RD_ID_CNT(info->read_id_bytes); 841 ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
802 ndcr |= NDCR_SPARE_EN; /* enable spare by default */ 842 ndcr |= NDCR_SPARE_EN; /* enable spare by default */
803 843
804 info->reg_ndcr = ndcr; 844 host->reg_ndcr = ndcr;
805 845
806 pxa3xx_nand_set_timing(info, f->timing); 846 pxa3xx_nand_set_timing(host, f->timing);
807 return 0; 847 return 0;
808} 848}
809 849
810static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info) 850static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
811{ 851{
852 /*
853 * We set 0 by hard coding here, for we don't support keep_config
854 * when there is more than one chip attached to the controller
855 */
856 struct pxa3xx_nand_host *host = info->host[0];
812 uint32_t ndcr = nand_readl(info, NDCR); 857 uint32_t ndcr = nand_readl(info, NDCR);
813 info->page_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
814 /* set info fields needed to read id */
815 info->read_id_bytes = (info->page_size == 2048) ? 4 : 2;
816 info->reg_ndcr = ndcr;
817 info->cmdset = &default_cmdset;
818 858
819 info->ndtr0cs0 = nand_readl(info, NDTR0CS0); 859 if (ndcr & NDCR_PAGE_SZ) {
820 info->ndtr1cs0 = nand_readl(info, NDTR1CS0); 860 host->page_size = 2048;
861 host->read_id_bytes = 4;
862 } else {
863 host->page_size = 512;
864 host->read_id_bytes = 2;
865 }
866
867 host->reg_ndcr = ndcr & ~NDCR_INT_MASK;
868 host->cmdset = &default_cmdset;
869
870 host->ndtr0cs0 = nand_readl(info, NDTR0CS0);
871 host->ndtr1cs0 = nand_readl(info, NDTR1CS0);
821 872
822 return 0; 873 return 0;
823} 874}
@@ -847,7 +898,6 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
847 return -ENOMEM; 898 return -ENOMEM;
848 } 899 }
849 900
850 info->data_buff_size = MAX_BUFF_SIZE;
851 info->data_desc = (void *)info->data_buff + data_desc_offset; 901 info->data_desc = (void *)info->data_buff + data_desc_offset;
852 info->data_desc_addr = info->data_buff_phys + data_desc_offset; 902 info->data_desc_addr = info->data_buff_phys + data_desc_offset;
853 903
@@ -855,7 +905,7 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
855 pxa3xx_nand_data_dma_irq, info); 905 pxa3xx_nand_data_dma_irq, info);
856 if (info->data_dma_ch < 0) { 906 if (info->data_dma_ch < 0) {
857 dev_err(&pdev->dev, "failed to request data dma\n"); 907 dev_err(&pdev->dev, "failed to request data dma\n");
858 dma_free_coherent(&pdev->dev, info->data_buff_size, 908 dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE,
859 info->data_buff, info->data_buff_phys); 909 info->data_buff, info->data_buff_phys);
860 return info->data_dma_ch; 910 return info->data_dma_ch;
861 } 911 }
@@ -865,24 +915,28 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
865 915
866static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info) 916static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
867{ 917{
868 struct mtd_info *mtd = info->mtd; 918 struct mtd_info *mtd;
869 struct nand_chip *chip = mtd->priv; 919 int ret;
870 920 mtd = info->host[info->cs]->mtd;
871 /* use the common timing to make a try */ 921 /* use the common timing to make a try */
872 pxa3xx_nand_config_flash(info, &builtin_flash_types[0]); 922 ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
873 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0); 923 if (ret)
924 return ret;
925
926 pxa3xx_nand_cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
874 if (info->is_ready) 927 if (info->is_ready)
875 return 1;
876 else
877 return 0; 928 return 0;
929
930 return -ENODEV;
878} 931}
879 932
880static int pxa3xx_nand_scan(struct mtd_info *mtd) 933static int pxa3xx_nand_scan(struct mtd_info *mtd)
881{ 934{
882 struct pxa3xx_nand_info *info = mtd->priv; 935 struct pxa3xx_nand_host *host = mtd->priv;
936 struct pxa3xx_nand_info *info = host->info_data;
883 struct platform_device *pdev = info->pdev; 937 struct platform_device *pdev = info->pdev;
884 struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data; 938 struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
885 struct nand_flash_dev pxa3xx_flash_ids[2] = { {NULL,}, {NULL,} }; 939 struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
886 const struct pxa3xx_nand_flash *f = NULL; 940 const struct pxa3xx_nand_flash *f = NULL;
887 struct nand_chip *chip = mtd->priv; 941 struct nand_chip *chip = mtd->priv;
888 uint32_t id = -1; 942 uint32_t id = -1;
@@ -893,22 +947,20 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
893 goto KEEP_CONFIG; 947 goto KEEP_CONFIG;
894 948
895 ret = pxa3xx_nand_sensing(info); 949 ret = pxa3xx_nand_sensing(info);
896 if (!ret) { 950 if (ret) {
897 kfree(mtd); 951 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
898 info->mtd = NULL; 952 info->cs);
899 printk(KERN_INFO "There is no nand chip on cs 0!\n");
900 953
901 return -EINVAL; 954 return ret;
902 } 955 }
903 956
904 chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0); 957 chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0);
905 id = *((uint16_t *)(info->data_buff)); 958 id = *((uint16_t *)(info->data_buff));
906 if (id != 0) 959 if (id != 0)
907 printk(KERN_INFO "Detect a flash id %x\n", id); 960 dev_info(&info->pdev->dev, "Detect a flash id %x\n", id);
908 else { 961 else {
909 kfree(mtd); 962 dev_warn(&info->pdev->dev,
910 info->mtd = NULL; 963 "Read out ID 0, potential timing set wrong!!\n");
911 printk(KERN_WARNING "Read out ID 0, potential timing set wrong!!\n");
912 964
913 return -EINVAL; 965 return -EINVAL;
914 } 966 }
@@ -926,14 +978,17 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
926 } 978 }
927 979
928 if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) { 980 if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) {
929 kfree(mtd); 981 dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n");
930 info->mtd = NULL;
931 printk(KERN_ERR "ERROR!! flash not defined!!!\n");
932 982
933 return -EINVAL; 983 return -EINVAL;
934 } 984 }
935 985
936 pxa3xx_nand_config_flash(info, f); 986 ret = pxa3xx_nand_config_flash(info, f);
987 if (ret) {
988 dev_err(&info->pdev->dev, "ERROR! Configure failed\n");
989 return ret;
990 }
991
937 pxa3xx_flash_ids[0].name = f->name; 992 pxa3xx_flash_ids[0].name = f->name;
938 pxa3xx_flash_ids[0].id = (f->chip_id >> 8) & 0xffff; 993 pxa3xx_flash_ids[0].id = (f->chip_id >> 8) & 0xffff;
939 pxa3xx_flash_ids[0].pagesize = f->page_size; 994 pxa3xx_flash_ids[0].pagesize = f->page_size;
@@ -942,62 +997,78 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
942 pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block; 997 pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
943 if (f->flash_width == 16) 998 if (f->flash_width == 16)
944 pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16; 999 pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
1000 pxa3xx_flash_ids[1].name = NULL;
1001 def = pxa3xx_flash_ids;
945KEEP_CONFIG: 1002KEEP_CONFIG:
946 if (nand_scan_ident(mtd, 1, pxa3xx_flash_ids)) 1003 chip->ecc.mode = NAND_ECC_HW;
1004 chip->ecc.size = host->page_size;
1005
1006 chip->options = NAND_NO_AUTOINCR;
1007 chip->options |= NAND_NO_READRDY;
1008 if (host->reg_ndcr & NDCR_DWIDTH_M)
1009 chip->options |= NAND_BUSWIDTH_16;
1010
1011 if (nand_scan_ident(mtd, 1, def))
947 return -ENODEV; 1012 return -ENODEV;
948 /* calculate addressing information */ 1013 /* calculate addressing information */
949 info->col_addr_cycles = (mtd->writesize >= 2048) ? 2 : 1; 1014 if (mtd->writesize >= 2048)
1015 host->col_addr_cycles = 2;
1016 else
1017 host->col_addr_cycles = 1;
1018
950 info->oob_buff = info->data_buff + mtd->writesize; 1019 info->oob_buff = info->data_buff + mtd->writesize;
951 if ((mtd->size >> chip->page_shift) > 65536) 1020 if ((mtd->size >> chip->page_shift) > 65536)
952 info->row_addr_cycles = 3; 1021 host->row_addr_cycles = 3;
953 else 1022 else
954 info->row_addr_cycles = 2; 1023 host->row_addr_cycles = 2;
955 mtd->name = mtd_names[0];
956 chip->ecc.mode = NAND_ECC_HW;
957 chip->ecc.size = f->page_size;
958
959 chip->options = (f->flash_width == 16) ? NAND_BUSWIDTH_16 : 0;
960 chip->options |= NAND_NO_AUTOINCR;
961 chip->options |= NAND_NO_READRDY;
962 1024
1025 mtd->name = mtd_names[0];
963 return nand_scan_tail(mtd); 1026 return nand_scan_tail(mtd);
964} 1027}
965 1028
966static 1029static int alloc_nand_resource(struct platform_device *pdev)
967struct pxa3xx_nand_info *alloc_nand_resource(struct platform_device *pdev)
968{ 1030{
1031 struct pxa3xx_nand_platform_data *pdata;
969 struct pxa3xx_nand_info *info; 1032 struct pxa3xx_nand_info *info;
1033 struct pxa3xx_nand_host *host;
970 struct nand_chip *chip; 1034 struct nand_chip *chip;
971 struct mtd_info *mtd; 1035 struct mtd_info *mtd;
972 struct resource *r; 1036 struct resource *r;
973 int ret, irq; 1037 int ret, irq, cs;
974 1038
975 mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct pxa3xx_nand_info), 1039 pdata = pdev->dev.platform_data;
976 GFP_KERNEL); 1040 info = kzalloc(sizeof(*info) + (sizeof(*mtd) +
977 if (!mtd) { 1041 sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
1042 if (!info) {
978 dev_err(&pdev->dev, "failed to allocate memory\n"); 1043 dev_err(&pdev->dev, "failed to allocate memory\n");
979 return NULL; 1044 return -ENOMEM;
980 } 1045 }
981 1046
982 info = (struct pxa3xx_nand_info *)(&mtd[1]);
983 chip = (struct nand_chip *)(&mtd[1]);
984 info->pdev = pdev; 1047 info->pdev = pdev;
985 info->mtd = mtd; 1048 for (cs = 0; cs < pdata->num_cs; cs++) {
986 mtd->priv = info; 1049 mtd = (struct mtd_info *)((unsigned int)&info[1] +
987 mtd->owner = THIS_MODULE; 1050 (sizeof(*mtd) + sizeof(*host)) * cs);
988 1051 chip = (struct nand_chip *)(&mtd[1]);
989 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc; 1052 host = (struct pxa3xx_nand_host *)chip;
990 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc; 1053 info->host[cs] = host;
991 chip->controller = &info->controller; 1054 host->mtd = mtd;
992 chip->waitfunc = pxa3xx_nand_waitfunc; 1055 host->cs = cs;
993 chip->select_chip = pxa3xx_nand_select_chip; 1056 host->info_data = info;
994 chip->dev_ready = pxa3xx_nand_dev_ready; 1057 mtd->priv = host;
995 chip->cmdfunc = pxa3xx_nand_cmdfunc; 1058 mtd->owner = THIS_MODULE;
996 chip->read_word = pxa3xx_nand_read_word; 1059
997 chip->read_byte = pxa3xx_nand_read_byte; 1060 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
998 chip->read_buf = pxa3xx_nand_read_buf; 1061 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
999 chip->write_buf = pxa3xx_nand_write_buf; 1062 chip->controller = &info->controller;
1000 chip->verify_buf = pxa3xx_nand_verify_buf; 1063 chip->waitfunc = pxa3xx_nand_waitfunc;
1064 chip->select_chip = pxa3xx_nand_select_chip;
1065 chip->cmdfunc = pxa3xx_nand_cmdfunc;
1066 chip->read_word = pxa3xx_nand_read_word;
1067 chip->read_byte = pxa3xx_nand_read_byte;
1068 chip->read_buf = pxa3xx_nand_read_buf;
1069 chip->write_buf = pxa3xx_nand_write_buf;
1070 chip->verify_buf = pxa3xx_nand_verify_buf;
1071 }
1001 1072
1002 spin_lock_init(&chip->controller->lock); 1073 spin_lock_init(&chip->controller->lock);
1003 init_waitqueue_head(&chip->controller->wq); 1074 init_waitqueue_head(&chip->controller->wq);
@@ -1070,13 +1141,13 @@ struct pxa3xx_nand_info *alloc_nand_resource(struct platform_device *pdev)
1070 1141
1071 platform_set_drvdata(pdev, info); 1142 platform_set_drvdata(pdev, info);
1072 1143
1073 return info; 1144 return 0;
1074 1145
1075fail_free_buf: 1146fail_free_buf:
1076 free_irq(irq, info); 1147 free_irq(irq, info);
1077 if (use_dma) { 1148 if (use_dma) {
1078 pxa_free_dma(info->data_dma_ch); 1149 pxa_free_dma(info->data_dma_ch);
1079 dma_free_coherent(&pdev->dev, info->data_buff_size, 1150 dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE,
1080 info->data_buff, info->data_buff_phys); 1151 info->data_buff, info->data_buff_phys);
1081 } else 1152 } else
1082 kfree(info->data_buff); 1153 kfree(info->data_buff);
@@ -1088,17 +1159,21 @@ fail_put_clk:
1088 clk_disable(info->clk); 1159 clk_disable(info->clk);
1089 clk_put(info->clk); 1160 clk_put(info->clk);
1090fail_free_mtd: 1161fail_free_mtd:
1091 kfree(mtd); 1162 kfree(info);
1092 return NULL; 1163 return ret;
1093} 1164}
1094 1165
1095static int pxa3xx_nand_remove(struct platform_device *pdev) 1166static int pxa3xx_nand_remove(struct platform_device *pdev)
1096{ 1167{
1097 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); 1168 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1098 struct mtd_info *mtd = info->mtd; 1169 struct pxa3xx_nand_platform_data *pdata;
1099 struct resource *r; 1170 struct resource *r;
1100 int irq; 1171 int irq, cs;
1101 1172
1173 if (!info)
1174 return 0;
1175
1176 pdata = pdev->dev.platform_data;
1102 platform_set_drvdata(pdev, NULL); 1177 platform_set_drvdata(pdev, NULL);
1103 1178
1104 irq = platform_get_irq(pdev, 0); 1179 irq = platform_get_irq(pdev, 0);
@@ -1106,7 +1181,7 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
1106 free_irq(irq, info); 1181 free_irq(irq, info);
1107 if (use_dma) { 1182 if (use_dma) {
1108 pxa_free_dma(info->data_dma_ch); 1183 pxa_free_dma(info->data_dma_ch);
1109 dma_free_writecombine(&pdev->dev, info->data_buff_size, 1184 dma_free_writecombine(&pdev->dev, MAX_BUFF_SIZE,
1110 info->data_buff, info->data_buff_phys); 1185 info->data_buff, info->data_buff_phys);
1111 } else 1186 } else
1112 kfree(info->data_buff); 1187 kfree(info->data_buff);
@@ -1118,10 +1193,9 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
1118 clk_disable(info->clk); 1193 clk_disable(info->clk);
1119 clk_put(info->clk); 1194 clk_put(info->clk);
1120 1195
1121 if (mtd) { 1196 for (cs = 0; cs < pdata->num_cs; cs++)
1122 mtd_device_unregister(mtd); 1197 nand_release(info->host[cs]->mtd);
1123 kfree(mtd); 1198 kfree(info);
1124 }
1125 return 0; 1199 return 0;
1126} 1200}
1127 1201
@@ -1129,6 +1203,7 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
1129{ 1203{
1130 struct pxa3xx_nand_platform_data *pdata; 1204 struct pxa3xx_nand_platform_data *pdata;
1131 struct pxa3xx_nand_info *info; 1205 struct pxa3xx_nand_info *info;
1206 int ret, cs, probe_success;
1132 1207
1133 pdata = pdev->dev.platform_data; 1208 pdata = pdev->dev.platform_data;
1134 if (!pdata) { 1209 if (!pdata) {
@@ -1136,52 +1211,88 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
1136 return -ENODEV; 1211 return -ENODEV;
1137 } 1212 }
1138 1213
1139 info = alloc_nand_resource(pdev); 1214 ret = alloc_nand_resource(pdev);
1140 if (info == NULL) 1215 if (ret) {
1141 return -ENOMEM; 1216 dev_err(&pdev->dev, "alloc nand resource failed\n");
1142 1217 return ret;
1143 if (pxa3xx_nand_scan(info->mtd)) {
1144 dev_err(&pdev->dev, "failed to scan nand\n");
1145 pxa3xx_nand_remove(pdev);
1146 return -ENODEV;
1147 } 1218 }
1148 1219
1149 if (mtd_has_cmdlinepart()) { 1220 info = platform_get_drvdata(pdev);
1150 const char *probes[] = { "cmdlinepart", NULL }; 1221 probe_success = 0;
1151 struct mtd_partition *parts; 1222 for (cs = 0; cs < pdata->num_cs; cs++) {
1152 int nr_parts; 1223 info->cs = cs;
1224 ret = pxa3xx_nand_scan(info->host[cs]->mtd);
1225 if (ret) {
1226 dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1227 cs);
1228 continue;
1229 }
1153 1230
1154 nr_parts = parse_mtd_partitions(info->mtd, probes, &parts, 0); 1231 ret = mtd_device_parse_register(info->host[cs]->mtd, NULL, 0,
1232 pdata->parts[cs], pdata->nr_parts[cs]);
1233 if (!ret)
1234 probe_success = 1;
1235 }
1155 1236
1156 if (nr_parts) 1237 if (!probe_success) {
1157 return mtd_device_register(info->mtd, parts, nr_parts); 1238 pxa3xx_nand_remove(pdev);
1239 return -ENODEV;
1158 } 1240 }
1159 1241
1160 return mtd_device_register(info->mtd, pdata->parts, pdata->nr_parts); 1242 return 0;
1161} 1243}
1162 1244
1163#ifdef CONFIG_PM 1245#ifdef CONFIG_PM
1164static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state) 1246static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1165{ 1247{
1166 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); 1248 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1167 struct mtd_info *mtd = info->mtd; 1249 struct pxa3xx_nand_platform_data *pdata;
1250 struct mtd_info *mtd;
1251 int cs;
1168 1252
1253 pdata = pdev->dev.platform_data;
1169 if (info->state) { 1254 if (info->state) {
1170 dev_err(&pdev->dev, "driver busy, state = %d\n", info->state); 1255 dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1171 return -EAGAIN; 1256 return -EAGAIN;
1172 } 1257 }
1173 1258
1259 for (cs = 0; cs < pdata->num_cs; cs++) {
1260 mtd = info->host[cs]->mtd;
1261 mtd->suspend(mtd);
1262 }
1263
1174 return 0; 1264 return 0;
1175} 1265}
1176 1266
1177static int pxa3xx_nand_resume(struct platform_device *pdev) 1267static int pxa3xx_nand_resume(struct platform_device *pdev)
1178{ 1268{
1179 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); 1269 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1180 struct mtd_info *mtd = info->mtd; 1270 struct pxa3xx_nand_platform_data *pdata;
1271 struct mtd_info *mtd;
1272 int cs;
1181 1273
1182 nand_writel(info, NDTR0CS0, info->ndtr0cs0); 1274 pdata = pdev->dev.platform_data;
1183 nand_writel(info, NDTR1CS0, info->ndtr1cs0); 1275 /* We don't want to handle interrupt without calling mtd routine */
1184 clk_enable(info->clk); 1276 disable_int(info, NDCR_INT_MASK);
1277
1278 /*
1279 * Directly set the chip select to a invalid value,
1280 * then the driver would reset the timing according
1281 * to current chip select at the beginning of cmdfunc
1282 */
1283 info->cs = 0xff;
1284
1285 /*
1286 * As the spec says, the NDSR would be updated to 0x1800 when
1287 * doing the nand_clk disable/enable.
1288 * To prevent it damaging state machine of the driver, clear
1289 * all status before resume
1290 */
1291 nand_writel(info, NDSR, NDSR_MASK);
1292 for (cs = 0; cs < pdata->num_cs; cs++) {
1293 mtd = info->host[cs]->mtd;
1294 mtd->resume(mtd);
1295 }
1185 1296
1186 return 0; 1297 return 0;
1187} 1298}
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index cae2e013c986..f20f393bfda6 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -1027,7 +1027,7 @@ void r852_shutdown(struct pci_dev *pci_dev)
1027} 1027}
1028 1028
1029#ifdef CONFIG_PM 1029#ifdef CONFIG_PM
1030int r852_suspend(struct device *device) 1030static int r852_suspend(struct device *device)
1031{ 1031{
1032 struct r852_device *dev = pci_get_drvdata(to_pci_dev(device)); 1032 struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
1033 1033
@@ -1048,7 +1048,7 @@ int r852_suspend(struct device *device)
1048 return 0; 1048 return 0;
1049} 1049}
1050 1050
1051int r852_resume(struct device *device) 1051static int r852_resume(struct device *device)
1052{ 1052{
1053 struct r852_device *dev = pci_get_drvdata(to_pci_dev(device)); 1053 struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
1054 1054
@@ -1092,7 +1092,7 @@ static const struct pci_device_id r852_pci_id_tbl[] = {
1092 1092
1093MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl); 1093MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl);
1094 1094
1095SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume); 1095static SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume);
1096 1096
1097static struct pci_driver r852_pci_driver = { 1097static struct pci_driver r852_pci_driver = {
1098 .name = DRV_NAME, 1098 .name = DRV_NAME,
diff --git a/drivers/mtd/nand/rtc_from4.c b/drivers/mtd/nand/rtc_from4.c
index c9f9127ff770..f309addc2fa0 100644
--- a/drivers/mtd/nand/rtc_from4.c
+++ b/drivers/mtd/nand/rtc_from4.c
@@ -351,7 +351,7 @@ static int rtc_from4_correct_data(struct mtd_info *mtd, const u_char *buf, u_cha
351 return 0; 351 return 0;
352 } 352 }
353 353
354 /* Read the syndrom pattern from the FPGA and correct the bitorder */ 354 /* Read the syndrome pattern from the FPGA and correct the bitorder */
355 rs_ecc = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC); 355 rs_ecc = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC);
356 for (i = 0; i < 8; i++) { 356 for (i = 0; i < 8; i++) {
357 ecc[i] = bitrev8(*rs_ecc); 357 ecc[i] = bitrev8(*rs_ecc);
@@ -380,7 +380,7 @@ static int rtc_from4_correct_data(struct mtd_info *mtd, const u_char *buf, u_cha
380 /* Let the library code do its magic. */ 380 /* Let the library code do its magic. */
381 res = decode_rs8(rs_decoder, (uint8_t *) buf, par, 512, syn, 0, NULL, 0xff, NULL); 381 res = decode_rs8(rs_decoder, (uint8_t *) buf, par, 512, syn, 0, NULL, 0xff, NULL);
382 if (res > 0) { 382 if (res > 0) {
383 DEBUG(MTD_DEBUG_LEVEL0, "rtc_from4_correct_data: " "ECC corrected %d errors on read\n", res); 383 pr_debug("rtc_from4_correct_data: " "ECC corrected %d errors on read\n", res);
384 } 384 }
385 return res; 385 return res;
386} 386}
@@ -444,7 +444,6 @@ static int rtc_from4_errstat(struct mtd_info *mtd, struct nand_chip *this,
444 len = mtd->writesize; 444 len = mtd->writesize;
445 buf = kmalloc(len, GFP_KERNEL); 445 buf = kmalloc(len, GFP_KERNEL);
446 if (!buf) { 446 if (!buf) {
447 printk(KERN_ERR "rtc_from4_errstat: Out of memory!\n");
448 er_stat = 1; 447 er_stat = 1;
449 goto out; 448 goto out;
450 } 449 }
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 4405468f196b..868685db6712 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -723,7 +723,7 @@ static int s3c24xx_nand_remove(struct platform_device *pdev)
723 723
724 /* free the common resources */ 724 /* free the common resources */
725 725
726 if (info->clk != NULL && !IS_ERR(info->clk)) { 726 if (!IS_ERR(info->clk)) {
727 s3c2410_nand_clk_set_state(info, CLOCK_DISABLE); 727 s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
728 clk_put(info->clk); 728 clk_put(info->clk);
729 } 729 }
@@ -744,26 +744,15 @@ static int s3c24xx_nand_remove(struct platform_device *pdev)
744 return 0; 744 return 0;
745} 745}
746 746
747const char *part_probes[] = { "cmdlinepart", NULL };
748static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info, 747static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
749 struct s3c2410_nand_mtd *mtd, 748 struct s3c2410_nand_mtd *mtd,
750 struct s3c2410_nand_set *set) 749 struct s3c2410_nand_set *set)
751{ 750{
752 struct mtd_partition *part_info; 751 if (set)
753 int nr_part = 0; 752 mtd->mtd.name = set->name;
754 753
755 if (set == NULL) 754 return mtd_device_parse_register(&mtd->mtd, NULL, 0,
756 return mtd_device_register(&mtd->mtd, NULL, 0); 755 set->partitions, set->nr_partitions);
757
758 mtd->mtd.name = set->name;
759 nr_part = parse_mtd_partitions(&mtd->mtd, part_probes, &part_info, 0);
760
761 if (nr_part <= 0 && set->nr_partitions > 0) {
762 nr_part = set->nr_partitions;
763 part_info = set->partitions;
764 }
765
766 return mtd_device_register(&mtd->mtd, part_info, nr_part);
767} 756}
768 757
769/** 758/**
@@ -880,8 +869,10 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
880 /* If you use u-boot BBT creation code, specifying this flag will 869 /* If you use u-boot BBT creation code, specifying this flag will
881 * let the kernel fish out the BBT from the NAND, and also skip the 870 * let the kernel fish out the BBT from the NAND, and also skip the
882 * full NAND scan that can take 1/2s or so. Little things... */ 871 * full NAND scan that can take 1/2s or so. Little things... */
883 if (set->flash_bbt) 872 if (set->flash_bbt) {
884 chip->options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN; 873 chip->bbt_options |= NAND_BBT_USE_FLASH;
874 chip->options |= NAND_SKIP_BBTSCAN;
875 }
885} 876}
886 877
887/** 878/**
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 19e24ed089ea..619d2a504788 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -103,16 +103,12 @@ static int sharpsl_nand_calculate_ecc(struct mtd_info *mtd, const u_char * dat,
103 return readb(sharpsl->io + ECCCNTR) != 0; 103 return readb(sharpsl->io + ECCCNTR) != 0;
104} 104}
105 105
106static const char *part_probes[] = { "cmdlinepart", NULL };
107
108/* 106/*
109 * Main initialization routine 107 * Main initialization routine
110 */ 108 */
111static int __devinit sharpsl_nand_probe(struct platform_device *pdev) 109static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
112{ 110{
113 struct nand_chip *this; 111 struct nand_chip *this;
114 struct mtd_partition *sharpsl_partition_info;
115 int nr_partitions;
116 struct resource *r; 112 struct resource *r;
117 int err = 0; 113 int err = 0;
118 struct sharpsl_nand *sharpsl; 114 struct sharpsl_nand *sharpsl;
@@ -184,14 +180,9 @@ static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
184 180
185 /* Register the partitions */ 181 /* Register the partitions */
186 sharpsl->mtd.name = "sharpsl-nand"; 182 sharpsl->mtd.name = "sharpsl-nand";
187 nr_partitions = parse_mtd_partitions(&sharpsl->mtd, part_probes, &sharpsl_partition_info, 0);
188 if (nr_partitions <= 0) {
189 nr_partitions = data->nr_partitions;
190 sharpsl_partition_info = data->partitions;
191 }
192 183
193 err = mtd_device_register(&sharpsl->mtd, sharpsl_partition_info, 184 err = mtd_device_parse_register(&sharpsl->mtd, NULL, 0,
194 nr_partitions); 185 data->partitions, data->nr_partitions);
195 if (err) 186 if (err)
196 goto err_add; 187 goto err_add;
197 188
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
index 43469715b3fa..32ae5af7444f 100644
--- a/drivers/mtd/nand/sm_common.c
+++ b/drivers/mtd/nand/sm_common.c
@@ -48,7 +48,7 @@ static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs)
48 48
49 /* As long as this function is called on erase block boundaries 49 /* As long as this function is called on erase block boundaries
50 it will work correctly for 256 byte nand */ 50 it will work correctly for 256 byte nand */
51 ops.mode = MTD_OOB_PLACE; 51 ops.mode = MTD_OPS_PLACE_OOB;
52 ops.ooboffs = 0; 52 ops.ooboffs = 0;
53 ops.ooblen = mtd->oobsize; 53 ops.ooblen = mtd->oobsize;
54 ops.oobbuf = (void *)&oob; 54 ops.oobbuf = (void *)&oob;
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index ca2d0555729e..0fb24f9c2327 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -155,8 +155,6 @@ static int socrates_nand_device_ready(struct mtd_info *mtd)
155 return 1; 155 return 1;
156} 156}
157 157
158static const char *part_probes[] = { "cmdlinepart", NULL };
159
160/* 158/*
161 * Probe for the NAND device. 159 * Probe for the NAND device.
162 */ 160 */
@@ -166,8 +164,7 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
166 struct mtd_info *mtd; 164 struct mtd_info *mtd;
167 struct nand_chip *nand_chip; 165 struct nand_chip *nand_chip;
168 int res; 166 int res;
169 struct mtd_partition *partitions = NULL; 167 struct mtd_part_parser_data ppdata;
170 int num_partitions = 0;
171 168
172 /* Allocate memory for the device structure (and zero it) */ 169 /* Allocate memory for the device structure (and zero it) */
173 host = kzalloc(sizeof(struct socrates_nand_host), GFP_KERNEL); 170 host = kzalloc(sizeof(struct socrates_nand_host), GFP_KERNEL);
@@ -193,6 +190,7 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
193 mtd->name = "socrates_nand"; 190 mtd->name = "socrates_nand";
194 mtd->owner = THIS_MODULE; 191 mtd->owner = THIS_MODULE;
195 mtd->dev.parent = &ofdev->dev; 192 mtd->dev.parent = &ofdev->dev;
193 ppdata.of_node = ofdev->dev.of_node;
196 194
197 /*should never be accessed directly */ 195 /*should never be accessed directly */
198 nand_chip->IO_ADDR_R = (void *)0xdeadbeef; 196 nand_chip->IO_ADDR_R = (void *)0xdeadbeef;
@@ -225,30 +223,10 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
225 goto out; 223 goto out;
226 } 224 }
227 225
228#ifdef CONFIG_MTD_CMDLINE_PARTS 226 res = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
229 num_partitions = parse_mtd_partitions(mtd, part_probes,
230 &partitions, 0);
231 if (num_partitions < 0) {
232 res = num_partitions;
233 goto release;
234 }
235#endif
236
237 if (num_partitions == 0) {
238 num_partitions = of_mtd_parse_partitions(&ofdev->dev,
239 ofdev->dev.of_node,
240 &partitions);
241 if (num_partitions < 0) {
242 res = num_partitions;
243 goto release;
244 }
245 }
246
247 res = mtd_device_register(mtd, partitions, num_partitions);
248 if (!res) 227 if (!res)
249 return res; 228 return res;
250 229
251release:
252 nand_release(mtd); 230 nand_release(mtd);
253 231
254out: 232out:
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index 11e8371b5683..beebd95f7690 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -121,9 +121,6 @@ struct tmio_nand {
121 121
122#define mtd_to_tmio(m) container_of(m, struct tmio_nand, mtd) 122#define mtd_to_tmio(m) container_of(m, struct tmio_nand, mtd)
123 123
124#ifdef CONFIG_MTD_CMDLINE_PARTS
125static const char *part_probes[] = { "cmdlinepart", NULL };
126#endif
127 124
128/*--------------------------------------------------------------------------*/ 125/*--------------------------------------------------------------------------*/
129 126
@@ -381,8 +378,6 @@ static int tmio_probe(struct platform_device *dev)
381 struct tmio_nand *tmio; 378 struct tmio_nand *tmio;
382 struct mtd_info *mtd; 379 struct mtd_info *mtd;
383 struct nand_chip *nand_chip; 380 struct nand_chip *nand_chip;
384 struct mtd_partition *parts;
385 int nbparts = 0;
386 int retval; 381 int retval;
387 382
388 if (data == NULL) 383 if (data == NULL)
@@ -461,15 +456,9 @@ static int tmio_probe(struct platform_device *dev)
461 goto err_scan; 456 goto err_scan;
462 } 457 }
463 /* Register the partitions */ 458 /* Register the partitions */
464#ifdef CONFIG_MTD_CMDLINE_PARTS 459 retval = mtd_device_parse_register(mtd, NULL, 0,
465 nbparts = parse_mtd_partitions(mtd, part_probes, &parts, 0); 460 data ? data->partition : NULL,
466#endif 461 data ? data->num_partitions : 0);
467 if (nbparts <= 0 && data) {
468 parts = data->partition;
469 nbparts = data->num_partitions;
470 }
471
472 retval = mtd_device_register(mtd, parts, nbparts);
473 if (!retval) 462 if (!retval)
474 return retval; 463 return retval;
475 464
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index bfba4e39a6c5..ace46fdaef58 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -74,7 +74,6 @@ struct txx9ndfmc_drvdata {
74 unsigned char hold; /* in gbusclock */ 74 unsigned char hold; /* in gbusclock */
75 unsigned char spw; /* in gbusclock */ 75 unsigned char spw; /* in gbusclock */
76 struct nand_hw_control hw_control; 76 struct nand_hw_control hw_control;
77 struct mtd_partition *parts[MAX_TXX9NDFMC_DEV];
78}; 77};
79 78
80static struct platform_device *mtd_to_platdev(struct mtd_info *mtd) 79static struct platform_device *mtd_to_platdev(struct mtd_info *mtd)
@@ -287,7 +286,6 @@ static int txx9ndfmc_nand_scan(struct mtd_info *mtd)
287static int __init txx9ndfmc_probe(struct platform_device *dev) 286static int __init txx9ndfmc_probe(struct platform_device *dev)
288{ 287{
289 struct txx9ndfmc_platform_data *plat = dev->dev.platform_data; 288 struct txx9ndfmc_platform_data *plat = dev->dev.platform_data;
290 static const char *probes[] = { "cmdlinepart", NULL };
291 int hold, spw; 289 int hold, spw;
292 int i; 290 int i;
293 struct txx9ndfmc_drvdata *drvdata; 291 struct txx9ndfmc_drvdata *drvdata;
@@ -333,7 +331,6 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
333 struct txx9ndfmc_priv *txx9_priv; 331 struct txx9ndfmc_priv *txx9_priv;
334 struct nand_chip *chip; 332 struct nand_chip *chip;
335 struct mtd_info *mtd; 333 struct mtd_info *mtd;
336 int nr_parts;
337 334
338 if (!(plat->ch_mask & (1 << i))) 335 if (!(plat->ch_mask & (1 << i)))
339 continue; 336 continue;
@@ -393,9 +390,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
393 } 390 }
394 mtd->name = txx9_priv->mtdname; 391 mtd->name = txx9_priv->mtdname;
395 392
396 nr_parts = parse_mtd_partitions(mtd, probes, 393 mtd_device_parse_register(mtd, NULL, 0, NULL, 0);
397 &drvdata->parts[i], 0);
398 mtd_device_register(mtd, drvdata->parts[i], nr_parts);
399 drvdata->mtds[i] = mtd; 394 drvdata->mtds[i] = mtd;
400 } 395 }
401 396
@@ -421,7 +416,6 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev)
421 txx9_priv = chip->priv; 416 txx9_priv = chip->priv;
422 417
423 nand_release(mtd); 418 nand_release(mtd);
424 kfree(drvdata->parts[i]);
425 kfree(txx9_priv->mtdname); 419 kfree(txx9_priv->mtdname);
426 kfree(txx9_priv); 420 kfree(txx9_priv);
427 } 421 }