aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/nand
diff options
context:
space:
mode:
authorGrant Likely <grant.likely@secretlab.ca>2010-05-22 02:36:56 -0400
committerGrant Likely <grant.likely@secretlab.ca>2010-05-22 02:36:56 -0400
commitcf9b59e9d3e008591d1f54830f570982bb307a0d (patch)
tree113478ce8fd8c832ba726ffdf59b82cb46356476 /drivers/mtd/nand
parent44504b2bebf8b5823c59484e73096a7d6574471d (diff)
parentf4b87dee923342505e1ddba8d34ce9de33e75050 (diff)
Merge remote branch 'origin' into secretlab/next-devicetree
Merging in current state of Linus' tree to deal with merge conflicts and build failures in vio.c after merge. Conflicts: drivers/i2c/busses/i2c-cpm.c drivers/i2c/busses/i2c-mpc.c drivers/net/gianfar.c Also fixed up one line in arch/powerpc/kernel/vio.c to use the correct node pointer. Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
Diffstat (limited to 'drivers/mtd/nand')
-rw-r--r--drivers/mtd/nand/Kconfig69
-rw-r--r--drivers/mtd/nand/Makefile10
-rw-r--r--drivers/mtd/nand/alauda.c2
-rw-r--r--drivers/mtd/nand/atmel_nand.c2
-rw-r--r--drivers/mtd/nand/au1550nd.c12
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c3
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c29
-rw-r--r--drivers/mtd/nand/cafe_nand.c4
-rw-r--r--drivers/mtd/nand/davinci_nand.c6
-rw-r--r--drivers/mtd/nand/denali.c2134
-rw-r--r--drivers/mtd/nand/denali.h816
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c4
-rw-r--r--drivers/mtd/nand/fsl_upm.c9
-rw-r--r--drivers/mtd/nand/gpio.c12
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c917
-rw-r--r--drivers/mtd/nand/mxc_nand.c146
-rw-r--r--drivers/mtd/nand/nand_base.c387
-rw-r--r--drivers/mtd/nand/nand_bbt.c29
-rw-r--r--drivers/mtd/nand/nand_bcm_umi.h71
-rw-r--r--drivers/mtd/nand/nand_ids.c1
-rw-r--r--drivers/mtd/nand/nandsim.c17
-rw-r--r--drivers/mtd/nand/nomadik_nand.c6
-rw-r--r--drivers/mtd/nand/nuc900_nand.c (renamed from drivers/mtd/nand/w90p910_nand.c)144
-rw-r--r--drivers/mtd/nand/omap2.c16
-rw-r--r--drivers/mtd/nand/orion_nand.c21
-rw-r--r--drivers/mtd/nand/pasemi_nand.c2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c11
-rw-r--r--drivers/mtd/nand/r852.c1140
-rw-r--r--drivers/mtd/nand/r852.h163
-rw-r--r--drivers/mtd/nand/s3c2410.c12
-rw-r--r--drivers/mtd/nand/sh_flctl.c2
-rw-r--r--drivers/mtd/nand/sm_common.c148
-rw-r--r--drivers/mtd/nand/sm_common.h61
-rw-r--r--drivers/mtd/nand/socrates_nand.c4
-rw-r--r--drivers/mtd/nand/tmio_nand.c14
-rw-r--r--drivers/mtd/nand/ts7250.c207
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c2
37 files changed, 6072 insertions, 561 deletions
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 42e5ea49e97..98a04b3c952 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -2,11 +2,23 @@ menuconfig MTD_NAND
2 tristate "NAND Device Support" 2 tristate "NAND Device Support"
3 depends on MTD 3 depends on MTD
4 select MTD_NAND_IDS 4 select MTD_NAND_IDS
5 select MTD_NAND_ECC
5 help 6 help
6 This enables support for accessing all type of NAND flash 7 This enables support for accessing all type of NAND flash
7 devices. For further information see 8 devices. For further information see
8 <http://www.linux-mtd.infradead.org/doc/nand.html>. 9 <http://www.linux-mtd.infradead.org/doc/nand.html>.
9 10
11config MTD_NAND_ECC
12 tristate
13
14config MTD_NAND_ECC_SMC
15 bool "NAND ECC Smart Media byte order"
16 depends on MTD_NAND_ECC
17 default n
18 help
19 Software ECC according to the Smart Media Specification.
20 The original Linux implementation had byte 0 and 1 swapped.
21
10if MTD_NAND 22if MTD_NAND
11 23
12config MTD_NAND_VERIFY_WRITE 24config MTD_NAND_VERIFY_WRITE
@@ -18,12 +30,9 @@ config MTD_NAND_VERIFY_WRITE
18 device thinks the write was successful, a bit could have been 30 device thinks the write was successful, a bit could have been
19 flipped accidentally due to device wear or something else. 31 flipped accidentally due to device wear or something else.
20 32
21config MTD_NAND_ECC_SMC 33config MTD_SM_COMMON
22 bool "NAND ECC Smart Media byte order" 34 tristate
23 default n 35 default n
24 help
25 Software ECC according to the Smart Media Specification.
26 The original Linux implementation had byte 0 and 1 swapped.
27 36
28config MTD_NAND_MUSEUM_IDS 37config MTD_NAND_MUSEUM_IDS
29 bool "Enable chip ids for obsolete ancient NAND devices" 38 bool "Enable chip ids for obsolete ancient NAND devices"
@@ -41,6 +50,23 @@ config MTD_NAND_AUTCPU12
41 This enables the driver for the autronix autcpu12 board to 50 This enables the driver for the autronix autcpu12 board to
42 access the SmartMediaCard. 51 access the SmartMediaCard.
43 52
53config MTD_NAND_DENALI
54 depends on PCI
55 tristate "Support Denali NAND controller on Intel Moorestown"
56 help
57 Enable the driver for NAND flash on Intel Moorestown, using the
58 Denali NAND controller core.
59
60config MTD_NAND_DENALI_SCRATCH_REG_ADDR
61 hex "Denali NAND size scratch register address"
62 default "0xFF108018"
63 help
64 Some platforms place the NAND chip size in a scratch register
65 because (some versions of) the driver aren't able to automatically
66 determine the size of certain chips. Set the address of the
67 scratch register here to enable this feature. On Intel Moorestown
68 boards, the scratch register is at 0xFF108018.
69
44config MTD_NAND_EDB7312 70config MTD_NAND_EDB7312
45 tristate "Support for Cirrus Logic EBD7312 evaluation board" 71 tristate "Support for Cirrus Logic EBD7312 evaluation board"
46 depends on ARCH_EDB7312 72 depends on ARCH_EDB7312
@@ -95,15 +121,21 @@ config MTD_NAND_OMAP_PREFETCH_DMA
95 or in DMA interrupt mode. 121 or in DMA interrupt mode.
96 Say y for DMA mode or MPU mode will be used 122 Say y for DMA mode or MPU mode will be used
97 123
98config MTD_NAND_TS7250
99 tristate "NAND Flash device on TS-7250 board"
100 depends on MACH_TS72XX
101 help
102 Support for NAND flash on Technologic Systems TS-7250 platform.
103
104config MTD_NAND_IDS 124config MTD_NAND_IDS
105 tristate 125 tristate
106 126
127config MTD_NAND_RICOH
128 tristate "Ricoh xD card reader"
129 default n
130 depends on PCI
131 select MTD_SM_COMMON
132 help
133 Enable support for Ricoh R5C852 xD card reader
134 You also need to enable ether
135 NAND SSFDC (SmartMedia) read only translation layer' or new
136 expermental, readwrite
137 'SmartMedia/xD new translation layer'
138
107config MTD_NAND_AU1550 139config MTD_NAND_AU1550
108 tristate "Au1550/1200 NAND support" 140 tristate "Au1550/1200 NAND support"
109 depends on SOC_AU1200 || SOC_AU1550 141 depends on SOC_AU1200 || SOC_AU1550
@@ -358,8 +390,6 @@ config MTD_NAND_ATMEL_ECC_NONE
358 390
359 If unsure, say N 391 If unsure, say N
360 392
361 endchoice
362
363endchoice 393endchoice
364 394
365config MTD_NAND_PXA3xx 395config MTD_NAND_PXA3xx
@@ -442,6 +472,13 @@ config MTD_NAND_FSL_UPM
442 Enables support for NAND Flash chips wired onto Freescale PowerPC 472 Enables support for NAND Flash chips wired onto Freescale PowerPC
443 processor localbus with User-Programmable Machine support. 473 processor localbus with User-Programmable Machine support.
444 474
475config MTD_NAND_MPC5121_NFC
476 tristate "MPC5121 built-in NAND Flash Controller support"
477 depends on PPC_MPC512x
478 help
479 This enables the driver for the NAND flash controller on the
480 MPC5121 SoC.
481
445config MTD_NAND_MXC 482config MTD_NAND_MXC
446 tristate "MXC NAND support" 483 tristate "MXC NAND support"
447 depends on ARCH_MX2 || ARCH_MX25 || ARCH_MX3 484 depends on ARCH_MX2 || ARCH_MX25 || ARCH_MX3
@@ -481,11 +518,11 @@ config MTD_NAND_SOCRATES
481 help 518 help
482 Enables support for NAND Flash chips wired onto Socrates board. 519 Enables support for NAND Flash chips wired onto Socrates board.
483 520
484config MTD_NAND_W90P910 521config MTD_NAND_NUC900
485 tristate "Support for NAND on w90p910 evaluation board." 522 tristate "Support for NAND on Nuvoton NUC9xx/w90p910 evaluation boards."
486 depends on ARCH_W90X900 && MTD_PARTITIONS 523 depends on ARCH_W90X900 && MTD_PARTITIONS
487 help 524 help
488 This enables the driver for the NAND Flash on evaluation board based 525 This enables the driver for the NAND Flash on evaluation board based
489 on w90p910. 526 on w90p910 / NUC9xx.
490 527
491endif # MTD_NAND 528endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 1407bd14401..e8ab884ba47 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -2,13 +2,16 @@
2# linux/drivers/nand/Makefile 2# linux/drivers/nand/Makefile
3# 3#
4 4
5obj-$(CONFIG_MTD_NAND) += nand.o nand_ecc.o 5obj-$(CONFIG_MTD_NAND) += nand.o
6obj-$(CONFIG_MTD_NAND_ECC) += nand_ecc.o
6obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o 7obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o
8obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o
7 9
8obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o 10obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o
9obj-$(CONFIG_MTD_NAND_SPIA) += spia.o 11obj-$(CONFIG_MTD_NAND_SPIA) += spia.o
10obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o 12obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o
11obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o 13obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o
14obj-$(CONFIG_MTD_NAND_DENALI) += denali.o
12obj-$(CONFIG_MTD_NAND_EDB7312) += edb7312.o 15obj-$(CONFIG_MTD_NAND_EDB7312) += edb7312.o
13obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o 16obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o
14obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o 17obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o
@@ -19,7 +22,6 @@ obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o
19obj-$(CONFIG_MTD_NAND_H1900) += h1910.o 22obj-$(CONFIG_MTD_NAND_H1900) += h1910.o
20obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o 23obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o
21obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o 24obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o
22obj-$(CONFIG_MTD_NAND_TS7250) += ts7250.o
23obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o 25obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o
24obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o 26obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o
25obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o 27obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
@@ -39,8 +41,10 @@ obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o
39obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o 41obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o
40obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o 42obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o
41obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o 43obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o
42obj-$(CONFIG_MTD_NAND_W90P910) += w90p910_nand.o 44obj-$(CONFIG_MTD_NAND_NUC900) += nuc900_nand.o
43obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o 45obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o
44obj-$(CONFIG_MTD_NAND_BCM_UMI) += bcm_umi_nand.o nand_bcm_umi.o 46obj-$(CONFIG_MTD_NAND_BCM_UMI) += bcm_umi_nand.o nand_bcm_umi.o
47obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o
48obj-$(CONFIG_MTD_NAND_RICOH) += r852.o
45 49
46nand-objs := nand_base.o nand_bbt.o 50nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
index 2d6773281fd..8691e0482ed 100644
--- a/drivers/mtd/nand/alauda.c
+++ b/drivers/mtd/nand/alauda.c
@@ -49,7 +49,7 @@
49 49
50#define TIMEOUT HZ 50#define TIMEOUT HZ
51 51
52static struct usb_device_id alauda_table [] = { 52static const struct usb_device_id alauda_table[] = {
53 { USB_DEVICE(0x0584, 0x0008) }, /* Fujifilm DPC-R1 */ 53 { USB_DEVICE(0x0584, 0x0008) }, /* Fujifilm DPC-R1 */
54 { USB_DEVICE(0x07b4, 0x010a) }, /* Olympus MAUSB-10 */ 54 { USB_DEVICE(0x07b4, 0x010a) }, /* Olympus MAUSB-10 */
55 { } 55 { }
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 524e6c9e067..04d30887ca7 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -474,7 +474,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
474 } 474 }
475 475
476 /* first scan to find the device and get the page size */ 476 /* first scan to find the device and get the page size */
477 if (nand_scan_ident(mtd, 1)) { 477 if (nand_scan_ident(mtd, 1, NULL)) {
478 res = -ENXIO; 478 res = -ENXIO;
479 goto err_scan_ident; 479 goto err_scan_ident;
480 } 480 }
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 43d46e42404..3ffe05db492 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -451,7 +451,7 @@ static int __init au1xxx_nand_init(void)
451 u32 nand_phys; 451 u32 nand_phys;
452 452
453 /* Allocate memory for MTD device structure and private data */ 453 /* Allocate memory for MTD device structure and private data */
454 au1550_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); 454 au1550_mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
455 if (!au1550_mtd) { 455 if (!au1550_mtd) {
456 printk("Unable to allocate NAND MTD dev structure.\n"); 456 printk("Unable to allocate NAND MTD dev structure.\n");
457 return -ENOMEM; 457 return -ENOMEM;
@@ -460,10 +460,6 @@ static int __init au1xxx_nand_init(void)
460 /* Get pointer to private data */ 460 /* Get pointer to private data */
461 this = (struct nand_chip *)(&au1550_mtd[1]); 461 this = (struct nand_chip *)(&au1550_mtd[1]);
462 462
463 /* Initialize structures */
464 memset(au1550_mtd, 0, sizeof(struct mtd_info));
465 memset(this, 0, sizeof(struct nand_chip));
466
467 /* Link the private data with the MTD structure */ 463 /* Link the private data with the MTD structure */
468 au1550_mtd->priv = this; 464 au1550_mtd->priv = this;
469 au1550_mtd->owner = THIS_MODULE; 465 au1550_mtd->owner = THIS_MODULE;
@@ -544,7 +540,7 @@ static int __init au1xxx_nand_init(void)
544 } 540 }
545 nand_phys = (mem_staddr << 4) & 0xFFFC0000; 541 nand_phys = (mem_staddr << 4) & 0xFFFC0000;
546 542
547 p_nand = (void __iomem *)ioremap(nand_phys, 0x1000); 543 p_nand = ioremap(nand_phys, 0x1000);
548 544
549 /* make controller and MTD agree */ 545 /* make controller and MTD agree */
550 if (NAND_CS == 0) 546 if (NAND_CS == 0)
@@ -589,7 +585,7 @@ static int __init au1xxx_nand_init(void)
589 return 0; 585 return 0;
590 586
591 outio: 587 outio:
592 iounmap((void *)p_nand); 588 iounmap(p_nand);
593 589
594 outmem: 590 outmem:
595 kfree(au1550_mtd); 591 kfree(au1550_mtd);
@@ -610,7 +606,7 @@ static void __exit au1550_cleanup(void)
610 kfree(au1550_mtd); 606 kfree(au1550_mtd);
611 607
612 /* Unmap */ 608 /* Unmap */
613 iounmap((void *)p_nand); 609 iounmap(p_nand);
614} 610}
615 611
616module_exit(au1550_cleanup); 612module_exit(au1550_cleanup);
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
index c997f98eeb3..dfe262c726f 100644
--- a/drivers/mtd/nand/bcm_umi_nand.c
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -13,7 +13,6 @@
13*****************************************************************************/ 13*****************************************************************************/
14 14
15/* ---- Include Files ---------------------------------------------------- */ 15/* ---- Include Files ---------------------------------------------------- */
16#include <linux/version.h>
17#include <linux/module.h> 16#include <linux/module.h>
18#include <linux/types.h> 17#include <linux/types.h>
19#include <linux/init.h> 18#include <linux/init.h>
@@ -447,7 +446,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
447 * layout we'll be using. 446 * layout we'll be using.
448 */ 447 */
449 448
450 err = nand_scan_ident(board_mtd, 1); 449 err = nand_scan_ident(board_mtd, 1, NULL);
451 if (err) { 450 if (err) {
452 printk(KERN_ERR "nand_scan failed: %d\n", err); 451 printk(KERN_ERR "nand_scan failed: %d\n", err);
453 iounmap(bcm_umi_io_base); 452 iounmap(bcm_umi_io_base);
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 8506e7e606f..2974995e194 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -68,6 +68,27 @@
68#define DRV_AUTHOR "Bryan Wu <bryan.wu@analog.com>" 68#define DRV_AUTHOR "Bryan Wu <bryan.wu@analog.com>"
69#define DRV_DESC "BF5xx on-chip NAND FLash Controller Driver" 69#define DRV_DESC "BF5xx on-chip NAND FLash Controller Driver"
70 70
71/* NFC_STAT Masks */
72#define NBUSY 0x01 /* Not Busy */
73#define WB_FULL 0x02 /* Write Buffer Full */
74#define PG_WR_STAT 0x04 /* Page Write Pending */
75#define PG_RD_STAT 0x08 /* Page Read Pending */
76#define WB_EMPTY 0x10 /* Write Buffer Empty */
77
78/* NFC_IRQSTAT Masks */
79#define NBUSYIRQ 0x01 /* Not Busy IRQ */
80#define WB_OVF 0x02 /* Write Buffer Overflow */
81#define WB_EDGE 0x04 /* Write Buffer Edge Detect */
82#define RD_RDY 0x08 /* Read Data Ready */
83#define WR_DONE 0x10 /* Page Write Done */
84
85/* NFC_RST Masks */
86#define ECC_RST 0x01 /* ECC (and NFC counters) Reset */
87
88/* NFC_PGCTL Masks */
89#define PG_RD_START 0x01 /* Page Read Start */
90#define PG_WR_START 0x02 /* Page Write Start */
91
71#ifdef CONFIG_MTD_NAND_BF5XX_HWECC 92#ifdef CONFIG_MTD_NAND_BF5XX_HWECC
72static int hardware_ecc = 1; 93static int hardware_ecc = 1;
73#else 94#else
@@ -487,7 +508,7 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
487 * transferred to generate the correct ECC register 508 * transferred to generate the correct ECC register
488 * values. 509 * values.
489 */ 510 */
490 bfin_write_NFC_RST(0x1); 511 bfin_write_NFC_RST(ECC_RST);
491 SSYNC(); 512 SSYNC();
492 513
493 disable_dma(CH_NFC); 514 disable_dma(CH_NFC);
@@ -497,7 +518,7 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
497 set_dma_config(CH_NFC, 0x0); 518 set_dma_config(CH_NFC, 0x0);
498 set_dma_start_addr(CH_NFC, (unsigned long) buf); 519 set_dma_start_addr(CH_NFC, (unsigned long) buf);
499 520
500/* The DMAs have different size on BF52x and BF54x */ 521 /* The DMAs have different size on BF52x and BF54x */
501#ifdef CONFIG_BF52x 522#ifdef CONFIG_BF52x
502 set_dma_x_count(CH_NFC, (page_size >> 1)); 523 set_dma_x_count(CH_NFC, (page_size >> 1));
503 set_dma_x_modify(CH_NFC, 2); 524 set_dma_x_modify(CH_NFC, 2);
@@ -517,9 +538,9 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
517 538
518 /* Start PAGE read/write operation */ 539 /* Start PAGE read/write operation */
519 if (is_read) 540 if (is_read)
520 bfin_write_NFC_PGCTL(0x1); 541 bfin_write_NFC_PGCTL(PG_RD_START);
521 else 542 else
522 bfin_write_NFC_PGCTL(0x2); 543 bfin_write_NFC_PGCTL(PG_WR_START);
523 wait_for_completion(&info->dma_completion); 544 wait_for_completion(&info->dma_completion);
524} 545}
525 546
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index e5a9f9ccea6..db1dfc5a1b1 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -762,7 +762,7 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
762 cafe_readl(cafe, GLOBAL_CTRL), cafe_readl(cafe, GLOBAL_IRQ_MASK)); 762 cafe_readl(cafe, GLOBAL_CTRL), cafe_readl(cafe, GLOBAL_IRQ_MASK));
763 763
764 /* Scan to find existence of the device */ 764 /* Scan to find existence of the device */
765 if (nand_scan_ident(mtd, 2)) { 765 if (nand_scan_ident(mtd, 2, NULL)) {
766 err = -ENXIO; 766 err = -ENXIO;
767 goto out_irq; 767 goto out_irq;
768 } 768 }
@@ -849,7 +849,7 @@ static void __devexit cafe_nand_remove(struct pci_dev *pdev)
849 kfree(mtd); 849 kfree(mtd);
850} 850}
851 851
852static struct pci_device_id cafe_nand_tbl[] = { 852static const struct pci_device_id cafe_nand_tbl[] = {
853 { PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_88ALP01_NAND, 853 { PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_88ALP01_NAND,
854 PCI_ANY_ID, PCI_ANY_ID }, 854 PCI_ANY_ID, PCI_ANY_ID },
855 { } 855 { }
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 76e2dc8e62f..9c9d893affe 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -567,8 +567,8 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
567 goto err_nomem; 567 goto err_nomem;
568 } 568 }
569 569
570 vaddr = ioremap(res1->start, res1->end - res1->start); 570 vaddr = ioremap(res1->start, resource_size(res1));
571 base = ioremap(res2->start, res2->end - res2->start); 571 base = ioremap(res2->start, resource_size(res2));
572 if (!vaddr || !base) { 572 if (!vaddr || !base) {
573 dev_err(&pdev->dev, "ioremap failed\n"); 573 dev_err(&pdev->dev, "ioremap failed\n");
574 ret = -EINVAL; 574 ret = -EINVAL;
@@ -691,7 +691,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
691 spin_unlock_irq(&davinci_nand_lock); 691 spin_unlock_irq(&davinci_nand_lock);
692 692
693 /* Scan to find existence of the device(s) */ 693 /* Scan to find existence of the device(s) */
694 ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1); 694 ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1, NULL);
695 if (ret < 0) { 695 if (ret < 0) {
696 dev_dbg(&pdev->dev, "no NAND chip(s) found\n"); 696 dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
697 goto err_scan; 697 goto err_scan;
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
new file mode 100644
index 00000000000..ca03428b59c
--- /dev/null
+++ b/drivers/mtd/nand/denali.c
@@ -0,0 +1,2134 @@
1/*
2 * NAND Flash Controller Device Driver
3 * Copyright © 2009-2010, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20#include <linux/interrupt.h>
21#include <linux/delay.h>
22#include <linux/wait.h>
23#include <linux/mutex.h>
24#include <linux/pci.h>
25#include <linux/mtd/mtd.h>
26#include <linux/module.h>
27
28#include "denali.h"
29
30MODULE_LICENSE("GPL");
31
32/* We define a module parameter that allows the user to override
33 * the hardware and decide what timing mode should be used.
34 */
35#define NAND_DEFAULT_TIMINGS -1
36
37static int onfi_timing_mode = NAND_DEFAULT_TIMINGS;
38module_param(onfi_timing_mode, int, S_IRUGO);
39MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting. -1 indicates"
40 " use default timings");
41
42#define DENALI_NAND_NAME "denali-nand"
43
44/* We define a macro here that combines all interrupts this driver uses into
45 * a single constant value, for convenience. */
46#define DENALI_IRQ_ALL (INTR_STATUS0__DMA_CMD_COMP | \
47 INTR_STATUS0__ECC_TRANSACTION_DONE | \
48 INTR_STATUS0__ECC_ERR | \
49 INTR_STATUS0__PROGRAM_FAIL | \
50 INTR_STATUS0__LOAD_COMP | \
51 INTR_STATUS0__PROGRAM_COMP | \
52 INTR_STATUS0__TIME_OUT | \
53 INTR_STATUS0__ERASE_FAIL | \
54 INTR_STATUS0__RST_COMP | \
55 INTR_STATUS0__ERASE_COMP)
56
57/* indicates whether or not the internal value for the flash bank is
58 valid or not */
59#define CHIP_SELECT_INVALID -1
60
61#define SUPPORT_8BITECC 1
62
63/* This macro divides two integers and rounds fractional values up
64 * to the nearest integer value. */
65#define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y)))
66
67/* this macro allows us to convert from an MTD structure to our own
68 * device context (denali) structure.
69 */
70#define mtd_to_denali(m) container_of(m, struct denali_nand_info, mtd)
71
72/* These constants are defined by the driver to enable common driver
73 configuration options. */
74#define SPARE_ACCESS 0x41
75#define MAIN_ACCESS 0x42
76#define MAIN_SPARE_ACCESS 0x43
77
78#define DENALI_READ 0
79#define DENALI_WRITE 0x100
80
81/* types of device accesses. We can issue commands and get status */
82#define COMMAND_CYCLE 0
83#define ADDR_CYCLE 1
84#define STATUS_CYCLE 2
85
86/* this is a helper macro that allows us to
87 * format the bank into the proper bits for the controller */
88#define BANK(x) ((x) << 24)
89
90/* List of platforms this NAND controller has be integrated into */
91static const struct pci_device_id denali_pci_ids[] = {
92 { PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 },
93 { PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST },
94 { /* end: all zeroes */ }
95};
96
97
98/* these are static lookup tables that give us easy access to
99 registers in the NAND controller.
100 */
101static const uint32_t intr_status_addresses[4] = {INTR_STATUS0,
102 INTR_STATUS1,
103 INTR_STATUS2,
104 INTR_STATUS3};
105
106static const uint32_t device_reset_banks[4] = {DEVICE_RESET__BANK0,
107 DEVICE_RESET__BANK1,
108 DEVICE_RESET__BANK2,
109 DEVICE_RESET__BANK3};
110
111static const uint32_t operation_timeout[4] = {INTR_STATUS0__TIME_OUT,
112 INTR_STATUS1__TIME_OUT,
113 INTR_STATUS2__TIME_OUT,
114 INTR_STATUS3__TIME_OUT};
115
116static const uint32_t reset_complete[4] = {INTR_STATUS0__RST_COMP,
117 INTR_STATUS1__RST_COMP,
118 INTR_STATUS2__RST_COMP,
119 INTR_STATUS3__RST_COMP};
120
121/* specifies the debug level of the driver */
122static int nand_debug_level = 0;
123
124/* forward declarations */
125static void clear_interrupts(struct denali_nand_info *denali);
126static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask);
127static void denali_irq_enable(struct denali_nand_info *denali, uint32_t int_mask);
128static uint32_t read_interrupt_status(struct denali_nand_info *denali);
129
130#define DEBUG_DENALI 0
131
132/* This is a wrapper for writing to the denali registers.
133 * this allows us to create debug information so we can
134 * observe how the driver is programming the device.
135 * it uses standard linux convention for (val, addr) */
136static void denali_write32(uint32_t value, void *addr)
137{
138 iowrite32(value, addr);
139
140#if DEBUG_DENALI
141 printk(KERN_ERR "wrote: 0x%x -> 0x%x\n", value, (uint32_t)((uint32_t)addr & 0x1fff));
142#endif
143}
144
145/* Certain operations for the denali NAND controller use an indexed mode to read/write
146 data. The operation is performed by writing the address value of the command to
147 the device memory followed by the data. This function abstracts this common
148 operation.
149*/
150static void index_addr(struct denali_nand_info *denali, uint32_t address, uint32_t data)
151{
152 denali_write32(address, denali->flash_mem);
153 denali_write32(data, denali->flash_mem + 0x10);
154}
155
156/* Perform an indexed read of the device */
157static void index_addr_read_data(struct denali_nand_info *denali,
158 uint32_t address, uint32_t *pdata)
159{
160 denali_write32(address, denali->flash_mem);
161 *pdata = ioread32(denali->flash_mem + 0x10);
162}
163
164/* We need to buffer some data for some of the NAND core routines.
165 * The operations manage buffering that data. */
166static void reset_buf(struct denali_nand_info *denali)
167{
168 denali->buf.head = denali->buf.tail = 0;
169}
170
171static void write_byte_to_buf(struct denali_nand_info *denali, uint8_t byte)
172{
173 BUG_ON(denali->buf.tail >= sizeof(denali->buf.buf));
174 denali->buf.buf[denali->buf.tail++] = byte;
175}
176
177/* reads the status of the device */
178static void read_status(struct denali_nand_info *denali)
179{
180 uint32_t cmd = 0x0;
181
182 /* initialize the data buffer to store status */
183 reset_buf(denali);
184
185 /* initiate a device status read */
186 cmd = MODE_11 | BANK(denali->flash_bank);
187 index_addr(denali, cmd | COMMAND_CYCLE, 0x70);
188 denali_write32(cmd | STATUS_CYCLE, denali->flash_mem);
189
190 /* update buffer with status value */
191 write_byte_to_buf(denali, ioread32(denali->flash_mem + 0x10));
192
193#if DEBUG_DENALI
194 printk("device reporting status value of 0x%2x\n", denali->buf.buf[0]);
195#endif
196}
197
198/* resets a specific device connected to the core */
199static void reset_bank(struct denali_nand_info *denali)
200{
201 uint32_t irq_status = 0;
202 uint32_t irq_mask = reset_complete[denali->flash_bank] |
203 operation_timeout[denali->flash_bank];
204 int bank = 0;
205
206 clear_interrupts(denali);
207
208 bank = device_reset_banks[denali->flash_bank];
209 denali_write32(bank, denali->flash_reg + DEVICE_RESET);
210
211 irq_status = wait_for_irq(denali, irq_mask);
212
213 if (irq_status & operation_timeout[denali->flash_bank])
214 {
215 printk(KERN_ERR "reset bank failed.\n");
216 }
217}
218
219/* Reset the flash controller */
220static uint16_t NAND_Flash_Reset(struct denali_nand_info *denali)
221{
222 uint32_t i;
223
224 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
225 __FILE__, __LINE__, __func__);
226
227 for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++)
228 denali_write32(reset_complete[i] | operation_timeout[i],
229 denali->flash_reg + intr_status_addresses[i]);
230
231 for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) {
232 denali_write32(device_reset_banks[i], denali->flash_reg + DEVICE_RESET);
233 while (!(ioread32(denali->flash_reg + intr_status_addresses[i]) &
234 (reset_complete[i] | operation_timeout[i])))
235 ;
236 if (ioread32(denali->flash_reg + intr_status_addresses[i]) &
237 operation_timeout[i])
238 nand_dbg_print(NAND_DBG_WARN,
239 "NAND Reset operation timed out on bank %d\n", i);
240 }
241
242 for (i = 0; i < LLD_MAX_FLASH_BANKS; i++)
243 denali_write32(reset_complete[i] | operation_timeout[i],
244 denali->flash_reg + intr_status_addresses[i]);
245
246 return PASS;
247}
248
249/* this routine calculates the ONFI timing values for a given mode and programs
250 * the clocking register accordingly. The mode is determined by the get_onfi_nand_para
251 routine.
252 */
253static void NAND_ONFi_Timing_Mode(struct denali_nand_info *denali, uint16_t mode)
254{
255 uint16_t Trea[6] = {40, 30, 25, 20, 20, 16};
256 uint16_t Trp[6] = {50, 25, 17, 15, 12, 10};
257 uint16_t Treh[6] = {30, 15, 15, 10, 10, 7};
258 uint16_t Trc[6] = {100, 50, 35, 30, 25, 20};
259 uint16_t Trhoh[6] = {0, 15, 15, 15, 15, 15};
260 uint16_t Trloh[6] = {0, 0, 0, 0, 5, 5};
261 uint16_t Tcea[6] = {100, 45, 30, 25, 25, 25};
262 uint16_t Tadl[6] = {200, 100, 100, 100, 70, 70};
263 uint16_t Trhw[6] = {200, 100, 100, 100, 100, 100};
264 uint16_t Trhz[6] = {200, 100, 100, 100, 100, 100};
265 uint16_t Twhr[6] = {120, 80, 80, 60, 60, 60};
266 uint16_t Tcs[6] = {70, 35, 25, 25, 20, 15};
267
268 uint16_t TclsRising = 1;
269 uint16_t data_invalid_rhoh, data_invalid_rloh, data_invalid;
270 uint16_t dv_window = 0;
271 uint16_t en_lo, en_hi;
272 uint16_t acc_clks;
273 uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
274
275 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
276 __FILE__, __LINE__, __func__);
277
278 en_lo = CEIL_DIV(Trp[mode], CLK_X);
279 en_hi = CEIL_DIV(Treh[mode], CLK_X);
280#if ONFI_BLOOM_TIME
281 if ((en_hi * CLK_X) < (Treh[mode] + 2))
282 en_hi++;
283#endif
284
285 if ((en_lo + en_hi) * CLK_X < Trc[mode])
286 en_lo += CEIL_DIV((Trc[mode] - (en_lo + en_hi) * CLK_X), CLK_X);
287
288 if ((en_lo + en_hi) < CLK_MULTI)
289 en_lo += CLK_MULTI - en_lo - en_hi;
290
291 while (dv_window < 8) {
292 data_invalid_rhoh = en_lo * CLK_X + Trhoh[mode];
293
294 data_invalid_rloh = (en_lo + en_hi) * CLK_X + Trloh[mode];
295
296 data_invalid =
297 data_invalid_rhoh <
298 data_invalid_rloh ? data_invalid_rhoh : data_invalid_rloh;
299
300 dv_window = data_invalid - Trea[mode];
301
302 if (dv_window < 8)
303 en_lo++;
304 }
305
306 acc_clks = CEIL_DIV(Trea[mode], CLK_X);
307
308 while (((acc_clks * CLK_X) - Trea[mode]) < 3)
309 acc_clks++;
310
311 if ((data_invalid - acc_clks * CLK_X) < 2)
312 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d: Warning!\n",
313 __FILE__, __LINE__);
314
315 addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
316 re_2_we = CEIL_DIV(Trhw[mode], CLK_X);
317 re_2_re = CEIL_DIV(Trhz[mode], CLK_X);
318 we_2_re = CEIL_DIV(Twhr[mode], CLK_X);
319 cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X);
320 if (!TclsRising)
321 cs_cnt = CEIL_DIV(Tcs[mode], CLK_X);
322 if (cs_cnt == 0)
323 cs_cnt = 1;
324
325 if (Tcea[mode]) {
326 while (((cs_cnt * CLK_X) + Trea[mode]) < Tcea[mode])
327 cs_cnt++;
328 }
329
330#if MODE5_WORKAROUND
331 if (mode == 5)
332 acc_clks = 5;
333#endif
334
335 /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
336 if ((ioread32(denali->flash_reg + MANUFACTURER_ID) == 0) &&
337 (ioread32(denali->flash_reg + DEVICE_ID) == 0x88))
338 acc_clks = 6;
339
340 denali_write32(acc_clks, denali->flash_reg + ACC_CLKS);
341 denali_write32(re_2_we, denali->flash_reg + RE_2_WE);
342 denali_write32(re_2_re, denali->flash_reg + RE_2_RE);
343 denali_write32(we_2_re, denali->flash_reg + WE_2_RE);
344 denali_write32(addr_2_data, denali->flash_reg + ADDR_2_DATA);
345 denali_write32(en_lo, denali->flash_reg + RDWR_EN_LO_CNT);
346 denali_write32(en_hi, denali->flash_reg + RDWR_EN_HI_CNT);
347 denali_write32(cs_cnt, denali->flash_reg + CS_SETUP_CNT);
348}
349
350/* configures the initial ECC settings for the controller */
351static void set_ecc_config(struct denali_nand_info *denali)
352{
353#if SUPPORT_8BITECC
354 if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) < 4096) ||
355 (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) <= 128))
356 denali_write32(8, denali->flash_reg + ECC_CORRECTION);
357#endif
358
359 if ((ioread32(denali->flash_reg + ECC_CORRECTION) & ECC_CORRECTION__VALUE)
360 == 1) {
361 denali->dev_info.wECCBytesPerSector = 4;
362 denali->dev_info.wECCBytesPerSector *= denali->dev_info.wDevicesConnected;
363 denali->dev_info.wNumPageSpareFlag =
364 denali->dev_info.wPageSpareSize -
365 denali->dev_info.wPageDataSize /
366 (ECC_SECTOR_SIZE * denali->dev_info.wDevicesConnected) *
367 denali->dev_info.wECCBytesPerSector
368 - denali->dev_info.wSpareSkipBytes;
369 } else {
370 denali->dev_info.wECCBytesPerSector =
371 (ioread32(denali->flash_reg + ECC_CORRECTION) &
372 ECC_CORRECTION__VALUE) * 13 / 8;
373 if ((denali->dev_info.wECCBytesPerSector) % 2 == 0)
374 denali->dev_info.wECCBytesPerSector += 2;
375 else
376 denali->dev_info.wECCBytesPerSector += 1;
377
378 denali->dev_info.wECCBytesPerSector *= denali->dev_info.wDevicesConnected;
379 denali->dev_info.wNumPageSpareFlag = denali->dev_info.wPageSpareSize -
380 denali->dev_info.wPageDataSize /
381 (ECC_SECTOR_SIZE * denali->dev_info.wDevicesConnected) *
382 denali->dev_info.wECCBytesPerSector
383 - denali->dev_info.wSpareSkipBytes;
384 }
385}
386
387/* queries the NAND device to see what ONFI modes it supports. */
388static uint16_t get_onfi_nand_para(struct denali_nand_info *denali)
389{
390 int i;
391 uint16_t blks_lun_l, blks_lun_h, n_of_luns;
392 uint32_t blockperlun, id;
393
394 denali_write32(DEVICE_RESET__BANK0, denali->flash_reg + DEVICE_RESET);
395
396 while (!((ioread32(denali->flash_reg + INTR_STATUS0) &
397 INTR_STATUS0__RST_COMP) |
398 (ioread32(denali->flash_reg + INTR_STATUS0) &
399 INTR_STATUS0__TIME_OUT)))
400 ;
401
402 if (ioread32(denali->flash_reg + INTR_STATUS0) & INTR_STATUS0__RST_COMP) {
403 denali_write32(DEVICE_RESET__BANK1, denali->flash_reg + DEVICE_RESET);
404 while (!((ioread32(denali->flash_reg + INTR_STATUS1) &
405 INTR_STATUS1__RST_COMP) |
406 (ioread32(denali->flash_reg + INTR_STATUS1) &
407 INTR_STATUS1__TIME_OUT)))
408 ;
409
410 if (ioread32(denali->flash_reg + INTR_STATUS1) &
411 INTR_STATUS1__RST_COMP) {
412 denali_write32(DEVICE_RESET__BANK2,
413 denali->flash_reg + DEVICE_RESET);
414 while (!((ioread32(denali->flash_reg + INTR_STATUS2) &
415 INTR_STATUS2__RST_COMP) |
416 (ioread32(denali->flash_reg + INTR_STATUS2) &
417 INTR_STATUS2__TIME_OUT)))
418 ;
419
420 if (ioread32(denali->flash_reg + INTR_STATUS2) &
421 INTR_STATUS2__RST_COMP) {
422 denali_write32(DEVICE_RESET__BANK3,
423 denali->flash_reg + DEVICE_RESET);
424 while (!((ioread32(denali->flash_reg + INTR_STATUS3) &
425 INTR_STATUS3__RST_COMP) |
426 (ioread32(denali->flash_reg + INTR_STATUS3) &
427 INTR_STATUS3__TIME_OUT)))
428 ;
429 } else {
430 printk(KERN_ERR "Getting a time out for bank 2!\n");
431 }
432 } else {
433 printk(KERN_ERR "Getting a time out for bank 1!\n");
434 }
435 }
436
437 denali_write32(INTR_STATUS0__TIME_OUT, denali->flash_reg + INTR_STATUS0);
438 denali_write32(INTR_STATUS1__TIME_OUT, denali->flash_reg + INTR_STATUS1);
439 denali_write32(INTR_STATUS2__TIME_OUT, denali->flash_reg + INTR_STATUS2);
440 denali_write32(INTR_STATUS3__TIME_OUT, denali->flash_reg + INTR_STATUS3);
441
442 denali->dev_info.wONFIDevFeatures =
443 ioread32(denali->flash_reg + ONFI_DEVICE_FEATURES);
444 denali->dev_info.wONFIOptCommands =
445 ioread32(denali->flash_reg + ONFI_OPTIONAL_COMMANDS);
446 denali->dev_info.wONFITimingMode =
447 ioread32(denali->flash_reg + ONFI_TIMING_MODE);
448 denali->dev_info.wONFIPgmCacheTimingMode =
449 ioread32(denali->flash_reg + ONFI_PGM_CACHE_TIMING_MODE);
450
451 n_of_luns = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) &
452 ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS;
453 blks_lun_l = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L);
454 blks_lun_h = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U);
455
456 blockperlun = (blks_lun_h << 16) | blks_lun_l;
457
458 denali->dev_info.wTotalBlocks = n_of_luns * blockperlun;
459
460 if (!(ioread32(denali->flash_reg + ONFI_TIMING_MODE) &
461 ONFI_TIMING_MODE__VALUE))
462 return FAIL;
463
464 for (i = 5; i > 0; i--) {
465 if (ioread32(denali->flash_reg + ONFI_TIMING_MODE) & (0x01 << i))
466 break;
467 }
468
469 NAND_ONFi_Timing_Mode(denali, i);
470
471 index_addr(denali, MODE_11 | 0, 0x90);
472 index_addr(denali, MODE_11 | 1, 0);
473
474 for (i = 0; i < 3; i++)
475 index_addr_read_data(denali, MODE_11 | 2, &id);
476
477 nand_dbg_print(NAND_DBG_DEBUG, "3rd ID: 0x%x\n", id);
478
479 denali->dev_info.MLCDevice = id & 0x0C;
480
481 /* By now, all the ONFI devices we know support the page cache */
482 /* rw feature. So here we enable the pipeline_rw_ahead feature */
483 /* iowrite32(1, denali->flash_reg + CACHE_WRITE_ENABLE); */
484 /* iowrite32(1, denali->flash_reg + CACHE_READ_ENABLE); */
485
486 return PASS;
487}
488
489static void get_samsung_nand_para(struct denali_nand_info *denali)
490{
491 uint8_t no_of_planes;
492 uint32_t blk_size;
493 uint64_t plane_size, capacity;
494 uint32_t id_bytes[5];
495 int i;
496
497 index_addr(denali, (uint32_t)(MODE_11 | 0), 0x90);
498 index_addr(denali, (uint32_t)(MODE_11 | 1), 0);
499 for (i = 0; i < 5; i++)
500 index_addr_read_data(denali, (uint32_t)(MODE_11 | 2), &id_bytes[i]);
501
502 nand_dbg_print(NAND_DBG_DEBUG,
503 "ID bytes: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
504 id_bytes[0], id_bytes[1], id_bytes[2],
505 id_bytes[3], id_bytes[4]);
506
507 if ((id_bytes[1] & 0xff) == 0xd3) { /* Samsung K9WAG08U1A */
508 /* Set timing register values according to datasheet */
509 denali_write32(5, denali->flash_reg + ACC_CLKS);
510 denali_write32(20, denali->flash_reg + RE_2_WE);
511 denali_write32(12, denali->flash_reg + WE_2_RE);
512 denali_write32(14, denali->flash_reg + ADDR_2_DATA);
513 denali_write32(3, denali->flash_reg + RDWR_EN_LO_CNT);
514 denali_write32(2, denali->flash_reg + RDWR_EN_HI_CNT);
515 denali_write32(2, denali->flash_reg + CS_SETUP_CNT);
516 }
517
518 no_of_planes = 1 << ((id_bytes[4] & 0x0c) >> 2);
519 plane_size = (uint64_t)64 << ((id_bytes[4] & 0x70) >> 4);
520 blk_size = 64 << ((ioread32(denali->flash_reg + DEVICE_PARAM_1) & 0x30) >> 4);
521 capacity = (uint64_t)128 * plane_size * no_of_planes;
522
523 do_div(capacity, blk_size);
524 denali->dev_info.wTotalBlocks = capacity;
525}
526
527static void get_toshiba_nand_para(struct denali_nand_info *denali)
528{
529 void __iomem *scratch_reg;
530 uint32_t tmp;
531
532 /* Workaround to fix a controller bug which reports a wrong */
533 /* spare area size for some kind of Toshiba NAND device */
534 if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
535 (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64)) {
536 denali_write32(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
537 tmp = ioread32(denali->flash_reg + DEVICES_CONNECTED) *
538 ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
539 denali_write32(tmp, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
540#if SUPPORT_15BITECC
541 denali_write32(15, denali->flash_reg + ECC_CORRECTION);
542#elif SUPPORT_8BITECC
543 denali_write32(8, denali->flash_reg + ECC_CORRECTION);
544#endif
545 }
546
547 /* As Toshiba NAND can not provide it's block number, */
548 /* so here we need user to provide the correct block */
549 /* number in a scratch register before the Linux NAND */
550 /* driver is loaded. If no valid value found in the scratch */
551 /* register, then we use default block number value */
552 scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
553 if (!scratch_reg) {
554 printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
555 __FILE__, __LINE__);
556 denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
557 } else {
558 nand_dbg_print(NAND_DBG_WARN,
559 "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
560 denali->dev_info.wTotalBlocks = 1 << ioread8(scratch_reg);
561 if (denali->dev_info.wTotalBlocks < 512)
562 denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
563 iounmap(scratch_reg);
564 }
565}
566
567static void get_hynix_nand_para(struct denali_nand_info *denali)
568{
569 void __iomem *scratch_reg;
570 uint32_t main_size, spare_size;
571
572 switch (denali->dev_info.wDeviceID) {
573 case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
574 case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
575 denali_write32(128, denali->flash_reg + PAGES_PER_BLOCK);
576 denali_write32(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
577 denali_write32(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
578 main_size = 4096 * ioread32(denali->flash_reg + DEVICES_CONNECTED);
579 spare_size = 224 * ioread32(denali->flash_reg + DEVICES_CONNECTED);
580 denali_write32(main_size, denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
581 denali_write32(spare_size, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
582 denali_write32(0, denali->flash_reg + DEVICE_WIDTH);
583#if SUPPORT_15BITECC
584 denali_write32(15, denali->flash_reg + ECC_CORRECTION);
585#elif SUPPORT_8BITECC
586 denali_write32(8, denali->flash_reg + ECC_CORRECTION);
587#endif
588 denali->dev_info.MLCDevice = 1;
589 break;
590 default:
591 nand_dbg_print(NAND_DBG_WARN,
592 "Spectra: Unknown Hynix NAND (Device ID: 0x%x)."
593 "Will use default parameter values instead.\n",
594 denali->dev_info.wDeviceID);
595 }
596
597 scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
598 if (!scratch_reg) {
599 printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
600 __FILE__, __LINE__);
601 denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
602 } else {
603 nand_dbg_print(NAND_DBG_WARN,
604 "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
605 denali->dev_info.wTotalBlocks = 1 << ioread8(scratch_reg);
606 if (denali->dev_info.wTotalBlocks < 512)
607 denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
608 iounmap(scratch_reg);
609 }
610}
611
612/* determines how many NAND chips are connected to the controller. Note for
613 Intel CE4100 devices we don't support more than one device.
614 */
615static void find_valid_banks(struct denali_nand_info *denali)
616{
617 uint32_t id[LLD_MAX_FLASH_BANKS];
618 int i;
619
620 denali->total_used_banks = 1;
621 for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) {
622 index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 0), 0x90);
623 index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 1), 0);
624 index_addr_read_data(denali, (uint32_t)(MODE_11 | (i << 24) | 2), &id[i]);
625
626 nand_dbg_print(NAND_DBG_DEBUG,
627 "Return 1st ID for bank[%d]: %x\n", i, id[i]);
628
629 if (i == 0) {
630 if (!(id[i] & 0x0ff))
631 break; /* WTF? */
632 } else {
633 if ((id[i] & 0x0ff) == (id[0] & 0x0ff))
634 denali->total_used_banks++;
635 else
636 break;
637 }
638 }
639
640 if (denali->platform == INTEL_CE4100)
641 {
642 /* Platform limitations of the CE4100 device limit
643 * users to a single chip solution for NAND.
644 * Multichip support is not enabled.
645 */
646 if (denali->total_used_banks != 1)
647 {
648 printk(KERN_ERR "Sorry, Intel CE4100 only supports "
649 "a single NAND device.\n");
650 BUG();
651 }
652 }
653 nand_dbg_print(NAND_DBG_DEBUG,
654 "denali->total_used_banks: %d\n", denali->total_used_banks);
655}
656
657static void detect_partition_feature(struct denali_nand_info *denali)
658{
659 if (ioread32(denali->flash_reg + FEATURES) & FEATURES__PARTITION) {
660 if ((ioread32(denali->flash_reg + PERM_SRC_ID_1) &
661 PERM_SRC_ID_1__SRCID) == SPECTRA_PARTITION_ID) {
662 denali->dev_info.wSpectraStartBlock =
663 ((ioread32(denali->flash_reg + MIN_MAX_BANK_1) &
664 MIN_MAX_BANK_1__MIN_VALUE) *
665 denali->dev_info.wTotalBlocks)
666 +
667 (ioread32(denali->flash_reg + MIN_BLK_ADDR_1) &
668 MIN_BLK_ADDR_1__VALUE);
669
670 denali->dev_info.wSpectraEndBlock =
671 (((ioread32(denali->flash_reg + MIN_MAX_BANK_1) &
672 MIN_MAX_BANK_1__MAX_VALUE) >> 2) *
673 denali->dev_info.wTotalBlocks)
674 +
675 (ioread32(denali->flash_reg + MAX_BLK_ADDR_1) &
676 MAX_BLK_ADDR_1__VALUE);
677
678 denali->dev_info.wTotalBlocks *= denali->total_used_banks;
679
680 if (denali->dev_info.wSpectraEndBlock >=
681 denali->dev_info.wTotalBlocks) {
682 denali->dev_info.wSpectraEndBlock =
683 denali->dev_info.wTotalBlocks - 1;
684 }
685
686 denali->dev_info.wDataBlockNum =
687 denali->dev_info.wSpectraEndBlock -
688 denali->dev_info.wSpectraStartBlock + 1;
689 } else {
690 denali->dev_info.wTotalBlocks *= denali->total_used_banks;
691 denali->dev_info.wSpectraStartBlock = SPECTRA_START_BLOCK;
692 denali->dev_info.wSpectraEndBlock =
693 denali->dev_info.wTotalBlocks - 1;
694 denali->dev_info.wDataBlockNum =
695 denali->dev_info.wSpectraEndBlock -
696 denali->dev_info.wSpectraStartBlock + 1;
697 }
698 } else {
699 denali->dev_info.wTotalBlocks *= denali->total_used_banks;
700 denali->dev_info.wSpectraStartBlock = SPECTRA_START_BLOCK;
701 denali->dev_info.wSpectraEndBlock = denali->dev_info.wTotalBlocks - 1;
702 denali->dev_info.wDataBlockNum =
703 denali->dev_info.wSpectraEndBlock -
704 denali->dev_info.wSpectraStartBlock + 1;
705 }
706}
707
708static void dump_device_info(struct denali_nand_info *denali)
709{
710 nand_dbg_print(NAND_DBG_DEBUG, "denali->dev_info:\n");
711 nand_dbg_print(NAND_DBG_DEBUG, "DeviceMaker: 0x%x\n",
712 denali->dev_info.wDeviceMaker);
713 nand_dbg_print(NAND_DBG_DEBUG, "DeviceID: 0x%x\n",
714 denali->dev_info.wDeviceID);
715 nand_dbg_print(NAND_DBG_DEBUG, "DeviceType: 0x%x\n",
716 denali->dev_info.wDeviceType);
717 nand_dbg_print(NAND_DBG_DEBUG, "SpectraStartBlock: %d\n",
718 denali->dev_info.wSpectraStartBlock);
719 nand_dbg_print(NAND_DBG_DEBUG, "SpectraEndBlock: %d\n",
720 denali->dev_info.wSpectraEndBlock);
721 nand_dbg_print(NAND_DBG_DEBUG, "TotalBlocks: %d\n",
722 denali->dev_info.wTotalBlocks);
723 nand_dbg_print(NAND_DBG_DEBUG, "PagesPerBlock: %d\n",
724 denali->dev_info.wPagesPerBlock);
725 nand_dbg_print(NAND_DBG_DEBUG, "PageSize: %d\n",
726 denali->dev_info.wPageSize);
727 nand_dbg_print(NAND_DBG_DEBUG, "PageDataSize: %d\n",
728 denali->dev_info.wPageDataSize);
729 nand_dbg_print(NAND_DBG_DEBUG, "PageSpareSize: %d\n",
730 denali->dev_info.wPageSpareSize);
731 nand_dbg_print(NAND_DBG_DEBUG, "NumPageSpareFlag: %d\n",
732 denali->dev_info.wNumPageSpareFlag);
733 nand_dbg_print(NAND_DBG_DEBUG, "ECCBytesPerSector: %d\n",
734 denali->dev_info.wECCBytesPerSector);
735 nand_dbg_print(NAND_DBG_DEBUG, "BlockSize: %d\n",
736 denali->dev_info.wBlockSize);
737 nand_dbg_print(NAND_DBG_DEBUG, "BlockDataSize: %d\n",
738 denali->dev_info.wBlockDataSize);
739 nand_dbg_print(NAND_DBG_DEBUG, "DataBlockNum: %d\n",
740 denali->dev_info.wDataBlockNum);
741 nand_dbg_print(NAND_DBG_DEBUG, "PlaneNum: %d\n",
742 denali->dev_info.bPlaneNum);
743 nand_dbg_print(NAND_DBG_DEBUG, "DeviceMainAreaSize: %d\n",
744 denali->dev_info.wDeviceMainAreaSize);
745 nand_dbg_print(NAND_DBG_DEBUG, "DeviceSpareAreaSize: %d\n",
746 denali->dev_info.wDeviceSpareAreaSize);
747 nand_dbg_print(NAND_DBG_DEBUG, "DevicesConnected: %d\n",
748 denali->dev_info.wDevicesConnected);
749 nand_dbg_print(NAND_DBG_DEBUG, "DeviceWidth: %d\n",
750 denali->dev_info.wDeviceWidth);
751 nand_dbg_print(NAND_DBG_DEBUG, "HWRevision: 0x%x\n",
752 denali->dev_info.wHWRevision);
753 nand_dbg_print(NAND_DBG_DEBUG, "HWFeatures: 0x%x\n",
754 denali->dev_info.wHWFeatures);
755 nand_dbg_print(NAND_DBG_DEBUG, "ONFIDevFeatures: 0x%x\n",
756 denali->dev_info.wONFIDevFeatures);
757 nand_dbg_print(NAND_DBG_DEBUG, "ONFIOptCommands: 0x%x\n",
758 denali->dev_info.wONFIOptCommands);
759 nand_dbg_print(NAND_DBG_DEBUG, "ONFITimingMode: 0x%x\n",
760 denali->dev_info.wONFITimingMode);
761 nand_dbg_print(NAND_DBG_DEBUG, "ONFIPgmCacheTimingMode: 0x%x\n",
762 denali->dev_info.wONFIPgmCacheTimingMode);
763 nand_dbg_print(NAND_DBG_DEBUG, "MLCDevice: %s\n",
764 denali->dev_info.MLCDevice ? "Yes" : "No");
765 nand_dbg_print(NAND_DBG_DEBUG, "SpareSkipBytes: %d\n",
766 denali->dev_info.wSpareSkipBytes);
767 nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageNumber: %d\n",
768 denali->dev_info.nBitsInPageNumber);
769 nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageDataSize: %d\n",
770 denali->dev_info.nBitsInPageDataSize);
771 nand_dbg_print(NAND_DBG_DEBUG, "BitsInBlockDataSize: %d\n",
772 denali->dev_info.nBitsInBlockDataSize);
773}
774
775static uint16_t NAND_Read_Device_ID(struct denali_nand_info *denali)
776{
777 uint16_t status = PASS;
778 uint8_t no_of_planes;
779
780 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
781 __FILE__, __LINE__, __func__);
782
783 denali->dev_info.wDeviceMaker = ioread32(denali->flash_reg + MANUFACTURER_ID);
784 denali->dev_info.wDeviceID = ioread32(denali->flash_reg + DEVICE_ID);
785 denali->dev_info.bDeviceParam0 = ioread32(denali->flash_reg + DEVICE_PARAM_0);
786 denali->dev_info.bDeviceParam1 = ioread32(denali->flash_reg + DEVICE_PARAM_1);
787 denali->dev_info.bDeviceParam2 = ioread32(denali->flash_reg + DEVICE_PARAM_2);
788
789 denali->dev_info.MLCDevice = ioread32(denali->flash_reg + DEVICE_PARAM_0) & 0x0c;
790
791 if (ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) &
792 ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */
793 if (FAIL == get_onfi_nand_para(denali))
794 return FAIL;
795 } else if (denali->dev_info.wDeviceMaker == 0xEC) { /* Samsung NAND */
796 get_samsung_nand_para(denali);
797 } else if (denali->dev_info.wDeviceMaker == 0x98) { /* Toshiba NAND */
798 get_toshiba_nand_para(denali);
799 } else if (denali->dev_info.wDeviceMaker == 0xAD) { /* Hynix NAND */
800 get_hynix_nand_para(denali);
801 } else {
802 denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
803 }
804
805 nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
806 "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
807 "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
808 "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
809 ioread32(denali->flash_reg + ACC_CLKS),
810 ioread32(denali->flash_reg + RE_2_WE),
811 ioread32(denali->flash_reg + WE_2_RE),
812 ioread32(denali->flash_reg + ADDR_2_DATA),
813 ioread32(denali->flash_reg + RDWR_EN_LO_CNT),
814 ioread32(denali->flash_reg + RDWR_EN_HI_CNT),
815 ioread32(denali->flash_reg + CS_SETUP_CNT));
816
817 denali->dev_info.wHWRevision = ioread32(denali->flash_reg + REVISION);
818 denali->dev_info.wHWFeatures = ioread32(denali->flash_reg + FEATURES);
819
820 denali->dev_info.wDeviceMainAreaSize =
821 ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
822 denali->dev_info.wDeviceSpareAreaSize =
823 ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
824
825 denali->dev_info.wPageDataSize =
826 ioread32(denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
827
828 /* Note: When using the Micon 4K NAND device, the controller will report
829 * Page Spare Size as 216 bytes. But Micron's Spec say it's 218 bytes.
830 * And if force set it to 218 bytes, the controller can not work
831 * correctly. So just let it be. But keep in mind that this bug may
832 * cause
833 * other problems in future. - Yunpeng 2008-10-10
834 */
835 denali->dev_info.wPageSpareSize =
836 ioread32(denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
837
838 denali->dev_info.wPagesPerBlock = ioread32(denali->flash_reg + PAGES_PER_BLOCK);
839
840 denali->dev_info.wPageSize =
841 denali->dev_info.wPageDataSize + denali->dev_info.wPageSpareSize;
842 denali->dev_info.wBlockSize =
843 denali->dev_info.wPageSize * denali->dev_info.wPagesPerBlock;
844 denali->dev_info.wBlockDataSize =
845 denali->dev_info.wPagesPerBlock * denali->dev_info.wPageDataSize;
846
847 denali->dev_info.wDeviceWidth = ioread32(denali->flash_reg + DEVICE_WIDTH);
848 denali->dev_info.wDeviceType =
849 ((ioread32(denali->flash_reg + DEVICE_WIDTH) > 0) ? 16 : 8);
850
851 denali->dev_info.wDevicesConnected = ioread32(denali->flash_reg + DEVICES_CONNECTED);
852
853 denali->dev_info.wSpareSkipBytes =
854 ioread32(denali->flash_reg + SPARE_AREA_SKIP_BYTES) *
855 denali->dev_info.wDevicesConnected;
856
857 denali->dev_info.nBitsInPageNumber =
858 ilog2(denali->dev_info.wPagesPerBlock);
859 denali->dev_info.nBitsInPageDataSize =
860 ilog2(denali->dev_info.wPageDataSize);
861 denali->dev_info.nBitsInBlockDataSize =
862 ilog2(denali->dev_info.wBlockDataSize);
863
864 set_ecc_config(denali);
865
866 no_of_planes = ioread32(denali->flash_reg + NUMBER_OF_PLANES) &
867 NUMBER_OF_PLANES__VALUE;
868
869 switch (no_of_planes) {
870 case 0:
871 case 1:
872 case 3:
873 case 7:
874 denali->dev_info.bPlaneNum = no_of_planes + 1;
875 break;
876 default:
877 status = FAIL;
878 break;
879 }
880
881 find_valid_banks(denali);
882
883 detect_partition_feature(denali);
884
885 dump_device_info(denali);
886
887 /* If the user specified to override the default timings
888 * with a specific ONFI mode, we apply those changes here.
889 */
890 if (onfi_timing_mode != NAND_DEFAULT_TIMINGS)
891 {
892 NAND_ONFi_Timing_Mode(denali, onfi_timing_mode);
893 }
894
895 return status;
896}
897
898static void NAND_LLD_Enable_Disable_Interrupts(struct denali_nand_info *denali,
899 uint16_t INT_ENABLE)
900{
901 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
902 __FILE__, __LINE__, __func__);
903
904 if (INT_ENABLE)
905 denali_write32(1, denali->flash_reg + GLOBAL_INT_ENABLE);
906 else
907 denali_write32(0, denali->flash_reg + GLOBAL_INT_ENABLE);
908}
909
910/* validation function to verify that the controlling software is making
911 a valid request
912 */
913static inline bool is_flash_bank_valid(int flash_bank)
914{
915 return (flash_bank >= 0 && flash_bank < 4);
916}
917
918static void denali_irq_init(struct denali_nand_info *denali)
919{
920 uint32_t int_mask = 0;
921
922 /* Disable global interrupts */
923 NAND_LLD_Enable_Disable_Interrupts(denali, false);
924
925 int_mask = DENALI_IRQ_ALL;
926
927 /* Clear all status bits */
928 denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS0);
929 denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS1);
930 denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS2);
931 denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS3);
932
933 denali_irq_enable(denali, int_mask);
934}
935
936static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali)
937{
938 NAND_LLD_Enable_Disable_Interrupts(denali, false);
939 free_irq(irqnum, denali);
940}
941
942static void denali_irq_enable(struct denali_nand_info *denali, uint32_t int_mask)
943{
944 denali_write32(int_mask, denali->flash_reg + INTR_EN0);
945 denali_write32(int_mask, denali->flash_reg + INTR_EN1);
946 denali_write32(int_mask, denali->flash_reg + INTR_EN2);
947 denali_write32(int_mask, denali->flash_reg + INTR_EN3);
948}
949
950/* This function only returns when an interrupt that this driver cares about
951 * occurs. This is to reduce the overhead of servicing interrupts
952 */
953static inline uint32_t denali_irq_detected(struct denali_nand_info *denali)
954{
955 return (read_interrupt_status(denali) & DENALI_IRQ_ALL);
956}
957
958/* Interrupts are cleared by writing a 1 to the appropriate status bit */
959static inline void clear_interrupt(struct denali_nand_info *denali, uint32_t irq_mask)
960{
961 uint32_t intr_status_reg = 0;
962
963 intr_status_reg = intr_status_addresses[denali->flash_bank];
964
965 denali_write32(irq_mask, denali->flash_reg + intr_status_reg);
966}
967
968static void clear_interrupts(struct denali_nand_info *denali)
969{
970 uint32_t status = 0x0;
971 spin_lock_irq(&denali->irq_lock);
972
973 status = read_interrupt_status(denali);
974
975#if DEBUG_DENALI
976 denali->irq_debug_array[denali->idx++] = 0x30000000 | status;
977 denali->idx %= 32;
978#endif
979
980 denali->irq_status = 0x0;
981 spin_unlock_irq(&denali->irq_lock);
982}
983
984static uint32_t read_interrupt_status(struct denali_nand_info *denali)
985{
986 uint32_t intr_status_reg = 0;
987
988 intr_status_reg = intr_status_addresses[denali->flash_bank];
989
990 return ioread32(denali->flash_reg + intr_status_reg);
991}
992
993#if DEBUG_DENALI
994static void print_irq_log(struct denali_nand_info *denali)
995{
996 int i = 0;
997
998 printk("ISR debug log index = %X\n", denali->idx);
999 for (i = 0; i < 32; i++)
1000 {
1001 printk("%08X: %08X\n", i, denali->irq_debug_array[i]);
1002 }
1003}
1004#endif
1005
1006/* This is the interrupt service routine. It handles all interrupts
1007 * sent to this device. Note that on CE4100, this is a shared
1008 * interrupt.
1009 */
1010static irqreturn_t denali_isr(int irq, void *dev_id)
1011{
1012 struct denali_nand_info *denali = dev_id;
1013 uint32_t irq_status = 0x0;
1014 irqreturn_t result = IRQ_NONE;
1015
1016 spin_lock(&denali->irq_lock);
1017
1018 /* check to see if a valid NAND chip has
1019 * been selected.
1020 */
1021 if (is_flash_bank_valid(denali->flash_bank))
1022 {
1023 /* check to see if controller generated
1024 * the interrupt, since this is a shared interrupt */
1025 if ((irq_status = denali_irq_detected(denali)) != 0)
1026 {
1027#if DEBUG_DENALI
1028 denali->irq_debug_array[denali->idx++] = 0x10000000 | irq_status;
1029 denali->idx %= 32;
1030
1031 printk("IRQ status = 0x%04x\n", irq_status);
1032#endif
1033 /* handle interrupt */
1034 /* first acknowledge it */
1035 clear_interrupt(denali, irq_status);
1036 /* store the status in the device context for someone
1037 to read */
1038 denali->irq_status |= irq_status;
1039 /* notify anyone who cares that it happened */
1040 complete(&denali->complete);
1041 /* tell the OS that we've handled this */
1042 result = IRQ_HANDLED;
1043 }
1044 }
1045 spin_unlock(&denali->irq_lock);
1046 return result;
1047}
1048#define BANK(x) ((x) << 24)
1049
1050static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
1051{
1052 unsigned long comp_res = 0;
1053 uint32_t intr_status = 0;
1054 bool retry = false;
1055 unsigned long timeout = msecs_to_jiffies(1000);
1056
1057 do
1058 {
1059#if DEBUG_DENALI
1060 printk("waiting for 0x%x\n", irq_mask);
1061#endif
1062 comp_res = wait_for_completion_timeout(&denali->complete, timeout);
1063 spin_lock_irq(&denali->irq_lock);
1064 intr_status = denali->irq_status;
1065
1066#if DEBUG_DENALI
1067 denali->irq_debug_array[denali->idx++] = 0x20000000 | (irq_mask << 16) | intr_status;
1068 denali->idx %= 32;
1069#endif
1070
1071 if (intr_status & irq_mask)
1072 {
1073 denali->irq_status &= ~irq_mask;
1074 spin_unlock_irq(&denali->irq_lock);
1075#if DEBUG_DENALI
1076 if (retry) printk("status on retry = 0x%x\n", intr_status);
1077#endif
1078 /* our interrupt was detected */
1079 break;
1080 }
1081 else
1082 {
1083 /* these are not the interrupts you are looking for -
1084 need to wait again */
1085 spin_unlock_irq(&denali->irq_lock);
1086#if DEBUG_DENALI
1087 print_irq_log(denali);
1088 printk("received irq nobody cared: irq_status = 0x%x,"
1089 " irq_mask = 0x%x, timeout = %ld\n", intr_status, irq_mask, comp_res);
1090#endif
1091 retry = true;
1092 }
1093 } while (comp_res != 0);
1094
1095 if (comp_res == 0)
1096 {
1097 /* timeout */
1098 printk(KERN_ERR "timeout occurred, status = 0x%x, mask = 0x%x\n",
1099 intr_status, irq_mask);
1100
1101 intr_status = 0;
1102 }
1103 return intr_status;
1104}
1105
1106/* This helper function setups the registers for ECC and whether or not
1107 the spare area will be transfered. */
1108static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
1109 bool transfer_spare)
1110{
1111 int ecc_en_flag = 0, transfer_spare_flag = 0;
1112
1113 /* set ECC, transfer spare bits if needed */
1114 ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
1115 transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
1116
1117 /* Enable spare area/ECC per user's request. */
1118 denali_write32(ecc_en_flag, denali->flash_reg + ECC_ENABLE);
1119 denali_write32(transfer_spare_flag, denali->flash_reg + TRANSFER_SPARE_REG);
1120}
1121
1122/* sends a pipeline command operation to the controller. See the Denali NAND
1123 controller's user guide for more information (section 4.2.3.6).
1124 */
1125static int denali_send_pipeline_cmd(struct denali_nand_info *denali, bool ecc_en,
1126 bool transfer_spare, int access_type,
1127 int op)
1128{
1129 int status = PASS;
1130 uint32_t addr = 0x0, cmd = 0x0, page_count = 1, irq_status = 0,
1131 irq_mask = 0;
1132
1133 if (op == DENALI_READ) irq_mask = INTR_STATUS0__LOAD_COMP;
1134 else if (op == DENALI_WRITE) irq_mask = 0;
1135 else BUG();
1136
1137 setup_ecc_for_xfer(denali, ecc_en, transfer_spare);
1138
1139#if DEBUG_DENALI
1140 spin_lock_irq(&denali->irq_lock);
1141 denali->irq_debug_array[denali->idx++] = 0x40000000 | ioread32(denali->flash_reg + ECC_ENABLE) | (access_type << 4);
1142 denali->idx %= 32;
1143 spin_unlock_irq(&denali->irq_lock);
1144#endif
1145
1146
1147 /* clear interrupts */
1148 clear_interrupts(denali);
1149
1150 addr = BANK(denali->flash_bank) | denali->page;
1151
1152 if (op == DENALI_WRITE && access_type != SPARE_ACCESS)
1153 {
1154 cmd = MODE_01 | addr;
1155 denali_write32(cmd, denali->flash_mem);
1156 }
1157 else if (op == DENALI_WRITE && access_type == SPARE_ACCESS)
1158 {
1159 /* read spare area */
1160 cmd = MODE_10 | addr;
1161 index_addr(denali, (uint32_t)cmd, access_type);
1162
1163 cmd = MODE_01 | addr;
1164 denali_write32(cmd, denali->flash_mem);
1165 }
1166 else if (op == DENALI_READ)
1167 {
1168 /* setup page read request for access type */
1169 cmd = MODE_10 | addr;
1170 index_addr(denali, (uint32_t)cmd, access_type);
1171
1172 /* page 33 of the NAND controller spec indicates we should not
1173 use the pipeline commands in Spare area only mode. So we
1174 don't.
1175 */
1176 if (access_type == SPARE_ACCESS)
1177 {
1178 cmd = MODE_01 | addr;
1179 denali_write32(cmd, denali->flash_mem);
1180 }
1181 else
1182 {
1183 index_addr(denali, (uint32_t)cmd, 0x2000 | op | page_count);
1184
1185 /* wait for command to be accepted
1186 * can always use status0 bit as the mask is identical for each
1187 * bank. */
1188 irq_status = wait_for_irq(denali, irq_mask);
1189
1190 if (irq_status == 0)
1191 {
1192 printk(KERN_ERR "cmd, page, addr on timeout "
1193 "(0x%x, 0x%x, 0x%x)\n", cmd, denali->page, addr);
1194 status = FAIL;
1195 }
1196 else
1197 {
1198 cmd = MODE_01 | addr;
1199 denali_write32(cmd, denali->flash_mem);
1200 }
1201 }
1202 }
1203 return status;
1204}
1205
1206/* helper function that simply writes a buffer to the flash */
1207static int write_data_to_flash_mem(struct denali_nand_info *denali, const uint8_t *buf,
1208 int len)
1209{
1210 uint32_t i = 0, *buf32;
1211
1212 /* verify that the len is a multiple of 4. see comment in
1213 * read_data_from_flash_mem() */
1214 BUG_ON((len % 4) != 0);
1215
1216 /* write the data to the flash memory */
1217 buf32 = (uint32_t *)buf;
1218 for (i = 0; i < len / 4; i++)
1219 {
1220 denali_write32(*buf32++, denali->flash_mem + 0x10);
1221 }
1222 return i*4; /* intent is to return the number of bytes read */
1223}
1224
1225/* helper function that simply reads a buffer from the flash */
1226static int read_data_from_flash_mem(struct denali_nand_info *denali, uint8_t *buf,
1227 int len)
1228{
1229 uint32_t i = 0, *buf32;
1230
1231 /* we assume that len will be a multiple of 4, if not
1232 * it would be nice to know about it ASAP rather than
1233 * have random failures...
1234 *
1235 * This assumption is based on the fact that this
1236 * function is designed to be used to read flash pages,
1237 * which are typically multiples of 4...
1238 */
1239
1240 BUG_ON((len % 4) != 0);
1241
1242 /* transfer the data from the flash */
1243 buf32 = (uint32_t *)buf;
1244 for (i = 0; i < len / 4; i++)
1245 {
1246 *buf32++ = ioread32(denali->flash_mem + 0x10);
1247 }
1248 return i*4; /* intent is to return the number of bytes read */
1249}
1250
1251/* writes OOB data to the device */
1252static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
1253{
1254 struct denali_nand_info *denali = mtd_to_denali(mtd);
1255 uint32_t irq_status = 0;
1256 uint32_t irq_mask = INTR_STATUS0__PROGRAM_COMP |
1257 INTR_STATUS0__PROGRAM_FAIL;
1258 int status = 0;
1259
1260 denali->page = page;
1261
1262 if (denali_send_pipeline_cmd(denali, false, false, SPARE_ACCESS,
1263 DENALI_WRITE) == PASS)
1264 {
1265 write_data_to_flash_mem(denali, buf, mtd->oobsize);
1266
1267#if DEBUG_DENALI
1268 spin_lock_irq(&denali->irq_lock);
1269 denali->irq_debug_array[denali->idx++] = 0x80000000 | mtd->oobsize;
1270 denali->idx %= 32;
1271 spin_unlock_irq(&denali->irq_lock);
1272#endif
1273
1274
1275 /* wait for operation to complete */
1276 irq_status = wait_for_irq(denali, irq_mask);
1277
1278 if (irq_status == 0)
1279 {
1280 printk(KERN_ERR "OOB write failed\n");
1281 status = -EIO;
1282 }
1283 }
1284 else
1285 {
1286 printk(KERN_ERR "unable to send pipeline command\n");
1287 status = -EIO;
1288 }
1289 return status;
1290}
1291
1292/* reads OOB data from the device */
1293static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
1294{
1295 struct denali_nand_info *denali = mtd_to_denali(mtd);
1296 uint32_t irq_mask = INTR_STATUS0__LOAD_COMP, irq_status = 0, addr = 0x0, cmd = 0x0;
1297
1298 denali->page = page;
1299
1300#if DEBUG_DENALI
1301 printk("read_oob %d\n", page);
1302#endif
1303 if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS,
1304 DENALI_READ) == PASS)
1305 {
1306 read_data_from_flash_mem(denali, buf, mtd->oobsize);
1307
1308 /* wait for command to be accepted
1309 * can always use status0 bit as the mask is identical for each
1310 * bank. */
1311 irq_status = wait_for_irq(denali, irq_mask);
1312
1313 if (irq_status == 0)
1314 {
1315 printk(KERN_ERR "page on OOB timeout %d\n", denali->page);
1316 }
1317
1318 /* We set the device back to MAIN_ACCESS here as I observed
1319 * instability with the controller if you do a block erase
1320 * and the last transaction was a SPARE_ACCESS. Block erase
1321 * is reliable (according to the MTD test infrastructure)
1322 * if you are in MAIN_ACCESS.
1323 */
1324 addr = BANK(denali->flash_bank) | denali->page;
1325 cmd = MODE_10 | addr;
1326 index_addr(denali, (uint32_t)cmd, MAIN_ACCESS);
1327
1328#if DEBUG_DENALI
1329 spin_lock_irq(&denali->irq_lock);
1330 denali->irq_debug_array[denali->idx++] = 0x60000000 | mtd->oobsize;
1331 denali->idx %= 32;
1332 spin_unlock_irq(&denali->irq_lock);
1333#endif
1334 }
1335}
1336
1337/* this function examines buffers to see if they contain data that
1338 * indicate that the buffer is part of an erased region of flash.
1339 */
1340bool is_erased(uint8_t *buf, int len)
1341{
1342 int i = 0;
1343 for (i = 0; i < len; i++)
1344 {
1345 if (buf[i] != 0xFF)
1346 {
1347 return false;
1348 }
1349 }
1350 return true;
1351}
1352#define ECC_SECTOR_SIZE 512
1353
1354#define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
1355#define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET))
1356#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
1357#define ECC_ERROR_CORRECTABLE(x) (!((x) & ERR_CORRECTION_INFO))
1358#define ECC_ERR_DEVICE(x) ((x) & ERR_CORRECTION_INFO__DEVICE_NR >> 8)
1359#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
1360
1361static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
1362 uint8_t *oobbuf, uint32_t irq_status)
1363{
1364 bool check_erased_page = false;
1365
1366 if (irq_status & INTR_STATUS0__ECC_ERR)
1367 {
1368 /* read the ECC errors. we'll ignore them for now */
1369 uint32_t err_address = 0, err_correction_info = 0;
1370 uint32_t err_byte = 0, err_sector = 0, err_device = 0;
1371 uint32_t err_correction_value = 0;
1372
1373 do
1374 {
1375 err_address = ioread32(denali->flash_reg +
1376 ECC_ERROR_ADDRESS);
1377 err_sector = ECC_SECTOR(err_address);
1378 err_byte = ECC_BYTE(err_address);
1379
1380
1381 err_correction_info = ioread32(denali->flash_reg +
1382 ERR_CORRECTION_INFO);
1383 err_correction_value =
1384 ECC_CORRECTION_VALUE(err_correction_info);
1385 err_device = ECC_ERR_DEVICE(err_correction_info);
1386
1387 if (ECC_ERROR_CORRECTABLE(err_correction_info))
1388 {
1389 /* offset in our buffer is computed as:
1390 sector number * sector size + offset in
1391 sector
1392 */
1393 int offset = err_sector * ECC_SECTOR_SIZE +
1394 err_byte;
1395 if (offset < denali->mtd.writesize)
1396 {
1397 /* correct the ECC error */
1398 buf[offset] ^= err_correction_value;
1399 denali->mtd.ecc_stats.corrected++;
1400 }
1401 else
1402 {
1403 /* bummer, couldn't correct the error */
1404 printk(KERN_ERR "ECC offset invalid\n");
1405 denali->mtd.ecc_stats.failed++;
1406 }
1407 }
1408 else
1409 {
1410 /* if the error is not correctable, need to
1411 * look at the page to see if it is an erased page.
1412 * if so, then it's not a real ECC error */
1413 check_erased_page = true;
1414 }
1415
1416#if DEBUG_DENALI
1417 printk("Detected ECC error in page %d: err_addr = 0x%08x,"
1418 " info to fix is 0x%08x\n", denali->page, err_address,
1419 err_correction_info);
1420#endif
1421 } while (!ECC_LAST_ERR(err_correction_info));
1422 }
1423 return check_erased_page;
1424}
1425
1426/* programs the controller to either enable/disable DMA transfers */
1427static void denali_enable_dma(struct denali_nand_info *denali, bool en)
1428{
1429 uint32_t reg_val = 0x0;
1430
1431 if (en) reg_val = DMA_ENABLE__FLAG;
1432
1433 denali_write32(reg_val, denali->flash_reg + DMA_ENABLE);
1434 ioread32(denali->flash_reg + DMA_ENABLE);
1435}
1436
1437/* setups the HW to perform the data DMA */
1438static void denali_setup_dma(struct denali_nand_info *denali, int op)
1439{
1440 uint32_t mode = 0x0;
1441 const int page_count = 1;
1442 dma_addr_t addr = denali->buf.dma_buf;
1443
1444 mode = MODE_10 | BANK(denali->flash_bank);
1445
1446 /* DMA is a four step process */
1447
1448 /* 1. setup transfer type and # of pages */
1449 index_addr(denali, mode | denali->page, 0x2000 | op | page_count);
1450
1451 /* 2. set memory high address bits 23:8 */
1452 index_addr(denali, mode | ((uint16_t)(addr >> 16) << 8), 0x2200);
1453
1454 /* 3. set memory low address bits 23:8 */
1455 index_addr(denali, mode | ((uint16_t)addr << 8), 0x2300);
1456
1457 /* 4. interrupt when complete, burst len = 64 bytes*/
1458 index_addr(denali, mode | 0x14000, 0x2400);
1459}
1460
1461/* writes a page. user specifies type, and this function handles the
1462 configuration details. */
1463static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
1464 const uint8_t *buf, bool raw_xfer)
1465{
1466 struct denali_nand_info *denali = mtd_to_denali(mtd);
1467 struct pci_dev *pci_dev = denali->dev;
1468
1469 dma_addr_t addr = denali->buf.dma_buf;
1470 size_t size = denali->mtd.writesize + denali->mtd.oobsize;
1471
1472 uint32_t irq_status = 0;
1473 uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP |
1474 INTR_STATUS0__PROGRAM_FAIL;
1475
1476 /* if it is a raw xfer, we want to disable ecc, and send
1477 * the spare area.
1478 * !raw_xfer - enable ecc
1479 * raw_xfer - transfer spare
1480 */
1481 setup_ecc_for_xfer(denali, !raw_xfer, raw_xfer);
1482
1483 /* copy buffer into DMA buffer */
1484 memcpy(denali->buf.buf, buf, mtd->writesize);
1485
1486 if (raw_xfer)
1487 {
1488 /* transfer the data to the spare area */
1489 memcpy(denali->buf.buf + mtd->writesize,
1490 chip->oob_poi,
1491 mtd->oobsize);
1492 }
1493
1494 pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_TODEVICE);
1495
1496 clear_interrupts(denali);
1497 denali_enable_dma(denali, true);
1498
1499 denali_setup_dma(denali, DENALI_WRITE);
1500
1501 /* wait for operation to complete */
1502 irq_status = wait_for_irq(denali, irq_mask);
1503
1504 if (irq_status == 0)
1505 {
1506 printk(KERN_ERR "timeout on write_page (type = %d)\n", raw_xfer);
1507 denali->status =
1508 (irq_status & INTR_STATUS0__PROGRAM_FAIL) ? NAND_STATUS_FAIL :
1509 PASS;
1510 }
1511
1512 denali_enable_dma(denali, false);
1513 pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_TODEVICE);
1514}
1515
1516/* NAND core entry points */
1517
1518/* this is the callback that the NAND core calls to write a page. Since
1519 writing a page with ECC or without is similar, all the work is done
1520 by write_page above. */
1521static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1522 const uint8_t *buf)
1523{
1524 /* for regular page writes, we let HW handle all the ECC
1525 * data written to the device. */
1526 write_page(mtd, chip, buf, false);
1527}
1528
1529/* This is the callback that the NAND core calls to write a page without ECC.
1530 raw access is similiar to ECC page writes, so all the work is done in the
1531 write_page() function above.
1532 */
1533static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1534 const uint8_t *buf)
1535{
1536 /* for raw page writes, we want to disable ECC and simply write
1537 whatever data is in the buffer. */
1538 write_page(mtd, chip, buf, true);
1539}
1540
1541static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
1542 int page)
1543{
1544 return write_oob_data(mtd, chip->oob_poi, page);
1545}
1546
1547static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1548 int page, int sndcmd)
1549{
1550 read_oob_data(mtd, chip->oob_poi, page);
1551
1552 return 0; /* notify NAND core to send command to
1553 * NAND device. */
1554}
1555
1556static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1557 uint8_t *buf, int page)
1558{
1559 struct denali_nand_info *denali = mtd_to_denali(mtd);
1560 struct pci_dev *pci_dev = denali->dev;
1561
1562 dma_addr_t addr = denali->buf.dma_buf;
1563 size_t size = denali->mtd.writesize + denali->mtd.oobsize;
1564
1565 uint32_t irq_status = 0;
1566 uint32_t irq_mask = INTR_STATUS0__ECC_TRANSACTION_DONE |
1567 INTR_STATUS0__ECC_ERR;
1568 bool check_erased_page = false;
1569
1570 setup_ecc_for_xfer(denali, true, false);
1571
1572 denali_enable_dma(denali, true);
1573 pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
1574
1575 clear_interrupts(denali);
1576 denali_setup_dma(denali, DENALI_READ);
1577
1578 /* wait for operation to complete */
1579 irq_status = wait_for_irq(denali, irq_mask);
1580
1581 pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
1582
1583 memcpy(buf, denali->buf.buf, mtd->writesize);
1584
1585 check_erased_page = handle_ecc(denali, buf, chip->oob_poi, irq_status);
1586 denali_enable_dma(denali, false);
1587
1588 if (check_erased_page)
1589 {
1590 read_oob_data(&denali->mtd, chip->oob_poi, denali->page);
1591
1592 /* check ECC failures that may have occurred on erased pages */
1593 if (check_erased_page)
1594 {
1595 if (!is_erased(buf, denali->mtd.writesize))
1596 {
1597 denali->mtd.ecc_stats.failed++;
1598 }
1599 if (!is_erased(buf, denali->mtd.oobsize))
1600 {
1601 denali->mtd.ecc_stats.failed++;
1602 }
1603 }
1604 }
1605 return 0;
1606}
1607
1608static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1609 uint8_t *buf, int page)
1610{
1611 struct denali_nand_info *denali = mtd_to_denali(mtd);
1612 struct pci_dev *pci_dev = denali->dev;
1613
1614 dma_addr_t addr = denali->buf.dma_buf;
1615 size_t size = denali->mtd.writesize + denali->mtd.oobsize;
1616
1617 uint32_t irq_status = 0;
1618 uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP;
1619
1620 setup_ecc_for_xfer(denali, false, true);
1621 denali_enable_dma(denali, true);
1622
1623 pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
1624
1625 clear_interrupts(denali);
1626 denali_setup_dma(denali, DENALI_READ);
1627
1628 /* wait for operation to complete */
1629 irq_status = wait_for_irq(denali, irq_mask);
1630
1631 pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
1632
1633 denali_enable_dma(denali, false);
1634
1635 memcpy(buf, denali->buf.buf, mtd->writesize);
1636 memcpy(chip->oob_poi, denali->buf.buf + mtd->writesize, mtd->oobsize);
1637
1638 return 0;
1639}
1640
1641static uint8_t denali_read_byte(struct mtd_info *mtd)
1642{
1643 struct denali_nand_info *denali = mtd_to_denali(mtd);
1644 uint8_t result = 0xff;
1645
1646 if (denali->buf.head < denali->buf.tail)
1647 {
1648 result = denali->buf.buf[denali->buf.head++];
1649 }
1650
1651#if DEBUG_DENALI
1652 printk("read byte -> 0x%02x\n", result);
1653#endif
1654 return result;
1655}
1656
1657static void denali_select_chip(struct mtd_info *mtd, int chip)
1658{
1659 struct denali_nand_info *denali = mtd_to_denali(mtd);
1660#if DEBUG_DENALI
1661 printk("denali select chip %d\n", chip);
1662#endif
1663 spin_lock_irq(&denali->irq_lock);
1664 denali->flash_bank = chip;
1665 spin_unlock_irq(&denali->irq_lock);
1666}
1667
1668static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
1669{
1670 struct denali_nand_info *denali = mtd_to_denali(mtd);
1671 int status = denali->status;
1672 denali->status = 0;
1673
1674#if DEBUG_DENALI
1675 printk("waitfunc %d\n", status);
1676#endif
1677 return status;
1678}
1679
1680static void denali_erase(struct mtd_info *mtd, int page)
1681{
1682 struct denali_nand_info *denali = mtd_to_denali(mtd);
1683
1684 uint32_t cmd = 0x0, irq_status = 0;
1685
1686#if DEBUG_DENALI
1687 printk("erase page: %d\n", page);
1688#endif
1689 /* clear interrupts */
1690 clear_interrupts(denali);
1691
1692 /* setup page read request for access type */
1693 cmd = MODE_10 | BANK(denali->flash_bank) | page;
1694 index_addr(denali, (uint32_t)cmd, 0x1);
1695
1696 /* wait for erase to complete or failure to occur */
1697 irq_status = wait_for_irq(denali, INTR_STATUS0__ERASE_COMP |
1698 INTR_STATUS0__ERASE_FAIL);
1699
1700 denali->status = (irq_status & INTR_STATUS0__ERASE_FAIL) ? NAND_STATUS_FAIL :
1701 PASS;
1702}
1703
1704static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
1705 int page)
1706{
1707 struct denali_nand_info *denali = mtd_to_denali(mtd);
1708
1709#if DEBUG_DENALI
1710 printk("cmdfunc: 0x%x %d %d\n", cmd, col, page);
1711#endif
1712 switch (cmd)
1713 {
1714 case NAND_CMD_PAGEPROG:
1715 break;
1716 case NAND_CMD_STATUS:
1717 read_status(denali);
1718 break;
1719 case NAND_CMD_READID:
1720 reset_buf(denali);
1721 if (denali->flash_bank < denali->total_used_banks)
1722 {
1723 /* write manufacturer information into nand
1724 buffer for NAND subsystem to fetch.
1725 */
1726 write_byte_to_buf(denali, denali->dev_info.wDeviceMaker);
1727 write_byte_to_buf(denali, denali->dev_info.wDeviceID);
1728 write_byte_to_buf(denali, denali->dev_info.bDeviceParam0);
1729 write_byte_to_buf(denali, denali->dev_info.bDeviceParam1);
1730 write_byte_to_buf(denali, denali->dev_info.bDeviceParam2);
1731 }
1732 else
1733 {
1734 int i;
1735 for (i = 0; i < 5; i++)
1736 write_byte_to_buf(denali, 0xff);
1737 }
1738 break;
1739 case NAND_CMD_READ0:
1740 case NAND_CMD_SEQIN:
1741 denali->page = page;
1742 break;
1743 case NAND_CMD_RESET:
1744 reset_bank(denali);
1745 break;
1746 case NAND_CMD_READOOB:
1747 /* TODO: Read OOB data */
1748 break;
1749 default:
1750 printk(KERN_ERR ": unsupported command received 0x%x\n", cmd);
1751 break;
1752 }
1753}
1754
1755/* stubs for ECC functions not used by the NAND core */
1756static int denali_ecc_calculate(struct mtd_info *mtd, const uint8_t *data,
1757 uint8_t *ecc_code)
1758{
1759 printk(KERN_ERR "denali_ecc_calculate called unexpectedly\n");
1760 BUG();
1761 return -EIO;
1762}
1763
1764static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data,
1765 uint8_t *read_ecc, uint8_t *calc_ecc)
1766{
1767 printk(KERN_ERR "denali_ecc_correct called unexpectedly\n");
1768 BUG();
1769 return -EIO;
1770}
1771
1772static void denali_ecc_hwctl(struct mtd_info *mtd, int mode)
1773{
1774 printk(KERN_ERR "denali_ecc_hwctl called unexpectedly\n");
1775 BUG();
1776}
1777/* end NAND core entry points */
1778
1779/* Initialization code to bring the device up to a known good state */
1780static void denali_hw_init(struct denali_nand_info *denali)
1781{
1782 denali_irq_init(denali);
1783 NAND_Flash_Reset(denali);
1784 denali_write32(0x0F, denali->flash_reg + RB_PIN_ENABLED);
1785 denali_write32(CHIP_EN_DONT_CARE__FLAG, denali->flash_reg + CHIP_ENABLE_DONT_CARE);
1786
1787 denali_write32(0x0, denali->flash_reg + SPARE_AREA_SKIP_BYTES);
1788 denali_write32(0xffff, denali->flash_reg + SPARE_AREA_MARKER);
1789
1790 /* Should set value for these registers when init */
1791 denali_write32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
1792 denali_write32(1, denali->flash_reg + ECC_ENABLE);
1793}
1794
1795/* ECC layout for SLC devices. Denali spec indicates SLC fixed at 4 bytes */
1796#define ECC_BYTES_SLC 4 * (2048 / ECC_SECTOR_SIZE)
1797static struct nand_ecclayout nand_oob_slc = {
1798 .eccbytes = 4,
1799 .eccpos = { 0, 1, 2, 3 }, /* not used */
1800 .oobfree = {{
1801 .offset = ECC_BYTES_SLC,
1802 .length = 64 - ECC_BYTES_SLC
1803 }}
1804};
1805
1806#define ECC_BYTES_MLC 14 * (2048 / ECC_SECTOR_SIZE)
1807static struct nand_ecclayout nand_oob_mlc_14bit = {
1808 .eccbytes = 14,
1809 .eccpos = { 0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13 }, /* not used */
1810 .oobfree = {{
1811 .offset = ECC_BYTES_MLC,
1812 .length = 64 - ECC_BYTES_MLC
1813 }}
1814};
1815
1816static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
1817static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
1818
1819static struct nand_bbt_descr bbt_main_descr = {
1820 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
1821 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
1822 .offs = 8,
1823 .len = 4,
1824 .veroffs = 12,
1825 .maxblocks = 4,
1826 .pattern = bbt_pattern,
1827};
1828
1829static struct nand_bbt_descr bbt_mirror_descr = {
1830 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
1831 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
1832 .offs = 8,
1833 .len = 4,
1834 .veroffs = 12,
1835 .maxblocks = 4,
1836 .pattern = mirror_pattern,
1837};
1838
1839/* initalize driver data structures */
1840void denali_drv_init(struct denali_nand_info *denali)
1841{
1842 denali->idx = 0;
1843
1844 /* setup interrupt handler */
1845 /* the completion object will be used to notify
1846 * the callee that the interrupt is done */
1847 init_completion(&denali->complete);
1848
1849 /* the spinlock will be used to synchronize the ISR
1850 * with any element that might be access shared
1851 * data (interrupt status) */
1852 spin_lock_init(&denali->irq_lock);
1853
1854 /* indicate that MTD has not selected a valid bank yet */
1855 denali->flash_bank = CHIP_SELECT_INVALID;
1856
1857 /* initialize our irq_status variable to indicate no interrupts */
1858 denali->irq_status = 0;
1859}
1860
1861/* driver entry point */
1862static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1863{
1864 int ret = -ENODEV;
1865 resource_size_t csr_base, mem_base;
1866 unsigned long csr_len, mem_len;
1867 struct denali_nand_info *denali;
1868
1869 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1870 __FILE__, __LINE__, __func__);
1871
1872 denali = kzalloc(sizeof(*denali), GFP_KERNEL);
1873 if (!denali)
1874 return -ENOMEM;
1875
1876 ret = pci_enable_device(dev);
1877 if (ret) {
1878 printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
1879 goto failed_enable;
1880 }
1881
1882 if (id->driver_data == INTEL_CE4100) {
1883 /* Due to a silicon limitation, we can only support
1884 * ONFI timing mode 1 and below.
1885 */
1886 if (onfi_timing_mode < -1 || onfi_timing_mode > 1)
1887 {
1888 printk("Intel CE4100 only supports ONFI timing mode 1 "
1889 "or below\n");
1890 ret = -EINVAL;
1891 goto failed_enable;
1892 }
1893 denali->platform = INTEL_CE4100;
1894 mem_base = pci_resource_start(dev, 0);
1895 mem_len = pci_resource_len(dev, 1);
1896 csr_base = pci_resource_start(dev, 1);
1897 csr_len = pci_resource_len(dev, 1);
1898 } else {
1899 denali->platform = INTEL_MRST;
1900 csr_base = pci_resource_start(dev, 0);
1901 csr_len = pci_resource_start(dev, 0);
1902 mem_base = pci_resource_start(dev, 1);
1903 mem_len = pci_resource_len(dev, 1);
1904 if (!mem_len) {
1905 mem_base = csr_base + csr_len;
1906 mem_len = csr_len;
1907 nand_dbg_print(NAND_DBG_WARN,
1908 "Spectra: No second BAR for PCI device; assuming %08Lx\n",
1909 (uint64_t)csr_base);
1910 }
1911 }
1912
1913 /* Is 32-bit DMA supported? */
1914 ret = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
1915
1916 if (ret)
1917 {
1918 printk(KERN_ERR "Spectra: no usable DMA configuration\n");
1919 goto failed_enable;
1920 }
1921 denali->buf.dma_buf = pci_map_single(dev, denali->buf.buf, DENALI_BUF_SIZE,
1922 PCI_DMA_BIDIRECTIONAL);
1923
1924 if (pci_dma_mapping_error(dev, denali->buf.dma_buf))
1925 {
1926 printk(KERN_ERR "Spectra: failed to map DMA buffer\n");
1927 goto failed_enable;
1928 }
1929
1930 pci_set_master(dev);
1931 denali->dev = dev;
1932
1933 ret = pci_request_regions(dev, DENALI_NAND_NAME);
1934 if (ret) {
1935 printk(KERN_ERR "Spectra: Unable to request memory regions\n");
1936 goto failed_req_csr;
1937 }
1938
1939 denali->flash_reg = ioremap_nocache(csr_base, csr_len);
1940 if (!denali->flash_reg) {
1941 printk(KERN_ERR "Spectra: Unable to remap memory region\n");
1942 ret = -ENOMEM;
1943 goto failed_remap_csr;
1944 }
1945 nand_dbg_print(NAND_DBG_DEBUG, "Spectra: CSR 0x%08Lx -> 0x%p (0x%lx)\n",
1946 (uint64_t)csr_base, denali->flash_reg, csr_len);
1947
1948 denali->flash_mem = ioremap_nocache(mem_base, mem_len);
1949 if (!denali->flash_mem) {
1950 printk(KERN_ERR "Spectra: ioremap_nocache failed!");
1951 iounmap(denali->flash_reg);
1952 ret = -ENOMEM;
1953 goto failed_remap_csr;
1954 }
1955
1956 nand_dbg_print(NAND_DBG_WARN,
1957 "Spectra: Remapped flash base address: "
1958 "0x%p, len: %ld\n",
1959 denali->flash_mem, csr_len);
1960
1961 denali_hw_init(denali);
1962 denali_drv_init(denali);
1963
1964 nand_dbg_print(NAND_DBG_DEBUG, "Spectra: IRQ %d\n", dev->irq);
1965 if (request_irq(dev->irq, denali_isr, IRQF_SHARED,
1966 DENALI_NAND_NAME, denali)) {
1967 printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
1968 ret = -ENODEV;
1969 goto failed_request_irq;
1970 }
1971
1972 /* now that our ISR is registered, we can enable interrupts */
1973 NAND_LLD_Enable_Disable_Interrupts(denali, true);
1974
1975 pci_set_drvdata(dev, denali);
1976
1977 NAND_Read_Device_ID(denali);
1978
1979 /* MTD supported page sizes vary by kernel. We validate our
1980 kernel supports the device here.
1981 */
1982 if (denali->dev_info.wPageSize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE)
1983 {
1984 ret = -ENODEV;
1985 printk(KERN_ERR "Spectra: device size not supported by this "
1986 "version of MTD.");
1987 goto failed_nand;
1988 }
1989
1990 nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
1991 "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
1992 "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
1993 "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
1994 ioread32(denali->flash_reg + ACC_CLKS),
1995 ioread32(denali->flash_reg + RE_2_WE),
1996 ioread32(denali->flash_reg + WE_2_RE),
1997 ioread32(denali->flash_reg + ADDR_2_DATA),
1998 ioread32(denali->flash_reg + RDWR_EN_LO_CNT),
1999 ioread32(denali->flash_reg + RDWR_EN_HI_CNT),
2000 ioread32(denali->flash_reg + CS_SETUP_CNT));
2001
2002 denali->mtd.name = "Denali NAND";
2003 denali->mtd.owner = THIS_MODULE;
2004 denali->mtd.priv = &denali->nand;
2005
2006 /* register the driver with the NAND core subsystem */
2007 denali->nand.select_chip = denali_select_chip;
2008 denali->nand.cmdfunc = denali_cmdfunc;
2009 denali->nand.read_byte = denali_read_byte;
2010 denali->nand.waitfunc = denali_waitfunc;
2011
2012 /* scan for NAND devices attached to the controller
2013 * this is the first stage in a two step process to register
2014 * with the nand subsystem */
2015 if (nand_scan_ident(&denali->mtd, LLD_MAX_FLASH_BANKS, NULL))
2016 {
2017 ret = -ENXIO;
2018 goto failed_nand;
2019 }
2020
2021 /* second stage of the NAND scan
2022 * this stage requires information regarding ECC and
2023 * bad block management. */
2024
2025 /* Bad block management */
2026 denali->nand.bbt_td = &bbt_main_descr;
2027 denali->nand.bbt_md = &bbt_mirror_descr;
2028
2029 /* skip the scan for now until we have OOB read and write support */
2030 denali->nand.options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN;
2031 denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
2032
2033 if (denali->dev_info.MLCDevice)
2034 {
2035 denali->nand.ecc.layout = &nand_oob_mlc_14bit;
2036 denali->nand.ecc.bytes = ECC_BYTES_MLC;
2037 }
2038 else /* SLC */
2039 {
2040 denali->nand.ecc.layout = &nand_oob_slc;
2041 denali->nand.ecc.bytes = ECC_BYTES_SLC;
2042 }
2043
2044 /* These functions are required by the NAND core framework, otherwise,
2045 the NAND core will assert. However, we don't need them, so we'll stub
2046 them out. */
2047 denali->nand.ecc.calculate = denali_ecc_calculate;
2048 denali->nand.ecc.correct = denali_ecc_correct;
2049 denali->nand.ecc.hwctl = denali_ecc_hwctl;
2050
2051 /* override the default read operations */
2052 denali->nand.ecc.size = denali->mtd.writesize;
2053 denali->nand.ecc.read_page = denali_read_page;
2054 denali->nand.ecc.read_page_raw = denali_read_page_raw;
2055 denali->nand.ecc.write_page = denali_write_page;
2056 denali->nand.ecc.write_page_raw = denali_write_page_raw;
2057 denali->nand.ecc.read_oob = denali_read_oob;
2058 denali->nand.ecc.write_oob = denali_write_oob;
2059 denali->nand.erase_cmd = denali_erase;
2060
2061 if (nand_scan_tail(&denali->mtd))
2062 {
2063 ret = -ENXIO;
2064 goto failed_nand;
2065 }
2066
2067 ret = add_mtd_device(&denali->mtd);
2068 if (ret) {
2069 printk(KERN_ERR "Spectra: Failed to register MTD device: %d\n", ret);
2070 goto failed_nand;
2071 }
2072 return 0;
2073
2074 failed_nand:
2075 denali_irq_cleanup(dev->irq, denali);
2076 failed_request_irq:
2077 iounmap(denali->flash_reg);
2078 iounmap(denali->flash_mem);
2079 failed_remap_csr:
2080 pci_release_regions(dev);
2081 failed_req_csr:
2082 pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
2083 PCI_DMA_BIDIRECTIONAL);
2084 failed_enable:
2085 kfree(denali);
2086 return ret;
2087}
2088
2089/* driver exit point */
2090static void denali_pci_remove(struct pci_dev *dev)
2091{
2092 struct denali_nand_info *denali = pci_get_drvdata(dev);
2093
2094 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
2095 __FILE__, __LINE__, __func__);
2096
2097 nand_release(&denali->mtd);
2098 del_mtd_device(&denali->mtd);
2099
2100 denali_irq_cleanup(dev->irq, denali);
2101
2102 iounmap(denali->flash_reg);
2103 iounmap(denali->flash_mem);
2104 pci_release_regions(dev);
2105 pci_disable_device(dev);
2106 pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
2107 PCI_DMA_BIDIRECTIONAL);
2108 pci_set_drvdata(dev, NULL);
2109 kfree(denali);
2110}
2111
2112MODULE_DEVICE_TABLE(pci, denali_pci_ids);
2113
2114static struct pci_driver denali_pci_driver = {
2115 .name = DENALI_NAND_NAME,
2116 .id_table = denali_pci_ids,
2117 .probe = denali_pci_probe,
2118 .remove = denali_pci_remove,
2119};
2120
2121static int __devinit denali_init(void)
2122{
2123 printk(KERN_INFO "Spectra MTD driver built on %s @ %s\n", __DATE__, __TIME__);
2124 return pci_register_driver(&denali_pci_driver);
2125}
2126
2127/* Free memory */
2128static void __devexit denali_exit(void)
2129{
2130 pci_unregister_driver(&denali_pci_driver);
2131}
2132
2133module_init(denali_init);
2134module_exit(denali_exit);
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
new file mode 100644
index 00000000000..422a29ab2f6
--- /dev/null
+++ b/drivers/mtd/nand/denali.h
@@ -0,0 +1,816 @@
1/*
2 * NAND Flash Controller Device Driver
3 * Copyright (c) 2009 - 2010, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20#include <linux/mtd/nand.h>
21
22#define DEVICE_RESET 0x0
23#define DEVICE_RESET__BANK0 0x0001
24#define DEVICE_RESET__BANK1 0x0002
25#define DEVICE_RESET__BANK2 0x0004
26#define DEVICE_RESET__BANK3 0x0008
27
28#define TRANSFER_SPARE_REG 0x10
29#define TRANSFER_SPARE_REG__FLAG 0x0001
30
31#define LOAD_WAIT_CNT 0x20
32#define LOAD_WAIT_CNT__VALUE 0xffff
33
34#define PROGRAM_WAIT_CNT 0x30
35#define PROGRAM_WAIT_CNT__VALUE 0xffff
36
37#define ERASE_WAIT_CNT 0x40
38#define ERASE_WAIT_CNT__VALUE 0xffff
39
40#define INT_MON_CYCCNT 0x50
41#define INT_MON_CYCCNT__VALUE 0xffff
42
43#define RB_PIN_ENABLED 0x60
44#define RB_PIN_ENABLED__BANK0 0x0001
45#define RB_PIN_ENABLED__BANK1 0x0002
46#define RB_PIN_ENABLED__BANK2 0x0004
47#define RB_PIN_ENABLED__BANK3 0x0008
48
49#define MULTIPLANE_OPERATION 0x70
50#define MULTIPLANE_OPERATION__FLAG 0x0001
51
52#define MULTIPLANE_READ_ENABLE 0x80
53#define MULTIPLANE_READ_ENABLE__FLAG 0x0001
54
55#define COPYBACK_DISABLE 0x90
56#define COPYBACK_DISABLE__FLAG 0x0001
57
58#define CACHE_WRITE_ENABLE 0xa0
59#define CACHE_WRITE_ENABLE__FLAG 0x0001
60
61#define CACHE_READ_ENABLE 0xb0
62#define CACHE_READ_ENABLE__FLAG 0x0001
63
64#define PREFETCH_MODE 0xc0
65#define PREFETCH_MODE__PREFETCH_EN 0x0001
66#define PREFETCH_MODE__PREFETCH_BURST_LENGTH 0xfff0
67
68#define CHIP_ENABLE_DONT_CARE 0xd0
69#define CHIP_EN_DONT_CARE__FLAG 0x01
70
71#define ECC_ENABLE 0xe0
72#define ECC_ENABLE__FLAG 0x0001
73
74#define GLOBAL_INT_ENABLE 0xf0
75#define GLOBAL_INT_EN_FLAG 0x01
76
77#define WE_2_RE 0x100
78#define WE_2_RE__VALUE 0x003f
79
80#define ADDR_2_DATA 0x110
81#define ADDR_2_DATA__VALUE 0x003f
82
83#define RE_2_WE 0x120
84#define RE_2_WE__VALUE 0x003f
85
86#define ACC_CLKS 0x130
87#define ACC_CLKS__VALUE 0x000f
88
89#define NUMBER_OF_PLANES 0x140
90#define NUMBER_OF_PLANES__VALUE 0x0007
91
92#define PAGES_PER_BLOCK 0x150
93#define PAGES_PER_BLOCK__VALUE 0xffff
94
95#define DEVICE_WIDTH 0x160
96#define DEVICE_WIDTH__VALUE 0x0003
97
98#define DEVICE_MAIN_AREA_SIZE 0x170
99#define DEVICE_MAIN_AREA_SIZE__VALUE 0xffff
100
101#define DEVICE_SPARE_AREA_SIZE 0x180
102#define DEVICE_SPARE_AREA_SIZE__VALUE 0xffff
103
104#define TWO_ROW_ADDR_CYCLES 0x190
105#define TWO_ROW_ADDR_CYCLES__FLAG 0x0001
106
107#define MULTIPLANE_ADDR_RESTRICT 0x1a0
108#define MULTIPLANE_ADDR_RESTRICT__FLAG 0x0001
109
110#define ECC_CORRECTION 0x1b0
111#define ECC_CORRECTION__VALUE 0x001f
112
113#define READ_MODE 0x1c0
114#define READ_MODE__VALUE 0x000f
115
116#define WRITE_MODE 0x1d0
117#define WRITE_MODE__VALUE 0x000f
118
119#define COPYBACK_MODE 0x1e0
120#define COPYBACK_MODE__VALUE 0x000f
121
122#define RDWR_EN_LO_CNT 0x1f0
123#define RDWR_EN_LO_CNT__VALUE 0x001f
124
125#define RDWR_EN_HI_CNT 0x200
126#define RDWR_EN_HI_CNT__VALUE 0x001f
127
128#define MAX_RD_DELAY 0x210
129#define MAX_RD_DELAY__VALUE 0x000f
130
131#define CS_SETUP_CNT 0x220
132#define CS_SETUP_CNT__VALUE 0x001f
133
134#define SPARE_AREA_SKIP_BYTES 0x230
135#define SPARE_AREA_SKIP_BYTES__VALUE 0x003f
136
137#define SPARE_AREA_MARKER 0x240
138#define SPARE_AREA_MARKER__VALUE 0xffff
139
140#define DEVICES_CONNECTED 0x250
141#define DEVICES_CONNECTED__VALUE 0x0007
142
143#define DIE_MASK 0x260
144#define DIE_MASK__VALUE 0x00ff
145
146#define FIRST_BLOCK_OF_NEXT_PLANE 0x270
147#define FIRST_BLOCK_OF_NEXT_PLANE__VALUE 0xffff
148
149#define WRITE_PROTECT 0x280
150#define WRITE_PROTECT__FLAG 0x0001
151
152#define RE_2_RE 0x290
153#define RE_2_RE__VALUE 0x003f
154
155#define MANUFACTURER_ID 0x300
156#define MANUFACTURER_ID__VALUE 0x00ff
157
158#define DEVICE_ID 0x310
159#define DEVICE_ID__VALUE 0x00ff
160
161#define DEVICE_PARAM_0 0x320
162#define DEVICE_PARAM_0__VALUE 0x00ff
163
164#define DEVICE_PARAM_1 0x330
165#define DEVICE_PARAM_1__VALUE 0x00ff
166
167#define DEVICE_PARAM_2 0x340
168#define DEVICE_PARAM_2__VALUE 0x00ff
169
170#define LOGICAL_PAGE_DATA_SIZE 0x350
171#define LOGICAL_PAGE_DATA_SIZE__VALUE 0xffff
172
173#define LOGICAL_PAGE_SPARE_SIZE 0x360
174#define LOGICAL_PAGE_SPARE_SIZE__VALUE 0xffff
175
176#define REVISION 0x370
177#define REVISION__VALUE 0xffff
178
179#define ONFI_DEVICE_FEATURES 0x380
180#define ONFI_DEVICE_FEATURES__VALUE 0x003f
181
182#define ONFI_OPTIONAL_COMMANDS 0x390
183#define ONFI_OPTIONAL_COMMANDS__VALUE 0x003f
184
185#define ONFI_TIMING_MODE 0x3a0
186#define ONFI_TIMING_MODE__VALUE 0x003f
187
188#define ONFI_PGM_CACHE_TIMING_MODE 0x3b0
189#define ONFI_PGM_CACHE_TIMING_MODE__VALUE 0x003f
190
191#define ONFI_DEVICE_NO_OF_LUNS 0x3c0
192#define ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS 0x00ff
193#define ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE 0x0100
194
195#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L 0x3d0
196#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L__VALUE 0xffff
197
198#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U 0x3e0
199#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U__VALUE 0xffff
200
201#define FEATURES 0x3f0
202#define FEATURES__N_BANKS 0x0003
203#define FEATURES__ECC_MAX_ERR 0x003c
204#define FEATURES__DMA 0x0040
205#define FEATURES__CMD_DMA 0x0080
206#define FEATURES__PARTITION 0x0100
207#define FEATURES__XDMA_SIDEBAND 0x0200
208#define FEATURES__GPREG 0x0400
209#define FEATURES__INDEX_ADDR 0x0800
210
211#define TRANSFER_MODE 0x400
212#define TRANSFER_MODE__VALUE 0x0003
213
214#define INTR_STATUS0 0x410
215#define INTR_STATUS0__ECC_TRANSACTION_DONE 0x0001
216#define INTR_STATUS0__ECC_ERR 0x0002
217#define INTR_STATUS0__DMA_CMD_COMP 0x0004
218#define INTR_STATUS0__TIME_OUT 0x0008
219#define INTR_STATUS0__PROGRAM_FAIL 0x0010
220#define INTR_STATUS0__ERASE_FAIL 0x0020
221#define INTR_STATUS0__LOAD_COMP 0x0040
222#define INTR_STATUS0__PROGRAM_COMP 0x0080
223#define INTR_STATUS0__ERASE_COMP 0x0100
224#define INTR_STATUS0__PIPE_CPYBCK_CMD_COMP 0x0200
225#define INTR_STATUS0__LOCKED_BLK 0x0400
226#define INTR_STATUS0__UNSUP_CMD 0x0800
227#define INTR_STATUS0__INT_ACT 0x1000
228#define INTR_STATUS0__RST_COMP 0x2000
229#define INTR_STATUS0__PIPE_CMD_ERR 0x4000
230#define INTR_STATUS0__PAGE_XFER_INC 0x8000
231
232#define INTR_EN0 0x420
233#define INTR_EN0__ECC_TRANSACTION_DONE 0x0001
234#define INTR_EN0__ECC_ERR 0x0002
235#define INTR_EN0__DMA_CMD_COMP 0x0004
236#define INTR_EN0__TIME_OUT 0x0008
237#define INTR_EN0__PROGRAM_FAIL 0x0010
238#define INTR_EN0__ERASE_FAIL 0x0020
239#define INTR_EN0__LOAD_COMP 0x0040
240#define INTR_EN0__PROGRAM_COMP 0x0080
241#define INTR_EN0__ERASE_COMP 0x0100
242#define INTR_EN0__PIPE_CPYBCK_CMD_COMP 0x0200
243#define INTR_EN0__LOCKED_BLK 0x0400
244#define INTR_EN0__UNSUP_CMD 0x0800
245#define INTR_EN0__INT_ACT 0x1000
246#define INTR_EN0__RST_COMP 0x2000
247#define INTR_EN0__PIPE_CMD_ERR 0x4000
248#define INTR_EN0__PAGE_XFER_INC 0x8000
249
250#define PAGE_CNT0 0x430
251#define PAGE_CNT0__VALUE 0x00ff
252
253#define ERR_PAGE_ADDR0 0x440
254#define ERR_PAGE_ADDR0__VALUE 0xffff
255
256#define ERR_BLOCK_ADDR0 0x450
257#define ERR_BLOCK_ADDR0__VALUE 0xffff
258
259#define INTR_STATUS1 0x460
260#define INTR_STATUS1__ECC_TRANSACTION_DONE 0x0001
261#define INTR_STATUS1__ECC_ERR 0x0002
262#define INTR_STATUS1__DMA_CMD_COMP 0x0004
263#define INTR_STATUS1__TIME_OUT 0x0008
264#define INTR_STATUS1__PROGRAM_FAIL 0x0010
265#define INTR_STATUS1__ERASE_FAIL 0x0020
266#define INTR_STATUS1__LOAD_COMP 0x0040
267#define INTR_STATUS1__PROGRAM_COMP 0x0080
268#define INTR_STATUS1__ERASE_COMP 0x0100
269#define INTR_STATUS1__PIPE_CPYBCK_CMD_COMP 0x0200
270#define INTR_STATUS1__LOCKED_BLK 0x0400
271#define INTR_STATUS1__UNSUP_CMD 0x0800
272#define INTR_STATUS1__INT_ACT 0x1000
273#define INTR_STATUS1__RST_COMP 0x2000
274#define INTR_STATUS1__PIPE_CMD_ERR 0x4000
275#define INTR_STATUS1__PAGE_XFER_INC 0x8000
276
277#define INTR_EN1 0x470
278#define INTR_EN1__ECC_TRANSACTION_DONE 0x0001
279#define INTR_EN1__ECC_ERR 0x0002
280#define INTR_EN1__DMA_CMD_COMP 0x0004
281#define INTR_EN1__TIME_OUT 0x0008
282#define INTR_EN1__PROGRAM_FAIL 0x0010
283#define INTR_EN1__ERASE_FAIL 0x0020
284#define INTR_EN1__LOAD_COMP 0x0040
285#define INTR_EN1__PROGRAM_COMP 0x0080
286#define INTR_EN1__ERASE_COMP 0x0100
287#define INTR_EN1__PIPE_CPYBCK_CMD_COMP 0x0200
288#define INTR_EN1__LOCKED_BLK 0x0400
289#define INTR_EN1__UNSUP_CMD 0x0800
290#define INTR_EN1__INT_ACT 0x1000
291#define INTR_EN1__RST_COMP 0x2000
292#define INTR_EN1__PIPE_CMD_ERR 0x4000
293#define INTR_EN1__PAGE_XFER_INC 0x8000
294
295#define PAGE_CNT1 0x480
296#define PAGE_CNT1__VALUE 0x00ff
297
298#define ERR_PAGE_ADDR1 0x490
299#define ERR_PAGE_ADDR1__VALUE 0xffff
300
301#define ERR_BLOCK_ADDR1 0x4a0
302#define ERR_BLOCK_ADDR1__VALUE 0xffff
303
304#define INTR_STATUS2 0x4b0
305#define INTR_STATUS2__ECC_TRANSACTION_DONE 0x0001
306#define INTR_STATUS2__ECC_ERR 0x0002
307#define INTR_STATUS2__DMA_CMD_COMP 0x0004
308#define INTR_STATUS2__TIME_OUT 0x0008
309#define INTR_STATUS2__PROGRAM_FAIL 0x0010
310#define INTR_STATUS2__ERASE_FAIL 0x0020
311#define INTR_STATUS2__LOAD_COMP 0x0040
312#define INTR_STATUS2__PROGRAM_COMP 0x0080
313#define INTR_STATUS2__ERASE_COMP 0x0100
314#define INTR_STATUS2__PIPE_CPYBCK_CMD_COMP 0x0200
315#define INTR_STATUS2__LOCKED_BLK 0x0400
316#define INTR_STATUS2__UNSUP_CMD 0x0800
317#define INTR_STATUS2__INT_ACT 0x1000
318#define INTR_STATUS2__RST_COMP 0x2000
319#define INTR_STATUS2__PIPE_CMD_ERR 0x4000
320#define INTR_STATUS2__PAGE_XFER_INC 0x8000
321
322#define INTR_EN2 0x4c0
323#define INTR_EN2__ECC_TRANSACTION_DONE 0x0001
324#define INTR_EN2__ECC_ERR 0x0002
325#define INTR_EN2__DMA_CMD_COMP 0x0004
326#define INTR_EN2__TIME_OUT 0x0008
327#define INTR_EN2__PROGRAM_FAIL 0x0010
328#define INTR_EN2__ERASE_FAIL 0x0020
329#define INTR_EN2__LOAD_COMP 0x0040
330#define INTR_EN2__PROGRAM_COMP 0x0080
331#define INTR_EN2__ERASE_COMP 0x0100
332#define INTR_EN2__PIPE_CPYBCK_CMD_COMP 0x0200
333#define INTR_EN2__LOCKED_BLK 0x0400
334#define INTR_EN2__UNSUP_CMD 0x0800
335#define INTR_EN2__INT_ACT 0x1000
336#define INTR_EN2__RST_COMP 0x2000
337#define INTR_EN2__PIPE_CMD_ERR 0x4000
338#define INTR_EN2__PAGE_XFER_INC 0x8000
339
340#define PAGE_CNT2 0x4d0
341#define PAGE_CNT2__VALUE 0x00ff
342
343#define ERR_PAGE_ADDR2 0x4e0
344#define ERR_PAGE_ADDR2__VALUE 0xffff
345
346#define ERR_BLOCK_ADDR2 0x4f0
347#define ERR_BLOCK_ADDR2__VALUE 0xffff
348
349#define INTR_STATUS3 0x500
350#define INTR_STATUS3__ECC_TRANSACTION_DONE 0x0001
351#define INTR_STATUS3__ECC_ERR 0x0002
352#define INTR_STATUS3__DMA_CMD_COMP 0x0004
353#define INTR_STATUS3__TIME_OUT 0x0008
354#define INTR_STATUS3__PROGRAM_FAIL 0x0010
355#define INTR_STATUS3__ERASE_FAIL 0x0020
356#define INTR_STATUS3__LOAD_COMP 0x0040
357#define INTR_STATUS3__PROGRAM_COMP 0x0080
358#define INTR_STATUS3__ERASE_COMP 0x0100
359#define INTR_STATUS3__PIPE_CPYBCK_CMD_COMP 0x0200
360#define INTR_STATUS3__LOCKED_BLK 0x0400
361#define INTR_STATUS3__UNSUP_CMD 0x0800
362#define INTR_STATUS3__INT_ACT 0x1000
363#define INTR_STATUS3__RST_COMP 0x2000
364#define INTR_STATUS3__PIPE_CMD_ERR 0x4000
365#define INTR_STATUS3__PAGE_XFER_INC 0x8000
366
367#define INTR_EN3 0x510
368#define INTR_EN3__ECC_TRANSACTION_DONE 0x0001
369#define INTR_EN3__ECC_ERR 0x0002
370#define INTR_EN3__DMA_CMD_COMP 0x0004
371#define INTR_EN3__TIME_OUT 0x0008
372#define INTR_EN3__PROGRAM_FAIL 0x0010
373#define INTR_EN3__ERASE_FAIL 0x0020
374#define INTR_EN3__LOAD_COMP 0x0040
375#define INTR_EN3__PROGRAM_COMP 0x0080
376#define INTR_EN3__ERASE_COMP 0x0100
377#define INTR_EN3__PIPE_CPYBCK_CMD_COMP 0x0200
378#define INTR_EN3__LOCKED_BLK 0x0400
379#define INTR_EN3__UNSUP_CMD 0x0800
380#define INTR_EN3__INT_ACT 0x1000
381#define INTR_EN3__RST_COMP 0x2000
382#define INTR_EN3__PIPE_CMD_ERR 0x4000
383#define INTR_EN3__PAGE_XFER_INC 0x8000
384
385#define PAGE_CNT3 0x520
386#define PAGE_CNT3__VALUE 0x00ff
387
388#define ERR_PAGE_ADDR3 0x530
389#define ERR_PAGE_ADDR3__VALUE 0xffff
390
391#define ERR_BLOCK_ADDR3 0x540
392#define ERR_BLOCK_ADDR3__VALUE 0xffff
393
394#define DATA_INTR 0x550
395#define DATA_INTR__WRITE_SPACE_AV 0x0001
396#define DATA_INTR__READ_DATA_AV 0x0002
397
398#define DATA_INTR_EN 0x560
399#define DATA_INTR_EN__WRITE_SPACE_AV 0x0001
400#define DATA_INTR_EN__READ_DATA_AV 0x0002
401
402#define GPREG_0 0x570
403#define GPREG_0__VALUE 0xffff
404
405#define GPREG_1 0x580
406#define GPREG_1__VALUE 0xffff
407
408#define GPREG_2 0x590
409#define GPREG_2__VALUE 0xffff
410
411#define GPREG_3 0x5a0
412#define GPREG_3__VALUE 0xffff
413
414#define ECC_THRESHOLD 0x600
415#define ECC_THRESHOLD__VALUE 0x03ff
416
417#define ECC_ERROR_BLOCK_ADDRESS 0x610
418#define ECC_ERROR_BLOCK_ADDRESS__VALUE 0xffff
419
420#define ECC_ERROR_PAGE_ADDRESS 0x620
421#define ECC_ERROR_PAGE_ADDRESS__VALUE 0x0fff
422#define ECC_ERROR_PAGE_ADDRESS__BANK 0xf000
423
424#define ECC_ERROR_ADDRESS 0x630
425#define ECC_ERROR_ADDRESS__OFFSET 0x0fff
426#define ECC_ERROR_ADDRESS__SECTOR_NR 0xf000
427
428#define ERR_CORRECTION_INFO 0x640
429#define ERR_CORRECTION_INFO__BYTEMASK 0x00ff
430#define ERR_CORRECTION_INFO__DEVICE_NR 0x0f00
431#define ERR_CORRECTION_INFO__ERROR_TYPE 0x4000
432#define ERR_CORRECTION_INFO__LAST_ERR_INFO 0x8000
433
434#define DMA_ENABLE 0x700
435#define DMA_ENABLE__FLAG 0x0001
436
437#define IGNORE_ECC_DONE 0x710
438#define IGNORE_ECC_DONE__FLAG 0x0001
439
440#define DMA_INTR 0x720
441#define DMA_INTR__TARGET_ERROR 0x0001
442#define DMA_INTR__DESC_COMP_CHANNEL0 0x0002
443#define DMA_INTR__DESC_COMP_CHANNEL1 0x0004
444#define DMA_INTR__DESC_COMP_CHANNEL2 0x0008
445#define DMA_INTR__DESC_COMP_CHANNEL3 0x0010
446#define DMA_INTR__MEMCOPY_DESC_COMP 0x0020
447
448#define DMA_INTR_EN 0x730
449#define DMA_INTR_EN__TARGET_ERROR 0x0001
450#define DMA_INTR_EN__DESC_COMP_CHANNEL0 0x0002
451#define DMA_INTR_EN__DESC_COMP_CHANNEL1 0x0004
452#define DMA_INTR_EN__DESC_COMP_CHANNEL2 0x0008
453#define DMA_INTR_EN__DESC_COMP_CHANNEL3 0x0010
454#define DMA_INTR_EN__MEMCOPY_DESC_COMP 0x0020
455
456#define TARGET_ERR_ADDR_LO 0x740
457#define TARGET_ERR_ADDR_LO__VALUE 0xffff
458
459#define TARGET_ERR_ADDR_HI 0x750
460#define TARGET_ERR_ADDR_HI__VALUE 0xffff
461
462#define CHNL_ACTIVE 0x760
463#define CHNL_ACTIVE__CHANNEL0 0x0001
464#define CHNL_ACTIVE__CHANNEL1 0x0002
465#define CHNL_ACTIVE__CHANNEL2 0x0004
466#define CHNL_ACTIVE__CHANNEL3 0x0008
467
468#define ACTIVE_SRC_ID 0x800
469#define ACTIVE_SRC_ID__VALUE 0x00ff
470
471#define PTN_INTR 0x810
472#define PTN_INTR__CONFIG_ERROR 0x0001
473#define PTN_INTR__ACCESS_ERROR_BANK0 0x0002
474#define PTN_INTR__ACCESS_ERROR_BANK1 0x0004
475#define PTN_INTR__ACCESS_ERROR_BANK2 0x0008
476#define PTN_INTR__ACCESS_ERROR_BANK3 0x0010
477#define PTN_INTR__REG_ACCESS_ERROR 0x0020
478
479#define PTN_INTR_EN 0x820
480#define PTN_INTR_EN__CONFIG_ERROR 0x0001
481#define PTN_INTR_EN__ACCESS_ERROR_BANK0 0x0002
482#define PTN_INTR_EN__ACCESS_ERROR_BANK1 0x0004
483#define PTN_INTR_EN__ACCESS_ERROR_BANK2 0x0008
484#define PTN_INTR_EN__ACCESS_ERROR_BANK3 0x0010
485#define PTN_INTR_EN__REG_ACCESS_ERROR 0x0020
486
487#define PERM_SRC_ID_0 0x830
488#define PERM_SRC_ID_0__SRCID 0x00ff
489#define PERM_SRC_ID_0__DIRECT_ACCESS_ACTIVE 0x0800
490#define PERM_SRC_ID_0__WRITE_ACTIVE 0x2000
491#define PERM_SRC_ID_0__READ_ACTIVE 0x4000
492#define PERM_SRC_ID_0__PARTITION_VALID 0x8000
493
494#define MIN_BLK_ADDR_0 0x840
495#define MIN_BLK_ADDR_0__VALUE 0xffff
496
497#define MAX_BLK_ADDR_0 0x850
498#define MAX_BLK_ADDR_0__VALUE 0xffff
499
500#define MIN_MAX_BANK_0 0x860
501#define MIN_MAX_BANK_0__MIN_VALUE 0x0003
502#define MIN_MAX_BANK_0__MAX_VALUE 0x000c
503
504#define PERM_SRC_ID_1 0x870
505#define PERM_SRC_ID_1__SRCID 0x00ff
506#define PERM_SRC_ID_1__DIRECT_ACCESS_ACTIVE 0x0800
507#define PERM_SRC_ID_1__WRITE_ACTIVE 0x2000
508#define PERM_SRC_ID_1__READ_ACTIVE 0x4000
509#define PERM_SRC_ID_1__PARTITION_VALID 0x8000
510
511#define MIN_BLK_ADDR_1 0x880
512#define MIN_BLK_ADDR_1__VALUE 0xffff
513
514#define MAX_BLK_ADDR_1 0x890
515#define MAX_BLK_ADDR_1__VALUE 0xffff
516
517#define MIN_MAX_BANK_1 0x8a0
518#define MIN_MAX_BANK_1__MIN_VALUE 0x0003
519#define MIN_MAX_BANK_1__MAX_VALUE 0x000c
520
521#define PERM_SRC_ID_2 0x8b0
522#define PERM_SRC_ID_2__SRCID 0x00ff
523#define PERM_SRC_ID_2__DIRECT_ACCESS_ACTIVE 0x0800
524#define PERM_SRC_ID_2__WRITE_ACTIVE 0x2000
525#define PERM_SRC_ID_2__READ_ACTIVE 0x4000
526#define PERM_SRC_ID_2__PARTITION_VALID 0x8000
527
528#define MIN_BLK_ADDR_2 0x8c0
529#define MIN_BLK_ADDR_2__VALUE 0xffff
530
531#define MAX_BLK_ADDR_2 0x8d0
532#define MAX_BLK_ADDR_2__VALUE 0xffff
533
534#define MIN_MAX_BANK_2 0x8e0
535#define MIN_MAX_BANK_2__MIN_VALUE 0x0003
536#define MIN_MAX_BANK_2__MAX_VALUE 0x000c
537
538#define PERM_SRC_ID_3 0x8f0
539#define PERM_SRC_ID_3__SRCID 0x00ff
540#define PERM_SRC_ID_3__DIRECT_ACCESS_ACTIVE 0x0800
541#define PERM_SRC_ID_3__WRITE_ACTIVE 0x2000
542#define PERM_SRC_ID_3__READ_ACTIVE 0x4000
543#define PERM_SRC_ID_3__PARTITION_VALID 0x8000
544
545#define MIN_BLK_ADDR_3 0x900
546#define MIN_BLK_ADDR_3__VALUE 0xffff
547
548#define MAX_BLK_ADDR_3 0x910
549#define MAX_BLK_ADDR_3__VALUE 0xffff
550
551#define MIN_MAX_BANK_3 0x920
552#define MIN_MAX_BANK_3__MIN_VALUE 0x0003
553#define MIN_MAX_BANK_3__MAX_VALUE 0x000c
554
555#define PERM_SRC_ID_4 0x930
556#define PERM_SRC_ID_4__SRCID 0x00ff
557#define PERM_SRC_ID_4__DIRECT_ACCESS_ACTIVE 0x0800
558#define PERM_SRC_ID_4__WRITE_ACTIVE 0x2000
559#define PERM_SRC_ID_4__READ_ACTIVE 0x4000
560#define PERM_SRC_ID_4__PARTITION_VALID 0x8000
561
562#define MIN_BLK_ADDR_4 0x940
563#define MIN_BLK_ADDR_4__VALUE 0xffff
564
565#define MAX_BLK_ADDR_4 0x950
566#define MAX_BLK_ADDR_4__VALUE 0xffff
567
568#define MIN_MAX_BANK_4 0x960
569#define MIN_MAX_BANK_4__MIN_VALUE 0x0003
570#define MIN_MAX_BANK_4__MAX_VALUE 0x000c
571
572#define PERM_SRC_ID_5 0x970
573#define PERM_SRC_ID_5__SRCID 0x00ff
574#define PERM_SRC_ID_5__DIRECT_ACCESS_ACTIVE 0x0800
575#define PERM_SRC_ID_5__WRITE_ACTIVE 0x2000
576#define PERM_SRC_ID_5__READ_ACTIVE 0x4000
577#define PERM_SRC_ID_5__PARTITION_VALID 0x8000
578
579#define MIN_BLK_ADDR_5 0x980
580#define MIN_BLK_ADDR_5__VALUE 0xffff
581
582#define MAX_BLK_ADDR_5 0x990
583#define MAX_BLK_ADDR_5__VALUE 0xffff
584
585#define MIN_MAX_BANK_5 0x9a0
586#define MIN_MAX_BANK_5__MIN_VALUE 0x0003
587#define MIN_MAX_BANK_5__MAX_VALUE 0x000c
588
589#define PERM_SRC_ID_6 0x9b0
590#define PERM_SRC_ID_6__SRCID 0x00ff
591#define PERM_SRC_ID_6__DIRECT_ACCESS_ACTIVE 0x0800
592#define PERM_SRC_ID_6__WRITE_ACTIVE 0x2000
593#define PERM_SRC_ID_6__READ_ACTIVE 0x4000
594#define PERM_SRC_ID_6__PARTITION_VALID 0x8000
595
596#define MIN_BLK_ADDR_6 0x9c0
597#define MIN_BLK_ADDR_6__VALUE 0xffff
598
599#define MAX_BLK_ADDR_6 0x9d0
600#define MAX_BLK_ADDR_6__VALUE 0xffff
601
602#define MIN_MAX_BANK_6 0x9e0
603#define MIN_MAX_BANK_6__MIN_VALUE 0x0003
604#define MIN_MAX_BANK_6__MAX_VALUE 0x000c
605
606#define PERM_SRC_ID_7 0x9f0
607#define PERM_SRC_ID_7__SRCID 0x00ff
608#define PERM_SRC_ID_7__DIRECT_ACCESS_ACTIVE 0x0800
609#define PERM_SRC_ID_7__WRITE_ACTIVE 0x2000
610#define PERM_SRC_ID_7__READ_ACTIVE 0x4000
611#define PERM_SRC_ID_7__PARTITION_VALID 0x8000
612
613#define MIN_BLK_ADDR_7 0xa00
614#define MIN_BLK_ADDR_7__VALUE 0xffff
615
616#define MAX_BLK_ADDR_7 0xa10
617#define MAX_BLK_ADDR_7__VALUE 0xffff
618
619#define MIN_MAX_BANK_7 0xa20
620#define MIN_MAX_BANK_7__MIN_VALUE 0x0003
621#define MIN_MAX_BANK_7__MAX_VALUE 0x000c
622
623/* flash.h */
624struct device_info_tag {
625 uint16_t wDeviceMaker;
626 uint16_t wDeviceID;
627 uint8_t bDeviceParam0;
628 uint8_t bDeviceParam1;
629 uint8_t bDeviceParam2;
630 uint32_t wDeviceType;
631 uint32_t wSpectraStartBlock;
632 uint32_t wSpectraEndBlock;
633 uint32_t wTotalBlocks;
634 uint16_t wPagesPerBlock;
635 uint16_t wPageSize;
636 uint16_t wPageDataSize;
637 uint16_t wPageSpareSize;
638 uint16_t wNumPageSpareFlag;
639 uint16_t wECCBytesPerSector;
640 uint32_t wBlockSize;
641 uint32_t wBlockDataSize;
642 uint32_t wDataBlockNum;
643 uint8_t bPlaneNum;
644 uint16_t wDeviceMainAreaSize;
645 uint16_t wDeviceSpareAreaSize;
646 uint16_t wDevicesConnected;
647 uint16_t wDeviceWidth;
648 uint16_t wHWRevision;
649 uint16_t wHWFeatures;
650
651 uint16_t wONFIDevFeatures;
652 uint16_t wONFIOptCommands;
653 uint16_t wONFITimingMode;
654 uint16_t wONFIPgmCacheTimingMode;
655
656 uint16_t MLCDevice;
657 uint16_t wSpareSkipBytes;
658
659 uint8_t nBitsInPageNumber;
660 uint8_t nBitsInPageDataSize;
661 uint8_t nBitsInBlockDataSize;
662};
663
664/* ffsdefs.h */
665#define CLEAR 0 /*use this to clear a field instead of "fail"*/
666#define SET 1 /*use this to set a field instead of "pass"*/
667#define FAIL 1 /*failed flag*/
668#define PASS 0 /*success flag*/
669#define ERR -1 /*error flag*/
670
671/* lld.h */
672#define GOOD_BLOCK 0
673#define DEFECTIVE_BLOCK 1
674#define READ_ERROR 2
675
676#define CLK_X 5
677#define CLK_MULTI 4
678
679/* ffsport.h */
680#define VERBOSE 1
681
682#define NAND_DBG_WARN 1
683#define NAND_DBG_DEBUG 2
684#define NAND_DBG_TRACE 3
685
686#ifdef VERBOSE
687#define nand_dbg_print(level, args...) \
688 do { \
689 if (level <= nand_debug_level) \
690 printk(KERN_ALERT args); \
691 } while (0)
692#else
693#define nand_dbg_print(level, args...)
694#endif
695
696
697/* spectraswconfig.h */
698#define CMD_DMA 0
699
700#define SPECTRA_PARTITION_ID 0
701/**** Block Table and Reserved Block Parameters *****/
702#define SPECTRA_START_BLOCK 3
703#define NUM_FREE_BLOCKS_GATE 30
704
705/* KBV - Updated to LNW scratch register address */
706#define SCRATCH_REG_ADDR CONFIG_MTD_NAND_DENALI_SCRATCH_REG_ADDR
707#define SCRATCH_REG_SIZE 64
708
709#define GLOB_HWCTL_DEFAULT_BLKS 2048
710
711#define SUPPORT_15BITECC 1
712#define SUPPORT_8BITECC 1
713
714#define CUSTOM_CONF_PARAMS 0
715
716#define ONFI_BLOOM_TIME 1
717#define MODE5_WORKAROUND 0
718
719/* lld_nand.h */
720/*
721 * NAND Flash Controller Device Driver
722 * Copyright (c) 2009, Intel Corporation and its suppliers.
723 *
724 * This program is free software; you can redistribute it and/or modify it
725 * under the terms and conditions of the GNU General Public License,
726 * version 2, as published by the Free Software Foundation.
727 *
728 * This program is distributed in the hope it will be useful, but WITHOUT
729 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
730 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
731 * more details.
732 *
733 * You should have received a copy of the GNU General Public License along with
734 * this program; if not, write to the Free Software Foundation, Inc.,
735 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
736 *
737 */
738
739#ifndef _LLD_NAND_
740#define _LLD_NAND_
741
742#define MODE_00 0x00000000
743#define MODE_01 0x04000000
744#define MODE_10 0x08000000
745#define MODE_11 0x0C000000
746
747
748#define DATA_TRANSFER_MODE 0
749#define PROTECTION_PER_BLOCK 1
750#define LOAD_WAIT_COUNT 2
751#define PROGRAM_WAIT_COUNT 3
752#define ERASE_WAIT_COUNT 4
753#define INT_MONITOR_CYCLE_COUNT 5
754#define READ_BUSY_PIN_ENABLED 6
755#define MULTIPLANE_OPERATION_SUPPORT 7
756#define PRE_FETCH_MODE 8
757#define CE_DONT_CARE_SUPPORT 9
758#define COPYBACK_SUPPORT 10
759#define CACHE_WRITE_SUPPORT 11
760#define CACHE_READ_SUPPORT 12
761#define NUM_PAGES_IN_BLOCK 13
762#define ECC_ENABLE_SELECT 14
763#define WRITE_ENABLE_2_READ_ENABLE 15
764#define ADDRESS_2_DATA 16
765#define READ_ENABLE_2_WRITE_ENABLE 17
766#define TWO_ROW_ADDRESS_CYCLES 18
767#define MULTIPLANE_ADDRESS_RESTRICT 19
768#define ACC_CLOCKS 20
769#define READ_WRITE_ENABLE_LOW_COUNT 21
770#define READ_WRITE_ENABLE_HIGH_COUNT 22
771
772#define ECC_SECTOR_SIZE 512
773#define LLD_MAX_FLASH_BANKS 4
774
775#define DENALI_BUF_SIZE NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE
776
777struct nand_buf
778{
779 int head;
780 int tail;
781 uint8_t buf[DENALI_BUF_SIZE];
782 dma_addr_t dma_buf;
783};
784
785#define INTEL_CE4100 1
786#define INTEL_MRST 2
787
788struct denali_nand_info {
789 struct mtd_info mtd;
790 struct nand_chip nand;
791 struct device_info_tag dev_info;
792 int flash_bank; /* currently selected chip */
793 int status;
794 int platform;
795 struct nand_buf buf;
796 struct pci_dev *dev;
797 int total_used_banks;
798 uint32_t block; /* stored for future use */
799 uint16_t page;
800 void __iomem *flash_reg; /* Mapped io reg base address */
801 void __iomem *flash_mem; /* Mapped io reg base address */
802
803 /* elements used by ISR */
804 struct completion complete;
805 spinlock_t irq_lock;
806 uint32_t irq_status;
807 int irq_debug_array[32];
808 int idx;
809};
810
811static uint16_t NAND_Flash_Reset(struct denali_nand_info *denali);
812static uint16_t NAND_Read_Device_ID(struct denali_nand_info *denali);
813static void NAND_LLD_Enable_Disable_Interrupts(struct denali_nand_info *denali, uint16_t INT_ENABLE);
814
815#endif /*_LLD_NAND_*/
816
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index f45a8d0c150..5084cc51794 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -874,7 +874,7 @@ static int __devinit fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl,
874 priv->ctrl = ctrl; 874 priv->ctrl = ctrl;
875 priv->dev = ctrl->dev; 875 priv->dev = ctrl->dev;
876 876
877 priv->vbase = ioremap(res.start, res.end - res.start + 1); 877 priv->vbase = ioremap(res.start, resource_size(&res));
878 if (!priv->vbase) { 878 if (!priv->vbase) {
879 dev_err(ctrl->dev, "failed to map chip region\n"); 879 dev_err(ctrl->dev, "failed to map chip region\n");
880 ret = -ENOMEM; 880 ret = -ENOMEM;
@@ -891,7 +891,7 @@ static int __devinit fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl,
891 if (ret) 891 if (ret)
892 goto err; 892 goto err;
893 893
894 ret = nand_scan_ident(&priv->mtd, 1); 894 ret = nand_scan_ident(&priv->mtd, 1, NULL);
895 if (ret) 895 if (ret)
896 goto err; 896 goto err;
897 897
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index b7ab5a0ec35..00aea6f7d1f 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -49,7 +49,10 @@ struct fsl_upm_nand {
49 uint32_t wait_flags; 49 uint32_t wait_flags;
50}; 50};
51 51
52#define to_fsl_upm_nand(mtd) container_of(mtd, struct fsl_upm_nand, mtd) 52static inline struct fsl_upm_nand *to_fsl_upm_nand(struct mtd_info *mtdinfo)
53{
54 return container_of(mtdinfo, struct fsl_upm_nand, mtd);
55}
53 56
54static int fun_chip_ready(struct mtd_info *mtd) 57static int fun_chip_ready(struct mtd_info *mtd)
55{ 58{
@@ -303,7 +306,7 @@ static int __devinit fun_probe(struct of_device *ofdev,
303 FSL_UPM_WAIT_WRITE_BYTE; 306 FSL_UPM_WAIT_WRITE_BYTE;
304 307
305 fun->io_base = devm_ioremap_nocache(&ofdev->dev, io_res.start, 308 fun->io_base = devm_ioremap_nocache(&ofdev->dev, io_res.start,
306 io_res.end - io_res.start + 1); 309 resource_size(&io_res));
307 if (!fun->io_base) { 310 if (!fun->io_base) {
308 ret = -ENOMEM; 311 ret = -ENOMEM;
309 goto err2; 312 goto err2;
@@ -350,7 +353,7 @@ static int __devexit fun_remove(struct of_device *ofdev)
350 return 0; 353 return 0;
351} 354}
352 355
353static struct of_device_id of_fun_match[] = { 356static const struct of_device_id of_fun_match[] = {
354 { .compatible = "fsl,upm-nand" }, 357 { .compatible = "fsl,upm-nand" },
355 {}, 358 {},
356}; 359};
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index 8f902e75aa8..0cde618bcc1 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -181,11 +181,11 @@ static int __devexit gpio_nand_remove(struct platform_device *dev)
181 res = platform_get_resource(dev, IORESOURCE_MEM, 1); 181 res = platform_get_resource(dev, IORESOURCE_MEM, 1);
182 iounmap(gpiomtd->io_sync); 182 iounmap(gpiomtd->io_sync);
183 if (res) 183 if (res)
184 release_mem_region(res->start, res->end - res->start + 1); 184 release_mem_region(res->start, resource_size(res));
185 185
186 res = platform_get_resource(dev, IORESOURCE_MEM, 0); 186 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
187 iounmap(gpiomtd->nand_chip.IO_ADDR_R); 187 iounmap(gpiomtd->nand_chip.IO_ADDR_R);
188 release_mem_region(res->start, res->end - res->start + 1); 188 release_mem_region(res->start, resource_size(res));
189 189
190 if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) 190 if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
191 gpio_set_value(gpiomtd->plat.gpio_nwp, 0); 191 gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
@@ -208,14 +208,14 @@ static void __iomem *request_and_remap(struct resource *res, size_t size,
208{ 208{
209 void __iomem *ptr; 209 void __iomem *ptr;
210 210
211 if (!request_mem_region(res->start, res->end - res->start + 1, name)) { 211 if (!request_mem_region(res->start, resource_size(res), name)) {
212 *err = -EBUSY; 212 *err = -EBUSY;
213 return NULL; 213 return NULL;
214 } 214 }
215 215
216 ptr = ioremap(res->start, size); 216 ptr = ioremap(res->start, size);
217 if (!ptr) { 217 if (!ptr) {
218 release_mem_region(res->start, res->end - res->start + 1); 218 release_mem_region(res->start, resource_size(res));
219 *err = -ENOMEM; 219 *err = -ENOMEM;
220 } 220 }
221 return ptr; 221 return ptr;
@@ -338,10 +338,10 @@ err_nwp:
338err_nce: 338err_nce:
339 iounmap(gpiomtd->io_sync); 339 iounmap(gpiomtd->io_sync);
340 if (res1) 340 if (res1)
341 release_mem_region(res1->start, res1->end - res1->start + 1); 341 release_mem_region(res1->start, resource_size(res1));
342err_sync: 342err_sync:
343 iounmap(gpiomtd->nand_chip.IO_ADDR_R); 343 iounmap(gpiomtd->nand_chip.IO_ADDR_R);
344 release_mem_region(res0->start, res0->end - res0->start + 1); 344 release_mem_region(res0->start, resource_size(res0));
345err_map: 345err_map:
346 kfree(gpiomtd); 346 kfree(gpiomtd);
347 return ret; 347 return ret;
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
new file mode 100644
index 00000000000..3d0867d829c
--- /dev/null
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -0,0 +1,917 @@
1/*
2 * Copyright 2004-2008 Freescale Semiconductor, Inc.
3 * Copyright 2009 Semihalf.
4 *
5 * Approved as OSADL project by a majority of OSADL members and funded
6 * by OSADL membership fees in 2009; for details see www.osadl.org.
7 *
8 * Based on original driver from Freescale Semiconductor
9 * written by John Rigby <jrigby@freescale.com> on basis
10 * of drivers/mtd/nand/mxc_nand.c. Reworked and extended
11 * Piotr Ziecik <kosmo@semihalf.com>.
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version 2
16 * of the License, or (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
25 * MA 02110-1301, USA.
26 */
27
28#include <linux/module.h>
29#include <linux/clk.h>
30#include <linux/gfp.h>
31#include <linux/delay.h>
32#include <linux/init.h>
33#include <linux/interrupt.h>
34#include <linux/io.h>
35#include <linux/mtd/mtd.h>
36#include <linux/mtd/nand.h>
37#include <linux/mtd/partitions.h>
38#include <linux/of_device.h>
39#include <linux/of_platform.h>
40
41#include <asm/mpc5121.h>
42
43/* Addresses for NFC MAIN RAM BUFFER areas */
44#define NFC_MAIN_AREA(n) ((n) * 0x200)
45
46/* Addresses for NFC SPARE BUFFER areas */
47#define NFC_SPARE_BUFFERS 8
48#define NFC_SPARE_LEN 0x40
49#define NFC_SPARE_AREA(n) (0x1000 + ((n) * NFC_SPARE_LEN))
50
51/* MPC5121 NFC registers */
52#define NFC_BUF_ADDR 0x1E04
53#define NFC_FLASH_ADDR 0x1E06
54#define NFC_FLASH_CMD 0x1E08
55#define NFC_CONFIG 0x1E0A
56#define NFC_ECC_STATUS1 0x1E0C
57#define NFC_ECC_STATUS2 0x1E0E
58#define NFC_SPAS 0x1E10
59#define NFC_WRPROT 0x1E12
60#define NFC_NF_WRPRST 0x1E18
61#define NFC_CONFIG1 0x1E1A
62#define NFC_CONFIG2 0x1E1C
63#define NFC_UNLOCKSTART_BLK0 0x1E20
64#define NFC_UNLOCKEND_BLK0 0x1E22
65#define NFC_UNLOCKSTART_BLK1 0x1E24
66#define NFC_UNLOCKEND_BLK1 0x1E26
67#define NFC_UNLOCKSTART_BLK2 0x1E28
68#define NFC_UNLOCKEND_BLK2 0x1E2A
69#define NFC_UNLOCKSTART_BLK3 0x1E2C
70#define NFC_UNLOCKEND_BLK3 0x1E2E
71
72/* Bit Definitions: NFC_BUF_ADDR */
73#define NFC_RBA_MASK (7 << 0)
74#define NFC_ACTIVE_CS_SHIFT 5
75#define NFC_ACTIVE_CS_MASK (3 << NFC_ACTIVE_CS_SHIFT)
76
77/* Bit Definitions: NFC_CONFIG */
78#define NFC_BLS_UNLOCKED (1 << 1)
79
80/* Bit Definitions: NFC_CONFIG1 */
81#define NFC_ECC_4BIT (1 << 0)
82#define NFC_FULL_PAGE_DMA (1 << 1)
83#define NFC_SPARE_ONLY (1 << 2)
84#define NFC_ECC_ENABLE (1 << 3)
85#define NFC_INT_MASK (1 << 4)
86#define NFC_BIG_ENDIAN (1 << 5)
87#define NFC_RESET (1 << 6)
88#define NFC_CE (1 << 7)
89#define NFC_ONE_CYCLE (1 << 8)
90#define NFC_PPB_32 (0 << 9)
91#define NFC_PPB_64 (1 << 9)
92#define NFC_PPB_128 (2 << 9)
93#define NFC_PPB_256 (3 << 9)
94#define NFC_PPB_MASK (3 << 9)
95#define NFC_FULL_PAGE_INT (1 << 11)
96
97/* Bit Definitions: NFC_CONFIG2 */
98#define NFC_COMMAND (1 << 0)
99#define NFC_ADDRESS (1 << 1)
100#define NFC_INPUT (1 << 2)
101#define NFC_OUTPUT (1 << 3)
102#define NFC_ID (1 << 4)
103#define NFC_STATUS (1 << 5)
104#define NFC_CMD_FAIL (1 << 15)
105#define NFC_INT (1 << 15)
106
107/* Bit Definitions: NFC_WRPROT */
108#define NFC_WPC_LOCK_TIGHT (1 << 0)
109#define NFC_WPC_LOCK (1 << 1)
110#define NFC_WPC_UNLOCK (1 << 2)
111
112#define DRV_NAME "mpc5121_nfc"
113
114/* Timeouts */
115#define NFC_RESET_TIMEOUT 1000 /* 1 ms */
116#define NFC_TIMEOUT (HZ / 10) /* 1/10 s */
117
118struct mpc5121_nfc_prv {
119 struct mtd_info mtd;
120 struct nand_chip chip;
121 int irq;
122 void __iomem *regs;
123 struct clk *clk;
124 wait_queue_head_t irq_waitq;
125 uint column;
126 int spareonly;
127 void __iomem *csreg;
128 struct device *dev;
129};
130
131static void mpc5121_nfc_done(struct mtd_info *mtd);
132
133#ifdef CONFIG_MTD_PARTITIONS
134static const char *mpc5121_nfc_pprobes[] = { "cmdlinepart", NULL };
135#endif
136
137/* Read NFC register */
138static inline u16 nfc_read(struct mtd_info *mtd, uint reg)
139{
140 struct nand_chip *chip = mtd->priv;
141 struct mpc5121_nfc_prv *prv = chip->priv;
142
143 return in_be16(prv->regs + reg);
144}
145
146/* Write NFC register */
147static inline void nfc_write(struct mtd_info *mtd, uint reg, u16 val)
148{
149 struct nand_chip *chip = mtd->priv;
150 struct mpc5121_nfc_prv *prv = chip->priv;
151
152 out_be16(prv->regs + reg, val);
153}
154
155/* Set bits in NFC register */
156static inline void nfc_set(struct mtd_info *mtd, uint reg, u16 bits)
157{
158 nfc_write(mtd, reg, nfc_read(mtd, reg) | bits);
159}
160
161/* Clear bits in NFC register */
162static inline void nfc_clear(struct mtd_info *mtd, uint reg, u16 bits)
163{
164 nfc_write(mtd, reg, nfc_read(mtd, reg) & ~bits);
165}
166
167/* Invoke address cycle */
168static inline void mpc5121_nfc_send_addr(struct mtd_info *mtd, u16 addr)
169{
170 nfc_write(mtd, NFC_FLASH_ADDR, addr);
171 nfc_write(mtd, NFC_CONFIG2, NFC_ADDRESS);
172 mpc5121_nfc_done(mtd);
173}
174
175/* Invoke command cycle */
176static inline void mpc5121_nfc_send_cmd(struct mtd_info *mtd, u16 cmd)
177{
178 nfc_write(mtd, NFC_FLASH_CMD, cmd);
179 nfc_write(mtd, NFC_CONFIG2, NFC_COMMAND);
180 mpc5121_nfc_done(mtd);
181}
182
183/* Send data from NFC buffers to NAND flash */
184static inline void mpc5121_nfc_send_prog_page(struct mtd_info *mtd)
185{
186 nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
187 nfc_write(mtd, NFC_CONFIG2, NFC_INPUT);
188 mpc5121_nfc_done(mtd);
189}
190
191/* Receive data from NAND flash */
192static inline void mpc5121_nfc_send_read_page(struct mtd_info *mtd)
193{
194 nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
195 nfc_write(mtd, NFC_CONFIG2, NFC_OUTPUT);
196 mpc5121_nfc_done(mtd);
197}
198
199/* Receive ID from NAND flash */
200static inline void mpc5121_nfc_send_read_id(struct mtd_info *mtd)
201{
202 nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
203 nfc_write(mtd, NFC_CONFIG2, NFC_ID);
204 mpc5121_nfc_done(mtd);
205}
206
207/* Receive status from NAND flash */
208static inline void mpc5121_nfc_send_read_status(struct mtd_info *mtd)
209{
210 nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
211 nfc_write(mtd, NFC_CONFIG2, NFC_STATUS);
212 mpc5121_nfc_done(mtd);
213}
214
215/* NFC interrupt handler */
216static irqreturn_t mpc5121_nfc_irq(int irq, void *data)
217{
218 struct mtd_info *mtd = data;
219 struct nand_chip *chip = mtd->priv;
220 struct mpc5121_nfc_prv *prv = chip->priv;
221
222 nfc_set(mtd, NFC_CONFIG1, NFC_INT_MASK);
223 wake_up(&prv->irq_waitq);
224
225 return IRQ_HANDLED;
226}
227
228/* Wait for operation complete */
229static void mpc5121_nfc_done(struct mtd_info *mtd)
230{
231 struct nand_chip *chip = mtd->priv;
232 struct mpc5121_nfc_prv *prv = chip->priv;
233 int rv;
234
235 if ((nfc_read(mtd, NFC_CONFIG2) & NFC_INT) == 0) {
236 nfc_clear(mtd, NFC_CONFIG1, NFC_INT_MASK);
237 rv = wait_event_timeout(prv->irq_waitq,
238 (nfc_read(mtd, NFC_CONFIG2) & NFC_INT), NFC_TIMEOUT);
239
240 if (!rv)
241 dev_warn(prv->dev,
242 "Timeout while waiting for interrupt.\n");
243 }
244
245 nfc_clear(mtd, NFC_CONFIG2, NFC_INT);
246}
247
248/* Do address cycle(s) */
249static void mpc5121_nfc_addr_cycle(struct mtd_info *mtd, int column, int page)
250{
251 struct nand_chip *chip = mtd->priv;
252 u32 pagemask = chip->pagemask;
253
254 if (column != -1) {
255 mpc5121_nfc_send_addr(mtd, column);
256 if (mtd->writesize > 512)
257 mpc5121_nfc_send_addr(mtd, column >> 8);
258 }
259
260 if (page != -1) {
261 do {
262 mpc5121_nfc_send_addr(mtd, page & 0xFF);
263 page >>= 8;
264 pagemask >>= 8;
265 } while (pagemask);
266 }
267}
268
269/* Control chip select signals */
270static void mpc5121_nfc_select_chip(struct mtd_info *mtd, int chip)
271{
272 if (chip < 0) {
273 nfc_clear(mtd, NFC_CONFIG1, NFC_CE);
274 return;
275 }
276
277 nfc_clear(mtd, NFC_BUF_ADDR, NFC_ACTIVE_CS_MASK);
278 nfc_set(mtd, NFC_BUF_ADDR, (chip << NFC_ACTIVE_CS_SHIFT) &
279 NFC_ACTIVE_CS_MASK);
280 nfc_set(mtd, NFC_CONFIG1, NFC_CE);
281}
282
283/* Init external chip select logic on ADS5121 board */
284static int ads5121_chipselect_init(struct mtd_info *mtd)
285{
286 struct nand_chip *chip = mtd->priv;
287 struct mpc5121_nfc_prv *prv = chip->priv;
288 struct device_node *dn;
289
290 dn = of_find_compatible_node(NULL, NULL, "fsl,mpc5121ads-cpld");
291 if (dn) {
292 prv->csreg = of_iomap(dn, 0);
293 of_node_put(dn);
294 if (!prv->csreg)
295 return -ENOMEM;
296
297 /* CPLD Register 9 controls NAND /CE Lines */
298 prv->csreg += 9;
299 return 0;
300 }
301
302 return -EINVAL;
303}
304
305/* Control chips select signal on ADS5121 board */
306static void ads5121_select_chip(struct mtd_info *mtd, int chip)
307{
308 struct nand_chip *nand = mtd->priv;
309 struct mpc5121_nfc_prv *prv = nand->priv;
310 u8 v;
311
312 v = in_8(prv->csreg);
313 v |= 0x0F;
314
315 if (chip >= 0) {
316 mpc5121_nfc_select_chip(mtd, 0);
317 v &= ~(1 << chip);
318 } else
319 mpc5121_nfc_select_chip(mtd, -1);
320
321 out_8(prv->csreg, v);
322}
323
324/* Read NAND Ready/Busy signal */
325static int mpc5121_nfc_dev_ready(struct mtd_info *mtd)
326{
327 /*
328 * NFC handles ready/busy signal internally. Therefore, this function
329 * always returns status as ready.
330 */
331 return 1;
332}
333
334/* Write command to NAND flash */
335static void mpc5121_nfc_command(struct mtd_info *mtd, unsigned command,
336 int column, int page)
337{
338 struct nand_chip *chip = mtd->priv;
339 struct mpc5121_nfc_prv *prv = chip->priv;
340
341 prv->column = (column >= 0) ? column : 0;
342 prv->spareonly = 0;
343
344 switch (command) {
345 case NAND_CMD_PAGEPROG:
346 mpc5121_nfc_send_prog_page(mtd);
347 break;
348 /*
349 * NFC does not support sub-page reads and writes,
350 * so emulate them using full page transfers.
351 */
352 case NAND_CMD_READ0:
353 column = 0;
354 break;
355
356 case NAND_CMD_READ1:
357 prv->column += 256;
358 command = NAND_CMD_READ0;
359 column = 0;
360 break;
361
362 case NAND_CMD_READOOB:
363 prv->spareonly = 1;
364 command = NAND_CMD_READ0;
365 column = 0;
366 break;
367
368 case NAND_CMD_SEQIN:
369 mpc5121_nfc_command(mtd, NAND_CMD_READ0, column, page);
370 column = 0;
371 break;
372
373 case NAND_CMD_ERASE1:
374 case NAND_CMD_ERASE2:
375 case NAND_CMD_READID:
376 case NAND_CMD_STATUS:
377 break;
378
379 default:
380 return;
381 }
382
383 mpc5121_nfc_send_cmd(mtd, command);
384 mpc5121_nfc_addr_cycle(mtd, column, page);
385
386 switch (command) {
387 case NAND_CMD_READ0:
388 if (mtd->writesize > 512)
389 mpc5121_nfc_send_cmd(mtd, NAND_CMD_READSTART);
390 mpc5121_nfc_send_read_page(mtd);
391 break;
392
393 case NAND_CMD_READID:
394 mpc5121_nfc_send_read_id(mtd);
395 break;
396
397 case NAND_CMD_STATUS:
398 mpc5121_nfc_send_read_status(mtd);
399 if (chip->options & NAND_BUSWIDTH_16)
400 prv->column = 1;
401 else
402 prv->column = 0;
403 break;
404 }
405}
406
407/* Copy data from/to NFC spare buffers. */
408static void mpc5121_nfc_copy_spare(struct mtd_info *mtd, uint offset,
409 u8 *buffer, uint size, int wr)
410{
411 struct nand_chip *nand = mtd->priv;
412 struct mpc5121_nfc_prv *prv = nand->priv;
413 uint o, s, sbsize, blksize;
414
415 /*
416 * NAND spare area is available through NFC spare buffers.
417 * The NFC divides spare area into (page_size / 512) chunks.
418 * Each chunk is placed into separate spare memory area, using
419 * first (spare_size / num_of_chunks) bytes of the buffer.
420 *
421 * For NAND device in which the spare area is not divided fully
422 * by the number of chunks, number of used bytes in each spare
423 * buffer is rounded down to the nearest even number of bytes,
424 * and all remaining bytes are added to the last used spare area.
425 *
426 * For more information read section 26.6.10 of MPC5121e
427 * Microcontroller Reference Manual, Rev. 3.
428 */
429
430 /* Calculate number of valid bytes in each spare buffer */
431 sbsize = (mtd->oobsize / (mtd->writesize / 512)) & ~1;
432
433 while (size) {
434 /* Calculate spare buffer number */
435 s = offset / sbsize;
436 if (s > NFC_SPARE_BUFFERS - 1)
437 s = NFC_SPARE_BUFFERS - 1;
438
439 /*
440 * Calculate offset to requested data block in selected spare
441 * buffer and its size.
442 */
443 o = offset - (s * sbsize);
444 blksize = min(sbsize - o, size);
445
446 if (wr)
447 memcpy_toio(prv->regs + NFC_SPARE_AREA(s) + o,
448 buffer, blksize);
449 else
450 memcpy_fromio(buffer,
451 prv->regs + NFC_SPARE_AREA(s) + o, blksize);
452
453 buffer += blksize;
454 offset += blksize;
455 size -= blksize;
456 };
457}
458
459/* Copy data from/to NFC main and spare buffers */
460static void mpc5121_nfc_buf_copy(struct mtd_info *mtd, u_char *buf, int len,
461 int wr)
462{
463 struct nand_chip *chip = mtd->priv;
464 struct mpc5121_nfc_prv *prv = chip->priv;
465 uint c = prv->column;
466 uint l;
467
468 /* Handle spare area access */
469 if (prv->spareonly || c >= mtd->writesize) {
470 /* Calculate offset from beginning of spare area */
471 if (c >= mtd->writesize)
472 c -= mtd->writesize;
473
474 prv->column += len;
475 mpc5121_nfc_copy_spare(mtd, c, buf, len, wr);
476 return;
477 }
478
479 /*
480 * Handle main area access - limit copy length to prevent
481 * crossing main/spare boundary.
482 */
483 l = min((uint)len, mtd->writesize - c);
484 prv->column += l;
485
486 if (wr)
487 memcpy_toio(prv->regs + NFC_MAIN_AREA(0) + c, buf, l);
488 else
489 memcpy_fromio(buf, prv->regs + NFC_MAIN_AREA(0) + c, l);
490
491 /* Handle crossing main/spare boundary */
492 if (l != len) {
493 buf += l;
494 len -= l;
495 mpc5121_nfc_buf_copy(mtd, buf, len, wr);
496 }
497}
498
499/* Read data from NFC buffers */
500static void mpc5121_nfc_read_buf(struct mtd_info *mtd, u_char *buf, int len)
501{
502 mpc5121_nfc_buf_copy(mtd, buf, len, 0);
503}
504
505/* Write data to NFC buffers */
506static void mpc5121_nfc_write_buf(struct mtd_info *mtd,
507 const u_char *buf, int len)
508{
509 mpc5121_nfc_buf_copy(mtd, (u_char *)buf, len, 1);
510}
511
512/* Compare buffer with NAND flash */
513static int mpc5121_nfc_verify_buf(struct mtd_info *mtd,
514 const u_char *buf, int len)
515{
516 u_char tmp[256];
517 uint bsize;
518
519 while (len) {
520 bsize = min(len, 256);
521 mpc5121_nfc_read_buf(mtd, tmp, bsize);
522
523 if (memcmp(buf, tmp, bsize))
524 return 1;
525
526 buf += bsize;
527 len -= bsize;
528 }
529
530 return 0;
531}
532
533/* Read byte from NFC buffers */
534static u8 mpc5121_nfc_read_byte(struct mtd_info *mtd)
535{
536 u8 tmp;
537
538 mpc5121_nfc_read_buf(mtd, &tmp, sizeof(tmp));
539
540 return tmp;
541}
542
543/* Read word from NFC buffers */
544static u16 mpc5121_nfc_read_word(struct mtd_info *mtd)
545{
546 u16 tmp;
547
548 mpc5121_nfc_read_buf(mtd, (u_char *)&tmp, sizeof(tmp));
549
550 return tmp;
551}
552
553/*
554 * Read NFC configuration from Reset Config Word
555 *
556 * NFC is configured during reset in basis of information stored
557 * in Reset Config Word. There is no other way to set NAND block
558 * size, spare size and bus width.
559 */
560static int mpc5121_nfc_read_hw_config(struct mtd_info *mtd)
561{
562 struct nand_chip *chip = mtd->priv;
563 struct mpc5121_nfc_prv *prv = chip->priv;
564 struct mpc512x_reset_module *rm;
565 struct device_node *rmnode;
566 uint rcw_pagesize = 0;
567 uint rcw_sparesize = 0;
568 uint rcw_width;
569 uint rcwh;
570 uint romloc, ps;
571
572 rmnode = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-reset");
573 if (!rmnode) {
574 dev_err(prv->dev, "Missing 'fsl,mpc5121-reset' "
575 "node in device tree!\n");
576 return -ENODEV;
577 }
578
579 rm = of_iomap(rmnode, 0);
580 if (!rm) {
581 dev_err(prv->dev, "Error mapping reset module node!\n");
582 return -EBUSY;
583 }
584
585 rcwh = in_be32(&rm->rcwhr);
586
587 /* Bit 6: NFC bus width */
588 rcw_width = ((rcwh >> 6) & 0x1) ? 2 : 1;
589
590 /* Bit 7: NFC Page/Spare size */
591 ps = (rcwh >> 7) & 0x1;
592
593 /* Bits [22:21]: ROM Location */
594 romloc = (rcwh >> 21) & 0x3;
595
596 /* Decode RCW bits */
597 switch ((ps << 2) | romloc) {
598 case 0x00:
599 case 0x01:
600 rcw_pagesize = 512;
601 rcw_sparesize = 16;
602 break;
603 case 0x02:
604 case 0x03:
605 rcw_pagesize = 4096;
606 rcw_sparesize = 128;
607 break;
608 case 0x04:
609 case 0x05:
610 rcw_pagesize = 2048;
611 rcw_sparesize = 64;
612 break;
613 case 0x06:
614 case 0x07:
615 rcw_pagesize = 4096;
616 rcw_sparesize = 218;
617 break;
618 }
619
620 mtd->writesize = rcw_pagesize;
621 mtd->oobsize = rcw_sparesize;
622 if (rcw_width == 2)
623 chip->options |= NAND_BUSWIDTH_16;
624
625 dev_notice(prv->dev, "Configured for "
626 "%u-bit NAND, page size %u "
627 "with %u spare.\n",
628 rcw_width * 8, rcw_pagesize,
629 rcw_sparesize);
630 iounmap(rm);
631 of_node_put(rmnode);
632 return 0;
633}
634
635/* Free driver resources */
636static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
637{
638 struct nand_chip *chip = mtd->priv;
639 struct mpc5121_nfc_prv *prv = chip->priv;
640
641 if (prv->clk) {
642 clk_disable(prv->clk);
643 clk_put(prv->clk);
644 }
645
646 if (prv->csreg)
647 iounmap(prv->csreg);
648}
649
650static int __devinit mpc5121_nfc_probe(struct of_device *op,
651 const struct of_device_id *match)
652{
653 struct device_node *rootnode, *dn = op->node;
654 struct device *dev = &op->dev;
655 struct mpc5121_nfc_prv *prv;
656 struct resource res;
657 struct mtd_info *mtd;
658#ifdef CONFIG_MTD_PARTITIONS
659 struct mtd_partition *parts;
660#endif
661 struct nand_chip *chip;
662 unsigned long regs_paddr, regs_size;
663 const uint *chips_no;
664 int resettime = 0;
665 int retval = 0;
666 int rev, len;
667
668 /*
669 * Check SoC revision. This driver supports only NFC
670 * in MPC5121 revision 2 and MPC5123 revision 3.
671 */
672 rev = (mfspr(SPRN_SVR) >> 4) & 0xF;
673 if ((rev != 2) && (rev != 3)) {
674 dev_err(dev, "SoC revision %u is not supported!\n", rev);
675 return -ENXIO;
676 }
677
678 prv = devm_kzalloc(dev, sizeof(*prv), GFP_KERNEL);
679 if (!prv) {
680 dev_err(dev, "Memory exhausted!\n");
681 return -ENOMEM;
682 }
683
684 mtd = &prv->mtd;
685 chip = &prv->chip;
686
687 mtd->priv = chip;
688 chip->priv = prv;
689 prv->dev = dev;
690
691 /* Read NFC configuration from Reset Config Word */
692 retval = mpc5121_nfc_read_hw_config(mtd);
693 if (retval) {
694 dev_err(dev, "Unable to read NFC config!\n");
695 return retval;
696 }
697
698 prv->irq = irq_of_parse_and_map(dn, 0);
699 if (prv->irq == NO_IRQ) {
700 dev_err(dev, "Error mapping IRQ!\n");
701 return -EINVAL;
702 }
703
704 retval = of_address_to_resource(dn, 0, &res);
705 if (retval) {
706 dev_err(dev, "Error parsing memory region!\n");
707 return retval;
708 }
709
710 chips_no = of_get_property(dn, "chips", &len);
711 if (!chips_no || len != sizeof(*chips_no)) {
712 dev_err(dev, "Invalid/missing 'chips' property!\n");
713 return -EINVAL;
714 }
715
716 regs_paddr = res.start;
717 regs_size = res.end - res.start + 1;
718
719 if (!devm_request_mem_region(dev, regs_paddr, regs_size, DRV_NAME)) {
720 dev_err(dev, "Error requesting memory region!\n");
721 return -EBUSY;
722 }
723
724 prv->regs = devm_ioremap(dev, regs_paddr, regs_size);
725 if (!prv->regs) {
726 dev_err(dev, "Error mapping memory region!\n");
727 return -ENOMEM;
728 }
729
730 mtd->name = "MPC5121 NAND";
731 chip->dev_ready = mpc5121_nfc_dev_ready;
732 chip->cmdfunc = mpc5121_nfc_command;
733 chip->read_byte = mpc5121_nfc_read_byte;
734 chip->read_word = mpc5121_nfc_read_word;
735 chip->read_buf = mpc5121_nfc_read_buf;
736 chip->write_buf = mpc5121_nfc_write_buf;
737 chip->verify_buf = mpc5121_nfc_verify_buf;
738 chip->select_chip = mpc5121_nfc_select_chip;
739 chip->options = NAND_NO_AUTOINCR | NAND_USE_FLASH_BBT;
740 chip->ecc.mode = NAND_ECC_SOFT;
741
742 /* Support external chip-select logic on ADS5121 board */
743 rootnode = of_find_node_by_path("/");
744 if (of_device_is_compatible(rootnode, "fsl,mpc5121ads")) {
745 retval = ads5121_chipselect_init(mtd);
746 if (retval) {
747 dev_err(dev, "Chipselect init error!\n");
748 of_node_put(rootnode);
749 return retval;
750 }
751
752 chip->select_chip = ads5121_select_chip;
753 }
754 of_node_put(rootnode);
755
756 /* Enable NFC clock */
757 prv->clk = clk_get(dev, "nfc_clk");
758 if (!prv->clk) {
759 dev_err(dev, "Unable to acquire NFC clock!\n");
760 retval = -ENODEV;
761 goto error;
762 }
763
764 clk_enable(prv->clk);
765
766 /* Reset NAND Flash controller */
767 nfc_set(mtd, NFC_CONFIG1, NFC_RESET);
768 while (nfc_read(mtd, NFC_CONFIG1) & NFC_RESET) {
769 if (resettime++ >= NFC_RESET_TIMEOUT) {
770 dev_err(dev, "Timeout while resetting NFC!\n");
771 retval = -EINVAL;
772 goto error;
773 }
774
775 udelay(1);
776 }
777
778 /* Enable write to NFC memory */
779 nfc_write(mtd, NFC_CONFIG, NFC_BLS_UNLOCKED);
780
781 /* Enable write to all NAND pages */
782 nfc_write(mtd, NFC_UNLOCKSTART_BLK0, 0x0000);
783 nfc_write(mtd, NFC_UNLOCKEND_BLK0, 0xFFFF);
784 nfc_write(mtd, NFC_WRPROT, NFC_WPC_UNLOCK);
785
786 /*
787 * Setup NFC:
788 * - Big Endian transfers,
789 * - Interrupt after full page read/write.
790 */
791 nfc_write(mtd, NFC_CONFIG1, NFC_BIG_ENDIAN | NFC_INT_MASK |
792 NFC_FULL_PAGE_INT);
793
794 /* Set spare area size */
795 nfc_write(mtd, NFC_SPAS, mtd->oobsize >> 1);
796
797 init_waitqueue_head(&prv->irq_waitq);
798 retval = devm_request_irq(dev, prv->irq, &mpc5121_nfc_irq, 0, DRV_NAME,
799 mtd);
800 if (retval) {
801 dev_err(dev, "Error requesting IRQ!\n");
802 goto error;
803 }
804
805 /* Detect NAND chips */
806 if (nand_scan(mtd, *chips_no)) {
807 dev_err(dev, "NAND Flash not found !\n");
808 devm_free_irq(dev, prv->irq, mtd);
809 retval = -ENXIO;
810 goto error;
811 }
812
813 /* Set erase block size */
814 switch (mtd->erasesize / mtd->writesize) {
815 case 32:
816 nfc_set(mtd, NFC_CONFIG1, NFC_PPB_32);
817 break;
818
819 case 64:
820 nfc_set(mtd, NFC_CONFIG1, NFC_PPB_64);
821 break;
822
823 case 128:
824 nfc_set(mtd, NFC_CONFIG1, NFC_PPB_128);
825 break;
826
827 case 256:
828 nfc_set(mtd, NFC_CONFIG1, NFC_PPB_256);
829 break;
830
831 default:
832 dev_err(dev, "Unsupported NAND flash!\n");
833 devm_free_irq(dev, prv->irq, mtd);
834 retval = -ENXIO;
835 goto error;
836 }
837
838 dev_set_drvdata(dev, mtd);
839
840 /* Register device in MTD */
841#ifdef CONFIG_MTD_PARTITIONS
842 retval = parse_mtd_partitions(mtd, mpc5121_nfc_pprobes, &parts, 0);
843#ifdef CONFIG_MTD_OF_PARTS
844 if (retval == 0)
845 retval = of_mtd_parse_partitions(dev, dn, &parts);
846#endif
847 if (retval < 0) {
848 dev_err(dev, "Error parsing MTD partitions!\n");
849 devm_free_irq(dev, prv->irq, mtd);
850 retval = -EINVAL;
851 goto error;
852 }
853
854 if (retval > 0)
855 retval = add_mtd_partitions(mtd, parts, retval);
856 else
857#endif
858 retval = add_mtd_device(mtd);
859
860 if (retval) {
861 dev_err(dev, "Error adding MTD device!\n");
862 devm_free_irq(dev, prv->irq, mtd);
863 goto error;
864 }
865
866 return 0;
867error:
868 mpc5121_nfc_free(dev, mtd);
869 return retval;
870}
871
872static int __devexit mpc5121_nfc_remove(struct of_device *op)
873{
874 struct device *dev = &op->dev;
875 struct mtd_info *mtd = dev_get_drvdata(dev);
876 struct nand_chip *chip = mtd->priv;
877 struct mpc5121_nfc_prv *prv = chip->priv;
878
879 nand_release(mtd);
880 devm_free_irq(dev, prv->irq, mtd);
881 mpc5121_nfc_free(dev, mtd);
882
883 return 0;
884}
885
886static struct of_device_id mpc5121_nfc_match[] __devinitdata = {
887 { .compatible = "fsl,mpc5121-nfc", },
888 {},
889};
890
891static struct of_platform_driver mpc5121_nfc_driver = {
892 .match_table = mpc5121_nfc_match,
893 .probe = mpc5121_nfc_probe,
894 .remove = __devexit_p(mpc5121_nfc_remove),
895 .driver = {
896 .name = DRV_NAME,
897 .owner = THIS_MODULE,
898 },
899};
900
901static int __init mpc5121_nfc_init(void)
902{
903 return of_register_platform_driver(&mpc5121_nfc_driver);
904}
905
906module_init(mpc5121_nfc_init);
907
908static void __exit mpc5121_nfc_cleanup(void)
909{
910 of_unregister_platform_driver(&mpc5121_nfc_driver);
911}
912
913module_exit(mpc5121_nfc_cleanup);
914
915MODULE_AUTHOR("Freescale Semiconductor, Inc.");
916MODULE_DESCRIPTION("MPC5121 NAND MTD driver");
917MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index b2900d8406d..82e94389824 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -38,7 +38,7 @@
38#define DRIVER_NAME "mxc_nand" 38#define DRIVER_NAME "mxc_nand"
39 39
40#define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35()) 40#define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35())
41#define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27()) 41#define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27() || cpu_is_mx21())
42 42
43/* Addresses for NFC registers */ 43/* Addresses for NFC registers */
44#define NFC_BUF_SIZE 0xE00 44#define NFC_BUF_SIZE 0xE00
@@ -168,11 +168,7 @@ static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
168{ 168{
169 struct mxc_nand_host *host = dev_id; 169 struct mxc_nand_host *host = dev_id;
170 170
171 uint16_t tmp; 171 disable_irq_nosync(irq);
172
173 tmp = readw(host->regs + NFC_CONFIG1);
174 tmp |= NFC_INT_MSK; /* Disable interrupt */
175 writew(tmp, host->regs + NFC_CONFIG1);
176 172
177 wake_up(&host->irq_waitq); 173 wake_up(&host->irq_waitq);
178 174
@@ -184,15 +180,13 @@ static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
184 */ 180 */
185static void wait_op_done(struct mxc_nand_host *host, int useirq) 181static void wait_op_done(struct mxc_nand_host *host, int useirq)
186{ 182{
187 uint32_t tmp; 183 uint16_t tmp;
188 int max_retries = 2000; 184 int max_retries = 8000;
189 185
190 if (useirq) { 186 if (useirq) {
191 if ((readw(host->regs + NFC_CONFIG2) & NFC_INT) == 0) { 187 if ((readw(host->regs + NFC_CONFIG2) & NFC_INT) == 0) {
192 188
193 tmp = readw(host->regs + NFC_CONFIG1); 189 enable_irq(host->irq);
194 tmp &= ~NFC_INT_MSK; /* Enable interrupt */
195 writew(tmp, host->regs + NFC_CONFIG1);
196 190
197 wait_event(host->irq_waitq, 191 wait_event(host->irq_waitq,
198 readw(host->regs + NFC_CONFIG2) & NFC_INT); 192 readw(host->regs + NFC_CONFIG2) & NFC_INT);
@@ -226,8 +220,23 @@ static void send_cmd(struct mxc_nand_host *host, uint16_t cmd, int useirq)
226 writew(cmd, host->regs + NFC_FLASH_CMD); 220 writew(cmd, host->regs + NFC_FLASH_CMD);
227 writew(NFC_CMD, host->regs + NFC_CONFIG2); 221 writew(NFC_CMD, host->regs + NFC_CONFIG2);
228 222
229 /* Wait for operation to complete */ 223 if (cpu_is_mx21() && (cmd == NAND_CMD_RESET)) {
230 wait_op_done(host, useirq); 224 int max_retries = 100;
225 /* Reset completion is indicated by NFC_CONFIG2 */
226 /* being set to 0 */
227 while (max_retries-- > 0) {
228 if (readw(host->regs + NFC_CONFIG2) == 0) {
229 break;
230 }
231 udelay(1);
232 }
233 if (max_retries < 0)
234 DEBUG(MTD_DEBUG_LEVEL0, "%s: RESET failed\n",
235 __func__);
236 } else {
237 /* Wait for operation to complete */
238 wait_op_done(host, useirq);
239 }
231} 240}
232 241
233/* This function sends an address (or partial address) to the 242/* This function sends an address (or partial address) to the
@@ -542,6 +551,41 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
542 } 551 }
543} 552}
544 553
554static void preset(struct mtd_info *mtd)
555{
556 struct nand_chip *nand_chip = mtd->priv;
557 struct mxc_nand_host *host = nand_chip->priv;
558 uint16_t tmp;
559
560 /* enable interrupt, disable spare enable */
561 tmp = readw(host->regs + NFC_CONFIG1);
562 tmp &= ~NFC_INT_MSK;
563 tmp &= ~NFC_SP_EN;
564 if (nand_chip->ecc.mode == NAND_ECC_HW) {
565 tmp |= NFC_ECC_EN;
566 } else {
567 tmp &= ~NFC_ECC_EN;
568 }
569 writew(tmp, host->regs + NFC_CONFIG1);
570 /* preset operation */
571
572 /* Unlock the internal RAM Buffer */
573 writew(0x2, host->regs + NFC_CONFIG);
574
575 /* Blocks to be unlocked */
576 if (nfc_is_v21()) {
577 writew(0x0, host->regs + NFC_V21_UNLOCKSTART_BLKADDR);
578 writew(0xffff, host->regs + NFC_V21_UNLOCKEND_BLKADDR);
579 } else if (nfc_is_v1()) {
580 writew(0x0, host->regs + NFC_V1_UNLOCKSTART_BLKADDR);
581 writew(0x4000, host->regs + NFC_V1_UNLOCKEND_BLKADDR);
582 } else
583 BUG();
584
585 /* Unlock Block Command for given address range */
586 writew(0x4, host->regs + NFC_WRPROT);
587}
588
545/* Used by the upper layer to write command to NAND Flash for 589/* Used by the upper layer to write command to NAND Flash for
546 * different operations to be carried out on NAND Flash */ 590 * different operations to be carried out on NAND Flash */
547static void mxc_nand_command(struct mtd_info *mtd, unsigned command, 591static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
@@ -559,6 +603,10 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
559 603
560 /* Command pre-processing step */ 604 /* Command pre-processing step */
561 switch (command) { 605 switch (command) {
606 case NAND_CMD_RESET:
607 send_cmd(host, command, false);
608 preset(mtd);
609 break;
562 610
563 case NAND_CMD_STATUS: 611 case NAND_CMD_STATUS:
564 host->buf_start = 0; 612 host->buf_start = 0;
@@ -679,7 +727,6 @@ static int __init mxcnd_probe(struct platform_device *pdev)
679 struct mxc_nand_platform_data *pdata = pdev->dev.platform_data; 727 struct mxc_nand_platform_data *pdata = pdev->dev.platform_data;
680 struct mxc_nand_host *host; 728 struct mxc_nand_host *host;
681 struct resource *res; 729 struct resource *res;
682 uint16_t tmp;
683 int err = 0, nr_parts = 0; 730 int err = 0, nr_parts = 0;
684 struct nand_ecclayout *oob_smallpage, *oob_largepage; 731 struct nand_ecclayout *oob_smallpage, *oob_largepage;
685 732
@@ -743,51 +790,17 @@ static int __init mxcnd_probe(struct platform_device *pdev)
743 host->spare_len = 64; 790 host->spare_len = 64;
744 oob_smallpage = &nandv2_hw_eccoob_smallpage; 791 oob_smallpage = &nandv2_hw_eccoob_smallpage;
745 oob_largepage = &nandv2_hw_eccoob_largepage; 792 oob_largepage = &nandv2_hw_eccoob_largepage;
793 this->ecc.bytes = 9;
746 } else if (nfc_is_v1()) { 794 } else if (nfc_is_v1()) {
747 host->regs = host->base; 795 host->regs = host->base;
748 host->spare0 = host->base + 0x800; 796 host->spare0 = host->base + 0x800;
749 host->spare_len = 16; 797 host->spare_len = 16;
750 oob_smallpage = &nandv1_hw_eccoob_smallpage; 798 oob_smallpage = &nandv1_hw_eccoob_smallpage;
751 oob_largepage = &nandv1_hw_eccoob_largepage; 799 oob_largepage = &nandv1_hw_eccoob_largepage;
752 } else
753 BUG();
754
755 /* disable interrupt and spare enable */
756 tmp = readw(host->regs + NFC_CONFIG1);
757 tmp |= NFC_INT_MSK;
758 tmp &= ~NFC_SP_EN;
759 writew(tmp, host->regs + NFC_CONFIG1);
760
761 init_waitqueue_head(&host->irq_waitq);
762
763 host->irq = platform_get_irq(pdev, 0);
764
765 err = request_irq(host->irq, mxc_nfc_irq, 0, DRIVER_NAME, host);
766 if (err)
767 goto eirq;
768
769 /* Reset NAND */
770 this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
771
772 /* preset operation */
773 /* Unlock the internal RAM Buffer */
774 writew(0x2, host->regs + NFC_CONFIG);
775
776 /* Blocks to be unlocked */
777 if (nfc_is_v21()) {
778 writew(0x0, host->regs + NFC_V21_UNLOCKSTART_BLKADDR);
779 writew(0xffff, host->regs + NFC_V21_UNLOCKEND_BLKADDR);
780 this->ecc.bytes = 9;
781 } else if (nfc_is_v1()) {
782 writew(0x0, host->regs + NFC_V1_UNLOCKSTART_BLKADDR);
783 writew(0x4000, host->regs + NFC_V1_UNLOCKEND_BLKADDR);
784 this->ecc.bytes = 3; 800 this->ecc.bytes = 3;
785 } else 801 } else
786 BUG(); 802 BUG();
787 803
788 /* Unlock Block Command for given address range */
789 writew(0x4, host->regs + NFC_WRPROT);
790
791 this->ecc.size = 512; 804 this->ecc.size = 512;
792 this->ecc.layout = oob_smallpage; 805 this->ecc.layout = oob_smallpage;
793 806
@@ -796,14 +809,8 @@ static int __init mxcnd_probe(struct platform_device *pdev)
796 this->ecc.hwctl = mxc_nand_enable_hwecc; 809 this->ecc.hwctl = mxc_nand_enable_hwecc;
797 this->ecc.correct = mxc_nand_correct_data; 810 this->ecc.correct = mxc_nand_correct_data;
798 this->ecc.mode = NAND_ECC_HW; 811 this->ecc.mode = NAND_ECC_HW;
799 tmp = readw(host->regs + NFC_CONFIG1);
800 tmp |= NFC_ECC_EN;
801 writew(tmp, host->regs + NFC_CONFIG1);
802 } else { 812 } else {
803 this->ecc.mode = NAND_ECC_SOFT; 813 this->ecc.mode = NAND_ECC_SOFT;
804 tmp = readw(host->regs + NFC_CONFIG1);
805 tmp &= ~NFC_ECC_EN;
806 writew(tmp, host->regs + NFC_CONFIG1);
807 } 814 }
808 815
809 /* NAND bus width determines access funtions used by upper layer */ 816 /* NAND bus width determines access funtions used by upper layer */
@@ -817,8 +824,16 @@ static int __init mxcnd_probe(struct platform_device *pdev)
817 this->options |= NAND_USE_FLASH_BBT; 824 this->options |= NAND_USE_FLASH_BBT;
818 } 825 }
819 826
827 init_waitqueue_head(&host->irq_waitq);
828
829 host->irq = platform_get_irq(pdev, 0);
830
831 err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host);
832 if (err)
833 goto eirq;
834
820 /* first scan to find the device and get the page size */ 835 /* first scan to find the device and get the page size */
821 if (nand_scan_ident(mtd, 1)) { 836 if (nand_scan_ident(mtd, 1, NULL)) {
822 err = -ENXIO; 837 err = -ENXIO;
823 goto escan; 838 goto escan;
824 } 839 }
@@ -886,11 +901,14 @@ static int mxcnd_suspend(struct platform_device *pdev, pm_message_t state)
886 int ret = 0; 901 int ret = 0;
887 902
888 DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND suspend\n"); 903 DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND suspend\n");
889 if (mtd) { 904
890 ret = mtd->suspend(mtd); 905 ret = mtd->suspend(mtd);
891 /* Disable the NFC clock */ 906
892 clk_disable(host->clk); 907 /*
893 } 908 * nand_suspend locks the device for exclusive access, so
909 * the clock must already be off.
910 */
911 BUG_ON(!ret && host->clk_act);
894 912
895 return ret; 913 return ret;
896} 914}
@@ -904,11 +922,7 @@ static int mxcnd_resume(struct platform_device *pdev)
904 922
905 DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND resume\n"); 923 DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND resume\n");
906 924
907 if (mtd) { 925 mtd->resume(mtd);
908 /* Enable the NFC clock */
909 clk_enable(host->clk);
910 mtd->resume(mtd);
911 }
912 926
913 return ret; 927 return ret;
914} 928}
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 8f2958fe214..4a7b86423ee 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -108,6 +108,35 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
108 */ 108 */
109DEFINE_LED_TRIGGER(nand_led_trigger); 109DEFINE_LED_TRIGGER(nand_led_trigger);
110 110
111static int check_offs_len(struct mtd_info *mtd,
112 loff_t ofs, uint64_t len)
113{
114 struct nand_chip *chip = mtd->priv;
115 int ret = 0;
116
117 /* Start address must align on block boundary */
118 if (ofs & ((1 << chip->phys_erase_shift) - 1)) {
119 DEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __func__);
120 ret = -EINVAL;
121 }
122
123 /* Length must align on block boundary */
124 if (len & ((1 << chip->phys_erase_shift) - 1)) {
125 DEBUG(MTD_DEBUG_LEVEL0, "%s: Length not block aligned\n",
126 __func__);
127 ret = -EINVAL;
128 }
129
130 /* Do not allow past end of device */
131 if (ofs + len > mtd->size) {
132 DEBUG(MTD_DEBUG_LEVEL0, "%s: Past end of device\n",
133 __func__);
134 ret = -EINVAL;
135 }
136
137 return ret;
138}
139
111/** 140/**
112 * nand_release_device - [GENERIC] release chip 141 * nand_release_device - [GENERIC] release chip
113 * @mtd: MTD device structure 142 * @mtd: MTD device structure
@@ -318,6 +347,9 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
318 struct nand_chip *chip = mtd->priv; 347 struct nand_chip *chip = mtd->priv;
319 u16 bad; 348 u16 bad;
320 349
350 if (chip->options & NAND_BB_LAST_PAGE)
351 ofs += mtd->erasesize - mtd->writesize;
352
321 page = (int)(ofs >> chip->page_shift) & chip->pagemask; 353 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
322 354
323 if (getchip) { 355 if (getchip) {
@@ -335,14 +367,18 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
335 bad = cpu_to_le16(chip->read_word(mtd)); 367 bad = cpu_to_le16(chip->read_word(mtd));
336 if (chip->badblockpos & 0x1) 368 if (chip->badblockpos & 0x1)
337 bad >>= 8; 369 bad >>= 8;
338 if ((bad & 0xFF) != 0xff) 370 else
339 res = 1; 371 bad &= 0xFF;
340 } else { 372 } else {
341 chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, page); 373 chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, page);
342 if (chip->read_byte(mtd) != 0xff) 374 bad = chip->read_byte(mtd);
343 res = 1;
344 } 375 }
345 376
377 if (likely(chip->badblockbits == 8))
378 res = bad != 0xFF;
379 else
380 res = hweight8(bad) < chip->badblockbits;
381
346 if (getchip) 382 if (getchip)
347 nand_release_device(mtd); 383 nand_release_device(mtd);
348 384
@@ -363,6 +399,9 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
363 uint8_t buf[2] = { 0, 0 }; 399 uint8_t buf[2] = { 0, 0 };
364 int block, ret; 400 int block, ret;
365 401
402 if (chip->options & NAND_BB_LAST_PAGE)
403 ofs += mtd->erasesize - mtd->writesize;
404
366 /* Get block number */ 405 /* Get block number */
367 block = (int)(ofs >> chip->bbt_erase_shift); 406 block = (int)(ofs >> chip->bbt_erase_shift);
368 if (chip->bbt) 407 if (chip->bbt)
@@ -401,6 +440,11 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
401static int nand_check_wp(struct mtd_info *mtd) 440static int nand_check_wp(struct mtd_info *mtd)
402{ 441{
403 struct nand_chip *chip = mtd->priv; 442 struct nand_chip *chip = mtd->priv;
443
444 /* broken xD cards report WP despite being writable */
445 if (chip->options & NAND_BROKEN_XD)
446 return 0;
447
404 /* Check the WP bit */ 448 /* Check the WP bit */
405 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); 449 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
406 return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1; 450 return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
@@ -744,9 +788,6 @@ nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state)
744 chip->state = FL_PM_SUSPENDED; 788 chip->state = FL_PM_SUSPENDED;
745 spin_unlock(lock); 789 spin_unlock(lock);
746 return 0; 790 return 0;
747 } else {
748 spin_unlock(lock);
749 return -EAGAIN;
750 } 791 }
751 } 792 }
752 set_current_state(TASK_UNINTERRUPTIBLE); 793 set_current_state(TASK_UNINTERRUPTIBLE);
@@ -835,6 +876,168 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
835} 876}
836 877
837/** 878/**
879 * __nand_unlock - [REPLACABLE] unlocks specified locked blockes
880 *
881 * @param mtd - mtd info
882 * @param ofs - offset to start unlock from
883 * @param len - length to unlock
884 * @invert - when = 0, unlock the range of blocks within the lower and
885 * upper boundary address
886 * whne = 1, unlock the range of blocks outside the boundaries
887 * of the lower and upper boundary address
888 *
889 * @return - unlock status
890 */
891static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
892 uint64_t len, int invert)
893{
894 int ret = 0;
895 int status, page;
896 struct nand_chip *chip = mtd->priv;
897
898 /* Submit address of first page to unlock */
899 page = ofs >> chip->page_shift;
900 chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask);
901
902 /* Submit address of last page to unlock */
903 page = (ofs + len) >> chip->page_shift;
904 chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1,
905 (page | invert) & chip->pagemask);
906
907 /* Call wait ready function */
908 status = chip->waitfunc(mtd, chip);
909 udelay(1000);
910 /* See if device thinks it succeeded */
911 if (status & 0x01) {
912 DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n",
913 __func__, status);
914 ret = -EIO;
915 }
916
917 return ret;
918}
919
920/**
921 * nand_unlock - [REPLACABLE] unlocks specified locked blockes
922 *
923 * @param mtd - mtd info
924 * @param ofs - offset to start unlock from
925 * @param len - length to unlock
926 *
927 * @return - unlock status
928 */
929int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
930{
931 int ret = 0;
932 int chipnr;
933 struct nand_chip *chip = mtd->priv;
934
935 DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n",
936 __func__, (unsigned long long)ofs, len);
937
938 if (check_offs_len(mtd, ofs, len))
939 ret = -EINVAL;
940
941 /* Align to last block address if size addresses end of the device */
942 if (ofs + len == mtd->size)
943 len -= mtd->erasesize;
944
945 nand_get_device(chip, mtd, FL_UNLOCKING);
946
947 /* Shift to get chip number */
948 chipnr = ofs >> chip->chip_shift;
949
950 chip->select_chip(mtd, chipnr);
951
952 /* Check, if it is write protected */
953 if (nand_check_wp(mtd)) {
954 DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n",
955 __func__);
956 ret = -EIO;
957 goto out;
958 }
959
960 ret = __nand_unlock(mtd, ofs, len, 0);
961
962out:
963 /* de-select the NAND device */
964 chip->select_chip(mtd, -1);
965
966 nand_release_device(mtd);
967
968 return ret;
969}
970
971/**
972 * nand_lock - [REPLACABLE] locks all blockes present in the device
973 *
974 * @param mtd - mtd info
975 * @param ofs - offset to start unlock from
976 * @param len - length to unlock
977 *
978 * @return - lock status
979 *
980 * This feature is not support in many NAND parts. 'Micron' NAND parts
981 * do have this feature, but it allows only to lock all blocks not for
982 * specified range for block.
983 *
984 * Implementing 'lock' feature by making use of 'unlock', for now.
985 */
986int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
987{
988 int ret = 0;
989 int chipnr, status, page;
990 struct nand_chip *chip = mtd->priv;
991
992 DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n",
993 __func__, (unsigned long long)ofs, len);
994
995 if (check_offs_len(mtd, ofs, len))
996 ret = -EINVAL;
997
998 nand_get_device(chip, mtd, FL_LOCKING);
999
1000 /* Shift to get chip number */
1001 chipnr = ofs >> chip->chip_shift;
1002
1003 chip->select_chip(mtd, chipnr);
1004
1005 /* Check, if it is write protected */
1006 if (nand_check_wp(mtd)) {
1007 DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n",
1008 __func__);
1009 status = MTD_ERASE_FAILED;
1010 ret = -EIO;
1011 goto out;
1012 }
1013
1014 /* Submit address of first page to lock */
1015 page = ofs >> chip->page_shift;
1016 chip->cmdfunc(mtd, NAND_CMD_LOCK, -1, page & chip->pagemask);
1017
1018 /* Call wait ready function */
1019 status = chip->waitfunc(mtd, chip);
1020 udelay(1000);
1021 /* See if device thinks it succeeded */
1022 if (status & 0x01) {
1023 DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n",
1024 __func__, status);
1025 ret = -EIO;
1026 goto out;
1027 }
1028
1029 ret = __nand_unlock(mtd, ofs, len, 0x1);
1030
1031out:
1032 /* de-select the NAND device */
1033 chip->select_chip(mtd, -1);
1034
1035 nand_release_device(mtd);
1036
1037 return ret;
1038}
1039
1040/**
838 * nand_read_page_raw - [Intern] read raw page data without ecc 1041 * nand_read_page_raw - [Intern] read raw page data without ecc
839 * @mtd: mtd info structure 1042 * @mtd: mtd info structure
840 * @chip: nand chip info structure 1043 * @chip: nand chip info structure
@@ -1232,6 +1435,9 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1232 int ret = 0; 1435 int ret = 0;
1233 uint32_t readlen = ops->len; 1436 uint32_t readlen = ops->len;
1234 uint32_t oobreadlen = ops->ooblen; 1437 uint32_t oobreadlen = ops->ooblen;
1438 uint32_t max_oobsize = ops->mode == MTD_OOB_AUTO ?
1439 mtd->oobavail : mtd->oobsize;
1440
1235 uint8_t *bufpoi, *oob, *buf; 1441 uint8_t *bufpoi, *oob, *buf;
1236 1442
1237 stats = mtd->ecc_stats; 1443 stats = mtd->ecc_stats;
@@ -1282,18 +1488,14 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1282 buf += bytes; 1488 buf += bytes;
1283 1489
1284 if (unlikely(oob)) { 1490 if (unlikely(oob)) {
1285 /* Raw mode does data:oob:data:oob */ 1491
1286 if (ops->mode != MTD_OOB_RAW) { 1492 int toread = min(oobreadlen, max_oobsize);
1287 int toread = min(oobreadlen, 1493
1288 chip->ecc.layout->oobavail); 1494 if (toread) {
1289 if (toread) { 1495 oob = nand_transfer_oob(chip,
1290 oob = nand_transfer_oob(chip, 1496 oob, ops, toread);
1291 oob, ops, toread); 1497 oobreadlen -= toread;
1292 oobreadlen -= toread; 1498 }
1293 }
1294 } else
1295 buf = nand_transfer_oob(chip,
1296 buf, ops, mtd->oobsize);
1297 } 1499 }
1298 1500
1299 if (!(chip->options & NAND_NO_READRDY)) { 1501 if (!(chip->options & NAND_NO_READRDY)) {
@@ -1880,11 +2082,9 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1880 * @oob: oob data buffer 2082 * @oob: oob data buffer
1881 * @ops: oob ops structure 2083 * @ops: oob ops structure
1882 */ 2084 */
1883static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, 2085static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
1884 struct mtd_oob_ops *ops) 2086 struct mtd_oob_ops *ops)
1885{ 2087{
1886 size_t len = ops->ooblen;
1887
1888 switch(ops->mode) { 2088 switch(ops->mode) {
1889 2089
1890 case MTD_OOB_PLACE: 2090 case MTD_OOB_PLACE:
@@ -1939,6 +2139,11 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
1939 int chipnr, realpage, page, blockmask, column; 2139 int chipnr, realpage, page, blockmask, column;
1940 struct nand_chip *chip = mtd->priv; 2140 struct nand_chip *chip = mtd->priv;
1941 uint32_t writelen = ops->len; 2141 uint32_t writelen = ops->len;
2142
2143 uint32_t oobwritelen = ops->ooblen;
2144 uint32_t oobmaxlen = ops->mode == MTD_OOB_AUTO ?
2145 mtd->oobavail : mtd->oobsize;
2146
1942 uint8_t *oob = ops->oobbuf; 2147 uint8_t *oob = ops->oobbuf;
1943 uint8_t *buf = ops->datbuf; 2148 uint8_t *buf = ops->datbuf;
1944 int ret, subpage; 2149 int ret, subpage;
@@ -1980,6 +2185,10 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
1980 if (likely(!oob)) 2185 if (likely(!oob))
1981 memset(chip->oob_poi, 0xff, mtd->oobsize); 2186 memset(chip->oob_poi, 0xff, mtd->oobsize);
1982 2187
2188 /* Don't allow multipage oob writes with offset */
2189 if (ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen))
2190 return -EINVAL;
2191
1983 while(1) { 2192 while(1) {
1984 int bytes = mtd->writesize; 2193 int bytes = mtd->writesize;
1985 int cached = writelen > bytes && page != blockmask; 2194 int cached = writelen > bytes && page != blockmask;
@@ -1995,8 +2204,11 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
1995 wbuf = chip->buffers->databuf; 2204 wbuf = chip->buffers->databuf;
1996 } 2205 }
1997 2206
1998 if (unlikely(oob)) 2207 if (unlikely(oob)) {
1999 oob = nand_fill_oob(chip, oob, ops); 2208 size_t len = min(oobwritelen, oobmaxlen);
2209 oob = nand_fill_oob(chip, oob, len, ops);
2210 oobwritelen -= len;
2211 }
2000 2212
2001 ret = chip->write_page(mtd, chip, wbuf, page, cached, 2213 ret = chip->write_page(mtd, chip, wbuf, page, cached,
2002 (ops->mode == MTD_OOB_RAW)); 2214 (ops->mode == MTD_OOB_RAW));
@@ -2170,7 +2382,7 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
2170 chip->pagebuf = -1; 2382 chip->pagebuf = -1;
2171 2383
2172 memset(chip->oob_poi, 0xff, mtd->oobsize); 2384 memset(chip->oob_poi, 0xff, mtd->oobsize);
2173 nand_fill_oob(chip, ops->oobbuf, ops); 2385 nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
2174 status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask); 2386 status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
2175 memset(chip->oob_poi, 0xff, mtd->oobsize); 2387 memset(chip->oob_poi, 0xff, mtd->oobsize);
2176 2388
@@ -2293,25 +2505,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2293 __func__, (unsigned long long)instr->addr, 2505 __func__, (unsigned long long)instr->addr,
2294 (unsigned long long)instr->len); 2506 (unsigned long long)instr->len);
2295 2507
2296 /* Start address must align on block boundary */ 2508 if (check_offs_len(mtd, instr->addr, instr->len))
2297 if (instr->addr & ((1 << chip->phys_erase_shift) - 1)) {
2298 DEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __func__);
2299 return -EINVAL; 2509 return -EINVAL;
2300 }
2301
2302 /* Length must align on block boundary */
2303 if (instr->len & ((1 << chip->phys_erase_shift) - 1)) {
2304 DEBUG(MTD_DEBUG_LEVEL0, "%s: Length not block aligned\n",
2305 __func__);
2306 return -EINVAL;
2307 }
2308
2309 /* Do not allow erase past end of device */
2310 if ((instr->len + instr->addr) > mtd->size) {
2311 DEBUG(MTD_DEBUG_LEVEL0, "%s: Erase past end of device\n",
2312 __func__);
2313 return -EINVAL;
2314 }
2315 2510
2316 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; 2511 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
2317 2512
@@ -2582,11 +2777,11 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
2582 */ 2777 */
2583static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, 2778static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2584 struct nand_chip *chip, 2779 struct nand_chip *chip,
2585 int busw, int *maf_id) 2780 int busw, int *maf_id,
2781 struct nand_flash_dev *type)
2586{ 2782{
2587 struct nand_flash_dev *type = NULL;
2588 int i, dev_id, maf_idx; 2783 int i, dev_id, maf_idx;
2589 int tmp_id, tmp_manf; 2784 u8 id_data[8];
2590 2785
2591 /* Select the device */ 2786 /* Select the device */
2592 chip->select_chip(mtd, 0); 2787 chip->select_chip(mtd, 0);
@@ -2612,27 +2807,26 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2612 2807
2613 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); 2808 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
2614 2809
2615 /* Read manufacturer and device IDs */ 2810 /* Read entire ID string */
2616 2811
2617 tmp_manf = chip->read_byte(mtd); 2812 for (i = 0; i < 8; i++)
2618 tmp_id = chip->read_byte(mtd); 2813 id_data[i] = chip->read_byte(mtd);
2619 2814
2620 if (tmp_manf != *maf_id || tmp_id != dev_id) { 2815 if (id_data[0] != *maf_id || id_data[1] != dev_id) {
2621 printk(KERN_INFO "%s: second ID read did not match " 2816 printk(KERN_INFO "%s: second ID read did not match "
2622 "%02x,%02x against %02x,%02x\n", __func__, 2817 "%02x,%02x against %02x,%02x\n", __func__,
2623 *maf_id, dev_id, tmp_manf, tmp_id); 2818 *maf_id, dev_id, id_data[0], id_data[1]);
2624 return ERR_PTR(-ENODEV); 2819 return ERR_PTR(-ENODEV);
2625 } 2820 }
2626 2821
2627 /* Lookup the flash id */
2628 for (i = 0; nand_flash_ids[i].name != NULL; i++) {
2629 if (dev_id == nand_flash_ids[i].id) {
2630 type = &nand_flash_ids[i];
2631 break;
2632 }
2633 }
2634
2635 if (!type) 2822 if (!type)
2823 type = nand_flash_ids;
2824
2825 for (; type->name != NULL; type++)
2826 if (dev_id == type->id)
2827 break;
2828
2829 if (!type->name)
2636 return ERR_PTR(-ENODEV); 2830 return ERR_PTR(-ENODEV);
2637 2831
2638 if (!mtd->name) 2832 if (!mtd->name)
@@ -2644,21 +2838,45 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2644 if (!type->pagesize) { 2838 if (!type->pagesize) {
2645 int extid; 2839 int extid;
2646 /* The 3rd id byte holds MLC / multichip data */ 2840 /* The 3rd id byte holds MLC / multichip data */
2647 chip->cellinfo = chip->read_byte(mtd); 2841 chip->cellinfo = id_data[2];
2648 /* The 4th id byte is the important one */ 2842 /* The 4th id byte is the important one */
2649 extid = chip->read_byte(mtd); 2843 extid = id_data[3];
2650 /* Calc pagesize */
2651 mtd->writesize = 1024 << (extid & 0x3);
2652 extid >>= 2;
2653 /* Calc oobsize */
2654 mtd->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
2655 extid >>= 2;
2656 /* Calc blocksize. Blocksize is multiples of 64KiB */
2657 mtd->erasesize = (64 * 1024) << (extid & 0x03);
2658 extid >>= 2;
2659 /* Get buswidth information */
2660 busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
2661 2844
2845 /*
2846 * Field definitions are in the following datasheets:
2847 * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32)
2848 * New style (6 byte ID): Samsung K9GAG08U0D (p.40)
2849 *
2850 * Check for wraparound + Samsung ID + nonzero 6th byte
2851 * to decide what to do.
2852 */
2853 if (id_data[0] == id_data[6] && id_data[1] == id_data[7] &&
2854 id_data[0] == NAND_MFR_SAMSUNG &&
2855 id_data[5] != 0x00) {
2856 /* Calc pagesize */
2857 mtd->writesize = 2048 << (extid & 0x03);
2858 extid >>= 2;
2859 /* Calc oobsize */
2860 mtd->oobsize = (extid & 0x03) == 0x01 ? 128 : 218;
2861 extid >>= 2;
2862 /* Calc blocksize */
2863 mtd->erasesize = (128 * 1024) <<
2864 (((extid >> 1) & 0x04) | (extid & 0x03));
2865 busw = 0;
2866 } else {
2867 /* Calc pagesize */
2868 mtd->writesize = 1024 << (extid & 0x03);
2869 extid >>= 2;
2870 /* Calc oobsize */
2871 mtd->oobsize = (8 << (extid & 0x01)) *
2872 (mtd->writesize >> 9);
2873 extid >>= 2;
2874 /* Calc blocksize. Blocksize is multiples of 64KiB */
2875 mtd->erasesize = (64 * 1024) << (extid & 0x03);
2876 extid >>= 2;
2877 /* Get buswidth information */
2878 busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
2879 }
2662 } else { 2880 } else {
2663 /* 2881 /*
2664 * Old devices have chip data hardcoded in the device id table 2882 * Old devices have chip data hardcoded in the device id table
@@ -2704,6 +2922,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2704 /* Set the bad block position */ 2922 /* Set the bad block position */
2705 chip->badblockpos = mtd->writesize > 512 ? 2923 chip->badblockpos = mtd->writesize > 512 ?
2706 NAND_LARGE_BADBLOCK_POS : NAND_SMALL_BADBLOCK_POS; 2924 NAND_LARGE_BADBLOCK_POS : NAND_SMALL_BADBLOCK_POS;
2925 chip->badblockbits = 8;
2707 2926
2708 /* Get chip options, preserve non chip based options */ 2927 /* Get chip options, preserve non chip based options */
2709 chip->options &= ~NAND_CHIPOPTIONS_MSK; 2928 chip->options &= ~NAND_CHIPOPTIONS_MSK;
@@ -2720,6 +2939,15 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2720 if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize) 2939 if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)
2721 chip->options &= ~NAND_SAMSUNG_LP_OPTIONS; 2940 chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
2722 2941
2942 /*
2943 * Bad block marker is stored in the last page of each block
2944 * on Samsung and Hynix MLC devices
2945 */
2946 if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
2947 (*maf_id == NAND_MFR_SAMSUNG ||
2948 *maf_id == NAND_MFR_HYNIX))
2949 chip->options |= NAND_BB_LAST_PAGE;
2950
2723 /* Check for AND chips with 4 page planes */ 2951 /* Check for AND chips with 4 page planes */
2724 if (chip->options & NAND_4PAGE_ARRAY) 2952 if (chip->options & NAND_4PAGE_ARRAY)
2725 chip->erase_cmd = multi_erase_cmd; 2953 chip->erase_cmd = multi_erase_cmd;
@@ -2741,13 +2969,15 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2741 * nand_scan_ident - [NAND Interface] Scan for the NAND device 2969 * nand_scan_ident - [NAND Interface] Scan for the NAND device
2742 * @mtd: MTD device structure 2970 * @mtd: MTD device structure
2743 * @maxchips: Number of chips to scan for 2971 * @maxchips: Number of chips to scan for
2972 * @table: Alternative NAND ID table
2744 * 2973 *
2745 * This is the first phase of the normal nand_scan() function. It 2974 * This is the first phase of the normal nand_scan() function. It
2746 * reads the flash ID and sets up MTD fields accordingly. 2975 * reads the flash ID and sets up MTD fields accordingly.
2747 * 2976 *
2748 * The mtd->owner field must be set to the module of the caller. 2977 * The mtd->owner field must be set to the module of the caller.
2749 */ 2978 */
2750int nand_scan_ident(struct mtd_info *mtd, int maxchips) 2979int nand_scan_ident(struct mtd_info *mtd, int maxchips,
2980 struct nand_flash_dev *table)
2751{ 2981{
2752 int i, busw, nand_maf_id; 2982 int i, busw, nand_maf_id;
2753 struct nand_chip *chip = mtd->priv; 2983 struct nand_chip *chip = mtd->priv;
@@ -2759,7 +2989,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips)
2759 nand_set_defaults(chip, busw); 2989 nand_set_defaults(chip, busw);
2760 2990
2761 /* Read the flash type */ 2991 /* Read the flash type */
2762 type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id); 2992 type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id, table);
2763 2993
2764 if (IS_ERR(type)) { 2994 if (IS_ERR(type)) {
2765 if (!(chip->options & NAND_SCAN_SILENT_NODEV)) 2995 if (!(chip->options & NAND_SCAN_SILENT_NODEV))
@@ -2989,7 +3219,8 @@ int nand_scan_tail(struct mtd_info *mtd)
2989 3219
2990 /* Fill in remaining MTD driver data */ 3220 /* Fill in remaining MTD driver data */
2991 mtd->type = MTD_NANDFLASH; 3221 mtd->type = MTD_NANDFLASH;
2992 mtd->flags = MTD_CAP_NANDFLASH; 3222 mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
3223 MTD_CAP_NANDFLASH;
2993 mtd->erase = nand_erase; 3224 mtd->erase = nand_erase;
2994 mtd->point = NULL; 3225 mtd->point = NULL;
2995 mtd->unpoint = NULL; 3226 mtd->unpoint = NULL;
@@ -3050,7 +3281,7 @@ int nand_scan(struct mtd_info *mtd, int maxchips)
3050 BUG(); 3281 BUG();
3051 } 3282 }
3052 3283
3053 ret = nand_scan_ident(mtd, maxchips); 3284 ret = nand_scan_ident(mtd, maxchips, NULL);
3054 if (!ret) 3285 if (!ret)
3055 ret = nand_scan_tail(mtd); 3286 ret = nand_scan_tail(mtd);
3056 return ret; 3287 return ret;
@@ -3077,6 +3308,8 @@ void nand_release(struct mtd_info *mtd)
3077 kfree(chip->buffers); 3308 kfree(chip->buffers);
3078} 3309}
3079 3310
3311EXPORT_SYMBOL_GPL(nand_lock);
3312EXPORT_SYMBOL_GPL(nand_unlock);
3080EXPORT_SYMBOL_GPL(nand_scan); 3313EXPORT_SYMBOL_GPL(nand_scan);
3081EXPORT_SYMBOL_GPL(nand_scan_ident); 3314EXPORT_SYMBOL_GPL(nand_scan_ident);
3082EXPORT_SYMBOL_GPL(nand_scan_tail); 3315EXPORT_SYMBOL_GPL(nand_scan_tail);
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 55c23e5cd21..ad97c0ce73b 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -237,15 +237,33 @@ static int scan_read_raw(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
237 size_t len) 237 size_t len)
238{ 238{
239 struct mtd_oob_ops ops; 239 struct mtd_oob_ops ops;
240 int res;
240 241
241 ops.mode = MTD_OOB_RAW; 242 ops.mode = MTD_OOB_RAW;
242 ops.ooboffs = 0; 243 ops.ooboffs = 0;
243 ops.ooblen = mtd->oobsize; 244 ops.ooblen = mtd->oobsize;
244 ops.oobbuf = buf;
245 ops.datbuf = buf;
246 ops.len = len;
247 245
248 return mtd->read_oob(mtd, offs, &ops); 246
247 while (len > 0) {
248 if (len <= mtd->writesize) {
249 ops.oobbuf = buf + len;
250 ops.datbuf = buf;
251 ops.len = len;
252 return mtd->read_oob(mtd, offs, &ops);
253 } else {
254 ops.oobbuf = buf + mtd->writesize;
255 ops.datbuf = buf;
256 ops.len = mtd->writesize;
257 res = mtd->read_oob(mtd, offs, &ops);
258
259 if (res)
260 return res;
261 }
262
263 buf += mtd->oobsize + mtd->writesize;
264 len -= mtd->writesize;
265 }
266 return 0;
249} 267}
250 268
251/* 269/*
@@ -414,6 +432,9 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
414 from = (loff_t)startblock << (this->bbt_erase_shift - 1); 432 from = (loff_t)startblock << (this->bbt_erase_shift - 1);
415 } 433 }
416 434
435 if (this->options & NAND_BB_LAST_PAGE)
436 from += mtd->erasesize - (mtd->writesize * len);
437
417 for (i = startblock; i < numblocks;) { 438 for (i = startblock; i < numblocks;) {
418 int ret; 439 int ret;
419 440
diff --git a/drivers/mtd/nand/nand_bcm_umi.h b/drivers/mtd/nand/nand_bcm_umi.h
index 7cec2cd9785..198b304d6f7 100644
--- a/drivers/mtd/nand/nand_bcm_umi.h
+++ b/drivers/mtd/nand/nand_bcm_umi.h
@@ -167,18 +167,27 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
167 int numToRead = 16; /* There are 16 bytes per sector in the OOB */ 167 int numToRead = 16; /* There are 16 bytes per sector in the OOB */
168 168
169 /* ECC is already paused when this function is called */ 169 /* ECC is already paused when this function is called */
170 if (pageSize != NAND_DATA_ACCESS_SIZE) {
171 /* skip BI */
172#if defined(__KERNEL__) && !defined(STANDALONE)
173 *oobp++ = REG_NAND_DATA8;
174#else
175 REG_NAND_DATA8;
176#endif
177 numToRead--;
178 }
170 179
171 if (pageSize == NAND_DATA_ACCESS_SIZE) { 180 while (numToRead > numEccBytes) {
172 while (numToRead > numEccBytes) { 181 /* skip free oob region */
173 /* skip free oob region */
174#if defined(__KERNEL__) && !defined(STANDALONE) 182#if defined(__KERNEL__) && !defined(STANDALONE)
175 *oobp++ = REG_NAND_DATA8; 183 *oobp++ = REG_NAND_DATA8;
176#else 184#else
177 REG_NAND_DATA8; 185 REG_NAND_DATA8;
178#endif 186#endif
179 numToRead--; 187 numToRead--;
180 } 188 }
181 189
190 if (pageSize == NAND_DATA_ACCESS_SIZE) {
182 /* read ECC bytes before BI */ 191 /* read ECC bytes before BI */
183 nand_bcm_umi_bch_resume_read_ecc_calc(); 192 nand_bcm_umi_bch_resume_read_ecc_calc();
184 193
@@ -190,6 +199,7 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
190#else 199#else
191 eccCalc[eccPos++] = REG_NAND_DATA8; 200 eccCalc[eccPos++] = REG_NAND_DATA8;
192#endif 201#endif
202 numToRead--;
193 } 203 }
194 204
195 nand_bcm_umi_bch_pause_read_ecc_calc(); 205 nand_bcm_umi_bch_pause_read_ecc_calc();
@@ -204,49 +214,18 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
204 numToRead--; 214 numToRead--;
205 } 215 }
206 216
207 /* read ECC bytes */ 217 }
208 nand_bcm_umi_bch_resume_read_ecc_calc(); 218 /* read ECC bytes */
209 while (numToRead) { 219 nand_bcm_umi_bch_resume_read_ecc_calc();
210#if defined(__KERNEL__) && !defined(STANDALONE) 220 while (numToRead) {
211 *oobp = REG_NAND_DATA8;
212 eccCalc[eccPos++] = *oobp;
213 oobp++;
214#else
215 eccCalc[eccPos++] = REG_NAND_DATA8;
216#endif
217 numToRead--;
218 }
219 } else {
220 /* skip BI */
221#if defined(__KERNEL__) && !defined(STANDALONE) 221#if defined(__KERNEL__) && !defined(STANDALONE)
222 *oobp++ = REG_NAND_DATA8; 222 *oobp = REG_NAND_DATA8;
223 eccCalc[eccPos++] = *oobp;
224 oobp++;
223#else 225#else
224 REG_NAND_DATA8; 226 eccCalc[eccPos++] = REG_NAND_DATA8;
225#endif 227#endif
226 numToRead--; 228 numToRead--;
227
228 while (numToRead > numEccBytes) {
229 /* skip free oob region */
230#if defined(__KERNEL__) && !defined(STANDALONE)
231 *oobp++ = REG_NAND_DATA8;
232#else
233 REG_NAND_DATA8;
234#endif
235 numToRead--;
236 }
237
238 /* read ECC bytes */
239 nand_bcm_umi_bch_resume_read_ecc_calc();
240 while (numToRead) {
241#if defined(__KERNEL__) && !defined(STANDALONE)
242 *oobp = REG_NAND_DATA8;
243 eccCalc[eccPos++] = *oobp;
244 oobp++;
245#else
246 eccCalc[eccPos++] = REG_NAND_DATA8;
247#endif
248 numToRead--;
249 }
250 } 229 }
251} 230}
252 231
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index 69ee2c90eb0..89907ed9900 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -82,6 +82,7 @@ struct nand_flash_dev nand_flash_ids[] = {
82 /* 1 Gigabit */ 82 /* 1 Gigabit */
83 {"NAND 128MiB 1,8V 8-bit", 0xA1, 0, 128, 0, LP_OPTIONS}, 83 {"NAND 128MiB 1,8V 8-bit", 0xA1, 0, 128, 0, LP_OPTIONS},
84 {"NAND 128MiB 3,3V 8-bit", 0xF1, 0, 128, 0, LP_OPTIONS}, 84 {"NAND 128MiB 3,3V 8-bit", 0xF1, 0, 128, 0, LP_OPTIONS},
85 {"NAND 128MiB 3,3V 8-bit", 0xD1, 0, 128, 0, LP_OPTIONS},
85 {"NAND 128MiB 1,8V 16-bit", 0xB1, 0, 128, 0, LP_OPTIONS16}, 86 {"NAND 128MiB 1,8V 16-bit", 0xB1, 0, 128, 0, LP_OPTIONS16},
86 {"NAND 128MiB 3,3V 16-bit", 0xC1, 0, 128, 0, LP_OPTIONS16}, 87 {"NAND 128MiB 3,3V 16-bit", 0xC1, 0, 128, 0, LP_OPTIONS16},
87 88
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 7281000fef2..261337efe0e 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -80,6 +80,9 @@
80#ifndef CONFIG_NANDSIM_DBG 80#ifndef CONFIG_NANDSIM_DBG
81#define CONFIG_NANDSIM_DBG 0 81#define CONFIG_NANDSIM_DBG 0
82#endif 82#endif
83#ifndef CONFIG_NANDSIM_MAX_PARTS
84#define CONFIG_NANDSIM_MAX_PARTS 32
85#endif
83 86
84static uint first_id_byte = CONFIG_NANDSIM_FIRST_ID_BYTE; 87static uint first_id_byte = CONFIG_NANDSIM_FIRST_ID_BYTE;
85static uint second_id_byte = CONFIG_NANDSIM_SECOND_ID_BYTE; 88static uint second_id_byte = CONFIG_NANDSIM_SECOND_ID_BYTE;
@@ -94,7 +97,7 @@ static uint bus_width = CONFIG_NANDSIM_BUS_WIDTH;
94static uint do_delays = CONFIG_NANDSIM_DO_DELAYS; 97static uint do_delays = CONFIG_NANDSIM_DO_DELAYS;
95static uint log = CONFIG_NANDSIM_LOG; 98static uint log = CONFIG_NANDSIM_LOG;
96static uint dbg = CONFIG_NANDSIM_DBG; 99static uint dbg = CONFIG_NANDSIM_DBG;
97static unsigned long parts[MAX_MTD_DEVICES]; 100static unsigned long parts[CONFIG_NANDSIM_MAX_PARTS];
98static unsigned int parts_num; 101static unsigned int parts_num;
99static char *badblocks = NULL; 102static char *badblocks = NULL;
100static char *weakblocks = NULL; 103static char *weakblocks = NULL;
@@ -135,8 +138,8 @@ MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read I
135MODULE_PARM_DESC(access_delay, "Initial page access delay (microseconds)"); 138MODULE_PARM_DESC(access_delay, "Initial page access delay (microseconds)");
136MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds"); 139MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds");
137MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)"); 140MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)");
138MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanodeconds)"); 141MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanoseconds)");
139MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanodeconds)"); 142MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanoseconds)");
140MODULE_PARM_DESC(bus_width, "Chip's bus width (8- or 16-bit)"); 143MODULE_PARM_DESC(bus_width, "Chip's bus width (8- or 16-bit)");
141MODULE_PARM_DESC(do_delays, "Simulate NAND delays using busy-waits if not zero"); 144MODULE_PARM_DESC(do_delays, "Simulate NAND delays using busy-waits if not zero");
142MODULE_PARM_DESC(log, "Perform logging if not zero"); 145MODULE_PARM_DESC(log, "Perform logging if not zero");
@@ -288,7 +291,7 @@ union ns_mem {
288 * The structure which describes all the internal simulator data. 291 * The structure which describes all the internal simulator data.
289 */ 292 */
290struct nandsim { 293struct nandsim {
291 struct mtd_partition partitions[MAX_MTD_DEVICES]; 294 struct mtd_partition partitions[CONFIG_NANDSIM_MAX_PARTS];
292 unsigned int nbparts; 295 unsigned int nbparts;
293 296
294 uint busw; /* flash chip bus width (8 or 16) */ 297 uint busw; /* flash chip bus width (8 or 16) */
@@ -312,7 +315,7 @@ struct nandsim {
312 union ns_mem buf; 315 union ns_mem buf;
313 316
314 /* NAND flash "geometry" */ 317 /* NAND flash "geometry" */
315 struct nandsin_geometry { 318 struct {
316 uint64_t totsz; /* total flash size, bytes */ 319 uint64_t totsz; /* total flash size, bytes */
317 uint32_t secsz; /* flash sector (erase block) size, bytes */ 320 uint32_t secsz; /* flash sector (erase block) size, bytes */
318 uint pgsz; /* NAND flash page size, bytes */ 321 uint pgsz; /* NAND flash page size, bytes */
@@ -331,7 +334,7 @@ struct nandsim {
331 } geom; 334 } geom;
332 335
333 /* NAND flash internal registers */ 336 /* NAND flash internal registers */
334 struct nandsim_regs { 337 struct {
335 unsigned command; /* the command register */ 338 unsigned command; /* the command register */
336 u_char status; /* the status register */ 339 u_char status; /* the status register */
337 uint row; /* the page number */ 340 uint row; /* the page number */
@@ -342,7 +345,7 @@ struct nandsim {
342 } regs; 345 } regs;
343 346
344 /* NAND flash lines state */ 347 /* NAND flash lines state */
345 struct ns_lines_status { 348 struct {
346 int ce; /* chip Enable */ 349 int ce; /* chip Enable */
347 int cle; /* command Latch Enable */ 350 int cle; /* command Latch Enable */
348 int ale; /* address Latch Enable */ 351 int ale; /* address Latch Enable */
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
index 1f6f741af5d..8c0b6937522 100644
--- a/drivers/mtd/nand/nomadik_nand.c
+++ b/drivers/mtd/nand/nomadik_nand.c
@@ -105,21 +105,21 @@ static int nomadik_nand_probe(struct platform_device *pdev)
105 ret = -EIO; 105 ret = -EIO;
106 goto err_unmap; 106 goto err_unmap;
107 } 107 }
108 host->addr_va = ioremap(res->start, res->end - res->start + 1); 108 host->addr_va = ioremap(res->start, resource_size(res));
109 109
110 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data"); 110 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
111 if (!res) { 111 if (!res) {
112 ret = -EIO; 112 ret = -EIO;
113 goto err_unmap; 113 goto err_unmap;
114 } 114 }
115 host->data_va = ioremap(res->start, res->end - res->start + 1); 115 host->data_va = ioremap(res->start, resource_size(res));
116 116
117 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd"); 117 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
118 if (!res) { 118 if (!res) {
119 ret = -EIO; 119 ret = -EIO;
120 goto err_unmap; 120 goto err_unmap;
121 } 121 }
122 host->cmd_va = ioremap(res->start, res->end - res->start + 1); 122 host->cmd_va = ioremap(res->start, resource_size(res));
123 123
124 if (!host->addr_va || !host->data_va || !host->cmd_va) { 124 if (!host->addr_va || !host->data_va || !host->cmd_va) {
125 ret = -ENOMEM; 125 ret = -ENOMEM;
diff --git a/drivers/mtd/nand/w90p910_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 7680e731348..6eddf7361ed 100644
--- a/drivers/mtd/nand/w90p910_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2009 Nuvoton technology corporation. 2 * Copyright © 2009 Nuvoton technology corporation.
3 * 3 *
4 * Wan ZongShun <mcuos.com@gmail.com> 4 * Wan ZongShun <mcuos.com@gmail.com>
5 * 5 *
@@ -55,7 +55,7 @@
55#define write_addr_reg(dev, val) \ 55#define write_addr_reg(dev, val) \
56 __raw_writel((val), (dev)->reg + REG_SMADDR) 56 __raw_writel((val), (dev)->reg + REG_SMADDR)
57 57
58struct w90p910_nand { 58struct nuc900_nand {
59 struct mtd_info mtd; 59 struct mtd_info mtd;
60 struct nand_chip chip; 60 struct nand_chip chip;
61 void __iomem *reg; 61 void __iomem *reg;
@@ -76,49 +76,49 @@ static const struct mtd_partition partitions[] = {
76 } 76 }
77}; 77};
78 78
79static unsigned char w90p910_nand_read_byte(struct mtd_info *mtd) 79static unsigned char nuc900_nand_read_byte(struct mtd_info *mtd)
80{ 80{
81 unsigned char ret; 81 unsigned char ret;
82 struct w90p910_nand *nand; 82 struct nuc900_nand *nand;
83 83
84 nand = container_of(mtd, struct w90p910_nand, mtd); 84 nand = container_of(mtd, struct nuc900_nand, mtd);
85 85
86 ret = (unsigned char)read_data_reg(nand); 86 ret = (unsigned char)read_data_reg(nand);
87 87
88 return ret; 88 return ret;
89} 89}
90 90
91static void w90p910_nand_read_buf(struct mtd_info *mtd, 91static void nuc900_nand_read_buf(struct mtd_info *mtd,
92 unsigned char *buf, int len) 92 unsigned char *buf, int len)
93{ 93{
94 int i; 94 int i;
95 struct w90p910_nand *nand; 95 struct nuc900_nand *nand;
96 96
97 nand = container_of(mtd, struct w90p910_nand, mtd); 97 nand = container_of(mtd, struct nuc900_nand, mtd);
98 98
99 for (i = 0; i < len; i++) 99 for (i = 0; i < len; i++)
100 buf[i] = (unsigned char)read_data_reg(nand); 100 buf[i] = (unsigned char)read_data_reg(nand);
101} 101}
102 102
103static void w90p910_nand_write_buf(struct mtd_info *mtd, 103static void nuc900_nand_write_buf(struct mtd_info *mtd,
104 const unsigned char *buf, int len) 104 const unsigned char *buf, int len)
105{ 105{
106 int i; 106 int i;
107 struct w90p910_nand *nand; 107 struct nuc900_nand *nand;
108 108
109 nand = container_of(mtd, struct w90p910_nand, mtd); 109 nand = container_of(mtd, struct nuc900_nand, mtd);
110 110
111 for (i = 0; i < len; i++) 111 for (i = 0; i < len; i++)
112 write_data_reg(nand, buf[i]); 112 write_data_reg(nand, buf[i]);
113} 113}
114 114
115static int w90p910_verify_buf(struct mtd_info *mtd, 115static int nuc900_verify_buf(struct mtd_info *mtd,
116 const unsigned char *buf, int len) 116 const unsigned char *buf, int len)
117{ 117{
118 int i; 118 int i;
119 struct w90p910_nand *nand; 119 struct nuc900_nand *nand;
120 120
121 nand = container_of(mtd, struct w90p910_nand, mtd); 121 nand = container_of(mtd, struct nuc900_nand, mtd);
122 122
123 for (i = 0; i < len; i++) { 123 for (i = 0; i < len; i++) {
124 if (buf[i] != (unsigned char)read_data_reg(nand)) 124 if (buf[i] != (unsigned char)read_data_reg(nand))
@@ -128,7 +128,7 @@ static int w90p910_verify_buf(struct mtd_info *mtd,
128 return 0; 128 return 0;
129} 129}
130 130
131static int w90p910_check_rb(struct w90p910_nand *nand) 131static int nuc900_check_rb(struct nuc900_nand *nand)
132{ 132{
133 unsigned int val; 133 unsigned int val;
134 spin_lock(&nand->lock); 134 spin_lock(&nand->lock);
@@ -139,24 +139,24 @@ static int w90p910_check_rb(struct w90p910_nand *nand)
139 return val; 139 return val;
140} 140}
141 141
142static int w90p910_nand_devready(struct mtd_info *mtd) 142static int nuc900_nand_devready(struct mtd_info *mtd)
143{ 143{
144 struct w90p910_nand *nand; 144 struct nuc900_nand *nand;
145 int ready; 145 int ready;
146 146
147 nand = container_of(mtd, struct w90p910_nand, mtd); 147 nand = container_of(mtd, struct nuc900_nand, mtd);
148 148
149 ready = (w90p910_check_rb(nand)) ? 1 : 0; 149 ready = (nuc900_check_rb(nand)) ? 1 : 0;
150 return ready; 150 return ready;
151} 151}
152 152
153static void w90p910_nand_command_lp(struct mtd_info *mtd, 153static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command,
154 unsigned int command, int column, int page_addr) 154 int column, int page_addr)
155{ 155{
156 register struct nand_chip *chip = mtd->priv; 156 register struct nand_chip *chip = mtd->priv;
157 struct w90p910_nand *nand; 157 struct nuc900_nand *nand;
158 158
159 nand = container_of(mtd, struct w90p910_nand, mtd); 159 nand = container_of(mtd, struct nuc900_nand, mtd);
160 160
161 if (command == NAND_CMD_READOOB) { 161 if (command == NAND_CMD_READOOB) {
162 column += mtd->writesize; 162 column += mtd->writesize;
@@ -212,7 +212,7 @@ static void w90p910_nand_command_lp(struct mtd_info *mtd,
212 write_cmd_reg(nand, NAND_CMD_STATUS); 212 write_cmd_reg(nand, NAND_CMD_STATUS);
213 write_cmd_reg(nand, command); 213 write_cmd_reg(nand, command);
214 214
215 while (!w90p910_check_rb(nand)) 215 while (!nuc900_check_rb(nand))
216 ; 216 ;
217 217
218 return; 218 return;
@@ -241,7 +241,7 @@ static void w90p910_nand_command_lp(struct mtd_info *mtd,
241} 241}
242 242
243 243
244static void w90p910_nand_enable(struct w90p910_nand *nand) 244static void nuc900_nand_enable(struct nuc900_nand *nand)
245{ 245{
246 unsigned int val; 246 unsigned int val;
247 spin_lock(&nand->lock); 247 spin_lock(&nand->lock);
@@ -262,37 +262,37 @@ static void w90p910_nand_enable(struct w90p910_nand *nand)
262 spin_unlock(&nand->lock); 262 spin_unlock(&nand->lock);
263} 263}
264 264
265static int __devinit w90p910_nand_probe(struct platform_device *pdev) 265static int __devinit nuc900_nand_probe(struct platform_device *pdev)
266{ 266{
267 struct w90p910_nand *w90p910_nand; 267 struct nuc900_nand *nuc900_nand;
268 struct nand_chip *chip; 268 struct nand_chip *chip;
269 int retval; 269 int retval;
270 struct resource *res; 270 struct resource *res;
271 271
272 retval = 0; 272 retval = 0;
273 273
274 w90p910_nand = kzalloc(sizeof(struct w90p910_nand), GFP_KERNEL); 274 nuc900_nand = kzalloc(sizeof(struct nuc900_nand), GFP_KERNEL);
275 if (!w90p910_nand) 275 if (!nuc900_nand)
276 return -ENOMEM; 276 return -ENOMEM;
277 chip = &(w90p910_nand->chip); 277 chip = &(nuc900_nand->chip);
278 278
279 w90p910_nand->mtd.priv = chip; 279 nuc900_nand->mtd.priv = chip;
280 w90p910_nand->mtd.owner = THIS_MODULE; 280 nuc900_nand->mtd.owner = THIS_MODULE;
281 spin_lock_init(&w90p910_nand->lock); 281 spin_lock_init(&nuc900_nand->lock);
282 282
283 w90p910_nand->clk = clk_get(&pdev->dev, NULL); 283 nuc900_nand->clk = clk_get(&pdev->dev, NULL);
284 if (IS_ERR(w90p910_nand->clk)) { 284 if (IS_ERR(nuc900_nand->clk)) {
285 retval = -ENOENT; 285 retval = -ENOENT;
286 goto fail1; 286 goto fail1;
287 } 287 }
288 clk_enable(w90p910_nand->clk); 288 clk_enable(nuc900_nand->clk);
289 289
290 chip->cmdfunc = w90p910_nand_command_lp; 290 chip->cmdfunc = nuc900_nand_command_lp;
291 chip->dev_ready = w90p910_nand_devready; 291 chip->dev_ready = nuc900_nand_devready;
292 chip->read_byte = w90p910_nand_read_byte; 292 chip->read_byte = nuc900_nand_read_byte;
293 chip->write_buf = w90p910_nand_write_buf; 293 chip->write_buf = nuc900_nand_write_buf;
294 chip->read_buf = w90p910_nand_read_buf; 294 chip->read_buf = nuc900_nand_read_buf;
295 chip->verify_buf = w90p910_verify_buf; 295 chip->verify_buf = nuc900_verify_buf;
296 chip->chip_delay = 50; 296 chip->chip_delay = 50;
297 chip->options = 0; 297 chip->options = 0;
298 chip->ecc.mode = NAND_ECC_SOFT; 298 chip->ecc.mode = NAND_ECC_SOFT;
@@ -308,75 +308,75 @@ static int __devinit w90p910_nand_probe(struct platform_device *pdev)
308 goto fail1; 308 goto fail1;
309 } 309 }
310 310
311 w90p910_nand->reg = ioremap(res->start, resource_size(res)); 311 nuc900_nand->reg = ioremap(res->start, resource_size(res));
312 if (!w90p910_nand->reg) { 312 if (!nuc900_nand->reg) {
313 retval = -ENOMEM; 313 retval = -ENOMEM;
314 goto fail2; 314 goto fail2;
315 } 315 }
316 316
317 w90p910_nand_enable(w90p910_nand); 317 nuc900_nand_enable(nuc900_nand);
318 318
319 if (nand_scan(&(w90p910_nand->mtd), 1)) { 319 if (nand_scan(&(nuc900_nand->mtd), 1)) {
320 retval = -ENXIO; 320 retval = -ENXIO;
321 goto fail3; 321 goto fail3;
322 } 322 }
323 323
324 add_mtd_partitions(&(w90p910_nand->mtd), partitions, 324 add_mtd_partitions(&(nuc900_nand->mtd), partitions,
325 ARRAY_SIZE(partitions)); 325 ARRAY_SIZE(partitions));
326 326
327 platform_set_drvdata(pdev, w90p910_nand); 327 platform_set_drvdata(pdev, nuc900_nand);
328 328
329 return retval; 329 return retval;
330 330
331fail3: iounmap(w90p910_nand->reg); 331fail3: iounmap(nuc900_nand->reg);
332fail2: release_mem_region(res->start, resource_size(res)); 332fail2: release_mem_region(res->start, resource_size(res));
333fail1: kfree(w90p910_nand); 333fail1: kfree(nuc900_nand);
334 return retval; 334 return retval;
335} 335}
336 336
337static int __devexit w90p910_nand_remove(struct platform_device *pdev) 337static int __devexit nuc900_nand_remove(struct platform_device *pdev)
338{ 338{
339 struct w90p910_nand *w90p910_nand = platform_get_drvdata(pdev); 339 struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev);
340 struct resource *res; 340 struct resource *res;
341 341
342 iounmap(w90p910_nand->reg); 342 iounmap(nuc900_nand->reg);
343 343
344 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 344 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
345 release_mem_region(res->start, resource_size(res)); 345 release_mem_region(res->start, resource_size(res));
346 346
347 clk_disable(w90p910_nand->clk); 347 clk_disable(nuc900_nand->clk);
348 clk_put(w90p910_nand->clk); 348 clk_put(nuc900_nand->clk);
349 349
350 kfree(w90p910_nand); 350 kfree(nuc900_nand);
351 351
352 platform_set_drvdata(pdev, NULL); 352 platform_set_drvdata(pdev, NULL);
353 353
354 return 0; 354 return 0;
355} 355}
356 356
357static struct platform_driver w90p910_nand_driver = { 357static struct platform_driver nuc900_nand_driver = {
358 .probe = w90p910_nand_probe, 358 .probe = nuc900_nand_probe,
359 .remove = __devexit_p(w90p910_nand_remove), 359 .remove = __devexit_p(nuc900_nand_remove),
360 .driver = { 360 .driver = {
361 .name = "w90p910-fmi", 361 .name = "nuc900-fmi",
362 .owner = THIS_MODULE, 362 .owner = THIS_MODULE,
363 }, 363 },
364}; 364};
365 365
366static int __init w90p910_nand_init(void) 366static int __init nuc900_nand_init(void)
367{ 367{
368 return platform_driver_register(&w90p910_nand_driver); 368 return platform_driver_register(&nuc900_nand_driver);
369} 369}
370 370
371static void __exit w90p910_nand_exit(void) 371static void __exit nuc900_nand_exit(void)
372{ 372{
373 platform_driver_unregister(&w90p910_nand_driver); 373 platform_driver_unregister(&nuc900_nand_driver);
374} 374}
375 375
376module_init(w90p910_nand_init); 376module_init(nuc900_nand_init);
377module_exit(w90p910_nand_exit); 377module_exit(nuc900_nand_exit);
378 378
379MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>"); 379MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
380MODULE_DESCRIPTION("w90p910 nand driver!"); 380MODULE_DESCRIPTION("w90p910/NUC9xx nand driver!");
381MODULE_LICENSE("GPL"); 381MODULE_LICENSE("GPL");
382MODULE_ALIAS("platform:w90p910-fmi"); 382MODULE_ALIAS("platform:nuc900-fmi");
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 7545568fce4..ee87325c771 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -292,11 +292,14 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
292 u32 *p = (u32 *)buf; 292 u32 *p = (u32 *)buf;
293 293
294 /* take care of subpage reads */ 294 /* take care of subpage reads */
295 for (; len % 4 != 0; ) { 295 if (len % 4) {
296 *buf++ = __raw_readb(info->nand.IO_ADDR_R); 296 if (info->nand.options & NAND_BUSWIDTH_16)
297 len--; 297 omap_read_buf16(mtd, buf, len % 4);
298 else
299 omap_read_buf8(mtd, buf, len % 4);
300 p = (u32 *) (buf + len % 4);
301 len -= len % 4;
298 } 302 }
299 p = (u32 *) buf;
300 303
301 /* configure and start prefetch transfer */ 304 /* configure and start prefetch transfer */
302 ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0); 305 ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0);
@@ -502,7 +505,7 @@ static void omap_write_buf_dma_pref(struct mtd_info *mtd,
502 omap_write_buf_pref(mtd, buf, len); 505 omap_write_buf_pref(mtd, buf, len);
503 else 506 else
504 /* start transfer in DMA mode */ 507 /* start transfer in DMA mode */
505 omap_nand_dma_transfer(mtd, buf, len, 0x1); 508 omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
506} 509}
507 510
508/** 511/**
@@ -1028,7 +1031,8 @@ out_free_info:
1028static int omap_nand_remove(struct platform_device *pdev) 1031static int omap_nand_remove(struct platform_device *pdev)
1029{ 1032{
1030 struct mtd_info *mtd = platform_get_drvdata(pdev); 1033 struct mtd_info *mtd = platform_get_drvdata(pdev);
1031 struct omap_nand_info *info = mtd->priv; 1034 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1035 mtd);
1032 1036
1033 platform_set_drvdata(pdev, NULL); 1037 platform_set_drvdata(pdev, NULL);
1034 if (use_dma) 1038 if (use_dma)
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index f59c07427af..da6e7534305 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -60,7 +60,13 @@ static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
60 } 60 }
61 buf64 = (uint64_t *)buf; 61 buf64 = (uint64_t *)buf;
62 while (i < len/8) { 62 while (i < len/8) {
63 uint64_t x; 63 /*
64 * Since GCC has no proper constraint (PR 43518)
65 * force x variable to r2/r3 registers as ldrd instruction
66 * requires first register to be even.
67 */
68 register uint64_t x asm ("r2");
69
64 asm volatile ("ldrd\t%0, [%1]" : "=&r" (x) : "r" (io_base)); 70 asm volatile ("ldrd\t%0, [%1]" : "=&r" (x) : "r" (io_base));
65 buf64[i++] = x; 71 buf64[i++] = x;
66 } 72 }
@@ -74,6 +80,7 @@ static int __init orion_nand_probe(struct platform_device *pdev)
74 struct mtd_info *mtd; 80 struct mtd_info *mtd;
75 struct nand_chip *nc; 81 struct nand_chip *nc;
76 struct orion_nand_data *board; 82 struct orion_nand_data *board;
83 struct resource *res;
77 void __iomem *io_base; 84 void __iomem *io_base;
78 int ret = 0; 85 int ret = 0;
79#ifdef CONFIG_MTD_PARTITIONS 86#ifdef CONFIG_MTD_PARTITIONS
@@ -89,8 +96,13 @@ static int __init orion_nand_probe(struct platform_device *pdev)
89 } 96 }
90 mtd = (struct mtd_info *)(nc + 1); 97 mtd = (struct mtd_info *)(nc + 1);
91 98
92 io_base = ioremap(pdev->resource[0].start, 99 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
93 pdev->resource[0].end - pdev->resource[0].start + 1); 100 if (!res) {
101 ret = -ENODEV;
102 goto no_res;
103 }
104
105 io_base = ioremap(res->start, resource_size(res));
94 if (!io_base) { 106 if (!io_base) {
95 printk(KERN_ERR "orion_nand: ioremap failed\n"); 107 printk(KERN_ERR "orion_nand: ioremap failed\n");
96 ret = -EIO; 108 ret = -EIO;
@@ -114,6 +126,9 @@ static int __init orion_nand_probe(struct platform_device *pdev)
114 if (board->width == 16) 126 if (board->width == 16)
115 nc->options |= NAND_BUSWIDTH_16; 127 nc->options |= NAND_BUSWIDTH_16;
116 128
129 if (board->dev_ready)
130 nc->dev_ready = board->dev_ready;
131
117 platform_set_drvdata(pdev, mtd); 132 platform_set_drvdata(pdev, mtd);
118 133
119 if (nand_scan(mtd, 1)) { 134 if (nand_scan(mtd, 1)) {
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index a97e9c95ab6..f02af24d033 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -209,7 +209,7 @@ static int __devexit pasemi_nand_remove(struct of_device *ofdev)
209 return 0; 209 return 0;
210} 210}
211 211
212static struct of_device_id pasemi_nand_match[] = 212static const struct of_device_id pasemi_nand_match[] =
213{ 213{
214 { 214 {
215 .compatible = "pasemi,localbus-nand", 215 .compatible = "pasemi,localbus-nand",
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 5d55152162c..e02fa4f0e3c 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1320,6 +1320,17 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
1320 goto fail_free_irq; 1320 goto fail_free_irq;
1321 } 1321 }
1322 1322
1323 if (mtd_has_cmdlinepart()) {
1324 static const char *probes[] = { "cmdlinepart", NULL };
1325 struct mtd_partition *parts;
1326 int nr_parts;
1327
1328 nr_parts = parse_mtd_partitions(mtd, probes, &parts, 0);
1329
1330 if (nr_parts)
1331 return add_mtd_partitions(mtd, parts, nr_parts);
1332 }
1333
1323 return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts); 1334 return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
1324 1335
1325fail_free_irq: 1336fail_free_irq:
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
new file mode 100644
index 00000000000..78a42329547
--- /dev/null
+++ b/drivers/mtd/nand/r852.c
@@ -0,0 +1,1140 @@
1/*
2 * Copyright © 2009 - Maxim Levitsky
3 * driver for Ricoh xD readers
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/jiffies.h>
13#include <linux/workqueue.h>
14#include <linux/interrupt.h>
15#include <linux/pci.h>
16#include <linux/pci_ids.h>
17#include <linux/delay.h>
18#include <linux/slab.h>
19#include <asm/byteorder.h>
20#include <linux/sched.h>
21#include "sm_common.h"
22#include "r852.h"
23
24
25static int r852_enable_dma = 1;
26module_param(r852_enable_dma, bool, S_IRUGO);
27MODULE_PARM_DESC(r852_enable_dma, "Enable usage of the DMA (default)");
28
29static int debug;
30module_param(debug, int, S_IRUGO | S_IWUSR);
31MODULE_PARM_DESC(debug, "Debug level (0-2)");
32
33/* read register */
34static inline uint8_t r852_read_reg(struct r852_device *dev, int address)
35{
36 uint8_t reg = readb(dev->mmio + address);
37 return reg;
38}
39
40/* write register */
41static inline void r852_write_reg(struct r852_device *dev,
42 int address, uint8_t value)
43{
44 writeb(value, dev->mmio + address);
45 mmiowb();
46}
47
48
49/* read dword sized register */
50static inline uint32_t r852_read_reg_dword(struct r852_device *dev, int address)
51{
52 uint32_t reg = le32_to_cpu(readl(dev->mmio + address));
53 return reg;
54}
55
56/* write dword sized register */
57static inline void r852_write_reg_dword(struct r852_device *dev,
58 int address, uint32_t value)
59{
60 writel(cpu_to_le32(value), dev->mmio + address);
61 mmiowb();
62}
63
64/* returns pointer to our private structure */
65static inline struct r852_device *r852_get_dev(struct mtd_info *mtd)
66{
67 struct nand_chip *chip = (struct nand_chip *)mtd->priv;
68 return (struct r852_device *)chip->priv;
69}
70
71
72/* check if controller supports dma */
73static void r852_dma_test(struct r852_device *dev)
74{
75 dev->dma_usable = (r852_read_reg(dev, R852_DMA_CAP) &
76 (R852_DMA1 | R852_DMA2)) == (R852_DMA1 | R852_DMA2);
77
78 if (!dev->dma_usable)
79 message("Non dma capable device detected, dma disabled");
80
81 if (!r852_enable_dma) {
82 message("disabling dma on user request");
83 dev->dma_usable = 0;
84 }
85}
86
87/*
88 * Enable dma. Enables ether first or second stage of the DMA,
89 * Expects dev->dma_dir and dev->dma_state be set
90 */
91static void r852_dma_enable(struct r852_device *dev)
92{
93 uint8_t dma_reg, dma_irq_reg;
94
95 /* Set up dma settings */
96 dma_reg = r852_read_reg_dword(dev, R852_DMA_SETTINGS);
97 dma_reg &= ~(R852_DMA_READ | R852_DMA_INTERNAL | R852_DMA_MEMORY);
98
99 if (dev->dma_dir)
100 dma_reg |= R852_DMA_READ;
101
102 if (dev->dma_state == DMA_INTERNAL) {
103 dma_reg |= R852_DMA_INTERNAL;
104 /* Precaution to make sure HW doesn't write */
105 /* to random kernel memory */
106 r852_write_reg_dword(dev, R852_DMA_ADDR,
107 cpu_to_le32(dev->phys_bounce_buffer));
108 } else {
109 dma_reg |= R852_DMA_MEMORY;
110 r852_write_reg_dword(dev, R852_DMA_ADDR,
111 cpu_to_le32(dev->phys_dma_addr));
112 }
113
114 /* Precaution: make sure write reached the device */
115 r852_read_reg_dword(dev, R852_DMA_ADDR);
116
117 r852_write_reg_dword(dev, R852_DMA_SETTINGS, dma_reg);
118
119 /* Set dma irq */
120 dma_irq_reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
121 r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
122 dma_irq_reg |
123 R852_DMA_IRQ_INTERNAL |
124 R852_DMA_IRQ_ERROR |
125 R852_DMA_IRQ_MEMORY);
126}
127
128/*
129 * Disable dma, called from the interrupt handler, which specifies
130 * success of the operation via 'error' argument
131 */
132static void r852_dma_done(struct r852_device *dev, int error)
133{
134 WARN_ON(dev->dma_stage == 0);
135
136 r852_write_reg_dword(dev, R852_DMA_IRQ_STA,
137 r852_read_reg_dword(dev, R852_DMA_IRQ_STA));
138
139 r852_write_reg_dword(dev, R852_DMA_SETTINGS, 0);
140 r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 0);
141
142 /* Precaution to make sure HW doesn't write to random kernel memory */
143 r852_write_reg_dword(dev, R852_DMA_ADDR,
144 cpu_to_le32(dev->phys_bounce_buffer));
145 r852_read_reg_dword(dev, R852_DMA_ADDR);
146
147 dev->dma_error = error;
148 dev->dma_stage = 0;
149
150 if (dev->phys_dma_addr && dev->phys_dma_addr != dev->phys_bounce_buffer)
151 pci_unmap_single(dev->pci_dev, dev->phys_dma_addr, R852_DMA_LEN,
152 dev->dma_dir ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
153 complete(&dev->dma_done);
154}
155
156/*
157 * Wait, till dma is done, which includes both phases of it
158 */
159static int r852_dma_wait(struct r852_device *dev)
160{
161 long timeout = wait_for_completion_timeout(&dev->dma_done,
162 msecs_to_jiffies(1000));
163 if (!timeout) {
164 dbg("timeout waiting for DMA interrupt");
165 return -ETIMEDOUT;
166 }
167
168 return 0;
169}
170
171/*
172 * Read/Write one page using dma. Only pages can be read (512 bytes)
173*/
174static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read)
175{
176 int bounce = 0;
177 unsigned long flags;
178 int error;
179
180 dev->dma_error = 0;
181
182 /* Set dma direction */
183 dev->dma_dir = do_read;
184 dev->dma_stage = 1;
185
186 dbg_verbose("doing dma %s ", do_read ? "read" : "write");
187
188 /* Set intial dma state: for reading first fill on board buffer,
189 from device, for writes first fill the buffer from memory*/
190 dev->dma_state = do_read ? DMA_INTERNAL : DMA_MEMORY;
191
192 /* if incoming buffer is not page aligned, we should do bounce */
193 if ((unsigned long)buf & (R852_DMA_LEN-1))
194 bounce = 1;
195
196 if (!bounce) {
197 dev->phys_dma_addr = pci_map_single(dev->pci_dev, (void *)buf,
198 R852_DMA_LEN,
199 (do_read ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE));
200
201 if (pci_dma_mapping_error(dev->pci_dev, dev->phys_dma_addr))
202 bounce = 1;
203 }
204
205 if (bounce) {
206 dbg_verbose("dma: using bounce buffer");
207 dev->phys_dma_addr = dev->phys_bounce_buffer;
208 if (!do_read)
209 memcpy(dev->bounce_buffer, buf, R852_DMA_LEN);
210 }
211
212 /* Enable DMA */
213 spin_lock_irqsave(&dev->irqlock, flags);
214 r852_dma_enable(dev);
215 spin_unlock_irqrestore(&dev->irqlock, flags);
216
217 /* Wait till complete */
218 error = r852_dma_wait(dev);
219
220 if (error) {
221 r852_dma_done(dev, error);
222 return;
223 }
224
225 if (do_read && bounce)
226 memcpy((void *)buf, dev->bounce_buffer, R852_DMA_LEN);
227}
228
229/*
230 * Program data lines of the nand chip to send data to it
231 */
232void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
233{
234 struct r852_device *dev = r852_get_dev(mtd);
235 uint32_t reg;
236
237 /* Don't allow any access to hardware if we suspect card removal */
238 if (dev->card_unstable)
239 return;
240
241 /* Special case for whole sector read */
242 if (len == R852_DMA_LEN && dev->dma_usable) {
243 r852_do_dma(dev, (uint8_t *)buf, 0);
244 return;
245 }
246
247 /* write DWORD chinks - faster */
248 while (len) {
249 reg = buf[0] | buf[1] << 8 | buf[2] << 16 | buf[3] << 24;
250 r852_write_reg_dword(dev, R852_DATALINE, reg);
251 buf += 4;
252 len -= 4;
253
254 }
255
256 /* write rest */
257 while (len)
258 r852_write_reg(dev, R852_DATALINE, *buf++);
259}
260
261/*
262 * Read data lines of the nand chip to retrieve data
263 */
264void r852_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
265{
266 struct r852_device *dev = r852_get_dev(mtd);
267 uint32_t reg;
268
269 if (dev->card_unstable) {
270 /* since we can't signal error here, at least, return
271 predictable buffer */
272 memset(buf, 0, len);
273 return;
274 }
275
276 /* special case for whole sector read */
277 if (len == R852_DMA_LEN && dev->dma_usable) {
278 r852_do_dma(dev, buf, 1);
279 return;
280 }
281
282 /* read in dword sized chunks */
283 while (len >= 4) {
284
285 reg = r852_read_reg_dword(dev, R852_DATALINE);
286 *buf++ = reg & 0xFF;
287 *buf++ = (reg >> 8) & 0xFF;
288 *buf++ = (reg >> 16) & 0xFF;
289 *buf++ = (reg >> 24) & 0xFF;
290 len -= 4;
291 }
292
293 /* read the reset by bytes */
294 while (len--)
295 *buf++ = r852_read_reg(dev, R852_DATALINE);
296}
297
298/*
299 * Read one byte from nand chip
300 */
301static uint8_t r852_read_byte(struct mtd_info *mtd)
302{
303 struct r852_device *dev = r852_get_dev(mtd);
304
305 /* Same problem as in r852_read_buf.... */
306 if (dev->card_unstable)
307 return 0;
308
309 return r852_read_reg(dev, R852_DATALINE);
310}
311
312
313/*
314 * Readback the buffer to verify it
315 */
316int r852_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
317{
318 struct r852_device *dev = r852_get_dev(mtd);
319
320 /* We can't be sure about anything here... */
321 if (dev->card_unstable)
322 return -1;
323
324 /* This will never happen, unless you wired up a nand chip
325 with > 512 bytes page size to the reader */
326 if (len > SM_SECTOR_SIZE)
327 return 0;
328
329 r852_read_buf(mtd, dev->tmp_buffer, len);
330 return memcmp(buf, dev->tmp_buffer, len);
331}
332
333/*
334 * Control several chip lines & send commands
335 */
336void r852_cmdctl(struct mtd_info *mtd, int dat, unsigned int ctrl)
337{
338 struct r852_device *dev = r852_get_dev(mtd);
339
340 if (dev->card_unstable)
341 return;
342
343 if (ctrl & NAND_CTRL_CHANGE) {
344
345 dev->ctlreg &= ~(R852_CTL_DATA | R852_CTL_COMMAND |
346 R852_CTL_ON | R852_CTL_CARDENABLE);
347
348 if (ctrl & NAND_ALE)
349 dev->ctlreg |= R852_CTL_DATA;
350
351 if (ctrl & NAND_CLE)
352 dev->ctlreg |= R852_CTL_COMMAND;
353
354 if (ctrl & NAND_NCE)
355 dev->ctlreg |= (R852_CTL_CARDENABLE | R852_CTL_ON);
356 else
357 dev->ctlreg &= ~R852_CTL_WRITE;
358
359 /* when write is stareted, enable write access */
360 if (dat == NAND_CMD_ERASE1)
361 dev->ctlreg |= R852_CTL_WRITE;
362
363 r852_write_reg(dev, R852_CTL, dev->ctlreg);
364 }
365
366 /* HACK: NAND_CMD_SEQIN is called without NAND_CTRL_CHANGE, but we need
367 to set write mode */
368 if (dat == NAND_CMD_SEQIN && (dev->ctlreg & R852_CTL_COMMAND)) {
369 dev->ctlreg |= R852_CTL_WRITE;
370 r852_write_reg(dev, R852_CTL, dev->ctlreg);
371 }
372
373 if (dat != NAND_CMD_NONE)
374 r852_write_reg(dev, R852_DATALINE, dat);
375}
376
377/*
378 * Wait till card is ready.
379 * based on nand_wait, but returns errors on DMA error
380 */
381int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
382{
383 struct r852_device *dev = (struct r852_device *)chip->priv;
384
385 unsigned long timeout;
386 int status;
387
388 timeout = jiffies + (chip->state == FL_ERASING ?
389 msecs_to_jiffies(400) : msecs_to_jiffies(20));
390
391 while (time_before(jiffies, timeout))
392 if (chip->dev_ready(mtd))
393 break;
394
395 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
396 status = (int)chip->read_byte(mtd);
397
398 /* Unfortunelly, no way to send detailed error status... */
399 if (dev->dma_error) {
400 status |= NAND_STATUS_FAIL;
401 dev->dma_error = 0;
402 }
403 return status;
404}
405
406/*
407 * Check if card is ready
408 */
409
410int r852_ready(struct mtd_info *mtd)
411{
412 struct r852_device *dev = r852_get_dev(mtd);
413 return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY);
414}
415
416
417/*
418 * Set ECC engine mode
419*/
420
421void r852_ecc_hwctl(struct mtd_info *mtd, int mode)
422{
423 struct r852_device *dev = r852_get_dev(mtd);
424
425 if (dev->card_unstable)
426 return;
427
428 switch (mode) {
429 case NAND_ECC_READ:
430 case NAND_ECC_WRITE:
431 /* enable ecc generation/check*/
432 dev->ctlreg |= R852_CTL_ECC_ENABLE;
433
434 /* flush ecc buffer */
435 r852_write_reg(dev, R852_CTL,
436 dev->ctlreg | R852_CTL_ECC_ACCESS);
437
438 r852_read_reg_dword(dev, R852_DATALINE);
439 r852_write_reg(dev, R852_CTL, dev->ctlreg);
440 return;
441
442 case NAND_ECC_READSYN:
443 /* disable ecc generation */
444 dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
445 r852_write_reg(dev, R852_CTL, dev->ctlreg);
446 }
447}
448
449/*
450 * Calculate ECC, only used for writes
451 */
452
453int r852_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat,
454 uint8_t *ecc_code)
455{
456 struct r852_device *dev = r852_get_dev(mtd);
457 struct sm_oob *oob = (struct sm_oob *)ecc_code;
458 uint32_t ecc1, ecc2;
459
460 if (dev->card_unstable)
461 return 0;
462
463 dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
464 r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
465
466 ecc1 = r852_read_reg_dword(dev, R852_DATALINE);
467 ecc2 = r852_read_reg_dword(dev, R852_DATALINE);
468
469 oob->ecc1[0] = (ecc1) & 0xFF;
470 oob->ecc1[1] = (ecc1 >> 8) & 0xFF;
471 oob->ecc1[2] = (ecc1 >> 16) & 0xFF;
472
473 oob->ecc2[0] = (ecc2) & 0xFF;
474 oob->ecc2[1] = (ecc2 >> 8) & 0xFF;
475 oob->ecc2[2] = (ecc2 >> 16) & 0xFF;
476
477 r852_write_reg(dev, R852_CTL, dev->ctlreg);
478 return 0;
479}
480
481/*
482 * Correct the data using ECC, hw did almost everything for us
483 */
484
485int r852_ecc_correct(struct mtd_info *mtd, uint8_t *dat,
486 uint8_t *read_ecc, uint8_t *calc_ecc)
487{
488 uint16_t ecc_reg;
489 uint8_t ecc_status, err_byte;
490 int i, error = 0;
491
492 struct r852_device *dev = r852_get_dev(mtd);
493
494 if (dev->card_unstable)
495 return 0;
496
497 r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
498 ecc_reg = r852_read_reg_dword(dev, R852_DATALINE);
499 r852_write_reg(dev, R852_CTL, dev->ctlreg);
500
501 for (i = 0 ; i <= 1 ; i++) {
502
503 ecc_status = (ecc_reg >> 8) & 0xFF;
504
505 /* ecc uncorrectable error */
506 if (ecc_status & R852_ECC_FAIL) {
507 dbg("ecc: unrecoverable error, in half %d", i);
508 error = -1;
509 goto exit;
510 }
511
512 /* correctable error */
513 if (ecc_status & R852_ECC_CORRECTABLE) {
514
515 err_byte = ecc_reg & 0xFF;
516 dbg("ecc: recoverable error, "
517 "in half %d, byte %d, bit %d", i,
518 err_byte, ecc_status & R852_ECC_ERR_BIT_MSK);
519
520 dat[err_byte] ^=
521 1 << (ecc_status & R852_ECC_ERR_BIT_MSK);
522 error++;
523 }
524
525 dat += 256;
526 ecc_reg >>= 16;
527 }
528exit:
529 return error;
530}
531
532/*
533 * This is copy of nand_read_oob_std
534 * nand_read_oob_syndrome assumes we can send column address - we can't
535 */
536static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
537 int page, int sndcmd)
538{
539 if (sndcmd) {
540 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
541 sndcmd = 0;
542 }
543 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
544 return sndcmd;
545}
546
547/*
548 * Start the nand engine
549 */
550
551void r852_engine_enable(struct r852_device *dev)
552{
553 if (r852_read_reg_dword(dev, R852_HW) & R852_HW_UNKNOWN) {
554 r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
555 r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
556 } else {
557 r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
558 r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
559 }
560 msleep(300);
561 r852_write_reg(dev, R852_CTL, 0);
562}
563
564
565/*
566 * Stop the nand engine
567 */
568
569void r852_engine_disable(struct r852_device *dev)
570{
571 r852_write_reg_dword(dev, R852_HW, 0);
572 r852_write_reg(dev, R852_CTL, R852_CTL_RESET);
573}
574
575/*
576 * Test if card is present
577 */
578
579void r852_card_update_present(struct r852_device *dev)
580{
581 unsigned long flags;
582 uint8_t reg;
583
584 spin_lock_irqsave(&dev->irqlock, flags);
585 reg = r852_read_reg(dev, R852_CARD_STA);
586 dev->card_detected = !!(reg & R852_CARD_STA_PRESENT);
587 spin_unlock_irqrestore(&dev->irqlock, flags);
588}
589
590/*
591 * Update card detection IRQ state according to current card state
592 * which is read in r852_card_update_present
593 */
594void r852_update_card_detect(struct r852_device *dev)
595{
596 int card_detect_reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
597 dev->card_unstable = 0;
598
599 card_detect_reg &= ~(R852_CARD_IRQ_REMOVE | R852_CARD_IRQ_INSERT);
600 card_detect_reg |= R852_CARD_IRQ_GENABLE;
601
602 card_detect_reg |= dev->card_detected ?
603 R852_CARD_IRQ_REMOVE : R852_CARD_IRQ_INSERT;
604
605 r852_write_reg(dev, R852_CARD_IRQ_ENABLE, card_detect_reg);
606}
607
608ssize_t r852_media_type_show(struct device *sys_dev,
609 struct device_attribute *attr, char *buf)
610{
611 struct mtd_info *mtd = container_of(sys_dev, struct mtd_info, dev);
612 struct r852_device *dev = r852_get_dev(mtd);
613 char *data = dev->sm ? "smartmedia" : "xd";
614
615 strcpy(buf, data);
616 return strlen(data);
617}
618
619DEVICE_ATTR(media_type, S_IRUGO, r852_media_type_show, NULL);
620
621
622/* Detect properties of card in slot */
623void r852_update_media_status(struct r852_device *dev)
624{
625 uint8_t reg;
626 unsigned long flags;
627 int readonly;
628
629 spin_lock_irqsave(&dev->irqlock, flags);
630 if (!dev->card_detected) {
631 message("card removed");
632 spin_unlock_irqrestore(&dev->irqlock, flags);
633 return ;
634 }
635
636 readonly = r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_RO;
637 reg = r852_read_reg(dev, R852_DMA_CAP);
638 dev->sm = (reg & (R852_DMA1 | R852_DMA2)) && (reg & R852_SMBIT);
639
640 message("detected %s %s card in slot",
641 dev->sm ? "SmartMedia" : "xD",
642 readonly ? "readonly" : "writeable");
643
644 dev->readonly = readonly;
645 spin_unlock_irqrestore(&dev->irqlock, flags);
646}
647
648/*
649 * Register the nand device
650 * Called when the card is detected
651 */
652int r852_register_nand_device(struct r852_device *dev)
653{
654 dev->mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
655
656 if (!dev->mtd)
657 goto error1;
658
659 WARN_ON(dev->card_registred);
660
661 dev->mtd->owner = THIS_MODULE;
662 dev->mtd->priv = dev->chip;
663 dev->mtd->dev.parent = &dev->pci_dev->dev;
664
665 if (dev->readonly)
666 dev->chip->options |= NAND_ROM;
667
668 r852_engine_enable(dev);
669
670 if (sm_register_device(dev->mtd, dev->sm))
671 goto error2;
672
673 if (device_create_file(&dev->mtd->dev, &dev_attr_media_type))
674 message("can't create media type sysfs attribute");
675
676 dev->card_registred = 1;
677 return 0;
678error2:
679 kfree(dev->mtd);
680error1:
681 /* Force card redetect */
682 dev->card_detected = 0;
683 return -1;
684}
685
686/*
687 * Unregister the card
688 */
689
690void r852_unregister_nand_device(struct r852_device *dev)
691{
692 if (!dev->card_registred)
693 return;
694
695 device_remove_file(&dev->mtd->dev, &dev_attr_media_type);
696 nand_release(dev->mtd);
697 r852_engine_disable(dev);
698 dev->card_registred = 0;
699 kfree(dev->mtd);
700 dev->mtd = NULL;
701}
702
703/* Card state updater */
704void r852_card_detect_work(struct work_struct *work)
705{
706 struct r852_device *dev =
707 container_of(work, struct r852_device, card_detect_work.work);
708
709 r852_card_update_present(dev);
710 dev->card_unstable = 0;
711
712 /* False alarm */
713 if (dev->card_detected == dev->card_registred)
714 goto exit;
715
716 /* Read media properties */
717 r852_update_media_status(dev);
718
719 /* Register the card */
720 if (dev->card_detected)
721 r852_register_nand_device(dev);
722 else
723 r852_unregister_nand_device(dev);
724exit:
725 /* Update detection logic */
726 r852_update_card_detect(dev);
727}
728
729/* Ack + disable IRQ generation */
730static void r852_disable_irqs(struct r852_device *dev)
731{
732 uint8_t reg;
733 reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
734 r852_write_reg(dev, R852_CARD_IRQ_ENABLE, reg & ~R852_CARD_IRQ_MASK);
735
736 reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
737 r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
738 reg & ~R852_DMA_IRQ_MASK);
739
740 r852_write_reg(dev, R852_CARD_IRQ_STA, R852_CARD_IRQ_MASK);
741 r852_write_reg_dword(dev, R852_DMA_IRQ_STA, R852_DMA_IRQ_MASK);
742}
743
744/* Interrupt handler */
745static irqreturn_t r852_irq(int irq, void *data)
746{
747 struct r852_device *dev = (struct r852_device *)data;
748
749 uint8_t card_status, dma_status;
750 unsigned long flags;
751 irqreturn_t ret = IRQ_NONE;
752
753 spin_lock_irqsave(&dev->irqlock, flags);
754
755 /* We can recieve shared interrupt while pci is suspended
756 in that case reads will return 0xFFFFFFFF.... */
757 if (dev->insuspend)
758 goto out;
759
760 /* handle card detection interrupts first */
761 card_status = r852_read_reg(dev, R852_CARD_IRQ_STA);
762 r852_write_reg(dev, R852_CARD_IRQ_STA, card_status);
763
764 if (card_status & (R852_CARD_IRQ_INSERT|R852_CARD_IRQ_REMOVE)) {
765
766 ret = IRQ_HANDLED;
767 dev->card_detected = !!(card_status & R852_CARD_IRQ_INSERT);
768
769 /* we shouldn't recieve any interrupts if we wait for card
770 to settle */
771 WARN_ON(dev->card_unstable);
772
773 /* disable irqs while card is unstable */
774 /* this will timeout DMA if active, but better that garbage */
775 r852_disable_irqs(dev);
776
777 if (dev->card_unstable)
778 goto out;
779
780 /* let, card state to settle a bit, and then do the work */
781 dev->card_unstable = 1;
782 queue_delayed_work(dev->card_workqueue,
783 &dev->card_detect_work, msecs_to_jiffies(100));
784 goto out;
785 }
786
787
788 /* Handle dma interrupts */
789 dma_status = r852_read_reg_dword(dev, R852_DMA_IRQ_STA);
790 r852_write_reg_dword(dev, R852_DMA_IRQ_STA, dma_status);
791
792 if (dma_status & R852_DMA_IRQ_MASK) {
793
794 ret = IRQ_HANDLED;
795
796 if (dma_status & R852_DMA_IRQ_ERROR) {
797 dbg("recieved dma error IRQ");
798 r852_dma_done(dev, -EIO);
799 goto out;
800 }
801
802 /* recieved DMA interrupt out of nowhere? */
803 WARN_ON_ONCE(dev->dma_stage == 0);
804
805 if (dev->dma_stage == 0)
806 goto out;
807
808 /* done device access */
809 if (dev->dma_state == DMA_INTERNAL &&
810 (dma_status & R852_DMA_IRQ_INTERNAL)) {
811
812 dev->dma_state = DMA_MEMORY;
813 dev->dma_stage++;
814 }
815
816 /* done memory DMA */
817 if (dev->dma_state == DMA_MEMORY &&
818 (dma_status & R852_DMA_IRQ_MEMORY)) {
819 dev->dma_state = DMA_INTERNAL;
820 dev->dma_stage++;
821 }
822
823 /* Enable 2nd half of dma dance */
824 if (dev->dma_stage == 2)
825 r852_dma_enable(dev);
826
827 /* Operation done */
828 if (dev->dma_stage == 3)
829 r852_dma_done(dev, 0);
830 goto out;
831 }
832
833 /* Handle unknown interrupts */
834 if (dma_status)
835 dbg("bad dma IRQ status = %x", dma_status);
836
837 if (card_status & ~R852_CARD_STA_CD)
838 dbg("strange card status = %x", card_status);
839
840out:
841 spin_unlock_irqrestore(&dev->irqlock, flags);
842 return ret;
843}
844
845int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
846{
847 int error;
848 struct nand_chip *chip;
849 struct r852_device *dev;
850
851 /* pci initialization */
852 error = pci_enable_device(pci_dev);
853
854 if (error)
855 goto error1;
856
857 pci_set_master(pci_dev);
858
859 error = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
860 if (error)
861 goto error2;
862
863 error = pci_request_regions(pci_dev, DRV_NAME);
864
865 if (error)
866 goto error3;
867
868 error = -ENOMEM;
869
870 /* init nand chip, but register it only on card insert */
871 chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
872
873 if (!chip)
874 goto error4;
875
876 /* commands */
877 chip->cmd_ctrl = r852_cmdctl;
878 chip->waitfunc = r852_wait;
879 chip->dev_ready = r852_ready;
880
881 /* I/O */
882 chip->read_byte = r852_read_byte;
883 chip->read_buf = r852_read_buf;
884 chip->write_buf = r852_write_buf;
885 chip->verify_buf = r852_verify_buf;
886
887 /* ecc */
888 chip->ecc.mode = NAND_ECC_HW_SYNDROME;
889 chip->ecc.size = R852_DMA_LEN;
890 chip->ecc.bytes = SM_OOB_SIZE;
891 chip->ecc.hwctl = r852_ecc_hwctl;
892 chip->ecc.calculate = r852_ecc_calculate;
893 chip->ecc.correct = r852_ecc_correct;
894
895 /* TODO: hack */
896 chip->ecc.read_oob = r852_read_oob;
897
898 /* init our device structure */
899 dev = kzalloc(sizeof(struct r852_device), GFP_KERNEL);
900
901 if (!dev)
902 goto error5;
903
904 chip->priv = dev;
905 dev->chip = chip;
906 dev->pci_dev = pci_dev;
907 pci_set_drvdata(pci_dev, dev);
908
909 dev->bounce_buffer = pci_alloc_consistent(pci_dev, R852_DMA_LEN,
910 &dev->phys_bounce_buffer);
911
912 if (!dev->bounce_buffer)
913 goto error6;
914
915
916 error = -ENODEV;
917 dev->mmio = pci_ioremap_bar(pci_dev, 0);
918
919 if (!dev->mmio)
920 goto error7;
921
922 error = -ENOMEM;
923 dev->tmp_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
924
925 if (!dev->tmp_buffer)
926 goto error8;
927
928 init_completion(&dev->dma_done);
929
930 dev->card_workqueue = create_freezeable_workqueue(DRV_NAME);
931
932 if (!dev->card_workqueue)
933 goto error9;
934
935 INIT_DELAYED_WORK(&dev->card_detect_work, r852_card_detect_work);
936
937 /* shutdown everything - precation */
938 r852_engine_disable(dev);
939 r852_disable_irqs(dev);
940
941 r852_dma_test(dev);
942
943 /*register irq handler*/
944 error = -ENODEV;
945 if (request_irq(pci_dev->irq, &r852_irq, IRQF_SHARED,
946 DRV_NAME, dev))
947 goto error10;
948
949 dev->irq = pci_dev->irq;
950 spin_lock_init(&dev->irqlock);
951
952 /* kick initial present test */
953 dev->card_detected = 0;
954 r852_card_update_present(dev);
955 queue_delayed_work(dev->card_workqueue,
956 &dev->card_detect_work, 0);
957
958
959 printk(KERN_NOTICE DRV_NAME ": driver loaded succesfully\n");
960 return 0;
961
962error10:
963 destroy_workqueue(dev->card_workqueue);
964error9:
965 kfree(dev->tmp_buffer);
966error8:
967 pci_iounmap(pci_dev, dev->mmio);
968error7:
969 pci_free_consistent(pci_dev, R852_DMA_LEN,
970 dev->bounce_buffer, dev->phys_bounce_buffer);
971error6:
972 kfree(dev);
973error5:
974 kfree(chip);
975error4:
976 pci_release_regions(pci_dev);
977error3:
978error2:
979 pci_disable_device(pci_dev);
980error1:
981 return error;
982}
983
984void r852_remove(struct pci_dev *pci_dev)
985{
986 struct r852_device *dev = pci_get_drvdata(pci_dev);
987
988 /* Stop detect workqueue -
989 we are going to unregister the device anyway*/
990 cancel_delayed_work_sync(&dev->card_detect_work);
991 destroy_workqueue(dev->card_workqueue);
992
993 /* Unregister the device, this might make more IO */
994 r852_unregister_nand_device(dev);
995
996 /* Stop interrupts */
997 r852_disable_irqs(dev);
998 synchronize_irq(dev->irq);
999 free_irq(dev->irq, dev);
1000
1001 /* Cleanup */
1002 kfree(dev->tmp_buffer);
1003 pci_iounmap(pci_dev, dev->mmio);
1004 pci_free_consistent(pci_dev, R852_DMA_LEN,
1005 dev->bounce_buffer, dev->phys_bounce_buffer);
1006
1007 kfree(dev->chip);
1008 kfree(dev);
1009
1010 /* Shutdown the PCI device */
1011 pci_release_regions(pci_dev);
1012 pci_disable_device(pci_dev);
1013}
1014
1015void r852_shutdown(struct pci_dev *pci_dev)
1016{
1017 struct r852_device *dev = pci_get_drvdata(pci_dev);
1018
1019 cancel_delayed_work_sync(&dev->card_detect_work);
1020 r852_disable_irqs(dev);
1021 synchronize_irq(dev->irq);
1022 pci_disable_device(pci_dev);
1023}
1024
1025#ifdef CONFIG_PM
1026int r852_suspend(struct device *device)
1027{
1028 struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
1029 unsigned long flags;
1030
1031 if (dev->ctlreg & R852_CTL_CARDENABLE)
1032 return -EBUSY;
1033
1034 /* First make sure the detect work is gone */
1035 cancel_delayed_work_sync(&dev->card_detect_work);
1036
1037 /* Turn off the interrupts and stop the device */
1038 r852_disable_irqs(dev);
1039 r852_engine_disable(dev);
1040
1041 spin_lock_irqsave(&dev->irqlock, flags);
1042 dev->insuspend = 1;
1043 spin_unlock_irqrestore(&dev->irqlock, flags);
1044
1045 /* At that point, even if interrupt handler is running, it will quit */
1046 /* So wait for this to happen explictly */
1047 synchronize_irq(dev->irq);
1048
1049 /* If card was pulled off just during the suspend, which is very
1050 unlikely, we will remove it on resume, it too late now
1051 anyway... */
1052 dev->card_unstable = 0;
1053
1054 pci_save_state(to_pci_dev(device));
1055 return pci_prepare_to_sleep(to_pci_dev(device));
1056}
1057
1058int r852_resume(struct device *device)
1059{
1060 struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
1061 unsigned long flags;
1062
1063 /* Turn on the hardware */
1064 pci_back_from_sleep(to_pci_dev(device));
1065 pci_restore_state(to_pci_dev(device));
1066
1067 r852_disable_irqs(dev);
1068 r852_card_update_present(dev);
1069 r852_engine_disable(dev);
1070
1071
1072 /* Now its safe for IRQ to run */
1073 spin_lock_irqsave(&dev->irqlock, flags);
1074 dev->insuspend = 0;
1075 spin_unlock_irqrestore(&dev->irqlock, flags);
1076
1077
1078 /* If card status changed, just do the work */
1079 if (dev->card_detected != dev->card_registred) {
1080 dbg("card was %s during low power state",
1081 dev->card_detected ? "added" : "removed");
1082
1083 queue_delayed_work(dev->card_workqueue,
1084 &dev->card_detect_work, 1000);
1085 return 0;
1086 }
1087
1088 /* Otherwise, initialize the card */
1089 if (dev->card_registred) {
1090 r852_engine_enable(dev);
1091 dev->chip->select_chip(dev->mtd, 0);
1092 dev->chip->cmdfunc(dev->mtd, NAND_CMD_RESET, -1, -1);
1093 dev->chip->select_chip(dev->mtd, -1);
1094 }
1095
1096 /* Program card detection IRQ */
1097 r852_update_card_detect(dev);
1098 return 0;
1099}
1100#else
1101#define r852_suspend NULL
1102#define r852_resume NULL
1103#endif
1104
1105static const struct pci_device_id r852_pci_id_tbl[] = {
1106
1107 { PCI_VDEVICE(RICOH, 0x0852), },
1108 { },
1109};
1110
1111MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl);
1112
1113SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume);
1114
1115
1116static struct pci_driver r852_pci_driver = {
1117 .name = DRV_NAME,
1118 .id_table = r852_pci_id_tbl,
1119 .probe = r852_probe,
1120 .remove = r852_remove,
1121 .shutdown = r852_shutdown,
1122 .driver.pm = &r852_pm_ops,
1123};
1124
1125static __init int r852_module_init(void)
1126{
1127 return pci_register_driver(&r852_pci_driver);
1128}
1129
1130static void __exit r852_module_exit(void)
1131{
1132 pci_unregister_driver(&r852_pci_driver);
1133}
1134
1135module_init(r852_module_init);
1136module_exit(r852_module_exit);
1137
1138MODULE_LICENSE("GPL");
1139MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
1140MODULE_DESCRIPTION("Ricoh 85xx xD/smartmedia card reader driver");
diff --git a/drivers/mtd/nand/r852.h b/drivers/mtd/nand/r852.h
new file mode 100644
index 00000000000..8096cc280c7
--- /dev/null
+++ b/drivers/mtd/nand/r852.h
@@ -0,0 +1,163 @@
1/*
2 * Copyright © 2009 - Maxim Levitsky
3 * driver for Ricoh xD readers
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/pci.h>
11#include <linux/completion.h>
12#include <linux/workqueue.h>
13#include <linux/mtd/nand.h>
14#include <linux/spinlock.h>
15
16
17/* nand interface + ecc
18 byte write/read does one cycle on nand data lines.
19 dword write/read does 4 cycles
20 if R852_CTL_ECC_ACCESS is set in R852_CTL, then dword read reads
21 results of ecc correction, if DMA read was done before.
22 If write was done two dword reads read generated ecc checksums
23*/
24#define R852_DATALINE 0x00
25
26/* control register */
27#define R852_CTL 0x04
28#define R852_CTL_COMMAND 0x01 /* send command (#CLE)*/
29#define R852_CTL_DATA 0x02 /* read/write data (#ALE)*/
30#define R852_CTL_ON 0x04 /* only seem to controls the hd led, */
31 /* but has to be set on start...*/
32#define R852_CTL_RESET 0x08 /* unknown, set only on start once*/
33#define R852_CTL_CARDENABLE 0x10 /* probably (#CE) - always set*/
34#define R852_CTL_ECC_ENABLE 0x20 /* enable ecc engine */
35#define R852_CTL_ECC_ACCESS 0x40 /* read/write ecc via reg #0*/
36#define R852_CTL_WRITE 0x80 /* set when performing writes (#WP) */
37
38/* card detection status */
39#define R852_CARD_STA 0x05
40
41#define R852_CARD_STA_CD 0x01 /* state of #CD line, same as 0x04 */
42#define R852_CARD_STA_RO 0x02 /* card is readonly */
43#define R852_CARD_STA_PRESENT 0x04 /* card is present (#CD) */
44#define R852_CARD_STA_ABSENT 0x08 /* card is absent */
45#define R852_CARD_STA_BUSY 0x80 /* card is busy - (#R/B) */
46
47/* card detection irq status & enable*/
48#define R852_CARD_IRQ_STA 0x06 /* IRQ status */
49#define R852_CARD_IRQ_ENABLE 0x07 /* IRQ enable */
50
51#define R852_CARD_IRQ_CD 0x01 /* fire when #CD lights, same as 0x04*/
52#define R852_CARD_IRQ_REMOVE 0x04 /* detect card removal */
53#define R852_CARD_IRQ_INSERT 0x08 /* detect card insert */
54#define R852_CARD_IRQ_UNK1 0x10 /* unknown */
55#define R852_CARD_IRQ_GENABLE 0x80 /* general enable */
56#define R852_CARD_IRQ_MASK 0x1D
57
58
59
60/* hardware enable */
61#define R852_HW 0x08
62#define R852_HW_ENABLED 0x01 /* hw enabled */
63#define R852_HW_UNKNOWN 0x80
64
65
66/* dma capabilities */
67#define R852_DMA_CAP 0x09
68#define R852_SMBIT 0x20 /* if set with bit #6 or bit #7, then */
69 /* hw is smartmedia */
70#define R852_DMA1 0x40 /* if set w/bit #7, dma is supported */
71#define R852_DMA2 0x80 /* if set w/bit #6, dma is supported */
72
73
74/* physical DMA address - 32 bit value*/
75#define R852_DMA_ADDR 0x0C
76
77
78/* dma settings */
79#define R852_DMA_SETTINGS 0x10
80#define R852_DMA_MEMORY 0x01 /* (memory <-> internal hw buffer) */
81#define R852_DMA_READ 0x02 /* 0 = write, 1 = read */
82#define R852_DMA_INTERNAL 0x04 /* (internal hw buffer <-> card) */
83
84/* dma IRQ status */
85#define R852_DMA_IRQ_STA 0x14
86
87/* dma IRQ enable */
88#define R852_DMA_IRQ_ENABLE 0x18
89
90#define R852_DMA_IRQ_MEMORY 0x01 /* (memory <-> internal hw buffer) */
91#define R852_DMA_IRQ_ERROR 0x02 /* error did happen */
92#define R852_DMA_IRQ_INTERNAL 0x04 /* (internal hw buffer <-> card) */
93#define R852_DMA_IRQ_MASK 0x07 /* mask of all IRQ bits */
94
95
96/* ECC syndrome format - read from reg #0 will return two copies of these for
97 each half of the page.
98 first byte is error byte location, and second, bit location + flags */
99#define R852_ECC_ERR_BIT_MSK 0x07 /* error bit location */
100#define R852_ECC_CORRECT 0x10 /* no errors - (guessed) */
101#define R852_ECC_CORRECTABLE 0x20 /* correctable error exist */
102#define R852_ECC_FAIL 0x40 /* non correctable error detected */
103
104#define R852_DMA_LEN 512
105
106#define DMA_INTERNAL 0
107#define DMA_MEMORY 1
108
109struct r852_device {
110 void __iomem *mmio; /* mmio */
111 struct mtd_info *mtd; /* mtd backpointer */
112 struct nand_chip *chip; /* nand chip backpointer */
113 struct pci_dev *pci_dev; /* pci backpointer */
114
115 /* dma area */
116 dma_addr_t phys_dma_addr; /* bus address of buffer*/
117 struct completion dma_done; /* data transfer done */
118
119 dma_addr_t phys_bounce_buffer; /* bus address of bounce buffer */
120 uint8_t *bounce_buffer; /* virtual address of bounce buffer */
121
122 int dma_dir; /* 1 = read, 0 = write */
123 int dma_stage; /* 0 - idle, 1 - first step,
124 2 - second step */
125
126 int dma_state; /* 0 = internal, 1 = memory */
127 int dma_error; /* dma errors */
128 int dma_usable; /* is it possible to use dma */
129
130 /* card status area */
131 struct delayed_work card_detect_work;
132 struct workqueue_struct *card_workqueue;
133 int card_registred; /* card registered with mtd */
134 int card_detected; /* card detected in slot */
135 int card_unstable; /* whenever the card is inserted,
136 is not known yet */
137 int readonly; /* card is readonly */
138 int sm; /* Is card smartmedia */
139
140 /* interrupt handling */
141 spinlock_t irqlock; /* IRQ protecting lock */
142 int irq; /* irq num */
143 int insuspend; /* device is suspended */
144
145 /* misc */
146 void *tmp_buffer; /* temporary buffer */
147 uint8_t ctlreg; /* cached contents of control reg */
148};
149
150#define DRV_NAME "r852"
151
152
153#define dbg(format, ...) \
154 if (debug) \
155 printk(KERN_DEBUG DRV_NAME ": " format "\n", ## __VA_ARGS__)
156
157#define dbg_verbose(format, ...) \
158 if (debug > 1) \
159 printk(KERN_DEBUG DRV_NAME ": " format "\n", ## __VA_ARGS__)
160
161
162#define message(format, ...) \
163 printk(KERN_INFO DRV_NAME ": " format "\n", ## __VA_ARGS__)
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index fa6e9c7fe51..239aadfd01b 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -929,14 +929,13 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
929 929
930 pr_debug("s3c2410_nand_probe(%p)\n", pdev); 930 pr_debug("s3c2410_nand_probe(%p)\n", pdev);
931 931
932 info = kmalloc(sizeof(*info), GFP_KERNEL); 932 info = kzalloc(sizeof(*info), GFP_KERNEL);
933 if (info == NULL) { 933 if (info == NULL) {
934 dev_err(&pdev->dev, "no memory for flash info\n"); 934 dev_err(&pdev->dev, "no memory for flash info\n");
935 err = -ENOMEM; 935 err = -ENOMEM;
936 goto exit_error; 936 goto exit_error;
937 } 937 }
938 938
939 memset(info, 0, sizeof(*info));
940 platform_set_drvdata(pdev, info); 939 platform_set_drvdata(pdev, info);
941 940
942 spin_lock_init(&info->controller.lock); 941 spin_lock_init(&info->controller.lock);
@@ -957,7 +956,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
957 956
958 /* currently we assume we have the one resource */ 957 /* currently we assume we have the one resource */
959 res = pdev->resource; 958 res = pdev->resource;
960 size = res->end - res->start + 1; 959 size = resource_size(res);
961 960
962 info->area = request_mem_region(res->start, size, pdev->name); 961 info->area = request_mem_region(res->start, size, pdev->name);
963 962
@@ -994,15 +993,13 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
994 /* allocate our information */ 993 /* allocate our information */
995 994
996 size = nr_sets * sizeof(*info->mtds); 995 size = nr_sets * sizeof(*info->mtds);
997 info->mtds = kmalloc(size, GFP_KERNEL); 996 info->mtds = kzalloc(size, GFP_KERNEL);
998 if (info->mtds == NULL) { 997 if (info->mtds == NULL) {
999 dev_err(&pdev->dev, "failed to allocate mtd storage\n"); 998 dev_err(&pdev->dev, "failed to allocate mtd storage\n");
1000 err = -ENOMEM; 999 err = -ENOMEM;
1001 goto exit_error; 1000 goto exit_error;
1002 } 1001 }
1003 1002
1004 memset(info->mtds, 0, size);
1005
1006 /* initialise all possible chips */ 1003 /* initialise all possible chips */
1007 1004
1008 nmtd = info->mtds; 1005 nmtd = info->mtds;
@@ -1013,7 +1010,8 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
1013 s3c2410_nand_init_chip(info, nmtd, sets); 1010 s3c2410_nand_init_chip(info, nmtd, sets);
1014 1011
1015 nmtd->scan_res = nand_scan_ident(&nmtd->mtd, 1012 nmtd->scan_res = nand_scan_ident(&nmtd->mtd,
1016 (sets) ? sets->nr_chips : 1); 1013 (sets) ? sets->nr_chips : 1,
1014 NULL);
1017 1015
1018 if (nmtd->scan_res == 0) { 1016 if (nmtd->scan_res == 0) {
1019 s3c2410_nand_update_chip(info, nmtd); 1017 s3c2410_nand_update_chip(info, nmtd);
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 34752fce079..546c2f0eb2e 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -855,7 +855,7 @@ static int __devinit flctl_probe(struct platform_device *pdev)
855 nand->read_word = flctl_read_word; 855 nand->read_word = flctl_read_word;
856 } 856 }
857 857
858 ret = nand_scan_ident(flctl_mtd, 1); 858 ret = nand_scan_ident(flctl_mtd, 1, NULL);
859 if (ret) 859 if (ret)
860 goto err; 860 goto err;
861 861
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
new file mode 100644
index 00000000000..ac80fb362e6
--- /dev/null
+++ b/drivers/mtd/nand/sm_common.c
@@ -0,0 +1,148 @@
1/*
2 * Copyright © 2009 - Maxim Levitsky
3 * Common routines & support for xD format
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9#include <linux/kernel.h>
10#include <linux/mtd/nand.h>
11#include "sm_common.h"
12
13static struct nand_ecclayout nand_oob_sm = {
14 .eccbytes = 6,
15 .eccpos = {8, 9, 10, 13, 14, 15},
16 .oobfree = {
17 {.offset = 0 , .length = 4}, /* reserved */
18 {.offset = 6 , .length = 2}, /* LBA1 */
19 {.offset = 11, .length = 2} /* LBA2 */
20 }
21};
22
23/* NOTE: This layout is is not compatabable with SmartMedia, */
24/* because the 256 byte devices have page depenent oob layout */
25/* However it does preserve the bad block markers */
26/* If you use smftl, it will bypass this and work correctly */
27/* If you not, then you break SmartMedia compliance anyway */
28
29static struct nand_ecclayout nand_oob_sm_small = {
30 .eccbytes = 3,
31 .eccpos = {0, 1, 2},
32 .oobfree = {
33 {.offset = 3 , .length = 2}, /* reserved */
34 {.offset = 6 , .length = 2}, /* LBA1 */
35 }
36};
37
38
39static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs)
40{
41 struct mtd_oob_ops ops;
42 struct sm_oob oob;
43 int ret, error = 0;
44
45 memset(&oob, -1, SM_OOB_SIZE);
46 oob.block_status = 0x0F;
47
48 /* As long as this function is called on erase block boundaries
49 it will work correctly for 256 byte nand */
50 ops.mode = MTD_OOB_PLACE;
51 ops.ooboffs = 0;
52 ops.ooblen = mtd->oobsize;
53 ops.oobbuf = (void *)&oob;
54 ops.datbuf = NULL;
55
56
57 ret = mtd->write_oob(mtd, ofs, &ops);
58 if (ret < 0 || ops.oobretlen != SM_OOB_SIZE) {
59 printk(KERN_NOTICE
60 "sm_common: can't mark sector at %i as bad\n",
61 (int)ofs);
62 error = -EIO;
63 } else
64 mtd->ecc_stats.badblocks++;
65
66 return error;
67}
68
69
70static struct nand_flash_dev nand_smartmedia_flash_ids[] = {
71 {"SmartMedia 1MiB 5V", 0x6e, 256, 1, 0x1000, 0},
72 {"SmartMedia 1MiB 3,3V", 0xe8, 256, 1, 0x1000, 0},
73 {"SmartMedia 1MiB 3,3V", 0xec, 256, 1, 0x1000, 0},
74 {"SmartMedia 2MiB 3,3V", 0xea, 256, 2, 0x1000, 0},
75 {"SmartMedia 2MiB 5V", 0x64, 256, 2, 0x1000, 0},
76 {"SmartMedia 2MiB 3,3V ROM", 0x5d, 512, 2, 0x2000, NAND_ROM},
77 {"SmartMedia 4MiB 3,3V", 0xe3, 512, 4, 0x2000, 0},
78 {"SmartMedia 4MiB 3,3/5V", 0xe5, 512, 4, 0x2000, 0},
79 {"SmartMedia 4MiB 5V", 0x6b, 512, 4, 0x2000, 0},
80 {"SmartMedia 4MiB 3,3V ROM", 0xd5, 512, 4, 0x2000, NAND_ROM},
81 {"SmartMedia 8MiB 3,3V", 0xe6, 512, 8, 0x2000, 0},
82 {"SmartMedia 8MiB 3,3V ROM", 0xd6, 512, 8, 0x2000, NAND_ROM},
83 {"SmartMedia 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0},
84 {"SmartMedia 16MiB 3,3V ROM", 0x57, 512, 16, 0x4000, NAND_ROM},
85 {"SmartMedia 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0},
86 {"SmartMedia 32MiB 3,3V ROM", 0x58, 512, 32, 0x4000, NAND_ROM},
87 {"SmartMedia 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0},
88 {"SmartMedia 64MiB 3,3V ROM", 0xd9, 512, 64, 0x4000, NAND_ROM},
89 {"SmartMedia 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0},
90 {"SmartMedia 128MiB 3,3V ROM", 0xda, 512, 128, 0x4000, NAND_ROM},
91 {"SmartMedia 256MiB 3,3V", 0x71, 512, 256, 0x4000 },
92 {"SmartMedia 256MiB 3,3V ROM", 0x5b, 512, 256, 0x4000, NAND_ROM},
93 {NULL,}
94};
95
96#define XD_TYPEM (NAND_NO_AUTOINCR | NAND_BROKEN_XD)
97static struct nand_flash_dev nand_xd_flash_ids[] = {
98
99 {"xD 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0},
100 {"xD 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0},
101 {"xD 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0},
102 {"xD 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0},
103 {"xD 256MiB 3,3V", 0x71, 512, 256, 0x4000, XD_TYPEM},
104 {"xD 512MiB 3,3V", 0xdc, 512, 512, 0x4000, XD_TYPEM},
105 {"xD 1GiB 3,3V", 0xd3, 512, 1024, 0x4000, XD_TYPEM},
106 {"xD 2GiB 3,3V", 0xd5, 512, 2048, 0x4000, XD_TYPEM},
107 {NULL,}
108};
109
110int sm_register_device(struct mtd_info *mtd, int smartmedia)
111{
112 struct nand_chip *chip = (struct nand_chip *)mtd->priv;
113 int ret;
114
115 chip->options |= NAND_SKIP_BBTSCAN;
116
117 /* Scan for card properties */
118 ret = nand_scan_ident(mtd, 1, smartmedia ?
119 nand_smartmedia_flash_ids : nand_xd_flash_ids);
120
121 if (ret)
122 return ret;
123
124 /* Bad block marker postion */
125 chip->badblockpos = 0x05;
126 chip->badblockbits = 7;
127 chip->block_markbad = sm_block_markbad;
128
129 /* ECC layout */
130 if (mtd->writesize == SM_SECTOR_SIZE)
131 chip->ecc.layout = &nand_oob_sm;
132 else if (mtd->writesize == SM_SMALL_PAGE)
133 chip->ecc.layout = &nand_oob_sm_small;
134 else
135 return -ENODEV;
136
137 ret = nand_scan_tail(mtd);
138
139 if (ret)
140 return ret;
141
142 return add_mtd_device(mtd);
143}
144EXPORT_SYMBOL_GPL(sm_register_device);
145
146MODULE_LICENSE("GPL");
147MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
148MODULE_DESCRIPTION("Common SmartMedia/xD functions");
diff --git a/drivers/mtd/nand/sm_common.h b/drivers/mtd/nand/sm_common.h
new file mode 100644
index 00000000000..00f4a83359b
--- /dev/null
+++ b/drivers/mtd/nand/sm_common.h
@@ -0,0 +1,61 @@
1/*
2 * Copyright © 2009 - Maxim Levitsky
3 * Common routines & support for SmartMedia/xD format
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9#include <linux/bitops.h>
10#include <linux/mtd/mtd.h>
11
12/* Full oob structure as written on the flash */
13struct sm_oob {
14 uint32_t reserved;
15 uint8_t data_status;
16 uint8_t block_status;
17 uint8_t lba_copy1[2];
18 uint8_t ecc2[3];
19 uint8_t lba_copy2[2];
20 uint8_t ecc1[3];
21} __attribute__((packed));
22
23
24/* one sector is always 512 bytes, but it can consist of two nand pages */
25#define SM_SECTOR_SIZE 512
26
27/* oob area is also 16 bytes, but might be from two pages */
28#define SM_OOB_SIZE 16
29
30/* This is maximum zone size, and all devices that have more that one zone
31 have this size */
32#define SM_MAX_ZONE_SIZE 1024
33
34/* support for small page nand */
35#define SM_SMALL_PAGE 256
36#define SM_SMALL_OOB_SIZE 8
37
38
39extern int sm_register_device(struct mtd_info *mtd, int smartmedia);
40
41
42static inline int sm_sector_valid(struct sm_oob *oob)
43{
44 return hweight16(oob->data_status) >= 5;
45}
46
47static inline int sm_block_valid(struct sm_oob *oob)
48{
49 return hweight16(oob->block_status) >= 7;
50}
51
52static inline int sm_block_erased(struct sm_oob *oob)
53{
54 static const uint32_t erased_pattern[4] = {
55 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
56
57 /* First test for erased block */
58 if (!memcmp(oob, erased_pattern, sizeof(*oob)))
59 return 1;
60 return 0;
61}
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index edb9b138414..884852dc7eb 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -220,7 +220,7 @@ static int __devinit socrates_nand_probe(struct of_device *ofdev,
220 dev_set_drvdata(&ofdev->dev, host); 220 dev_set_drvdata(&ofdev->dev, host);
221 221
222 /* first scan to find the device and get the page size */ 222 /* first scan to find the device and get the page size */
223 if (nand_scan_ident(mtd, 1)) { 223 if (nand_scan_ident(mtd, 1, NULL)) {
224 res = -ENXIO; 224 res = -ENXIO;
225 goto out; 225 goto out;
226 } 226 }
@@ -290,7 +290,7 @@ static int __devexit socrates_nand_remove(struct of_device *ofdev)
290 return 0; 290 return 0;
291} 291}
292 292
293static struct of_device_id socrates_nand_match[] = 293static const struct of_device_id socrates_nand_match[] =
294{ 294{
295 { 295 {
296 .compatible = "abb,socrates-nand", 296 .compatible = "abb,socrates-nand",
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index fa28f01ae00..3041d1f7ae3 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -319,7 +319,7 @@ static int tmio_nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
319 319
320static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio) 320static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
321{ 321{
322 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; 322 struct mfd_cell *cell = dev_get_platdata(&dev->dev);
323 int ret; 323 int ret;
324 324
325 if (cell->enable) { 325 if (cell->enable) {
@@ -363,7 +363,7 @@ static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
363 363
364static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio) 364static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
365{ 365{
366 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; 366 struct mfd_cell *cell = dev_get_platdata(&dev->dev);
367 367
368 tmio_iowrite8(FCR_MODE_POWER_OFF, tmio->fcr + FCR_MODE); 368 tmio_iowrite8(FCR_MODE_POWER_OFF, tmio->fcr + FCR_MODE);
369 if (cell->disable) 369 if (cell->disable)
@@ -372,7 +372,7 @@ static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
372 372
373static int tmio_probe(struct platform_device *dev) 373static int tmio_probe(struct platform_device *dev)
374{ 374{
375 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; 375 struct mfd_cell *cell = dev_get_platdata(&dev->dev);
376 struct tmio_nand_data *data = cell->driver_data; 376 struct tmio_nand_data *data = cell->driver_data;
377 struct resource *fcr = platform_get_resource(dev, 377 struct resource *fcr = platform_get_resource(dev,
378 IORESOURCE_MEM, 0); 378 IORESOURCE_MEM, 0);
@@ -405,14 +405,14 @@ static int tmio_probe(struct platform_device *dev)
405 mtd->priv = nand_chip; 405 mtd->priv = nand_chip;
406 mtd->name = "tmio-nand"; 406 mtd->name = "tmio-nand";
407 407
408 tmio->ccr = ioremap(ccr->start, ccr->end - ccr->start + 1); 408 tmio->ccr = ioremap(ccr->start, resource_size(ccr));
409 if (!tmio->ccr) { 409 if (!tmio->ccr) {
410 retval = -EIO; 410 retval = -EIO;
411 goto err_iomap_ccr; 411 goto err_iomap_ccr;
412 } 412 }
413 413
414 tmio->fcr_base = fcr->start & 0xfffff; 414 tmio->fcr_base = fcr->start & 0xfffff;
415 tmio->fcr = ioremap(fcr->start, fcr->end - fcr->start + 1); 415 tmio->fcr = ioremap(fcr->start, resource_size(fcr));
416 if (!tmio->fcr) { 416 if (!tmio->fcr) {
417 retval = -EIO; 417 retval = -EIO;
418 goto err_iomap_fcr; 418 goto err_iomap_fcr;
@@ -516,7 +516,7 @@ static int tmio_remove(struct platform_device *dev)
516#ifdef CONFIG_PM 516#ifdef CONFIG_PM
517static int tmio_suspend(struct platform_device *dev, pm_message_t state) 517static int tmio_suspend(struct platform_device *dev, pm_message_t state)
518{ 518{
519 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; 519 struct mfd_cell *cell = dev_get_platdata(&dev->dev);
520 520
521 if (cell->suspend) 521 if (cell->suspend)
522 cell->suspend(dev); 522 cell->suspend(dev);
@@ -527,7 +527,7 @@ static int tmio_suspend(struct platform_device *dev, pm_message_t state)
527 527
528static int tmio_resume(struct platform_device *dev) 528static int tmio_resume(struct platform_device *dev)
529{ 529{
530 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; 530 struct mfd_cell *cell = dev_get_platdata(&dev->dev);
531 531
532 /* FIXME - is this required or merely another attack of the broken 532 /* FIXME - is this required or merely another attack of the broken
533 * SHARP platform? Looks suspicious. 533 * SHARP platform? Looks suspicious.
diff --git a/drivers/mtd/nand/ts7250.c b/drivers/mtd/nand/ts7250.c
deleted file mode 100644
index 0f5562aeedc..00000000000
--- a/drivers/mtd/nand/ts7250.c
+++ /dev/null
@@ -1,207 +0,0 @@
1/*
2 * drivers/mtd/nand/ts7250.c
3 *
4 * Copyright (C) 2004 Technologic Systems (support@embeddedARM.com)
5 *
6 * Derived from drivers/mtd/nand/edb7312.c
7 * Copyright (C) 2004 Marius Gröger (mag@sysgo.de)
8 *
9 * Derived from drivers/mtd/nand/autcpu12.c
10 * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 * Overview:
17 * This is a device driver for the NAND flash device found on the
18 * TS-7250 board which utilizes a Samsung 32 Mbyte part.
19 */
20
21#include <linux/slab.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/mtd/mtd.h>
25#include <linux/mtd/nand.h>
26#include <linux/mtd/partitions.h>
27#include <linux/io.h>
28
29#include <mach/hardware.h>
30#include <mach/ts72xx.h>
31
32#include <asm/sizes.h>
33#include <asm/mach-types.h>
34
35/*
36 * MTD structure for TS7250 board
37 */
38static struct mtd_info *ts7250_mtd = NULL;
39
40#ifdef CONFIG_MTD_PARTITIONS
41static const char *part_probes[] = { "cmdlinepart", NULL };
42
43#define NUM_PARTITIONS 3
44
45/*
46 * Define static partitions for flash device
47 */
48static struct mtd_partition partition_info32[] = {
49 {
50 .name = "TS-BOOTROM",
51 .offset = 0x00000000,
52 .size = 0x00004000,
53 }, {
54 .name = "Linux",
55 .offset = 0x00004000,
56 .size = 0x01d00000,
57 }, {
58 .name = "RedBoot",
59 .offset = 0x01d04000,
60 .size = 0x002fc000,
61 },
62};
63
64/*
65 * Define static partitions for flash device
66 */
67static struct mtd_partition partition_info128[] = {
68 {
69 .name = "TS-BOOTROM",
70 .offset = 0x00000000,
71 .size = 0x00004000,
72 }, {
73 .name = "Linux",
74 .offset = 0x00004000,
75 .size = 0x07d00000,
76 }, {
77 .name = "RedBoot",
78 .offset = 0x07d04000,
79 .size = 0x002fc000,
80 },
81};
82#endif
83
84
85/*
86 * hardware specific access to control-lines
87 *
88 * ctrl:
89 * NAND_NCE: bit 0 -> bit 2
90 * NAND_CLE: bit 1 -> bit 1
91 * NAND_ALE: bit 2 -> bit 0
92 */
93static void ts7250_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
94{
95 struct nand_chip *chip = mtd->priv;
96
97 if (ctrl & NAND_CTRL_CHANGE) {
98 unsigned long addr = TS72XX_NAND_CONTROL_VIRT_BASE;
99 unsigned char bits;
100
101 bits = (ctrl & NAND_NCE) << 2;
102 bits |= ctrl & NAND_CLE;
103 bits |= (ctrl & NAND_ALE) >> 2;
104
105 __raw_writeb((__raw_readb(addr) & ~0x7) | bits, addr);
106 }
107
108 if (cmd != NAND_CMD_NONE)
109 writeb(cmd, chip->IO_ADDR_W);
110}
111
112/*
113 * read device ready pin
114 */
115static int ts7250_device_ready(struct mtd_info *mtd)
116{
117 return __raw_readb(TS72XX_NAND_BUSY_VIRT_BASE) & 0x20;
118}
119
120/*
121 * Main initialization routine
122 */
123static int __init ts7250_init(void)
124{
125 struct nand_chip *this;
126 const char *part_type = 0;
127 int mtd_parts_nb = 0;
128 struct mtd_partition *mtd_parts = 0;
129
130 if (!machine_is_ts72xx() || board_is_ts7200())
131 return -ENXIO;
132
133 /* Allocate memory for MTD device structure and private data */
134 ts7250_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
135 if (!ts7250_mtd) {
136 printk("Unable to allocate TS7250 NAND MTD device structure.\n");
137 return -ENOMEM;
138 }
139
140 /* Get pointer to private data */
141 this = (struct nand_chip *)(&ts7250_mtd[1]);
142
143 /* Initialize structures */
144 memset(ts7250_mtd, 0, sizeof(struct mtd_info));
145 memset(this, 0, sizeof(struct nand_chip));
146
147 /* Link the private data with the MTD structure */
148 ts7250_mtd->priv = this;
149 ts7250_mtd->owner = THIS_MODULE;
150
151 /* insert callbacks */
152 this->IO_ADDR_R = (void *)TS72XX_NAND_DATA_VIRT_BASE;
153 this->IO_ADDR_W = (void *)TS72XX_NAND_DATA_VIRT_BASE;
154 this->cmd_ctrl = ts7250_hwcontrol;
155 this->dev_ready = ts7250_device_ready;
156 this->chip_delay = 15;
157 this->ecc.mode = NAND_ECC_SOFT;
158
159 printk("Searching for NAND flash...\n");
160 /* Scan to find existence of the device */
161 if (nand_scan(ts7250_mtd, 1)) {
162 kfree(ts7250_mtd);
163 return -ENXIO;
164 }
165#ifdef CONFIG_MTD_PARTITIONS
166 ts7250_mtd->name = "ts7250-nand";
167 mtd_parts_nb = parse_mtd_partitions(ts7250_mtd, part_probes, &mtd_parts, 0);
168 if (mtd_parts_nb > 0)
169 part_type = "command line";
170 else
171 mtd_parts_nb = 0;
172#endif
173 if (mtd_parts_nb == 0) {
174 mtd_parts = partition_info32;
175 if (ts7250_mtd->size >= (128 * 0x100000))
176 mtd_parts = partition_info128;
177 mtd_parts_nb = NUM_PARTITIONS;
178 part_type = "static";
179 }
180
181 /* Register the partitions */
182 printk(KERN_NOTICE "Using %s partition definition\n", part_type);
183 add_mtd_partitions(ts7250_mtd, mtd_parts, mtd_parts_nb);
184
185 /* Return happy */
186 return 0;
187}
188
189module_init(ts7250_init);
190
191/*
192 * Clean up routine
193 */
194static void __exit ts7250_cleanup(void)
195{
196 /* Unregister the device */
197 del_mtd_device(ts7250_mtd);
198
199 /* Free the MTD device structure */
200 kfree(ts7250_mtd);
201}
202
203module_exit(ts7250_cleanup);
204
205MODULE_LICENSE("GPL");
206MODULE_AUTHOR("Jesse Off <joff@embeddedARM.com>");
207MODULE_DESCRIPTION("MTD map driver for Technologic Systems TS-7250 board");
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index 863513c3b69..054a41c0ef4 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -274,7 +274,7 @@ static int txx9ndfmc_nand_scan(struct mtd_info *mtd)
274 struct nand_chip *chip = mtd->priv; 274 struct nand_chip *chip = mtd->priv;
275 int ret; 275 int ret;
276 276
277 ret = nand_scan_ident(mtd, 1); 277 ret = nand_scan_ident(mtd, 1, NULL);
278 if (!ret) { 278 if (!ret) {
279 if (mtd->writesize >= 512) { 279 if (mtd->writesize >= 512) {
280 chip->ecc.size = mtd->writesize; 280 chip->ecc.size = mtd->writesize;