aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/mtd/Kconfig7
-rw-r--r--drivers/mtd/Makefile1
-rw-r--r--drivers/mtd/bcm47xxpart.c202
-rw-r--r--drivers/mtd/chips/Kconfig11
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c14
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c67
-rw-r--r--drivers/mtd/cmdlinepart.c183
-rw-r--r--drivers/mtd/devices/Kconfig10
-rw-r--r--drivers/mtd/devices/Makefile1
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.c105
-rw-r--r--drivers/mtd/devices/doc2001plus.c14
-rw-r--r--drivers/mtd/devices/docg3.c12
-rw-r--r--drivers/mtd/devices/m25p80.c24
-rw-r--r--drivers/mtd/devices/spear_smi.c141
-rw-r--r--drivers/mtd/maps/Kconfig16
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/autcpu12-nvram.c153
-rw-r--r--drivers/mtd/maps/pci.c23
-rw-r--r--drivers/mtd/maps/physmap_of.c14
-rw-r--r--drivers/mtd/maps/rbtx4939-flash.c2
-rw-r--r--drivers/mtd/maps/uclinux.c15
-rw-r--r--drivers/mtd/maps/wr_sbc82xx_flash.c174
-rw-r--r--drivers/mtd/mtdchar.c6
-rw-r--r--drivers/mtd/mtdcore.c21
-rw-r--r--drivers/mtd/mtdoops.c14
-rw-r--r--drivers/mtd/mtdpart.c5
-rw-r--r--drivers/mtd/nand/Kconfig63
-rw-r--r--drivers/mtd/nand/Makefile4
-rw-r--r--drivers/mtd/nand/ams-delta.c13
-rw-r--r--drivers/mtd/nand/atmel_nand.c987
-rw-r--r--drivers/mtd/nand/atmel_nand_ecc.h114
-rw-r--r--drivers/mtd/nand/au1550nd.c46
-rw-r--r--drivers/mtd/nand/bcm_umi_bch.c217
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c555
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c6
-rw-r--r--drivers/mtd/nand/cafe_nand.c20
-rw-r--r--drivers/mtd/nand/cmx270_nand.c13
-rw-r--r--drivers/mtd/nand/davinci_nand.c78
-rw-r--r--drivers/mtd/nand/denali.c12
-rw-r--r--drivers/mtd/nand/diskonchip.c63
-rw-r--r--drivers/mtd/nand/docg4.c43
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c51
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c52
-rw-r--r--drivers/mtd/nand/gpio.c54
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c322
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c152
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h26
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-regs.h12
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c924
-rw-r--r--drivers/mtd/nand/lpc32xx_slc.c1039
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c22
-rw-r--r--drivers/mtd/nand/mxc_nand.c168
-rw-r--r--drivers/mtd/nand/nand_base.c529
-rw-r--r--drivers/mtd/nand/nand_bbt.c148
-rw-r--r--drivers/mtd/nand/nand_bcm_umi.c149
-rw-r--r--drivers/mtd/nand/nand_bcm_umi.h336
-rw-r--r--drivers/mtd/nand/nand_ids.c7
-rw-r--r--drivers/mtd/nand/nandsim.c17
-rw-r--r--drivers/mtd/nand/ndfc.c13
-rw-r--r--drivers/mtd/nand/nuc900_nand.c17
-rw-r--r--drivers/mtd/nand/omap2.c36
-rw-r--r--drivers/mtd/nand/orion_nand.c1
-rw-r--r--drivers/mtd/nand/plat_nand.c5
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c12
-rw-r--r--drivers/mtd/nand/r852.c22
-rw-r--r--drivers/mtd/nand/s3c2410.c191
-rw-r--r--drivers/mtd/nand/sh_flctl.c327
-rw-r--r--drivers/mtd/nand/socrates_nand.c19
-rw-r--r--drivers/mtd/nand/tmio_nand.c13
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c13
-rw-r--r--drivers/mtd/nand/xway_nand.c201
-rw-r--r--drivers/mtd/sm_ftl.c1
-rw-r--r--drivers/mtd/tests/Makefile1
-rw-r--r--drivers/mtd/tests/mtd_nandbiterrs.c460
-rw-r--r--drivers/mtd/tests/mtd_nandecctest.c294
-rw-r--r--drivers/mtd/tests/mtd_speedtest.c16
-rw-r--r--drivers/mtd/tests/mtd_stresstest.c39
77 files changed, 5990 insertions, 3169 deletions
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 27143e042af5..73fcbbeb78d0 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -148,6 +148,13 @@ config MTD_BCM63XX_PARTS
148 This provides partions parsing for BCM63xx devices with CFE 148 This provides partions parsing for BCM63xx devices with CFE
149 bootloaders. 149 bootloaders.
150 150
151config MTD_BCM47XX_PARTS
152 tristate "BCM47XX partitioning support"
153 depends on BCM47XX
154 help
155 This provides partitions parser for devices based on BCM47xx
156 boards.
157
151comment "User Modules And Translation Layers" 158comment "User Modules And Translation Layers"
152 159
153config MTD_CHAR 160config MTD_CHAR
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index f90135429dc7..18a38e55b2f0 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
12obj-$(CONFIG_MTD_AFS_PARTS) += afs.o 12obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
13obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o 13obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o
14obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o 14obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o
15obj-$(CONFIG_MTD_BCM47XX_PARTS) += bcm47xxpart.o
15 16
16# 'Users' - code which presents functionality to userspace. 17# 'Users' - code which presents functionality to userspace.
17obj-$(CONFIG_MTD_CHAR) += mtdchar.o 18obj-$(CONFIG_MTD_CHAR) += mtdchar.o
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
new file mode 100644
index 000000000000..e06d782489a6
--- /dev/null
+++ b/drivers/mtd/bcm47xxpart.c
@@ -0,0 +1,202 @@
1/*
2 * BCM47XX MTD partitioning
3 *
4 * Copyright © 2012 Rafał Miłecki <zajec5@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/mtd/mtd.h>
16#include <linux/mtd/partitions.h>
17#include <asm/mach-bcm47xx/nvram.h>
18
19/* 10 parts were found on sflash on Netgear WNDR4500 */
20#define BCM47XXPART_MAX_PARTS 12
21
22/*
23 * Amount of bytes we read when analyzing each block of flash memory.
24 * Set it big enough to allow detecting partition and reading important data.
25 */
26#define BCM47XXPART_BYTES_TO_READ 0x404
27
28/* Magics */
29#define BOARD_DATA_MAGIC 0x5246504D /* MPFR */
30#define POT_MAGIC1 0x54544f50 /* POTT */
31#define POT_MAGIC2 0x504f /* OP */
32#define ML_MAGIC1 0x39685a42
33#define ML_MAGIC2 0x26594131
34#define TRX_MAGIC 0x30524448
35
36struct trx_header {
37 uint32_t magic;
38 uint32_t length;
39 uint32_t crc32;
40 uint16_t flags;
41 uint16_t version;
42 uint32_t offset[3];
43} __packed;
44
45static void bcm47xxpart_add_part(struct mtd_partition *part, char *name,
46 u64 offset, uint32_t mask_flags)
47{
48 part->name = name;
49 part->offset = offset;
50 part->mask_flags = mask_flags;
51}
52
53static int bcm47xxpart_parse(struct mtd_info *master,
54 struct mtd_partition **pparts,
55 struct mtd_part_parser_data *data)
56{
57 struct mtd_partition *parts;
58 uint8_t i, curr_part = 0;
59 uint32_t *buf;
60 size_t bytes_read;
61 uint32_t offset;
62 uint32_t blocksize = 0x10000;
63 struct trx_header *trx;
64
65 /* Alloc */
66 parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS,
67 GFP_KERNEL);
68 buf = kzalloc(BCM47XXPART_BYTES_TO_READ, GFP_KERNEL);
69
70 /* Parse block by block looking for magics */
71 for (offset = 0; offset <= master->size - blocksize;
72 offset += blocksize) {
73 /* Nothing more in higher memory */
74 if (offset >= 0x2000000)
75 break;
76
77 if (curr_part > BCM47XXPART_MAX_PARTS) {
78 pr_warn("Reached maximum number of partitions, scanning stopped!\n");
79 break;
80 }
81
82 /* Read beginning of the block */
83 if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
84 &bytes_read, (uint8_t *)buf) < 0) {
85 pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
86 offset);
87 continue;
88 }
89
90 /* CFE has small NVRAM at 0x400 */
91 if (buf[0x400 / 4] == NVRAM_HEADER) {
92 bcm47xxpart_add_part(&parts[curr_part++], "boot",
93 offset, MTD_WRITEABLE);
94 continue;
95 }
96
97 /* Standard NVRAM */
98 if (buf[0x000 / 4] == NVRAM_HEADER) {
99 bcm47xxpart_add_part(&parts[curr_part++], "nvram",
100 offset, 0);
101 continue;
102 }
103
104 /*
105 * board_data starts with board_id which differs across boards,
106 * but we can use 'MPFR' (hopefully) magic at 0x100
107 */
108 if (buf[0x100 / 4] == BOARD_DATA_MAGIC) {
109 bcm47xxpart_add_part(&parts[curr_part++], "board_data",
110 offset, MTD_WRITEABLE);
111 continue;
112 }
113
114 /* POT(TOP) */
115 if (buf[0x000 / 4] == POT_MAGIC1 &&
116 (buf[0x004 / 4] & 0xFFFF) == POT_MAGIC2) {
117 bcm47xxpart_add_part(&parts[curr_part++], "POT", offset,
118 MTD_WRITEABLE);
119 continue;
120 }
121
122 /* ML */
123 if (buf[0x010 / 4] == ML_MAGIC1 &&
124 buf[0x014 / 4] == ML_MAGIC2) {
125 bcm47xxpart_add_part(&parts[curr_part++], "ML", offset,
126 MTD_WRITEABLE);
127 continue;
128 }
129
130 /* TRX */
131 if (buf[0x000 / 4] == TRX_MAGIC) {
132 trx = (struct trx_header *)buf;
133
134 i = 0;
135 /* We have LZMA loader if offset[2] points to sth */
136 if (trx->offset[2]) {
137 bcm47xxpart_add_part(&parts[curr_part++],
138 "loader",
139 offset + trx->offset[i],
140 0);
141 i++;
142 }
143
144 bcm47xxpart_add_part(&parts[curr_part++], "linux",
145 offset + trx->offset[i], 0);
146 i++;
147
148 /*
149 * Pure rootfs size is known and can be calculated as:
150 * trx->length - trx->offset[i]. We don't fill it as
151 * we want to have jffs2 (overlay) in the same mtd.
152 */
153 bcm47xxpart_add_part(&parts[curr_part++], "rootfs",
154 offset + trx->offset[i], 0);
155 i++;
156
157 /*
158 * We have whole TRX scanned, skip to the next part. Use
159 * roundown (not roundup), as the loop will increase
160 * offset in next step.
161 */
162 offset = rounddown(offset + trx->length, blocksize);
163 continue;
164 }
165 }
166 kfree(buf);
167
168 /*
169 * Assume that partitions end at the beginning of the one they are
170 * followed by.
171 */
172 for (i = 0; i < curr_part - 1; i++)
173 parts[i].size = parts[i + 1].offset - parts[i].offset;
174 if (curr_part > 0)
175 parts[curr_part - 1].size =
176 master->size - parts[curr_part - 1].offset;
177
178 *pparts = parts;
179 return curr_part;
180};
181
182static struct mtd_part_parser bcm47xxpart_mtd_parser = {
183 .owner = THIS_MODULE,
184 .parse_fn = bcm47xxpart_parse,
185 .name = "bcm47xxpart",
186};
187
188static int __init bcm47xxpart_init(void)
189{
190 return register_mtd_parser(&bcm47xxpart_mtd_parser);
191}
192
193static void __exit bcm47xxpart_exit(void)
194{
195 deregister_mtd_parser(&bcm47xxpart_mtd_parser);
196}
197
198module_init(bcm47xxpart_init);
199module_exit(bcm47xxpart_exit);
200
201MODULE_LICENSE("GPL");
202MODULE_DESCRIPTION("MTD partitioning for BCM47XX flash memories");
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
index b1e3c26edd6d..e469b01d40d2 100644
--- a/drivers/mtd/chips/Kconfig
+++ b/drivers/mtd/chips/Kconfig
@@ -43,9 +43,6 @@ choice
43 prompt "Flash cmd/query data swapping" 43 prompt "Flash cmd/query data swapping"
44 depends on MTD_CFI_ADV_OPTIONS 44 depends on MTD_CFI_ADV_OPTIONS
45 default MTD_CFI_NOSWAP 45 default MTD_CFI_NOSWAP
46
47config MTD_CFI_NOSWAP
48 bool "NO"
49 ---help--- 46 ---help---
50 This option defines the way in which the CPU attempts to arrange 47 This option defines the way in which the CPU attempts to arrange
51 data bits when writing the 'magic' commands to the chips. Saying 48 data bits when writing the 'magic' commands to the chips. Saying
@@ -55,12 +52,8 @@ config MTD_CFI_NOSWAP
55 Specific arrangements are possible with the BIG_ENDIAN_BYTE and 52 Specific arrangements are possible with the BIG_ENDIAN_BYTE and
56 LITTLE_ENDIAN_BYTE, if the bytes are reversed. 53 LITTLE_ENDIAN_BYTE, if the bytes are reversed.
57 54
58 If you have a LART, on which the data (and address) lines were 55config MTD_CFI_NOSWAP
59 connected in a fashion which ensured that the nets were as short 56 bool "NO"
60 as possible, resulting in a bit-shuffling which seems utterly
61 random to the untrained eye, you need the LART_ENDIAN_BYTE option.
62
63 Yes, there really exists something sicker than PDP-endian :)
64 57
65config MTD_CFI_BE_BYTE_SWAP 58config MTD_CFI_BE_BYTE_SWAP
66 bool "BIG_ENDIAN_BYTE" 59 bool "BIG_ENDIAN_BYTE"
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index dbbd2edfb812..77514430f1fe 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -2043,7 +2043,7 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
2043{ 2043{
2044 struct cfi_private *cfi = map->fldrv_priv; 2044 struct cfi_private *cfi = map->fldrv_priv;
2045 struct cfi_pri_intelext *extp = cfi->cmdset_priv; 2045 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2046 int udelay; 2046 int mdelay;
2047 int ret; 2047 int ret;
2048 2048
2049 adr += chip->start; 2049 adr += chip->start;
@@ -2072,9 +2072,17 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
2072 * If Instant Individual Block Locking supported then no need 2072 * If Instant Individual Block Locking supported then no need
2073 * to delay. 2073 * to delay.
2074 */ 2074 */
2075 udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0; 2075 /*
2076 * Unlocking may take up to 1.4 seconds on some Intel flashes. So
2077 * lets use a max of 1.5 seconds (1500ms) as timeout.
2078 *
2079 * See "Clear Block Lock-Bits Time" on page 40 in
2080 * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
2081 * from February 2003
2082 */
2083 mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
2076 2084
2077 ret = WAIT_TIMEOUT(map, chip, adr, udelay, udelay * 100); 2085 ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
2078 if (ret) { 2086 if (ret) {
2079 map_write(map, CMD(0x70), adr); 2087 map_write(map, CMD(0x70), adr);
2080 chip->state = FL_STATUS; 2088 chip->state = FL_STATUS;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 22d0493a026f..5ff5c4a16943 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -431,6 +431,68 @@ static void cfi_fixup_major_minor(struct cfi_private *cfi,
431 } 431 }
432} 432}
433 433
434static int is_m29ew(struct cfi_private *cfi)
435{
436 if (cfi->mfr == CFI_MFR_INTEL &&
437 ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) ||
438 (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e)))
439 return 1;
440 return 0;
441}
442
443/*
444 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20:
445 * Some revisions of the M29EW suffer from erase suspend hang ups. In
446 * particular, it can occur when the sequence
447 * Erase Confirm -> Suspend -> Program -> Resume
448 * causes a lockup due to internal timing issues. The consequence is that the
449 * erase cannot be resumed without inserting a dummy command after programming
450 * and prior to resuming. [...] The work-around is to issue a dummy write cycle
451 * that writes an F0 command code before the RESUME command.
452 */
453static void cfi_fixup_m29ew_erase_suspend(struct map_info *map,
454 unsigned long adr)
455{
456 struct cfi_private *cfi = map->fldrv_priv;
457 /* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */
458 if (is_m29ew(cfi))
459 map_write(map, CMD(0xF0), adr);
460}
461
462/*
463 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22:
464 *
465 * Some revisions of the M29EW (for example, A1 and A2 step revisions)
466 * are affected by a problem that could cause a hang up when an ERASE SUSPEND
467 * command is issued after an ERASE RESUME operation without waiting for a
468 * minimum delay. The result is that once the ERASE seems to be completed
469 * (no bits are toggling), the contents of the Flash memory block on which
470 * the erase was ongoing could be inconsistent with the expected values
471 * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84
472 * values), causing a consequent failure of the ERASE operation.
473 * The occurrence of this issue could be high, especially when file system
474 * operations on the Flash are intensive. As a result, it is recommended
475 * that a patch be applied. Intensive file system operations can cause many
476 * calls to the garbage routine to free Flash space (also by erasing physical
477 * Flash blocks) and as a result, many consecutive SUSPEND and RESUME
478 * commands can occur. The problem disappears when a delay is inserted after
479 * the RESUME command by using the udelay() function available in Linux.
480 * The DELAY value must be tuned based on the customer's platform.
481 * The maximum value that fixes the problem in all cases is 500us.
482 * But, in our experience, a delay of 30 µs to 50 µs is sufficient
483 * in most cases.
484 * We have chosen 500µs because this latency is acceptable.
485 */
486static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
487{
488 /*
489 * Resolving the Delay After Resume Issue see Micron TN-13-07
490 * Worst case delay must be 500µs but 30-50µs should be ok as well
491 */
492 if (is_m29ew(cfi))
493 cfi_udelay(500);
494}
495
434struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) 496struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
435{ 497{
436 struct cfi_private *cfi = map->fldrv_priv; 498 struct cfi_private *cfi = map->fldrv_priv;
@@ -776,7 +838,10 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
776 838
777 switch(chip->oldstate) { 839 switch(chip->oldstate) {
778 case FL_ERASING: 840 case FL_ERASING:
841 cfi_fixup_m29ew_erase_suspend(map,
842 chip->in_progress_block_addr);
779 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr); 843 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
844 cfi_fixup_m29ew_delay_after_resume(cfi);
780 chip->oldstate = FL_READY; 845 chip->oldstate = FL_READY;
781 chip->state = FL_ERASING; 846 chip->state = FL_ERASING;
782 break; 847 break;
@@ -916,6 +981,8 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
916 /* Disallow XIP again */ 981 /* Disallow XIP again */
917 local_irq_disable(); 982 local_irq_disable();
918 983
984 /* Correct Erase Suspend Hangups for M29EW */
985 cfi_fixup_m29ew_erase_suspend(map, adr);
919 /* Resume the write or erase operation */ 986 /* Resume the write or erase operation */
920 map_write(map, cfi->sector_erase_cmd, adr); 987 map_write(map, cfi->sector_erase_cmd, adr);
921 chip->state = oldstate; 988 chip->state = oldstate;
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index 4558e0f4d07f..aed1b8a63c9f 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -39,11 +39,10 @@
39 39
40#include <linux/kernel.h> 40#include <linux/kernel.h>
41#include <linux/slab.h> 41#include <linux/slab.h>
42
43#include <linux/mtd/mtd.h> 42#include <linux/mtd/mtd.h>
44#include <linux/mtd/partitions.h> 43#include <linux/mtd/partitions.h>
45#include <linux/bootmem.h>
46#include <linux/module.h> 44#include <linux/module.h>
45#include <linux/err.h>
47 46
48/* error message prefix */ 47/* error message prefix */
49#define ERRP "mtd: " 48#define ERRP "mtd: "
@@ -72,7 +71,7 @@ static struct cmdline_mtd_partition *partitions;
72 71
73/* the command line passed to mtdpart_setup() */ 72/* the command line passed to mtdpart_setup() */
74static char *cmdline; 73static char *cmdline;
75static int cmdline_parsed = 0; 74static int cmdline_parsed;
76 75
77/* 76/*
78 * Parse one partition definition for an MTD. Since there can be many 77 * Parse one partition definition for an MTD. Since there can be many
@@ -83,15 +82,14 @@ static int cmdline_parsed = 0;
83 * syntax has been verified ok. 82 * syntax has been verified ok.
84 */ 83 */
85static struct mtd_partition * newpart(char *s, 84static struct mtd_partition * newpart(char *s,
86 char **retptr, 85 char **retptr,
87 int *num_parts, 86 int *num_parts,
88 int this_part, 87 int this_part,
89 unsigned char **extra_mem_ptr, 88 unsigned char **extra_mem_ptr,
90 int extra_mem_size) 89 int extra_mem_size)
91{ 90{
92 struct mtd_partition *parts; 91 struct mtd_partition *parts;
93 unsigned long size; 92 unsigned long size, offset = OFFSET_CONTINUOUS;
94 unsigned long offset = OFFSET_CONTINUOUS;
95 char *name; 93 char *name;
96 int name_len; 94 int name_len;
97 unsigned char *extra_mem; 95 unsigned char *extra_mem;
@@ -99,124 +97,106 @@ static struct mtd_partition * newpart(char *s,
99 unsigned int mask_flags; 97 unsigned int mask_flags;
100 98
101 /* fetch the partition size */ 99 /* fetch the partition size */
102 if (*s == '-') 100 if (*s == '-') {
103 { /* assign all remaining space to this partition */ 101 /* assign all remaining space to this partition */
104 size = SIZE_REMAINING; 102 size = SIZE_REMAINING;
105 s++; 103 s++;
106 } 104 } else {
107 else
108 {
109 size = memparse(s, &s); 105 size = memparse(s, &s);
110 if (size < PAGE_SIZE) 106 if (size < PAGE_SIZE) {
111 {
112 printk(KERN_ERR ERRP "partition size too small (%lx)\n", size); 107 printk(KERN_ERR ERRP "partition size too small (%lx)\n", size);
113 return NULL; 108 return ERR_PTR(-EINVAL);
114 } 109 }
115 } 110 }
116 111
117 /* fetch partition name and flags */ 112 /* fetch partition name and flags */
118 mask_flags = 0; /* this is going to be a regular partition */ 113 mask_flags = 0; /* this is going to be a regular partition */
119 delim = 0; 114 delim = 0;
120 /* check for offset */ 115
121 if (*s == '@') 116 /* check for offset */
122 { 117 if (*s == '@') {
123 s++; 118 s++;
124 offset = memparse(s, &s); 119 offset = memparse(s, &s);
125 } 120 }
126 /* now look for name */ 121
122 /* now look for name */
127 if (*s == '(') 123 if (*s == '(')
128 {
129 delim = ')'; 124 delim = ')';
130 }
131 125
132 if (delim) 126 if (delim) {
133 {
134 char *p; 127 char *p;
135 128
136 name = ++s; 129 name = ++s;
137 p = strchr(name, delim); 130 p = strchr(name, delim);
138 if (!p) 131 if (!p) {
139 {
140 printk(KERN_ERR ERRP "no closing %c found in partition name\n", delim); 132 printk(KERN_ERR ERRP "no closing %c found in partition name\n", delim);
141 return NULL; 133 return ERR_PTR(-EINVAL);
142 } 134 }
143 name_len = p - name; 135 name_len = p - name;
144 s = p + 1; 136 s = p + 1;
145 } 137 } else {
146 else 138 name = NULL;
147 {
148 name = NULL;
149 name_len = 13; /* Partition_000 */ 139 name_len = 13; /* Partition_000 */
150 } 140 }
151 141
152 /* record name length for memory allocation later */ 142 /* record name length for memory allocation later */
153 extra_mem_size += name_len + 1; 143 extra_mem_size += name_len + 1;
154 144
155 /* test for options */ 145 /* test for options */
156 if (strncmp(s, "ro", 2) == 0) 146 if (strncmp(s, "ro", 2) == 0) {
157 {
158 mask_flags |= MTD_WRITEABLE; 147 mask_flags |= MTD_WRITEABLE;
159 s += 2; 148 s += 2;
160 } 149 }
161 150
162 /* if lk is found do NOT unlock the MTD partition*/ 151 /* if lk is found do NOT unlock the MTD partition*/
163 if (strncmp(s, "lk", 2) == 0) 152 if (strncmp(s, "lk", 2) == 0) {
164 {
165 mask_flags |= MTD_POWERUP_LOCK; 153 mask_flags |= MTD_POWERUP_LOCK;
166 s += 2; 154 s += 2;
167 } 155 }
168 156
169 /* test if more partitions are following */ 157 /* test if more partitions are following */
170 if (*s == ',') 158 if (*s == ',') {
171 { 159 if (size == SIZE_REMAINING) {
172 if (size == SIZE_REMAINING)
173 {
174 printk(KERN_ERR ERRP "no partitions allowed after a fill-up partition\n"); 160 printk(KERN_ERR ERRP "no partitions allowed after a fill-up partition\n");
175 return NULL; 161 return ERR_PTR(-EINVAL);
176 } 162 }
177 /* more partitions follow, parse them */ 163 /* more partitions follow, parse them */
178 parts = newpart(s + 1, &s, num_parts, this_part + 1, 164 parts = newpart(s + 1, &s, num_parts, this_part + 1,
179 &extra_mem, extra_mem_size); 165 &extra_mem, extra_mem_size);
180 if (!parts) 166 if (IS_ERR(parts))
181 return NULL; 167 return parts;
182 } 168 } else {
183 else 169 /* this is the last partition: allocate space for all */
184 { /* this is the last partition: allocate space for all */
185 int alloc_size; 170 int alloc_size;
186 171
187 *num_parts = this_part + 1; 172 *num_parts = this_part + 1;
188 alloc_size = *num_parts * sizeof(struct mtd_partition) + 173 alloc_size = *num_parts * sizeof(struct mtd_partition) +
189 extra_mem_size; 174 extra_mem_size;
175
190 parts = kzalloc(alloc_size, GFP_KERNEL); 176 parts = kzalloc(alloc_size, GFP_KERNEL);
191 if (!parts) 177 if (!parts)
192 return NULL; 178 return ERR_PTR(-ENOMEM);
193 extra_mem = (unsigned char *)(parts + *num_parts); 179 extra_mem = (unsigned char *)(parts + *num_parts);
194 } 180 }
181
195 /* enter this partition (offset will be calculated later if it is zero at this point) */ 182 /* enter this partition (offset will be calculated later if it is zero at this point) */
196 parts[this_part].size = size; 183 parts[this_part].size = size;
197 parts[this_part].offset = offset; 184 parts[this_part].offset = offset;
198 parts[this_part].mask_flags = mask_flags; 185 parts[this_part].mask_flags = mask_flags;
199 if (name) 186 if (name)
200 {
201 strlcpy(extra_mem, name, name_len + 1); 187 strlcpy(extra_mem, name, name_len + 1);
202 }
203 else 188 else
204 {
205 sprintf(extra_mem, "Partition_%03d", this_part); 189 sprintf(extra_mem, "Partition_%03d", this_part);
206 }
207 parts[this_part].name = extra_mem; 190 parts[this_part].name = extra_mem;
208 extra_mem += name_len + 1; 191 extra_mem += name_len + 1;
209 192
210 dbg(("partition %d: name <%s>, offset %llx, size %llx, mask flags %x\n", 193 dbg(("partition %d: name <%s>, offset %llx, size %llx, mask flags %x\n",
211 this_part, 194 this_part, parts[this_part].name, parts[this_part].offset,
212 parts[this_part].name, 195 parts[this_part].size, parts[this_part].mask_flags));
213 parts[this_part].offset,
214 parts[this_part].size,
215 parts[this_part].mask_flags));
216 196
217 /* return (updated) pointer to extra_mem memory */ 197 /* return (updated) pointer to extra_mem memory */
218 if (extra_mem_ptr) 198 if (extra_mem_ptr)
219 *extra_mem_ptr = extra_mem; 199 *extra_mem_ptr = extra_mem;
220 200
221 /* return (updated) pointer command line string */ 201 /* return (updated) pointer command line string */
222 *retptr = s; 202 *retptr = s;
@@ -236,16 +216,16 @@ static int mtdpart_setup_real(char *s)
236 { 216 {
237 struct cmdline_mtd_partition *this_mtd; 217 struct cmdline_mtd_partition *this_mtd;
238 struct mtd_partition *parts; 218 struct mtd_partition *parts;
239 int mtd_id_len; 219 int mtd_id_len, num_parts;
240 int num_parts;
241 char *p, *mtd_id; 220 char *p, *mtd_id;
242 221
243 mtd_id = s; 222 mtd_id = s;
223
244 /* fetch <mtd-id> */ 224 /* fetch <mtd-id> */
245 if (!(p = strchr(s, ':'))) 225 p = strchr(s, ':');
246 { 226 if (!p) {
247 printk(KERN_ERR ERRP "no mtd-id\n"); 227 printk(KERN_ERR ERRP "no mtd-id\n");
248 return 0; 228 return -EINVAL;
249 } 229 }
250 mtd_id_len = p - mtd_id; 230 mtd_id_len = p - mtd_id;
251 231
@@ -262,8 +242,7 @@ static int mtdpart_setup_real(char *s)
262 (unsigned char**)&this_mtd, /* out: extra mem */ 242 (unsigned char**)&this_mtd, /* out: extra mem */
263 mtd_id_len + 1 + sizeof(*this_mtd) + 243 mtd_id_len + 1 + sizeof(*this_mtd) +
264 sizeof(void*)-1 /*alignment*/); 244 sizeof(void*)-1 /*alignment*/);
265 if(!parts) 245 if (IS_ERR(parts)) {
266 {
267 /* 246 /*
268 * An error occurred. We're either: 247 * An error occurred. We're either:
269 * a) out of memory, or 248 * a) out of memory, or
@@ -271,12 +250,12 @@ static int mtdpart_setup_real(char *s)
271 * Either way, this mtd is hosed and we're 250 * Either way, this mtd is hosed and we're
272 * unlikely to succeed in parsing any more 251 * unlikely to succeed in parsing any more
273 */ 252 */
274 return 0; 253 return PTR_ERR(parts);
275 } 254 }
276 255
277 /* align this_mtd */ 256 /* align this_mtd */
278 this_mtd = (struct cmdline_mtd_partition *) 257 this_mtd = (struct cmdline_mtd_partition *)
279 ALIGN((unsigned long)this_mtd, sizeof(void*)); 258 ALIGN((unsigned long)this_mtd, sizeof(void *));
280 /* enter results */ 259 /* enter results */
281 this_mtd->parts = parts; 260 this_mtd->parts = parts;
282 this_mtd->num_parts = num_parts; 261 this_mtd->num_parts = num_parts;
@@ -296,14 +275,14 @@ static int mtdpart_setup_real(char *s)
296 break; 275 break;
297 276
298 /* does another spec follow? */ 277 /* does another spec follow? */
299 if (*s != ';') 278 if (*s != ';') {
300 {
301 printk(KERN_ERR ERRP "bad character after partition (%c)\n", *s); 279 printk(KERN_ERR ERRP "bad character after partition (%c)\n", *s);
302 return 0; 280 return -EINVAL;
303 } 281 }
304 s++; 282 s++;
305 } 283 }
306 return 1; 284
285 return 0;
307} 286}
308 287
309/* 288/*
@@ -318,44 +297,58 @@ static int parse_cmdline_partitions(struct mtd_info *master,
318 struct mtd_part_parser_data *data) 297 struct mtd_part_parser_data *data)
319{ 298{
320 unsigned long offset; 299 unsigned long offset;
321 int i; 300 int i, err;
322 struct cmdline_mtd_partition *part; 301 struct cmdline_mtd_partition *part;
323 const char *mtd_id = master->name; 302 const char *mtd_id = master->name;
324 303
325 /* parse command line */ 304 /* parse command line */
326 if (!cmdline_parsed) 305 if (!cmdline_parsed) {
327 mtdpart_setup_real(cmdline); 306 err = mtdpart_setup_real(cmdline);
307 if (err)
308 return err;
309 }
328 310
329 for(part = partitions; part; part = part->next) 311 for (part = partitions; part; part = part->next) {
330 { 312 if ((!mtd_id) || (!strcmp(part->mtd_id, mtd_id))) {
331 if ((!mtd_id) || (!strcmp(part->mtd_id, mtd_id))) 313 for (i = 0, offset = 0; i < part->num_parts; i++) {
332 {
333 for(i = 0, offset = 0; i < part->num_parts; i++)
334 {
335 if (part->parts[i].offset == OFFSET_CONTINUOUS) 314 if (part->parts[i].offset == OFFSET_CONTINUOUS)
336 part->parts[i].offset = offset; 315 part->parts[i].offset = offset;
337 else 316 else
338 offset = part->parts[i].offset; 317 offset = part->parts[i].offset;
318
339 if (part->parts[i].size == SIZE_REMAINING) 319 if (part->parts[i].size == SIZE_REMAINING)
340 part->parts[i].size = master->size - offset; 320 part->parts[i].size = master->size - offset;
341 if (offset + part->parts[i].size > master->size) 321
342 { 322 if (part->parts[i].size == 0) {
323 printk(KERN_WARNING ERRP
324 "%s: skipping zero sized partition\n",
325 part->mtd_id);
326 part->num_parts--;
327 memmove(&part->parts[i],
328 &part->parts[i + 1],
329 sizeof(*part->parts) * (part->num_parts - i));
330 continue;
331 }
332
333 if (offset + part->parts[i].size > master->size) {
343 printk(KERN_WARNING ERRP 334 printk(KERN_WARNING ERRP
344 "%s: partitioning exceeds flash size, truncating\n", 335 "%s: partitioning exceeds flash size, truncating\n",
345 part->mtd_id); 336 part->mtd_id);
346 part->parts[i].size = master->size - offset; 337 part->parts[i].size = master->size - offset;
347 part->num_parts = i;
348 } 338 }
349 offset += part->parts[i].size; 339 offset += part->parts[i].size;
350 } 340 }
341
351 *pparts = kmemdup(part->parts, 342 *pparts = kmemdup(part->parts,
352 sizeof(*part->parts) * part->num_parts, 343 sizeof(*part->parts) * part->num_parts,
353 GFP_KERNEL); 344 GFP_KERNEL);
354 if (!*pparts) 345 if (!*pparts)
355 return -ENOMEM; 346 return -ENOMEM;
347
356 return part->num_parts; 348 return part->num_parts;
357 } 349 }
358 } 350 }
351
359 return 0; 352 return 0;
360} 353}
361 354
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 4cdb2af7bf44..27f80cd8aef3 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -97,7 +97,7 @@ config MTD_M25P80
97 doesn't support the JEDEC ID instruction. 97 doesn't support the JEDEC ID instruction.
98 98
99config M25PXX_USE_FAST_READ 99config M25PXX_USE_FAST_READ
100 bool "Use FAST_READ OPCode allowing SPI CLK <= 50MHz" 100 bool "Use FAST_READ OPCode allowing SPI CLK >= 50MHz"
101 depends on MTD_M25P80 101 depends on MTD_M25P80
102 default y 102 default y
103 help 103 help
@@ -120,6 +120,14 @@ config MTD_SST25L
120 Set up your spi devices with the right board-specific platform data, 120 Set up your spi devices with the right board-specific platform data,
121 if you want to specify device partitioning. 121 if you want to specify device partitioning.
122 122
123config MTD_BCM47XXSFLASH
124 tristate "R/O support for serial flash on BCMA bus"
125 depends on BCMA_SFLASH
126 help
127 BCMA bus can have various flash memories attached, they are
128 registered by bcma as platform devices. This enables driver for
129 serial flash memories (only read-only mode is implemented).
130
123config MTD_SLRAM 131config MTD_SLRAM
124 tristate "Uncached system RAM" 132 tristate "Uncached system RAM"
125 help 133 help
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index a4dd1d822b6c..395733a30ef4 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -19,5 +19,6 @@ obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o
19obj-$(CONFIG_MTD_M25P80) += m25p80.o 19obj-$(CONFIG_MTD_M25P80) += m25p80.o
20obj-$(CONFIG_MTD_SPEAR_SMI) += spear_smi.o 20obj-$(CONFIG_MTD_SPEAR_SMI) += spear_smi.o
21obj-$(CONFIG_MTD_SST25L) += sst25l.o 21obj-$(CONFIG_MTD_SST25L) += sst25l.o
22obj-$(CONFIG_MTD_BCM47XXSFLASH) += bcm47xxsflash.o
22 23
23CFLAGS_docg3.o += -I$(src) \ No newline at end of file 24CFLAGS_docg3.o += -I$(src) \ No newline at end of file
diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c
new file mode 100644
index 000000000000..2dc5a6f3fd57
--- /dev/null
+++ b/drivers/mtd/devices/bcm47xxsflash.c
@@ -0,0 +1,105 @@
1#include <linux/kernel.h>
2#include <linux/module.h>
3#include <linux/slab.h>
4#include <linux/mtd/mtd.h>
5#include <linux/platform_device.h>
6#include <linux/bcma/bcma.h>
7
8MODULE_LICENSE("GPL");
9MODULE_DESCRIPTION("Serial flash driver for BCMA bus");
10
11static const char *probes[] = { "bcm47xxpart", NULL };
12
13static int bcm47xxsflash_read(struct mtd_info *mtd, loff_t from, size_t len,
14 size_t *retlen, u_char *buf)
15{
16 struct bcma_sflash *sflash = mtd->priv;
17
18 /* Check address range */
19 if ((from + len) > mtd->size)
20 return -EINVAL;
21
22 memcpy_fromio(buf, (void __iomem *)KSEG0ADDR(sflash->window + from),
23 len);
24
25 return len;
26}
27
28static void bcm47xxsflash_fill_mtd(struct bcma_sflash *sflash,
29 struct mtd_info *mtd)
30{
31 mtd->priv = sflash;
32 mtd->name = "bcm47xxsflash";
33 mtd->owner = THIS_MODULE;
34 mtd->type = MTD_ROM;
35 mtd->size = sflash->size;
36 mtd->_read = bcm47xxsflash_read;
37
38 /* TODO: implement writing support and verify/change following code */
39 mtd->flags = MTD_CAP_ROM;
40 mtd->writebufsize = mtd->writesize = 1;
41}
42
43static int bcm47xxsflash_probe(struct platform_device *pdev)
44{
45 struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev);
46 int err;
47
48 sflash->mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
49 if (!sflash->mtd) {
50 err = -ENOMEM;
51 goto out;
52 }
53 bcm47xxsflash_fill_mtd(sflash, sflash->mtd);
54
55 err = mtd_device_parse_register(sflash->mtd, probes, NULL, NULL, 0);
56 if (err) {
57 pr_err("Failed to register MTD device: %d\n", err);
58 goto err_dev_reg;
59 }
60
61 return 0;
62
63err_dev_reg:
64 kfree(sflash->mtd);
65out:
66 return err;
67}
68
69static int __devexit bcm47xxsflash_remove(struct platform_device *pdev)
70{
71 struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev);
72
73 mtd_device_unregister(sflash->mtd);
74 kfree(sflash->mtd);
75
76 return 0;
77}
78
79static struct platform_driver bcma_sflash_driver = {
80 .remove = __devexit_p(bcm47xxsflash_remove),
81 .driver = {
82 .name = "bcma_sflash",
83 .owner = THIS_MODULE,
84 },
85};
86
87static int __init bcm47xxsflash_init(void)
88{
89 int err;
90
91 err = platform_driver_probe(&bcma_sflash_driver, bcm47xxsflash_probe);
92 if (err)
93 pr_err("Failed to register BCMA serial flash driver: %d\n",
94 err);
95
96 return err;
97}
98
99static void __exit bcm47xxsflash_exit(void)
100{
101 platform_driver_unregister(&bcma_sflash_driver);
102}
103
104module_init(bcm47xxsflash_init);
105module_exit(bcm47xxsflash_exit);
diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c
index 04eb2e4aa50f..4f2220ad8924 100644
--- a/drivers/mtd/devices/doc2001plus.c
+++ b/drivers/mtd/devices/doc2001plus.c
@@ -659,23 +659,15 @@ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
659#ifdef ECC_DEBUG 659#ifdef ECC_DEBUG
660 printk("%s(%d): Millennium Plus ECC error (from=0x%x:\n", 660 printk("%s(%d): Millennium Plus ECC error (from=0x%x:\n",
661 __FILE__, __LINE__, (int)from); 661 __FILE__, __LINE__, (int)from);
662 printk(" syndrome= %02x:%02x:%02x:%02x:%02x:" 662 printk(" syndrome= %*phC\n", 6, syndrome);
663 "%02x\n", 663 printk(" eccbuf= %*phC\n", 6, eccbuf);
664 syndrome[0], syndrome[1], syndrome[2],
665 syndrome[3], syndrome[4], syndrome[5]);
666 printk(" eccbuf= %02x:%02x:%02x:%02x:%02x:"
667 "%02x\n",
668 eccbuf[0], eccbuf[1], eccbuf[2],
669 eccbuf[3], eccbuf[4], eccbuf[5]);
670#endif 664#endif
671 ret = -EIO; 665 ret = -EIO;
672 } 666 }
673 } 667 }
674 668
675#ifdef PSYCHO_DEBUG 669#ifdef PSYCHO_DEBUG
676 printk("ECC DATA at %lx: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n", 670 printk("ECC DATA at %lx: %*ph\n", (long)from, 6, eccbuf);
677 (long)from, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
678 eccbuf[4], eccbuf[5]);
679#endif 671#endif
680 /* disable the ECC engine */ 672 /* disable the ECC engine */
681 WriteDOC(DOC_ECC_DIS, docptr , Mplus_ECCConf); 673 WriteDOC(DOC_ECC_DIS, docptr , Mplus_ECCConf);
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index f70854d728fe..d34d83b8f9c2 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -919,19 +919,13 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
919 eccconf1 = doc_register_readb(docg3, DOC_ECCCONF1); 919 eccconf1 = doc_register_readb(docg3, DOC_ECCCONF1);
920 920
921 if (nboob >= DOC_LAYOUT_OOB_SIZE) { 921 if (nboob >= DOC_LAYOUT_OOB_SIZE) {
922 doc_dbg("OOB - INFO: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n", 922 doc_dbg("OOB - INFO: %*phC\n", 7, oobbuf);
923 oobbuf[0], oobbuf[1], oobbuf[2], oobbuf[3],
924 oobbuf[4], oobbuf[5], oobbuf[6]);
925 doc_dbg("OOB - HAMMING: %02x\n", oobbuf[7]); 923 doc_dbg("OOB - HAMMING: %02x\n", oobbuf[7]);
926 doc_dbg("OOB - BCH_ECC: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n", 924 doc_dbg("OOB - BCH_ECC: %*phC\n", 7, oobbuf + 8);
927 oobbuf[8], oobbuf[9], oobbuf[10], oobbuf[11],
928 oobbuf[12], oobbuf[13], oobbuf[14]);
929 doc_dbg("OOB - UNUSED: %02x\n", oobbuf[15]); 925 doc_dbg("OOB - UNUSED: %02x\n", oobbuf[15]);
930 } 926 }
931 doc_dbg("ECC checks: ECCConf1=%x\n", eccconf1); 927 doc_dbg("ECC checks: ECCConf1=%x\n", eccconf1);
932 doc_dbg("ECC HW_ECC: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n", 928 doc_dbg("ECC HW_ECC: %*phC\n", 7, hwecc);
933 hwecc[0], hwecc[1], hwecc[2], hwecc[3], hwecc[4],
934 hwecc[5], hwecc[6]);
935 929
936 ret = -EIO; 930 ret = -EIO;
937 if (is_prot_seq_error(docg3)) 931 if (is_prot_seq_error(docg3))
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 5d0d68c3fe27..03838bab1f59 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -633,11 +633,14 @@ static const struct spi_device_id m25p_ids[] = {
633 { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) }, 633 { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
634 { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) }, 634 { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
635 635
636 { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
637
636 /* EON -- en25xxx */ 638 /* EON -- en25xxx */
637 { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) }, 639 { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
638 { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) }, 640 { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
639 { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) }, 641 { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
640 { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) }, 642 { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
643 { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
641 644
642 /* Everspin */ 645 /* Everspin */
643 { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2) }, 646 { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2) },
@@ -646,6 +649,7 @@ static const struct spi_device_id m25p_ids[] = {
646 { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) }, 649 { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
647 { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) }, 650 { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
648 { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) }, 651 { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
652 { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, 0) },
649 653
650 /* Macronix */ 654 /* Macronix */
651 { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) }, 655 { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
@@ -659,15 +663,15 @@ static const struct spi_device_id m25p_ids[] = {
659 { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) }, 663 { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
660 { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) }, 664 { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
661 665
666 /* Micron */
667 { "n25q128", INFO(0x20ba18, 0, 64 * 1024, 256, 0) },
668 { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) },
669
662 /* Spansion -- single (large) sector size only, at least 670 /* Spansion -- single (large) sector size only, at least
663 * for the chips listed here (without boot sectors). 671 * for the chips listed here (without boot sectors).
664 */ 672 */
665 { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) }, 673 { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, 0) },
666 { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) }, 674 { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, 0) },
667 { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
668 { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
669 { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SECT_4K) },
670 { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
671 { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) }, 675 { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) },
672 { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, 0) }, 676 { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, 0) },
673 { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, 0) }, 677 { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, 0) },
@@ -676,6 +680,11 @@ static const struct spi_device_id m25p_ids[] = {
676 { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) }, 680 { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
677 { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) }, 681 { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) },
678 { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) }, 682 { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) },
683 { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
684 { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
685 { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
686 { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
687 { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
679 { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K) }, 688 { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K) },
680 { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, 689 { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
681 690
@@ -699,6 +708,7 @@ static const struct spi_device_id m25p_ids[] = {
699 { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) }, 708 { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
700 { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) }, 709 { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
701 { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) }, 710 { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
711 { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, 0) },
702 712
703 { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) }, 713 { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
704 { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) }, 714 { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
@@ -714,6 +724,7 @@ static const struct spi_device_id m25p_ids[] = {
714 { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) }, 724 { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
715 { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) }, 725 { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
716 726
727 { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) },
717 { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) }, 728 { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
718 { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) }, 729 { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
719 730
@@ -730,6 +741,7 @@ static const struct spi_device_id m25p_ids[] = {
730 { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) }, 741 { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
731 { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) }, 742 { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
732 { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) }, 743 { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
744 { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, SECT_4K) },
733 { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) }, 745 { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
734 { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, 746 { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
735 { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) }, 747 { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
index 67960362681e..dcc3c9511530 100644
--- a/drivers/mtd/devices/spear_smi.c
+++ b/drivers/mtd/devices/spear_smi.c
@@ -26,6 +26,7 @@
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/param.h> 27#include <linux/param.h>
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29#include <linux/pm.h>
29#include <linux/mtd/mtd.h> 30#include <linux/mtd/mtd.h>
30#include <linux/mtd/partitions.h> 31#include <linux/mtd/partitions.h>
31#include <linux/mtd/spear_smi.h> 32#include <linux/mtd/spear_smi.h>
@@ -240,8 +241,8 @@ static int spear_smi_read_sr(struct spear_smi *dev, u32 bank)
240 /* copy dev->status (lower 16 bits) in order to release lock */ 241 /* copy dev->status (lower 16 bits) in order to release lock */
241 if (ret > 0) 242 if (ret > 0)
242 ret = dev->status & 0xffff; 243 ret = dev->status & 0xffff;
243 else 244 else if (ret == 0)
244 ret = -EIO; 245 ret = -ETIMEDOUT;
245 246
246 /* restore the ctrl regs state */ 247 /* restore the ctrl regs state */
247 writel(ctrlreg1, dev->io_base + SMI_CR1); 248 writel(ctrlreg1, dev->io_base + SMI_CR1);
@@ -269,16 +270,19 @@ static int spear_smi_wait_till_ready(struct spear_smi *dev, u32 bank,
269 finish = jiffies + timeout; 270 finish = jiffies + timeout;
270 do { 271 do {
271 status = spear_smi_read_sr(dev, bank); 272 status = spear_smi_read_sr(dev, bank);
272 if (status < 0) 273 if (status < 0) {
273 continue; /* try till timeout */ 274 if (status == -ETIMEDOUT)
274 else if (!(status & SR_WIP)) 275 continue; /* try till finish */
276 return status;
277 } else if (!(status & SR_WIP)) {
275 return 0; 278 return 0;
279 }
276 280
277 cond_resched(); 281 cond_resched();
278 } while (!time_after_eq(jiffies, finish)); 282 } while (!time_after_eq(jiffies, finish));
279 283
280 dev_err(&dev->pdev->dev, "smi controller is busy, timeout\n"); 284 dev_err(&dev->pdev->dev, "smi controller is busy, timeout\n");
281 return status; 285 return -EBUSY;
282} 286}
283 287
284/** 288/**
@@ -335,6 +339,9 @@ static void spear_smi_hw_init(struct spear_smi *dev)
335 val = HOLD1 | BANK_EN | DSEL_TIME | (prescale << 8); 339 val = HOLD1 | BANK_EN | DSEL_TIME | (prescale << 8);
336 340
337 mutex_lock(&dev->lock); 341 mutex_lock(&dev->lock);
342 /* clear all interrupt conditions */
343 writel(0, dev->io_base + SMI_SR);
344
338 writel(val, dev->io_base + SMI_CR1); 345 writel(val, dev->io_base + SMI_CR1);
339 mutex_unlock(&dev->lock); 346 mutex_unlock(&dev->lock);
340} 347}
@@ -391,11 +398,11 @@ static int spear_smi_write_enable(struct spear_smi *dev, u32 bank)
391 writel(ctrlreg1, dev->io_base + SMI_CR1); 398 writel(ctrlreg1, dev->io_base + SMI_CR1);
392 writel(0, dev->io_base + SMI_CR2); 399 writel(0, dev->io_base + SMI_CR2);
393 400
394 if (ret <= 0) { 401 if (ret == 0) {
395 ret = -EIO; 402 ret = -EIO;
396 dev_err(&dev->pdev->dev, 403 dev_err(&dev->pdev->dev,
397 "smi controller failed on write enable\n"); 404 "smi controller failed on write enable\n");
398 } else { 405 } else if (ret > 0) {
399 /* check whether write mode status is set for required bank */ 406 /* check whether write mode status is set for required bank */
400 if (dev->status & (1 << (bank + WM_SHIFT))) 407 if (dev->status & (1 << (bank + WM_SHIFT)))
401 ret = 0; 408 ret = 0;
@@ -462,10 +469,10 @@ static int spear_smi_erase_sector(struct spear_smi *dev,
462 ret = wait_event_interruptible_timeout(dev->cmd_complete, 469 ret = wait_event_interruptible_timeout(dev->cmd_complete,
463 dev->status & TFF, SMI_CMD_TIMEOUT); 470 dev->status & TFF, SMI_CMD_TIMEOUT);
464 471
465 if (ret <= 0) { 472 if (ret == 0) {
466 ret = -EIO; 473 ret = -EIO;
467 dev_err(&dev->pdev->dev, "sector erase failed\n"); 474 dev_err(&dev->pdev->dev, "sector erase failed\n");
468 } else 475 } else if (ret > 0)
469 ret = 0; /* success */ 476 ret = 0; /* success */
470 477
471 /* restore ctrl regs */ 478 /* restore ctrl regs */
@@ -820,7 +827,7 @@ static int spear_smi_setup_banks(struct platform_device *pdev,
820 if (!flash_info) 827 if (!flash_info)
821 return -ENODEV; 828 return -ENODEV;
822 829
823 flash = kzalloc(sizeof(*flash), GFP_ATOMIC); 830 flash = devm_kzalloc(&pdev->dev, sizeof(*flash), GFP_ATOMIC);
824 if (!flash) 831 if (!flash)
825 return -ENOMEM; 832 return -ENOMEM;
826 flash->bank = bank; 833 flash->bank = bank;
@@ -831,15 +838,13 @@ static int spear_smi_setup_banks(struct platform_device *pdev,
831 flash_index = spear_smi_probe_flash(dev, bank); 838 flash_index = spear_smi_probe_flash(dev, bank);
832 if (flash_index < 0) { 839 if (flash_index < 0) {
833 dev_info(&dev->pdev->dev, "smi-nor%d not found\n", bank); 840 dev_info(&dev->pdev->dev, "smi-nor%d not found\n", bank);
834 ret = flash_index; 841 return flash_index;
835 goto err_probe;
836 } 842 }
837 /* map the memory for nor flash chip */ 843 /* map the memory for nor flash chip */
838 flash->base_addr = ioremap(flash_info->mem_base, flash_info->size); 844 flash->base_addr = devm_ioremap(&pdev->dev, flash_info->mem_base,
839 if (!flash->base_addr) { 845 flash_info->size);
840 ret = -EIO; 846 if (!flash->base_addr)
841 goto err_probe; 847 return -EIO;
842 }
843 848
844 dev->flash[bank] = flash; 849 dev->flash[bank] = flash;
845 flash->mtd.priv = dev; 850 flash->mtd.priv = dev;
@@ -881,17 +886,10 @@ static int spear_smi_setup_banks(struct platform_device *pdev,
881 count); 886 count);
882 if (ret) { 887 if (ret) {
883 dev_err(&dev->pdev->dev, "Err MTD partition=%d\n", ret); 888 dev_err(&dev->pdev->dev, "Err MTD partition=%d\n", ret);
884 goto err_map; 889 return ret;
885 } 890 }
886 891
887 return 0; 892 return 0;
888
889err_map:
890 iounmap(flash->base_addr);
891
892err_probe:
893 kfree(flash);
894 return ret;
895} 893}
896 894
897/** 895/**
@@ -928,20 +926,13 @@ static int __devinit spear_smi_probe(struct platform_device *pdev)
928 } 926 }
929 } else { 927 } else {
930 pdata = dev_get_platdata(&pdev->dev); 928 pdata = dev_get_platdata(&pdev->dev);
931 if (pdata < 0) { 929 if (!pdata) {
932 ret = -ENODEV; 930 ret = -ENODEV;
933 dev_err(&pdev->dev, "no platform data\n"); 931 dev_err(&pdev->dev, "no platform data\n");
934 goto err; 932 goto err;
935 } 933 }
936 } 934 }
937 935
938 smi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
939 if (!smi_base) {
940 ret = -ENODEV;
941 dev_err(&pdev->dev, "invalid smi base address\n");
942 goto err;
943 }
944
945 irq = platform_get_irq(pdev, 0); 936 irq = platform_get_irq(pdev, 0);
946 if (irq < 0) { 937 if (irq < 0) {
947 ret = -ENODEV; 938 ret = -ENODEV;
@@ -949,32 +940,26 @@ static int __devinit spear_smi_probe(struct platform_device *pdev)
949 goto err; 940 goto err;
950 } 941 }
951 942
952 dev = kzalloc(sizeof(*dev), GFP_ATOMIC); 943 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_ATOMIC);
953 if (!dev) { 944 if (!dev) {
954 ret = -ENOMEM; 945 ret = -ENOMEM;
955 dev_err(&pdev->dev, "mem alloc fail\n"); 946 dev_err(&pdev->dev, "mem alloc fail\n");
956 goto err; 947 goto err;
957 } 948 }
958 949
959 smi_base = request_mem_region(smi_base->start, resource_size(smi_base), 950 smi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
960 pdev->name);
961 if (!smi_base) {
962 ret = -EBUSY;
963 dev_err(&pdev->dev, "request mem region fail\n");
964 goto err_mem;
965 }
966 951
967 dev->io_base = ioremap(smi_base->start, resource_size(smi_base)); 952 dev->io_base = devm_request_and_ioremap(&pdev->dev, smi_base);
968 if (!dev->io_base) { 953 if (!dev->io_base) {
969 ret = -EIO; 954 ret = -EIO;
970 dev_err(&pdev->dev, "ioremap fail\n"); 955 dev_err(&pdev->dev, "devm_request_and_ioremap fail\n");
971 goto err_ioremap; 956 goto err;
972 } 957 }
973 958
974 dev->pdev = pdev; 959 dev->pdev = pdev;
975 dev->clk_rate = pdata->clk_rate; 960 dev->clk_rate = pdata->clk_rate;
976 961
977 if (dev->clk_rate < 0 || dev->clk_rate > SMI_MAX_CLOCK_FREQ) 962 if (dev->clk_rate > SMI_MAX_CLOCK_FREQ)
978 dev->clk_rate = SMI_MAX_CLOCK_FREQ; 963 dev->clk_rate = SMI_MAX_CLOCK_FREQ;
979 964
980 dev->num_flashes = pdata->num_flashes; 965 dev->num_flashes = pdata->num_flashes;
@@ -984,17 +969,18 @@ static int __devinit spear_smi_probe(struct platform_device *pdev)
984 dev->num_flashes = MAX_NUM_FLASH_CHIP; 969 dev->num_flashes = MAX_NUM_FLASH_CHIP;
985 } 970 }
986 971
987 dev->clk = clk_get(&pdev->dev, NULL); 972 dev->clk = devm_clk_get(&pdev->dev, NULL);
988 if (IS_ERR(dev->clk)) { 973 if (IS_ERR(dev->clk)) {
989 ret = PTR_ERR(dev->clk); 974 ret = PTR_ERR(dev->clk);
990 goto err_clk; 975 goto err;
991 } 976 }
992 977
993 ret = clk_prepare_enable(dev->clk); 978 ret = clk_prepare_enable(dev->clk);
994 if (ret) 979 if (ret)
995 goto err_clk_prepare_enable; 980 goto err;
996 981
997 ret = request_irq(irq, spear_smi_int_handler, 0, pdev->name, dev); 982 ret = devm_request_irq(&pdev->dev, irq, spear_smi_int_handler, 0,
983 pdev->name, dev);
998 if (ret) { 984 if (ret) {
999 dev_err(&dev->pdev->dev, "SMI IRQ allocation failed\n"); 985 dev_err(&dev->pdev->dev, "SMI IRQ allocation failed\n");
1000 goto err_irq; 986 goto err_irq;
@@ -1017,18 +1003,9 @@ static int __devinit spear_smi_probe(struct platform_device *pdev)
1017 return 0; 1003 return 0;
1018 1004
1019err_bank_setup: 1005err_bank_setup:
1020 free_irq(irq, dev);
1021 platform_set_drvdata(pdev, NULL); 1006 platform_set_drvdata(pdev, NULL);
1022err_irq: 1007err_irq:
1023 clk_disable_unprepare(dev->clk); 1008 clk_disable_unprepare(dev->clk);
1024err_clk_prepare_enable:
1025 clk_put(dev->clk);
1026err_clk:
1027 iounmap(dev->io_base);
1028err_ioremap:
1029 release_mem_region(smi_base->start, resource_size(smi_base));
1030err_mem:
1031 kfree(dev);
1032err: 1009err:
1033 return ret; 1010 return ret;
1034} 1011}
@@ -1042,11 +1019,8 @@ err:
1042static int __devexit spear_smi_remove(struct platform_device *pdev) 1019static int __devexit spear_smi_remove(struct platform_device *pdev)
1043{ 1020{
1044 struct spear_smi *dev; 1021 struct spear_smi *dev;
1045 struct spear_smi_plat_data *pdata;
1046 struct spear_snor_flash *flash; 1022 struct spear_snor_flash *flash;
1047 struct resource *smi_base; 1023 int ret, i;
1048 int ret;
1049 int i, irq;
1050 1024
1051 dev = platform_get_drvdata(pdev); 1025 dev = platform_get_drvdata(pdev);
1052 if (!dev) { 1026 if (!dev) {
@@ -1054,8 +1028,6 @@ static int __devexit spear_smi_remove(struct platform_device *pdev)
1054 return -ENODEV; 1028 return -ENODEV;
1055 } 1029 }
1056 1030
1057 pdata = dev_get_platdata(&pdev->dev);
1058
1059 /* clean up for all nor flash */ 1031 /* clean up for all nor flash */
1060 for (i = 0; i < dev->num_flashes; i++) { 1032 for (i = 0; i < dev->num_flashes; i++) {
1061 flash = dev->flash[i]; 1033 flash = dev->flash[i];
@@ -1066,49 +1038,41 @@ static int __devexit spear_smi_remove(struct platform_device *pdev)
1066 ret = mtd_device_unregister(&flash->mtd); 1038 ret = mtd_device_unregister(&flash->mtd);
1067 if (ret) 1039 if (ret)
1068 dev_err(&pdev->dev, "error removing mtd\n"); 1040 dev_err(&pdev->dev, "error removing mtd\n");
1069
1070 iounmap(flash->base_addr);
1071 kfree(flash);
1072 } 1041 }
1073 1042
1074 irq = platform_get_irq(pdev, 0);
1075 free_irq(irq, dev);
1076
1077 clk_disable_unprepare(dev->clk); 1043 clk_disable_unprepare(dev->clk);
1078 clk_put(dev->clk);
1079 iounmap(dev->io_base);
1080 kfree(dev);
1081
1082 smi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1083 release_mem_region(smi_base->start, resource_size(smi_base));
1084 platform_set_drvdata(pdev, NULL); 1044 platform_set_drvdata(pdev, NULL);
1085 1045
1086 return 0; 1046 return 0;
1087} 1047}
1088 1048
1089int spear_smi_suspend(struct platform_device *pdev, pm_message_t state) 1049#ifdef CONFIG_PM
1050static int spear_smi_suspend(struct device *dev)
1090{ 1051{
1091 struct spear_smi *dev = platform_get_drvdata(pdev); 1052 struct spear_smi *sdev = dev_get_drvdata(dev);
1092 1053
1093 if (dev && dev->clk) 1054 if (sdev && sdev->clk)
1094 clk_disable_unprepare(dev->clk); 1055 clk_disable_unprepare(sdev->clk);
1095 1056
1096 return 0; 1057 return 0;
1097} 1058}
1098 1059
1099int spear_smi_resume(struct platform_device *pdev) 1060static int spear_smi_resume(struct device *dev)
1100{ 1061{
1101 struct spear_smi *dev = platform_get_drvdata(pdev); 1062 struct spear_smi *sdev = dev_get_drvdata(dev);
1102 int ret = -EPERM; 1063 int ret = -EPERM;
1103 1064
1104 if (dev && dev->clk) 1065 if (sdev && sdev->clk)
1105 ret = clk_prepare_enable(dev->clk); 1066 ret = clk_prepare_enable(sdev->clk);
1106 1067
1107 if (!ret) 1068 if (!ret)
1108 spear_smi_hw_init(dev); 1069 spear_smi_hw_init(sdev);
1109 return ret; 1070 return ret;
1110} 1071}
1111 1072
1073static SIMPLE_DEV_PM_OPS(spear_smi_pm_ops, spear_smi_suspend, spear_smi_resume);
1074#endif
1075
1112#ifdef CONFIG_OF 1076#ifdef CONFIG_OF
1113static const struct of_device_id spear_smi_id_table[] = { 1077static const struct of_device_id spear_smi_id_table[] = {
1114 { .compatible = "st,spear600-smi" }, 1078 { .compatible = "st,spear600-smi" },
@@ -1123,11 +1087,12 @@ static struct platform_driver spear_smi_driver = {
1123 .bus = &platform_bus_type, 1087 .bus = &platform_bus_type,
1124 .owner = THIS_MODULE, 1088 .owner = THIS_MODULE,
1125 .of_match_table = of_match_ptr(spear_smi_id_table), 1089 .of_match_table = of_match_ptr(spear_smi_id_table),
1090#ifdef CONFIG_PM
1091 .pm = &spear_smi_pm_ops,
1092#endif
1126 }, 1093 },
1127 .probe = spear_smi_probe, 1094 .probe = spear_smi_probe,
1128 .remove = __devexit_p(spear_smi_remove), 1095 .remove = __devexit_p(spear_smi_remove),
1129 .suspend = spear_smi_suspend,
1130 .resume = spear_smi_resume,
1131}; 1096};
1132 1097
1133static int spear_smi_init(void) 1098static int spear_smi_init(void)
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 5ba2458e799a..2e47c2ed0a2d 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -373,7 +373,7 @@ config MTD_FORTUNET
373 have such a board, say 'Y'. 373 have such a board, say 'Y'.
374 374
375config MTD_AUTCPU12 375config MTD_AUTCPU12
376 tristate "NV-RAM mapping AUTCPU12 board" 376 bool "NV-RAM mapping AUTCPU12 board"
377 depends on ARCH_AUTCPU12 377 depends on ARCH_AUTCPU12
378 help 378 help
379 This enables access to the NV-RAM on autronix autcpu12 board. 379 This enables access to the NV-RAM on autronix autcpu12 board.
@@ -443,22 +443,10 @@ config MTD_GPIO_ADDR
443 443
444config MTD_UCLINUX 444config MTD_UCLINUX
445 bool "Generic uClinux RAM/ROM filesystem support" 445 bool "Generic uClinux RAM/ROM filesystem support"
446 depends on MTD_RAM=y && !MMU 446 depends on MTD_RAM=y && (!MMU || COLDFIRE)
447 help 447 help
448 Map driver to support image based filesystems for uClinux. 448 Map driver to support image based filesystems for uClinux.
449 449
450config MTD_WRSBC8260
451 tristate "Map driver for WindRiver PowerQUICC II MPC82xx board"
452 depends on (SBC82xx || SBC8560)
453 select MTD_MAP_BANK_WIDTH_4
454 select MTD_MAP_BANK_WIDTH_1
455 select MTD_CFI_I1
456 select MTD_CFI_I4
457 help
458 Map driver for WindRiver PowerQUICC II MPC82xx board. Drives
459 all three flash regions on CS0, CS1 and CS6 if they are configured
460 correctly by the boot loader.
461
462config MTD_DMV182 450config MTD_DMV182
463 tristate "Map driver for Dy-4 SVME/DMV-182 board." 451 tristate "Map driver for Dy-4 SVME/DMV-182 board."
464 depends on DMV182 452 depends on DMV182
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 68a9a91d344f..deb43e9a1e7f 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -47,7 +47,6 @@ obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o
47obj-$(CONFIG_MTD_H720X) += h720x-flash.o 47obj-$(CONFIG_MTD_H720X) += h720x-flash.o
48obj-$(CONFIG_MTD_IXP4XX) += ixp4xx.o 48obj-$(CONFIG_MTD_IXP4XX) += ixp4xx.o
49obj-$(CONFIG_MTD_IXP2000) += ixp2000.o 49obj-$(CONFIG_MTD_IXP2000) += ixp2000.o
50obj-$(CONFIG_MTD_WRSBC8260) += wr_sbc82xx_flash.o
51obj-$(CONFIG_MTD_DMV182) += dmv182.o 50obj-$(CONFIG_MTD_DMV182) += dmv182.o
52obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o 51obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o
53obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o 52obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o
diff --git a/drivers/mtd/maps/autcpu12-nvram.c b/drivers/mtd/maps/autcpu12-nvram.c
index e5bfd0e093bb..76fb594bb1d9 100644
--- a/drivers/mtd/maps/autcpu12-nvram.c
+++ b/drivers/mtd/maps/autcpu12-nvram.c
@@ -15,43 +15,54 @@
15 * You should have received a copy of the GNU General Public License 15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software 16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 *
19 */ 18 */
19#include <linux/sizes.h>
20 20
21#include <linux/module.h>
22#include <linux/types.h> 21#include <linux/types.h>
23#include <linux/kernel.h> 22#include <linux/kernel.h>
24#include <linux/ioport.h>
25#include <linux/init.h> 23#include <linux/init.h>
26#include <asm/io.h> 24#include <linux/device.h>
27#include <asm/sizes.h> 25#include <linux/module.h>
28#include <mach/hardware.h> 26#include <linux/platform_device.h>
29#include <mach/autcpu12.h> 27
30#include <linux/mtd/mtd.h> 28#include <linux/mtd/mtd.h>
31#include <linux/mtd/map.h> 29#include <linux/mtd/map.h>
32#include <linux/mtd/partitions.h>
33
34
35static struct mtd_info *sram_mtd;
36 30
37struct map_info autcpu12_sram_map = { 31struct autcpu12_nvram_priv {
38 .name = "SRAM", 32 struct mtd_info *mtd;
39 .size = 32768, 33 struct map_info map;
40 .bankwidth = 4,
41 .phys = 0x12000000,
42}; 34};
43 35
44static int __init init_autcpu12_sram (void) 36static int __devinit autcpu12_nvram_probe(struct platform_device *pdev)
45{ 37{
46 int err, save0, save1; 38 map_word tmp, save0, save1;
39 struct resource *res;
40 struct autcpu12_nvram_priv *priv;
47 41
48 autcpu12_sram_map.virt = ioremap(0x12000000, SZ_128K); 42 priv = devm_kzalloc(&pdev->dev,
49 if (!autcpu12_sram_map.virt) { 43 sizeof(struct autcpu12_nvram_priv), GFP_KERNEL);
50 printk("Failed to ioremap autcpu12 NV-RAM space\n"); 44 if (!priv)
51 err = -EIO; 45 return -ENOMEM;
52 goto out; 46
47 platform_set_drvdata(pdev, priv);
48
49 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
50 if (!res) {
51 dev_err(&pdev->dev, "failed to get memory resource\n");
52 return -ENOENT;
53 }
54
55 priv->map.bankwidth = 4;
56 priv->map.phys = res->start;
57 priv->map.size = resource_size(res);
58 priv->map.virt = devm_request_and_ioremap(&pdev->dev, res);
59 strcpy((char *)priv->map.name, res->name);
60 if (!priv->map.virt) {
61 dev_err(&pdev->dev, "failed to remap mem resource\n");
62 return -EBUSY;
53 } 63 }
54 simple_map_init(&autcpu_sram_map); 64
65 simple_map_init(&priv->map);
55 66
56 /* 67 /*
57 * Check for 32K/128K 68 * Check for 32K/128K
@@ -61,65 +72,59 @@ static int __init init_autcpu12_sram (void)
61 * Read and check result on ofs 0x0 72 * Read and check result on ofs 0x0
62 * Restore contents 73 * Restore contents
63 */ 74 */
64 save0 = map_read32(&autcpu12_sram_map,0); 75 save0 = map_read(&priv->map, 0);
65 save1 = map_read32(&autcpu12_sram_map,0x10000); 76 save1 = map_read(&priv->map, 0x10000);
66 map_write32(&autcpu12_sram_map,~save0,0x10000); 77 tmp.x[0] = ~save0.x[0];
67 /* if we find this pattern on 0x0, we have 32K size 78 map_write(&priv->map, tmp, 0x10000);
68 * restore contents and exit 79 tmp = map_read(&priv->map, 0);
69 */ 80 /* if we find this pattern on 0x0, we have 32K size */
70 if ( map_read32(&autcpu12_sram_map,0) != save0) { 81 if (!map_word_equal(&priv->map, tmp, save0)) {
71 map_write32(&autcpu12_sram_map,save0,0x0); 82 map_write(&priv->map, save0, 0x0);
72 goto map; 83 priv->map.size = SZ_32K;
84 } else
85 map_write(&priv->map, save1, 0x10000);
86
87 priv->mtd = do_map_probe("map_ram", &priv->map);
88 if (!priv->mtd) {
89 dev_err(&pdev->dev, "probing failed\n");
90 return -ENXIO;
73 } 91 }
74 /* We have a 128K found, restore 0x10000 and set size
75 * to 128K
76 */
77 map_write32(&autcpu12_sram_map,save1,0x10000);
78 autcpu12_sram_map.size = SZ_128K;
79
80map:
81 sram_mtd = do_map_probe("map_ram", &autcpu12_sram_map);
82 if (!sram_mtd) {
83 printk("NV-RAM probe failed\n");
84 err = -ENXIO;
85 goto out_ioremap;
86 }
87
88 sram_mtd->owner = THIS_MODULE;
89 sram_mtd->erasesize = 16;
90 92
91 if (mtd_device_register(sram_mtd, NULL, 0)) { 93 priv->mtd->owner = THIS_MODULE;
92 printk("NV-RAM device addition failed\n"); 94 priv->mtd->erasesize = 16;
93 err = -ENOMEM; 95 priv->mtd->dev.parent = &pdev->dev;
94 goto out_probe; 96 if (!mtd_device_register(priv->mtd, NULL, 0)) {
97 dev_info(&pdev->dev,
98 "NV-RAM device size %ldKiB registered on AUTCPU12\n",
99 priv->map.size / SZ_1K);
100 return 0;
95 } 101 }
96 102
97 printk("NV-RAM device size %ldKiB registered on AUTCPU12\n",autcpu12_sram_map.size/SZ_1K); 103 map_destroy(priv->mtd);
98 104 dev_err(&pdev->dev, "NV-RAM device addition failed\n");
99 return 0; 105 return -ENOMEM;
100
101out_probe:
102 map_destroy(sram_mtd);
103 sram_mtd = 0;
104
105out_ioremap:
106 iounmap((void *)autcpu12_sram_map.virt);
107out:
108 return err;
109} 106}
110 107
111static void __exit cleanup_autcpu12_maps(void) 108static int __devexit autcpu12_nvram_remove(struct platform_device *pdev)
112{ 109{
113 if (sram_mtd) { 110 struct autcpu12_nvram_priv *priv = platform_get_drvdata(pdev);
114 mtd_device_unregister(sram_mtd); 111
115 map_destroy(sram_mtd); 112 mtd_device_unregister(priv->mtd);
116 iounmap((void *)autcpu12_sram_map.virt); 113 map_destroy(priv->mtd);
117 } 114
115 return 0;
118} 116}
119 117
120module_init(init_autcpu12_sram); 118static struct platform_driver autcpu12_nvram_driver = {
121module_exit(cleanup_autcpu12_maps); 119 .driver = {
120 .name = "autcpu12_nvram",
121 .owner = THIS_MODULE,
122 },
123 .probe = autcpu12_nvram_probe,
124 .remove = __devexit_p(autcpu12_nvram_remove),
125};
126module_platform_driver(autcpu12_nvram_driver);
122 127
123MODULE_AUTHOR("Thomas Gleixner"); 128MODULE_AUTHOR("Thomas Gleixner");
124MODULE_DESCRIPTION("autcpu12 NV-RAM map driver"); 129MODULE_DESCRIPTION("autcpu12 NVRAM map driver");
125MODULE_LICENSE("GPL"); 130MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index f14ce0af763f..1c30c1a307f4 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -43,26 +43,14 @@ static map_word mtd_pci_read8(struct map_info *_map, unsigned long ofs)
43 struct map_pci_info *map = (struct map_pci_info *)_map; 43 struct map_pci_info *map = (struct map_pci_info *)_map;
44 map_word val; 44 map_word val;
45 val.x[0]= readb(map->base + map->translate(map, ofs)); 45 val.x[0]= readb(map->base + map->translate(map, ofs));
46// printk("read8 : %08lx => %02x\n", ofs, val.x[0]);
47 return val; 46 return val;
48} 47}
49 48
50#if 0
51static map_word mtd_pci_read16(struct map_info *_map, unsigned long ofs)
52{
53 struct map_pci_info *map = (struct map_pci_info *)_map;
54 map_word val;
55 val.x[0] = readw(map->base + map->translate(map, ofs));
56// printk("read16: %08lx => %04x\n", ofs, val.x[0]);
57 return val;
58}
59#endif
60static map_word mtd_pci_read32(struct map_info *_map, unsigned long ofs) 49static map_word mtd_pci_read32(struct map_info *_map, unsigned long ofs)
61{ 50{
62 struct map_pci_info *map = (struct map_pci_info *)_map; 51 struct map_pci_info *map = (struct map_pci_info *)_map;
63 map_word val; 52 map_word val;
64 val.x[0] = readl(map->base + map->translate(map, ofs)); 53 val.x[0] = readl(map->base + map->translate(map, ofs));
65// printk("read32: %08lx => %08x\n", ofs, val.x[0]);
66 return val; 54 return val;
67} 55}
68 56
@@ -75,22 +63,12 @@ static void mtd_pci_copyfrom(struct map_info *_map, void *to, unsigned long from
75static void mtd_pci_write8(struct map_info *_map, map_word val, unsigned long ofs) 63static void mtd_pci_write8(struct map_info *_map, map_word val, unsigned long ofs)
76{ 64{
77 struct map_pci_info *map = (struct map_pci_info *)_map; 65 struct map_pci_info *map = (struct map_pci_info *)_map;
78// printk("write8 : %08lx <= %02x\n", ofs, val.x[0]);
79 writeb(val.x[0], map->base + map->translate(map, ofs)); 66 writeb(val.x[0], map->base + map->translate(map, ofs));
80} 67}
81 68
82#if 0
83static void mtd_pci_write16(struct map_info *_map, map_word val, unsigned long ofs)
84{
85 struct map_pci_info *map = (struct map_pci_info *)_map;
86// printk("write16: %08lx <= %04x\n", ofs, val.x[0]);
87 writew(val.x[0], map->base + map->translate(map, ofs));
88}
89#endif
90static void mtd_pci_write32(struct map_info *_map, map_word val, unsigned long ofs) 69static void mtd_pci_write32(struct map_info *_map, map_word val, unsigned long ofs)
91{ 70{
92 struct map_pci_info *map = (struct map_pci_info *)_map; 71 struct map_pci_info *map = (struct map_pci_info *)_map;
93// printk("write32: %08lx <= %08x\n", ofs, val.x[0]);
94 writel(val.x[0], map->base + map->translate(map, ofs)); 72 writel(val.x[0], map->base + map->translate(map, ofs));
95} 73}
96 74
@@ -358,4 +336,3 @@ MODULE_LICENSE("GPL");
358MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>"); 336MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
359MODULE_DESCRIPTION("Generic PCI map driver"); 337MODULE_DESCRIPTION("Generic PCI map driver");
360MODULE_DEVICE_TABLE(pci, mtd_pci_ids); 338MODULE_DEVICE_TABLE(pci, mtd_pci_ids);
361
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 2e6fb6831d55..6f19acadb06c 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -169,6 +169,7 @@ static int __devinit of_flash_probe(struct platform_device *dev)
169 struct mtd_info **mtd_list = NULL; 169 struct mtd_info **mtd_list = NULL;
170 resource_size_t res_size; 170 resource_size_t res_size;
171 struct mtd_part_parser_data ppdata; 171 struct mtd_part_parser_data ppdata;
172 bool map_indirect;
172 173
173 match = of_match_device(of_flash_match, &dev->dev); 174 match = of_match_device(of_flash_match, &dev->dev);
174 if (!match) 175 if (!match)
@@ -192,6 +193,8 @@ static int __devinit of_flash_probe(struct platform_device *dev)
192 } 193 }
193 count /= reg_tuple_size; 194 count /= reg_tuple_size;
194 195
196 map_indirect = of_property_read_bool(dp, "no-unaligned-direct-access");
197
195 err = -ENOMEM; 198 err = -ENOMEM;
196 info = kzalloc(sizeof(struct of_flash) + 199 info = kzalloc(sizeof(struct of_flash) +
197 sizeof(struct of_flash_list) * count, GFP_KERNEL); 200 sizeof(struct of_flash_list) * count, GFP_KERNEL);
@@ -247,6 +250,17 @@ static int __devinit of_flash_probe(struct platform_device *dev)
247 250
248 simple_map_init(&info->list[i].map); 251 simple_map_init(&info->list[i].map);
249 252
253 /*
254 * On some platforms (e.g. MPC5200) a direct 1:1 mapping
255 * may cause problems with JFFS2 usage, as the local bus (LPB)
256 * doesn't support unaligned accesses as implemented in the
257 * JFFS2 code via memcpy(). By setting NO_XIP, the
258 * flash will not be exposed directly to the MTD users
259 * (e.g. JFFS2) any more.
260 */
261 if (map_indirect)
262 info->list[i].map.phys = NO_XIP;
263
250 if (probe_type) { 264 if (probe_type) {
251 info->list[i].mtd = do_map_probe(probe_type, 265 info->list[i].mtd = do_map_probe(probe_type,
252 &info->list[i].map); 266 &info->list[i].map);
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
index 6f52e1f288b6..49c3fe715eee 100644
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -100,8 +100,6 @@ static int rbtx4939_flash_probe(struct platform_device *dev)
100 goto err_out; 100 goto err_out;
101 } 101 }
102 info->mtd->owner = THIS_MODULE; 102 info->mtd->owner = THIS_MODULE;
103 if (err)
104 goto err_out;
105 err = mtd_device_parse_register(info->mtd, NULL, NULL, pdata->parts, 103 err = mtd_device_parse_register(info->mtd, NULL, NULL, pdata->parts,
106 pdata->nr_parts); 104 pdata->nr_parts);
107 105
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index c3bb304eca07..299bf88a6f41 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -67,10 +67,16 @@ static int __init uclinux_mtd_init(void)
67 printk("uclinux[mtd]: RAM probe address=0x%x size=0x%x\n", 67 printk("uclinux[mtd]: RAM probe address=0x%x size=0x%x\n",
68 (int) mapp->phys, (int) mapp->size); 68 (int) mapp->phys, (int) mapp->size);
69 69
70 mapp->virt = ioremap_nocache(mapp->phys, mapp->size); 70 /*
71 * The filesystem is guaranteed to be in direct mapped memory. It is
72 * directly following the kernels own bss region. Following the same
73 * mechanism used by architectures setting up traditional initrds we
74 * use phys_to_virt to get the virtual address of its start.
75 */
76 mapp->virt = phys_to_virt(mapp->phys);
71 77
72 if (mapp->virt == 0) { 78 if (mapp->virt == 0) {
73 printk("uclinux[mtd]: ioremap_nocache() failed\n"); 79 printk("uclinux[mtd]: no virtual mapping?\n");
74 return(-EIO); 80 return(-EIO);
75 } 81 }
76 82
@@ -79,7 +85,6 @@ static int __init uclinux_mtd_init(void)
79 mtd = do_map_probe("map_ram", mapp); 85 mtd = do_map_probe("map_ram", mapp);
80 if (!mtd) { 86 if (!mtd) {
81 printk("uclinux[mtd]: failed to find a mapping?\n"); 87 printk("uclinux[mtd]: failed to find a mapping?\n");
82 iounmap(mapp->virt);
83 return(-ENXIO); 88 return(-ENXIO);
84 } 89 }
85 90
@@ -102,10 +107,8 @@ static void __exit uclinux_mtd_cleanup(void)
102 map_destroy(uclinux_ram_mtdinfo); 107 map_destroy(uclinux_ram_mtdinfo);
103 uclinux_ram_mtdinfo = NULL; 108 uclinux_ram_mtdinfo = NULL;
104 } 109 }
105 if (uclinux_ram_map.virt) { 110 if (uclinux_ram_map.virt)
106 iounmap((void *) uclinux_ram_map.virt);
107 uclinux_ram_map.virt = 0; 111 uclinux_ram_map.virt = 0;
108 }
109} 112}
110 113
111/****************************************************************************/ 114/****************************************************************************/
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c
deleted file mode 100644
index e7534c82f93a..000000000000
--- a/drivers/mtd/maps/wr_sbc82xx_flash.c
+++ /dev/null
@@ -1,174 +0,0 @@
1/*
2 * Map for flash chips on Wind River PowerQUICC II SBC82xx board.
3 *
4 * Copyright (C) 2004 Red Hat, Inc.
5 *
6 * Author: David Woodhouse <dwmw2@infradead.org>
7 *
8 */
9
10#include <linux/module.h>
11#include <linux/types.h>
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/slab.h>
15#include <asm/io.h>
16#include <linux/mtd/mtd.h>
17#include <linux/mtd/map.h>
18#include <linux/mtd/partitions.h>
19
20#include <asm/immap_cpm2.h>
21
22static struct mtd_info *sbcmtd[3];
23
24struct map_info sbc82xx_flash_map[3] = {
25 {.name = "Boot flash"},
26 {.name = "Alternate boot flash"},
27 {.name = "User flash"}
28};
29
30static struct mtd_partition smallflash_parts[] = {
31 {
32 .name = "space",
33 .size = 0x100000,
34 .offset = 0,
35 }, {
36 .name = "bootloader",
37 .size = MTDPART_SIZ_FULL,
38 .offset = MTDPART_OFS_APPEND,
39 }
40};
41
42static struct mtd_partition bigflash_parts[] = {
43 {
44 .name = "bootloader",
45 .size = 0x00100000,
46 .offset = 0,
47 }, {
48 .name = "file system",
49 .size = 0x01f00000,
50 .offset = MTDPART_OFS_APPEND,
51 }, {
52 .name = "boot config",
53 .size = 0x00100000,
54 .offset = MTDPART_OFS_APPEND,
55 }, {
56 .name = "space",
57 .size = 0x01f00000,
58 .offset = MTDPART_OFS_APPEND,
59 }
60};
61
62static const char *part_probes[] __initconst = {"cmdlinepart", "RedBoot", NULL};
63
64#define init_sbc82xx_one_flash(map, br, or) \
65do { \
66 (map).phys = (br & 1) ? (br & 0xffff8000) : 0; \
67 (map).size = (br & 1) ? (~(or & 0xffff8000) + 1) : 0; \
68 switch (br & 0x00001800) { \
69 case 0x00000000: \
70 case 0x00000800: (map).bankwidth = 1; break; \
71 case 0x00001000: (map).bankwidth = 2; break; \
72 case 0x00001800: (map).bankwidth = 4; break; \
73 } \
74} while (0);
75
76static int __init init_sbc82xx_flash(void)
77{
78 volatile memctl_cpm2_t *mc = &cpm2_immr->im_memctl;
79 int bigflash;
80 int i;
81
82#ifdef CONFIG_SBC8560
83 mc = ioremap(0xff700000 + 0x5000, sizeof(memctl_cpm2_t));
84#else
85 mc = &cpm2_immr->im_memctl;
86#endif
87
88 bigflash = 1;
89 if ((mc->memc_br0 & 0x00001800) == 0x00001800)
90 bigflash = 0;
91
92 init_sbc82xx_one_flash(sbc82xx_flash_map[0], mc->memc_br0, mc->memc_or0);
93 init_sbc82xx_one_flash(sbc82xx_flash_map[1], mc->memc_br6, mc->memc_or6);
94 init_sbc82xx_one_flash(sbc82xx_flash_map[2], mc->memc_br1, mc->memc_or1);
95
96#ifdef CONFIG_SBC8560
97 iounmap((void *) mc);
98#endif
99
100 for (i=0; i<3; i++) {
101 int8_t flashcs[3] = { 0, 6, 1 };
102 int nr_parts;
103 struct mtd_partition *defparts;
104
105 printk(KERN_NOTICE "PowerQUICC II %s (%ld MiB on CS%d",
106 sbc82xx_flash_map[i].name,
107 (sbc82xx_flash_map[i].size >> 20),
108 flashcs[i]);
109 if (!sbc82xx_flash_map[i].phys) {
110 /* We know it can't be at zero. */
111 printk("): disabled by bootloader.\n");
112 continue;
113 }
114 printk(" at %08lx)\n", sbc82xx_flash_map[i].phys);
115
116 sbc82xx_flash_map[i].virt = ioremap(sbc82xx_flash_map[i].phys,
117 sbc82xx_flash_map[i].size);
118
119 if (!sbc82xx_flash_map[i].virt) {
120 printk("Failed to ioremap\n");
121 continue;
122 }
123
124 simple_map_init(&sbc82xx_flash_map[i]);
125
126 sbcmtd[i] = do_map_probe("cfi_probe", &sbc82xx_flash_map[i]);
127
128 if (!sbcmtd[i])
129 continue;
130
131 sbcmtd[i]->owner = THIS_MODULE;
132
133 /* No partitioning detected. Use default */
134 if (i == 2) {
135 defparts = NULL;
136 nr_parts = 0;
137 } else if (i == bigflash) {
138 defparts = bigflash_parts;
139 nr_parts = ARRAY_SIZE(bigflash_parts);
140 } else {
141 defparts = smallflash_parts;
142 nr_parts = ARRAY_SIZE(smallflash_parts);
143 }
144
145 mtd_device_parse_register(sbcmtd[i], part_probes, NULL,
146 defparts, nr_parts);
147 }
148 return 0;
149}
150
151static void __exit cleanup_sbc82xx_flash(void)
152{
153 int i;
154
155 for (i=0; i<3; i++) {
156 if (!sbcmtd[i])
157 continue;
158
159 mtd_device_unregister(sbcmtd[i]);
160
161 map_destroy(sbcmtd[i]);
162
163 iounmap((void *)sbc82xx_flash_map[i].virt);
164 sbc82xx_flash_map[i].virt = 0;
165 }
166}
167
168module_init(init_sbc82xx_flash);
169module_exit(cleanup_sbc82xx_flash);
170
171
172MODULE_LICENSE("GPL");
173MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
174MODULE_DESCRIPTION("Flash map driver for WindRiver PowerQUICC II");
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 73ae81a629f2..82c06165d3d2 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -1162,7 +1162,11 @@ static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
1162 resource_size_t start, off; 1162 resource_size_t start, off;
1163 unsigned long len, vma_len; 1163 unsigned long len, vma_len;
1164 1164
1165 if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) { 1165 /* This is broken because it assumes the MTD device is map-based
1166 and that mtd->priv is a valid struct map_info. It should be
1167 replaced with something that uses the mtd_get_unmapped_area()
1168 operation properly. */
1169 if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) {
1166 off = get_vm_offset(vma); 1170 off = get_vm_offset(vma);
1167 start = map->phys; 1171 start = map->phys;
1168 len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size); 1172 len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size);
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index b9adff543f5f..374c46dff7dd 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -858,6 +858,27 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
858} 858}
859EXPORT_SYMBOL_GPL(mtd_panic_write); 859EXPORT_SYMBOL_GPL(mtd_panic_write);
860 860
861int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
862{
863 int ret_code;
864 ops->retlen = ops->oobretlen = 0;
865 if (!mtd->_read_oob)
866 return -EOPNOTSUPP;
867 /*
868 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
869 * similar to mtd->_read(), returning a non-negative integer
870 * representing max bitflips. In other cases, mtd->_read_oob() may
871 * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
872 */
873 ret_code = mtd->_read_oob(mtd, from, ops);
874 if (unlikely(ret_code < 0))
875 return ret_code;
876 if (mtd->ecc_strength == 0)
877 return 0; /* device lacks ecc */
878 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
879}
880EXPORT_SYMBOL_GPL(mtd_read_oob);
881
861/* 882/*
862 * Method to access the protection register area, present in some flash 883 * Method to access the protection register area, present in some flash
863 * devices. The user data is one time programmable but the factory data is read 884 * devices. The user data is one time programmable but the factory data is read
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 438737a1f59a..f5b3f91fa1cc 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -169,14 +169,7 @@ static void mtdoops_workfunc_erase(struct work_struct *work)
169 cxt->nextpage = 0; 169 cxt->nextpage = 0;
170 } 170 }
171 171
172 while (1) { 172 while ((ret = mtd_block_isbad(mtd, cxt->nextpage * record_size)) > 0) {
173 ret = mtd_block_isbad(mtd, cxt->nextpage * record_size);
174 if (!ret)
175 break;
176 if (ret < 0) {
177 printk(KERN_ERR "mtdoops: block_isbad failed, aborting\n");
178 return;
179 }
180badblock: 173badblock:
181 printk(KERN_WARNING "mtdoops: bad block at %08lx\n", 174 printk(KERN_WARNING "mtdoops: bad block at %08lx\n",
182 cxt->nextpage * record_size); 175 cxt->nextpage * record_size);
@@ -190,6 +183,11 @@ badblock:
190 } 183 }
191 } 184 }
192 185
186 if (ret < 0) {
187 printk(KERN_ERR "mtdoops: mtd_block_isbad failed, aborting\n");
188 return;
189 }
190
193 for (j = 0, ret = -1; (j < 3) && (ret < 0); j++) 191 for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
194 ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size); 192 ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size);
195 193
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 3a49e6de5e60..70fa70a8318f 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -711,6 +711,8 @@ static const char *default_mtd_part_types[] = {
711 * partition parsers, specified in @types. However, if @types is %NULL, then 711 * partition parsers, specified in @types. However, if @types is %NULL, then
712 * the default list of parsers is used. The default list contains only the 712 * the default list of parsers is used. The default list contains only the
713 * "cmdlinepart" and "ofpart" parsers ATM. 713 * "cmdlinepart" and "ofpart" parsers ATM.
714 * Note: If there are more then one parser in @types, the kernel only takes the
715 * partitions parsed out by the first parser.
714 * 716 *
715 * This function may return: 717 * This function may return:
716 * o a negative error code in case of failure 718 * o a negative error code in case of failure
@@ -735,11 +737,12 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types,
735 if (!parser) 737 if (!parser)
736 continue; 738 continue;
737 ret = (*parser->parse_fn)(master, pparts, data); 739 ret = (*parser->parse_fn)(master, pparts, data);
740 put_partition_parser(parser);
738 if (ret > 0) { 741 if (ret > 0) {
739 printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n", 742 printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
740 ret, parser->name, master->name); 743 ret, parser->name, master->name);
744 break;
741 } 745 }
742 put_partition_parser(parser);
743 } 746 }
744 return ret; 747 return ret;
745} 748}
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 598cd0a3adee..4883139460be 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -22,15 +22,6 @@ menuconfig MTD_NAND
22 22
23if MTD_NAND 23if MTD_NAND
24 24
25config MTD_NAND_VERIFY_WRITE
26 bool "Verify NAND page writes"
27 help
28 This adds an extra check when data is written to the flash. The
29 NAND flash device internally checks only bits transitioning
30 from 1 to 0. There is a rare possibility that even though the
31 device thinks the write was successful, a bit could have been
32 flipped accidentally due to device wear or something else.
33
34config MTD_NAND_BCH 25config MTD_NAND_BCH
35 tristate 26 tristate
36 select BCH 27 select BCH
@@ -267,22 +258,6 @@ config MTD_NAND_S3C2410_CLKSTOP
267 when the is NAND chip selected or released, but will save 258 when the is NAND chip selected or released, but will save
268 approximately 5mA of power when there is nothing happening. 259 approximately 5mA of power when there is nothing happening.
269 260
270config MTD_NAND_BCM_UMI
271 tristate "NAND Flash support for BCM Reference Boards"
272 depends on ARCH_BCMRING
273 help
274 This enables the NAND flash controller on the BCM UMI block.
275
276 No board specific support is done by this driver, each board
277 must advertise a platform_device for the driver to attach.
278
279config MTD_NAND_BCM_UMI_HWCS
280 bool "BCM UMI NAND Hardware CS"
281 depends on MTD_NAND_BCM_UMI
282 help
283 Enable the use of the BCM UMI block's internal CS using NAND.
284 This should only be used if you know the external NAND CS can toggle.
285
286config MTD_NAND_DISKONCHIP 261config MTD_NAND_DISKONCHIP
287 tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation) (EXPERIMENTAL)" 262 tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation) (EXPERIMENTAL)"
288 depends on EXPERIMENTAL 263 depends on EXPERIMENTAL
@@ -356,7 +331,7 @@ config MTD_NAND_DISKONCHIP_BBTWRITE
356 331
357config MTD_NAND_DOCG4 332config MTD_NAND_DOCG4
358 tristate "Support for DiskOnChip G4 (EXPERIMENTAL)" 333 tristate "Support for DiskOnChip G4 (EXPERIMENTAL)"
359 depends on EXPERIMENTAL 334 depends on EXPERIMENTAL && HAS_IOMEM
360 select BCH 335 select BCH
361 select BITREVERSE 336 select BITREVERSE
362 help 337 help
@@ -414,6 +389,28 @@ config MTD_NAND_PXA3xx
414 This enables the driver for the NAND flash device found on 389 This enables the driver for the NAND flash device found on
415 PXA3xx processors 390 PXA3xx processors
416 391
392config MTD_NAND_SLC_LPC32XX
393 tristate "NXP LPC32xx SLC Controller"
394 depends on ARCH_LPC32XX
395 help
396 Enables support for NXP's LPC32XX SLC (i.e. for Single Level Cell
397 chips) NAND controller. This is the default for the PHYTEC 3250
398 reference board which contains a NAND256R3A2CZA6 chip.
399
400 Please check the actual NAND chip connected and its support
401 by the SLC NAND controller.
402
403config MTD_NAND_MLC_LPC32XX
404 tristate "NXP LPC32xx MLC Controller"
405 depends on ARCH_LPC32XX
406 help
407 Uses the LPC32XX MLC (i.e. for Multi Level Cell chips) NAND
408 controller. This is the default for the WORK92105 controller
409 board.
410
411 Please check the actual NAND chip connected and its support
412 by the MLC NAND controller.
413
417config MTD_NAND_CM_X270 414config MTD_NAND_CM_X270
418 tristate "Support for NAND Flash on CM-X270 modules" 415 tristate "Support for NAND Flash on CM-X270 modules"
419 depends on MACH_ARMCORE 416 depends on MACH_ARMCORE
@@ -439,10 +436,10 @@ config MTD_NAND_NANDSIM
439 MTD nand layer. 436 MTD nand layer.
440 437
441config MTD_NAND_GPMI_NAND 438config MTD_NAND_GPMI_NAND
442 bool "GPMI NAND Flash Controller driver" 439 tristate "GPMI NAND Flash Controller driver"
443 depends on MTD_NAND && MXS_DMA 440 depends on MTD_NAND && MXS_DMA
444 help 441 help
445 Enables NAND Flash support for IMX23 or IMX28. 442 Enables NAND Flash support for IMX23, IMX28 or IMX6.
446 The GPMI controller is very powerful, with the help of BCH 443 The GPMI controller is very powerful, with the help of BCH
447 module, it can do the hardware ECC. The GPMI supports several 444 module, it can do the hardware ECC. The GPMI supports several
448 NAND flashs at the same time. The GPMI may conflicts with other 445 NAND flashs at the same time. The GPMI may conflicts with other
@@ -510,7 +507,7 @@ config MTD_NAND_MPC5121_NFC
510 507
511config MTD_NAND_MXC 508config MTD_NAND_MXC
512 tristate "MXC NAND support" 509 tristate "MXC NAND support"
513 depends on IMX_HAVE_PLATFORM_MXC_NAND 510 depends on ARCH_MXC
514 help 511 help
515 This enables the driver for the NAND flash controller on the 512 This enables the driver for the NAND flash controller on the
516 MXC processors. 513 MXC processors.
@@ -567,4 +564,12 @@ config MTD_NAND_FSMC
567 Enables support for NAND Flash chips on the ST Microelectronics 564 Enables support for NAND Flash chips on the ST Microelectronics
568 Flexible Static Memory Controller (FSMC) 565 Flexible Static Memory Controller (FSMC)
569 566
567config MTD_NAND_XWAY
568 tristate "Support for NAND on Lantiq XWAY SoC"
569 depends on LANTIQ && SOC_TYPE_XWAY
570 select MTD_NAND_PLATFORM
571 help
572 Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached
573 to the External Bus Unit (EBU).
574
570endif # MTD_NAND 575endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index d4b4d8739bd8..2cbd0916b733 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -40,16 +40,18 @@ obj-$(CONFIG_MTD_NAND_ORION) += orion_nand.o
40obj-$(CONFIG_MTD_NAND_FSL_ELBC) += fsl_elbc_nand.o 40obj-$(CONFIG_MTD_NAND_FSL_ELBC) += fsl_elbc_nand.o
41obj-$(CONFIG_MTD_NAND_FSL_IFC) += fsl_ifc_nand.o 41obj-$(CONFIG_MTD_NAND_FSL_IFC) += fsl_ifc_nand.o
42obj-$(CONFIG_MTD_NAND_FSL_UPM) += fsl_upm.o 42obj-$(CONFIG_MTD_NAND_FSL_UPM) += fsl_upm.o
43obj-$(CONFIG_MTD_NAND_SLC_LPC32XX) += lpc32xx_slc.o
44obj-$(CONFIG_MTD_NAND_MLC_LPC32XX) += lpc32xx_mlc.o
43obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o 45obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o
44obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o 46obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o
45obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o 47obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o
46obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o 48obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o
47obj-$(CONFIG_MTD_NAND_NUC900) += nuc900_nand.o 49obj-$(CONFIG_MTD_NAND_NUC900) += nuc900_nand.o
48obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o 50obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o
49obj-$(CONFIG_MTD_NAND_BCM_UMI) += bcm_umi_nand.o nand_bcm_umi.o
50obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o 51obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o
51obj-$(CONFIG_MTD_NAND_RICOH) += r852.o 52obj-$(CONFIG_MTD_NAND_RICOH) += r852.o
52obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o 53obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o
53obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/ 54obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
55obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o
54 56
55nand-objs := nand_base.o nand_bbt.o 57nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index a7040af08536..9e7723aa7acc 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -107,18 +107,6 @@ static void ams_delta_read_buf(struct mtd_info *mtd, u_char *buf, int len)
107 buf[i] = ams_delta_read_byte(mtd); 107 buf[i] = ams_delta_read_byte(mtd);
108} 108}
109 109
110static int ams_delta_verify_buf(struct mtd_info *mtd, const u_char *buf,
111 int len)
112{
113 int i;
114
115 for (i=0; i<len; i++)
116 if (buf[i] != ams_delta_read_byte(mtd))
117 return -EFAULT;
118
119 return 0;
120}
121
122/* 110/*
123 * Command control function 111 * Command control function
124 * 112 *
@@ -237,7 +225,6 @@ static int __devinit ams_delta_init(struct platform_device *pdev)
237 this->read_byte = ams_delta_read_byte; 225 this->read_byte = ams_delta_read_byte;
238 this->write_buf = ams_delta_write_buf; 226 this->write_buf = ams_delta_write_buf;
239 this->read_buf = ams_delta_read_buf; 227 this->read_buf = ams_delta_read_buf;
240 this->verify_buf = ams_delta_verify_buf;
241 this->cmd_ctrl = ams_delta_hwcontrol; 228 this->cmd_ctrl = ams_delta_hwcontrol;
242 if (gpio_request(AMS_DELTA_GPIO_PIN_NAND_RB, "nand_rdy") == 0) { 229 if (gpio_request(AMS_DELTA_GPIO_PIN_NAND_RB, "nand_rdy") == 0) {
243 this->dev_ready = ams_delta_nand_ready; 230 this->dev_ready = ams_delta_nand_ready;
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 97ac6712bb19..914455783302 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -1,20 +1,22 @@
1/* 1/*
2 * Copyright (C) 2003 Rick Bronson 2 * Copyright © 2003 Rick Bronson
3 * 3 *
4 * Derived from drivers/mtd/nand/autcpu12.c 4 * Derived from drivers/mtd/nand/autcpu12.c
5 * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de) 5 * Copyright © 2001 Thomas Gleixner (gleixner@autronix.de)
6 * 6 *
7 * Derived from drivers/mtd/spia.c 7 * Derived from drivers/mtd/spia.c
8 * Copyright (C) 2000 Steven J. Hill (sjhill@cotw.com) 8 * Copyright © 2000 Steven J. Hill (sjhill@cotw.com)
9 * 9 *
10 * 10 *
11 * Add Hardware ECC support for AT91SAM9260 / AT91SAM9263 11 * Add Hardware ECC support for AT91SAM9260 / AT91SAM9263
12 * Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright (C) 2007 12 * Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright © 2007
13 * 13 *
14 * Derived from Das U-Boot source code 14 * Derived from Das U-Boot source code
15 * (u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c) 15 * (u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c)
16 * (C) Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas 16 * © Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
17 * 17 *
18 * Add Programmable Multibit ECC support for various AT91 SoC
19 * © Copyright 2012 ATMEL, Hong Xu
18 * 20 *
19 * This program is free software; you can redistribute it and/or modify 21 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License version 2 as 22 * it under the terms of the GNU General Public License version 2 as
@@ -93,8 +95,36 @@ struct atmel_nand_host {
93 95
94 struct completion comp; 96 struct completion comp;
95 struct dma_chan *dma_chan; 97 struct dma_chan *dma_chan;
98
99 bool has_pmecc;
100 u8 pmecc_corr_cap;
101 u16 pmecc_sector_size;
102 u32 pmecc_lookup_table_offset;
103
104 int pmecc_bytes_per_sector;
105 int pmecc_sector_number;
106 int pmecc_degree; /* Degree of remainders */
107 int pmecc_cw_len; /* Length of codeword */
108
109 void __iomem *pmerrloc_base;
110 void __iomem *pmecc_rom_base;
111
112 /* lookup table for alpha_to and index_of */
113 void __iomem *pmecc_alpha_to;
114 void __iomem *pmecc_index_of;
115
116 /* data for pmecc computation */
117 int16_t *pmecc_partial_syn;
118 int16_t *pmecc_si;
119 int16_t *pmecc_smu; /* Sigma table */
120 int16_t *pmecc_lmu; /* polynomal order */
121 int *pmecc_mu;
122 int *pmecc_dmu;
123 int *pmecc_delta;
96}; 124};
97 125
126static struct nand_ecclayout atmel_pmecc_oobinfo;
127
98static int cpu_has_dma(void) 128static int cpu_has_dma(void)
99{ 129{
100 return cpu_is_at91sam9rl() || cpu_is_at91sam9g45(); 130 return cpu_is_at91sam9rl() || cpu_is_at91sam9g45();
@@ -288,6 +318,703 @@ static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
288} 318}
289 319
290/* 320/*
321 * Return number of ecc bytes per sector according to sector size and
322 * correction capability
323 *
324 * Following table shows what at91 PMECC supported:
325 * Correction Capability Sector_512_bytes Sector_1024_bytes
326 * ===================== ================ =================
327 * 2-bits 4-bytes 4-bytes
328 * 4-bits 7-bytes 7-bytes
329 * 8-bits 13-bytes 14-bytes
330 * 12-bits 20-bytes 21-bytes
331 * 24-bits 39-bytes 42-bytes
332 */
333static int __devinit pmecc_get_ecc_bytes(int cap, int sector_size)
334{
335 int m = 12 + sector_size / 512;
336 return (m * cap + 7) / 8;
337}
338
339static void __devinit pmecc_config_ecc_layout(struct nand_ecclayout *layout,
340 int oobsize, int ecc_len)
341{
342 int i;
343
344 layout->eccbytes = ecc_len;
345
346 /* ECC will occupy the last ecc_len bytes continuously */
347 for (i = 0; i < ecc_len; i++)
348 layout->eccpos[i] = oobsize - ecc_len + i;
349
350 layout->oobfree[0].offset = 2;
351 layout->oobfree[0].length =
352 oobsize - ecc_len - layout->oobfree[0].offset;
353}
354
355static void __devinit __iomem *pmecc_get_alpha_to(struct atmel_nand_host *host)
356{
357 int table_size;
358
359 table_size = host->pmecc_sector_size == 512 ?
360 PMECC_LOOKUP_TABLE_SIZE_512 : PMECC_LOOKUP_TABLE_SIZE_1024;
361
362 return host->pmecc_rom_base + host->pmecc_lookup_table_offset +
363 table_size * sizeof(int16_t);
364}
365
366static void pmecc_data_free(struct atmel_nand_host *host)
367{
368 kfree(host->pmecc_partial_syn);
369 kfree(host->pmecc_si);
370 kfree(host->pmecc_lmu);
371 kfree(host->pmecc_smu);
372 kfree(host->pmecc_mu);
373 kfree(host->pmecc_dmu);
374 kfree(host->pmecc_delta);
375}
376
377static int __devinit pmecc_data_alloc(struct atmel_nand_host *host)
378{
379 const int cap = host->pmecc_corr_cap;
380
381 host->pmecc_partial_syn = kzalloc((2 * cap + 1) * sizeof(int16_t),
382 GFP_KERNEL);
383 host->pmecc_si = kzalloc((2 * cap + 1) * sizeof(int16_t), GFP_KERNEL);
384 host->pmecc_lmu = kzalloc((cap + 1) * sizeof(int16_t), GFP_KERNEL);
385 host->pmecc_smu = kzalloc((cap + 2) * (2 * cap + 1) * sizeof(int16_t),
386 GFP_KERNEL);
387 host->pmecc_mu = kzalloc((cap + 1) * sizeof(int), GFP_KERNEL);
388 host->pmecc_dmu = kzalloc((cap + 1) * sizeof(int), GFP_KERNEL);
389 host->pmecc_delta = kzalloc((cap + 1) * sizeof(int), GFP_KERNEL);
390
391 if (host->pmecc_partial_syn &&
392 host->pmecc_si &&
393 host->pmecc_lmu &&
394 host->pmecc_smu &&
395 host->pmecc_mu &&
396 host->pmecc_dmu &&
397 host->pmecc_delta)
398 return 0;
399
400 /* error happened */
401 pmecc_data_free(host);
402 return -ENOMEM;
403}
404
405static void pmecc_gen_syndrome(struct mtd_info *mtd, int sector)
406{
407 struct nand_chip *nand_chip = mtd->priv;
408 struct atmel_nand_host *host = nand_chip->priv;
409 int i;
410 uint32_t value;
411
412 /* Fill odd syndromes */
413 for (i = 0; i < host->pmecc_corr_cap; i++) {
414 value = pmecc_readl_rem_relaxed(host->ecc, sector, i / 2);
415 if (i & 1)
416 value >>= 16;
417 value &= 0xffff;
418 host->pmecc_partial_syn[(2 * i) + 1] = (int16_t)value;
419 }
420}
421
422static void pmecc_substitute(struct mtd_info *mtd)
423{
424 struct nand_chip *nand_chip = mtd->priv;
425 struct atmel_nand_host *host = nand_chip->priv;
426 int16_t __iomem *alpha_to = host->pmecc_alpha_to;
427 int16_t __iomem *index_of = host->pmecc_index_of;
428 int16_t *partial_syn = host->pmecc_partial_syn;
429 const int cap = host->pmecc_corr_cap;
430 int16_t *si;
431 int i, j;
432
433 /* si[] is a table that holds the current syndrome value,
434 * an element of that table belongs to the field
435 */
436 si = host->pmecc_si;
437
438 memset(&si[1], 0, sizeof(int16_t) * (2 * cap - 1));
439
440 /* Computation 2t syndromes based on S(x) */
441 /* Odd syndromes */
442 for (i = 1; i < 2 * cap; i += 2) {
443 for (j = 0; j < host->pmecc_degree; j++) {
444 if (partial_syn[i] & ((unsigned short)0x1 << j))
445 si[i] = readw_relaxed(alpha_to + i * j) ^ si[i];
446 }
447 }
448 /* Even syndrome = (Odd syndrome) ** 2 */
449 for (i = 2, j = 1; j <= cap; i = ++j << 1) {
450 if (si[j] == 0) {
451 si[i] = 0;
452 } else {
453 int16_t tmp;
454
455 tmp = readw_relaxed(index_of + si[j]);
456 tmp = (tmp * 2) % host->pmecc_cw_len;
457 si[i] = readw_relaxed(alpha_to + tmp);
458 }
459 }
460
461 return;
462}
463
464static void pmecc_get_sigma(struct mtd_info *mtd)
465{
466 struct nand_chip *nand_chip = mtd->priv;
467 struct atmel_nand_host *host = nand_chip->priv;
468
469 int16_t *lmu = host->pmecc_lmu;
470 int16_t *si = host->pmecc_si;
471 int *mu = host->pmecc_mu;
472 int *dmu = host->pmecc_dmu; /* Discrepancy */
473 int *delta = host->pmecc_delta; /* Delta order */
474 int cw_len = host->pmecc_cw_len;
475 const int16_t cap = host->pmecc_corr_cap;
476 const int num = 2 * cap + 1;
477 int16_t __iomem *index_of = host->pmecc_index_of;
478 int16_t __iomem *alpha_to = host->pmecc_alpha_to;
479 int i, j, k;
480 uint32_t dmu_0_count, tmp;
481 int16_t *smu = host->pmecc_smu;
482
483 /* index of largest delta */
484 int ro;
485 int largest;
486 int diff;
487
488 dmu_0_count = 0;
489
490 /* First Row */
491
492 /* Mu */
493 mu[0] = -1;
494
495 memset(smu, 0, sizeof(int16_t) * num);
496 smu[0] = 1;
497
498 /* discrepancy set to 1 */
499 dmu[0] = 1;
500 /* polynom order set to 0 */
501 lmu[0] = 0;
502 delta[0] = (mu[0] * 2 - lmu[0]) >> 1;
503
504 /* Second Row */
505
506 /* Mu */
507 mu[1] = 0;
508 /* Sigma(x) set to 1 */
509 memset(&smu[num], 0, sizeof(int16_t) * num);
510 smu[num] = 1;
511
512 /* discrepancy set to S1 */
513 dmu[1] = si[1];
514
515 /* polynom order set to 0 */
516 lmu[1] = 0;
517
518 delta[1] = (mu[1] * 2 - lmu[1]) >> 1;
519
520 /* Init the Sigma(x) last row */
521 memset(&smu[(cap + 1) * num], 0, sizeof(int16_t) * num);
522
523 for (i = 1; i <= cap; i++) {
524 mu[i + 1] = i << 1;
525 /* Begin Computing Sigma (Mu+1) and L(mu) */
526 /* check if discrepancy is set to 0 */
527 if (dmu[i] == 0) {
528 dmu_0_count++;
529
530 tmp = ((cap - (lmu[i] >> 1) - 1) / 2);
531 if ((cap - (lmu[i] >> 1) - 1) & 0x1)
532 tmp += 2;
533 else
534 tmp += 1;
535
536 if (dmu_0_count == tmp) {
537 for (j = 0; j <= (lmu[i] >> 1) + 1; j++)
538 smu[(cap + 1) * num + j] =
539 smu[i * num + j];
540
541 lmu[cap + 1] = lmu[i];
542 return;
543 }
544
545 /* copy polynom */
546 for (j = 0; j <= lmu[i] >> 1; j++)
547 smu[(i + 1) * num + j] = smu[i * num + j];
548
549 /* copy previous polynom order to the next */
550 lmu[i + 1] = lmu[i];
551 } else {
552 ro = 0;
553 largest = -1;
554 /* find largest delta with dmu != 0 */
555 for (j = 0; j < i; j++) {
556 if ((dmu[j]) && (delta[j] > largest)) {
557 largest = delta[j];
558 ro = j;
559 }
560 }
561
562 /* compute difference */
563 diff = (mu[i] - mu[ro]);
564
565 /* Compute degree of the new smu polynomial */
566 if ((lmu[i] >> 1) > ((lmu[ro] >> 1) + diff))
567 lmu[i + 1] = lmu[i];
568 else
569 lmu[i + 1] = ((lmu[ro] >> 1) + diff) * 2;
570
571 /* Init smu[i+1] with 0 */
572 for (k = 0; k < num; k++)
573 smu[(i + 1) * num + k] = 0;
574
575 /* Compute smu[i+1] */
576 for (k = 0; k <= lmu[ro] >> 1; k++) {
577 int16_t a, b, c;
578
579 if (!(smu[ro * num + k] && dmu[i]))
580 continue;
581 a = readw_relaxed(index_of + dmu[i]);
582 b = readw_relaxed(index_of + dmu[ro]);
583 c = readw_relaxed(index_of + smu[ro * num + k]);
584 tmp = a + (cw_len - b) + c;
585 a = readw_relaxed(alpha_to + tmp % cw_len);
586 smu[(i + 1) * num + (k + diff)] = a;
587 }
588
589 for (k = 0; k <= lmu[i] >> 1; k++)
590 smu[(i + 1) * num + k] ^= smu[i * num + k];
591 }
592
593 /* End Computing Sigma (Mu+1) and L(mu) */
594 /* In either case compute delta */
595 delta[i + 1] = (mu[i + 1] * 2 - lmu[i + 1]) >> 1;
596
597 /* Do not compute discrepancy for the last iteration */
598 if (i >= cap)
599 continue;
600
601 for (k = 0; k <= (lmu[i + 1] >> 1); k++) {
602 tmp = 2 * (i - 1);
603 if (k == 0) {
604 dmu[i + 1] = si[tmp + 3];
605 } else if (smu[(i + 1) * num + k] && si[tmp + 3 - k]) {
606 int16_t a, b, c;
607 a = readw_relaxed(index_of +
608 smu[(i + 1) * num + k]);
609 b = si[2 * (i - 1) + 3 - k];
610 c = readw_relaxed(index_of + b);
611 tmp = a + c;
612 tmp %= cw_len;
613 dmu[i + 1] = readw_relaxed(alpha_to + tmp) ^
614 dmu[i + 1];
615 }
616 }
617 }
618
619 return;
620}
621
622static int pmecc_err_location(struct mtd_info *mtd)
623{
624 struct nand_chip *nand_chip = mtd->priv;
625 struct atmel_nand_host *host = nand_chip->priv;
626 unsigned long end_time;
627 const int cap = host->pmecc_corr_cap;
628 const int num = 2 * cap + 1;
629 int sector_size = host->pmecc_sector_size;
630 int err_nbr = 0; /* number of error */
631 int roots_nbr; /* number of roots */
632 int i;
633 uint32_t val;
634 int16_t *smu = host->pmecc_smu;
635
636 pmerrloc_writel(host->pmerrloc_base, ELDIS, PMERRLOC_DISABLE);
637
638 for (i = 0; i <= host->pmecc_lmu[cap + 1] >> 1; i++) {
639 pmerrloc_writel_sigma_relaxed(host->pmerrloc_base, i,
640 smu[(cap + 1) * num + i]);
641 err_nbr++;
642 }
643
644 val = (err_nbr - 1) << 16;
645 if (sector_size == 1024)
646 val |= 1;
647
648 pmerrloc_writel(host->pmerrloc_base, ELCFG, val);
649 pmerrloc_writel(host->pmerrloc_base, ELEN,
650 sector_size * 8 + host->pmecc_degree * cap);
651
652 end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS);
653 while (!(pmerrloc_readl_relaxed(host->pmerrloc_base, ELISR)
654 & PMERRLOC_CALC_DONE)) {
655 if (unlikely(time_after(jiffies, end_time))) {
656 dev_err(host->dev, "PMECC: Timeout to calculate error location.\n");
657 return -1;
658 }
659 cpu_relax();
660 }
661
662 roots_nbr = (pmerrloc_readl_relaxed(host->pmerrloc_base, ELISR)
663 & PMERRLOC_ERR_NUM_MASK) >> 8;
664 /* Number of roots == degree of smu hence <= cap */
665 if (roots_nbr == host->pmecc_lmu[cap + 1] >> 1)
666 return err_nbr - 1;
667
668 /* Number of roots does not match the degree of smu
669 * unable to correct error */
670 return -1;
671}
672
673static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc,
674 int sector_num, int extra_bytes, int err_nbr)
675{
676 struct nand_chip *nand_chip = mtd->priv;
677 struct atmel_nand_host *host = nand_chip->priv;
678 int i = 0;
679 int byte_pos, bit_pos, sector_size, pos;
680 uint32_t tmp;
681 uint8_t err_byte;
682
683 sector_size = host->pmecc_sector_size;
684
685 while (err_nbr) {
686 tmp = pmerrloc_readl_el_relaxed(host->pmerrloc_base, i) - 1;
687 byte_pos = tmp / 8;
688 bit_pos = tmp % 8;
689
690 if (byte_pos >= (sector_size + extra_bytes))
691 BUG(); /* should never happen */
692
693 if (byte_pos < sector_size) {
694 err_byte = *(buf + byte_pos);
695 *(buf + byte_pos) ^= (1 << bit_pos);
696
697 pos = sector_num * host->pmecc_sector_size + byte_pos;
698 dev_info(host->dev, "Bit flip in data area, byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
699 pos, bit_pos, err_byte, *(buf + byte_pos));
700 } else {
701 /* Bit flip in OOB area */
702 tmp = sector_num * host->pmecc_bytes_per_sector
703 + (byte_pos - sector_size);
704 err_byte = ecc[tmp];
705 ecc[tmp] ^= (1 << bit_pos);
706
707 pos = tmp + nand_chip->ecc.layout->eccpos[0];
708 dev_info(host->dev, "Bit flip in OOB, oob_byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
709 pos, bit_pos, err_byte, ecc[tmp]);
710 }
711
712 i++;
713 err_nbr--;
714 }
715
716 return;
717}
718
719static int pmecc_correction(struct mtd_info *mtd, u32 pmecc_stat, uint8_t *buf,
720 u8 *ecc)
721{
722 struct nand_chip *nand_chip = mtd->priv;
723 struct atmel_nand_host *host = nand_chip->priv;
724 int i, err_nbr, eccbytes;
725 uint8_t *buf_pos;
726
727 eccbytes = nand_chip->ecc.bytes;
728 for (i = 0; i < eccbytes; i++)
729 if (ecc[i] != 0xff)
730 goto normal_check;
731 /* Erased page, return OK */
732 return 0;
733
734normal_check:
735 for (i = 0; i < host->pmecc_sector_number; i++) {
736 err_nbr = 0;
737 if (pmecc_stat & 0x1) {
738 buf_pos = buf + i * host->pmecc_sector_size;
739
740 pmecc_gen_syndrome(mtd, i);
741 pmecc_substitute(mtd);
742 pmecc_get_sigma(mtd);
743
744 err_nbr = pmecc_err_location(mtd);
745 if (err_nbr == -1) {
746 dev_err(host->dev, "PMECC: Too many errors\n");
747 mtd->ecc_stats.failed++;
748 return -EIO;
749 } else {
750 pmecc_correct_data(mtd, buf_pos, ecc, i,
751 host->pmecc_bytes_per_sector, err_nbr);
752 mtd->ecc_stats.corrected += err_nbr;
753 }
754 }
755 pmecc_stat >>= 1;
756 }
757
758 return 0;
759}
760
761static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
762 struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
763{
764 struct atmel_nand_host *host = chip->priv;
765 int eccsize = chip->ecc.size;
766 uint8_t *oob = chip->oob_poi;
767 uint32_t *eccpos = chip->ecc.layout->eccpos;
768 uint32_t stat;
769 unsigned long end_time;
770
771 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
772 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
773 pmecc_writel(host->ecc, CFG, (pmecc_readl_relaxed(host->ecc, CFG)
774 & ~PMECC_CFG_WRITE_OP) | PMECC_CFG_AUTO_ENABLE);
775
776 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
777 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DATA);
778
779 chip->read_buf(mtd, buf, eccsize);
780 chip->read_buf(mtd, oob, mtd->oobsize);
781
782 end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS);
783 while ((pmecc_readl_relaxed(host->ecc, SR) & PMECC_SR_BUSY)) {
784 if (unlikely(time_after(jiffies, end_time))) {
785 dev_err(host->dev, "PMECC: Timeout to get error status.\n");
786 return -EIO;
787 }
788 cpu_relax();
789 }
790
791 stat = pmecc_readl_relaxed(host->ecc, ISR);
792 if (stat != 0)
793 if (pmecc_correction(mtd, stat, buf, &oob[eccpos[0]]) != 0)
794 return -EIO;
795
796 return 0;
797}
798
799static int atmel_nand_pmecc_write_page(struct mtd_info *mtd,
800 struct nand_chip *chip, const uint8_t *buf, int oob_required)
801{
802 struct atmel_nand_host *host = chip->priv;
803 uint32_t *eccpos = chip->ecc.layout->eccpos;
804 int i, j;
805 unsigned long end_time;
806
807 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
808 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
809
810 pmecc_writel(host->ecc, CFG, (pmecc_readl_relaxed(host->ecc, CFG) |
811 PMECC_CFG_WRITE_OP) & ~PMECC_CFG_AUTO_ENABLE);
812
813 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
814 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DATA);
815
816 chip->write_buf(mtd, (u8 *)buf, mtd->writesize);
817
818 end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS);
819 while ((pmecc_readl_relaxed(host->ecc, SR) & PMECC_SR_BUSY)) {
820 if (unlikely(time_after(jiffies, end_time))) {
821 dev_err(host->dev, "PMECC: Timeout to get ECC value.\n");
822 return -EIO;
823 }
824 cpu_relax();
825 }
826
827 for (i = 0; i < host->pmecc_sector_number; i++) {
828 for (j = 0; j < host->pmecc_bytes_per_sector; j++) {
829 int pos;
830
831 pos = i * host->pmecc_bytes_per_sector + j;
832 chip->oob_poi[eccpos[pos]] =
833 pmecc_readb_ecc_relaxed(host->ecc, i, j);
834 }
835 }
836 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
837
838 return 0;
839}
840
841static void atmel_pmecc_core_init(struct mtd_info *mtd)
842{
843 struct nand_chip *nand_chip = mtd->priv;
844 struct atmel_nand_host *host = nand_chip->priv;
845 uint32_t val = 0;
846 struct nand_ecclayout *ecc_layout;
847
848 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
849 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
850
851 switch (host->pmecc_corr_cap) {
852 case 2:
853 val = PMECC_CFG_BCH_ERR2;
854 break;
855 case 4:
856 val = PMECC_CFG_BCH_ERR4;
857 break;
858 case 8:
859 val = PMECC_CFG_BCH_ERR8;
860 break;
861 case 12:
862 val = PMECC_CFG_BCH_ERR12;
863 break;
864 case 24:
865 val = PMECC_CFG_BCH_ERR24;
866 break;
867 }
868
869 if (host->pmecc_sector_size == 512)
870 val |= PMECC_CFG_SECTOR512;
871 else if (host->pmecc_sector_size == 1024)
872 val |= PMECC_CFG_SECTOR1024;
873
874 switch (host->pmecc_sector_number) {
875 case 1:
876 val |= PMECC_CFG_PAGE_1SECTOR;
877 break;
878 case 2:
879 val |= PMECC_CFG_PAGE_2SECTORS;
880 break;
881 case 4:
882 val |= PMECC_CFG_PAGE_4SECTORS;
883 break;
884 case 8:
885 val |= PMECC_CFG_PAGE_8SECTORS;
886 break;
887 }
888
889 val |= (PMECC_CFG_READ_OP | PMECC_CFG_SPARE_DISABLE
890 | PMECC_CFG_AUTO_DISABLE);
891 pmecc_writel(host->ecc, CFG, val);
892
893 ecc_layout = nand_chip->ecc.layout;
894 pmecc_writel(host->ecc, SAREA, mtd->oobsize - 1);
895 pmecc_writel(host->ecc, SADDR, ecc_layout->eccpos[0]);
896 pmecc_writel(host->ecc, EADDR,
897 ecc_layout->eccpos[ecc_layout->eccbytes - 1]);
898 /* See datasheet about PMECC Clock Control Register */
899 pmecc_writel(host->ecc, CLK, 2);
900 pmecc_writel(host->ecc, IDR, 0xff);
901 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
902}
903
904static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev,
905 struct atmel_nand_host *host)
906{
907 struct mtd_info *mtd = &host->mtd;
908 struct nand_chip *nand_chip = &host->nand_chip;
909 struct resource *regs, *regs_pmerr, *regs_rom;
910 int cap, sector_size, err_no;
911
912 cap = host->pmecc_corr_cap;
913 sector_size = host->pmecc_sector_size;
914 dev_info(host->dev, "Initialize PMECC params, cap: %d, sector: %d\n",
915 cap, sector_size);
916
917 regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
918 if (!regs) {
919 dev_warn(host->dev,
920 "Can't get I/O resource regs for PMECC controller, rolling back on software ECC\n");
921 nand_chip->ecc.mode = NAND_ECC_SOFT;
922 return 0;
923 }
924
925 host->ecc = ioremap(regs->start, resource_size(regs));
926 if (host->ecc == NULL) {
927 dev_err(host->dev, "ioremap failed\n");
928 err_no = -EIO;
929 goto err_pmecc_ioremap;
930 }
931
932 regs_pmerr = platform_get_resource(pdev, IORESOURCE_MEM, 2);
933 regs_rom = platform_get_resource(pdev, IORESOURCE_MEM, 3);
934 if (regs_pmerr && regs_rom) {
935 host->pmerrloc_base = ioremap(regs_pmerr->start,
936 resource_size(regs_pmerr));
937 host->pmecc_rom_base = ioremap(regs_rom->start,
938 resource_size(regs_rom));
939 }
940
941 if (!host->pmerrloc_base || !host->pmecc_rom_base) {
942 dev_err(host->dev,
943 "Can not get I/O resource for PMECC ERRLOC controller or ROM!\n");
944 err_no = -EIO;
945 goto err_pmloc_ioremap;
946 }
947
948 /* ECC is calculated for the whole page (1 step) */
949 nand_chip->ecc.size = mtd->writesize;
950
951 /* set ECC page size and oob layout */
952 switch (mtd->writesize) {
953 case 2048:
954 host->pmecc_degree = PMECC_GF_DIMENSION_13;
955 host->pmecc_cw_len = (1 << host->pmecc_degree) - 1;
956 host->pmecc_sector_number = mtd->writesize / sector_size;
957 host->pmecc_bytes_per_sector = pmecc_get_ecc_bytes(
958 cap, sector_size);
959 host->pmecc_alpha_to = pmecc_get_alpha_to(host);
960 host->pmecc_index_of = host->pmecc_rom_base +
961 host->pmecc_lookup_table_offset;
962
963 nand_chip->ecc.steps = 1;
964 nand_chip->ecc.strength = cap;
965 nand_chip->ecc.bytes = host->pmecc_bytes_per_sector *
966 host->pmecc_sector_number;
967 if (nand_chip->ecc.bytes > mtd->oobsize - 2) {
968 dev_err(host->dev, "No room for ECC bytes\n");
969 err_no = -EINVAL;
970 goto err_no_ecc_room;
971 }
972 pmecc_config_ecc_layout(&atmel_pmecc_oobinfo,
973 mtd->oobsize,
974 nand_chip->ecc.bytes);
975 nand_chip->ecc.layout = &atmel_pmecc_oobinfo;
976 break;
977 case 512:
978 case 1024:
979 case 4096:
980 /* TODO */
981 dev_warn(host->dev,
982 "Unsupported page size for PMECC, use Software ECC\n");
983 default:
984 /* page size not handled by HW ECC */
985 /* switching back to soft ECC */
986 nand_chip->ecc.mode = NAND_ECC_SOFT;
987 return 0;
988 }
989
990 /* Allocate data for PMECC computation */
991 err_no = pmecc_data_alloc(host);
992 if (err_no) {
993 dev_err(host->dev,
994 "Cannot allocate memory for PMECC computation!\n");
995 goto err_pmecc_data_alloc;
996 }
997
998 nand_chip->ecc.read_page = atmel_nand_pmecc_read_page;
999 nand_chip->ecc.write_page = atmel_nand_pmecc_write_page;
1000
1001 atmel_pmecc_core_init(mtd);
1002
1003 return 0;
1004
1005err_pmecc_data_alloc:
1006err_no_ecc_room:
1007err_pmloc_ioremap:
1008 iounmap(host->ecc);
1009 if (host->pmerrloc_base)
1010 iounmap(host->pmerrloc_base);
1011 if (host->pmecc_rom_base)
1012 iounmap(host->pmecc_rom_base);
1013err_pmecc_ioremap:
1014 return err_no;
1015}
1016
1017/*
291 * Calculate HW ECC 1018 * Calculate HW ECC
292 * 1019 *
293 * function called after a write 1020 * function called after a write
@@ -481,7 +1208,8 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
481static int __devinit atmel_of_init_port(struct atmel_nand_host *host, 1208static int __devinit atmel_of_init_port(struct atmel_nand_host *host,
482 struct device_node *np) 1209 struct device_node *np)
483{ 1210{
484 u32 val; 1211 u32 val, table_offset;
1212 u32 offset[2];
485 int ecc_mode; 1213 int ecc_mode;
486 struct atmel_nand_data *board = &host->board; 1214 struct atmel_nand_data *board = &host->board;
487 enum of_gpio_flags flags; 1215 enum of_gpio_flags flags;
@@ -517,6 +1245,50 @@ static int __devinit atmel_of_init_port(struct atmel_nand_host *host,
517 board->enable_pin = of_get_gpio(np, 1); 1245 board->enable_pin = of_get_gpio(np, 1);
518 board->det_pin = of_get_gpio(np, 2); 1246 board->det_pin = of_get_gpio(np, 2);
519 1247
1248 host->has_pmecc = of_property_read_bool(np, "atmel,has-pmecc");
1249
1250 if (!(board->ecc_mode == NAND_ECC_HW) || !host->has_pmecc)
1251 return 0; /* Not using PMECC */
1252
1253 /* use PMECC, get correction capability, sector size and lookup
1254 * table offset.
1255 */
1256 if (of_property_read_u32(np, "atmel,pmecc-cap", &val) != 0) {
1257 dev_err(host->dev, "Cannot decide PMECC Capability\n");
1258 return -EINVAL;
1259 } else if ((val != 2) && (val != 4) && (val != 8) && (val != 12) &&
1260 (val != 24)) {
1261 dev_err(host->dev,
1262 "Unsupported PMECC correction capability: %d; should be 2, 4, 8, 12 or 24\n",
1263 val);
1264 return -EINVAL;
1265 }
1266 host->pmecc_corr_cap = (u8)val;
1267
1268 if (of_property_read_u32(np, "atmel,pmecc-sector-size", &val) != 0) {
1269 dev_err(host->dev, "Cannot decide PMECC Sector Size\n");
1270 return -EINVAL;
1271 } else if ((val != 512) && (val != 1024)) {
1272 dev_err(host->dev,
1273 "Unsupported PMECC sector size: %d; should be 512 or 1024 bytes\n",
1274 val);
1275 return -EINVAL;
1276 }
1277 host->pmecc_sector_size = (u16)val;
1278
1279 if (of_property_read_u32_array(np, "atmel,pmecc-lookup-table-offset",
1280 offset, 2) != 0) {
1281 dev_err(host->dev, "Cannot get PMECC lookup table offset\n");
1282 return -EINVAL;
1283 }
1284 table_offset = host->pmecc_sector_size == 512 ? offset[0] : offset[1];
1285
1286 if (!table_offset) {
1287 dev_err(host->dev, "Invalid PMECC lookup table offset\n");
1288 return -EINVAL;
1289 }
1290 host->pmecc_lookup_table_offset = table_offset;
1291
520 return 0; 1292 return 0;
521} 1293}
522#else 1294#else
@@ -527,6 +1299,66 @@ static int __devinit atmel_of_init_port(struct atmel_nand_host *host,
527} 1299}
528#endif 1300#endif
529 1301
1302static int __init atmel_hw_nand_init_params(struct platform_device *pdev,
1303 struct atmel_nand_host *host)
1304{
1305 struct mtd_info *mtd = &host->mtd;
1306 struct nand_chip *nand_chip = &host->nand_chip;
1307 struct resource *regs;
1308
1309 regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1310 if (!regs) {
1311 dev_err(host->dev,
1312 "Can't get I/O resource regs, use software ECC\n");
1313 nand_chip->ecc.mode = NAND_ECC_SOFT;
1314 return 0;
1315 }
1316
1317 host->ecc = ioremap(regs->start, resource_size(regs));
1318 if (host->ecc == NULL) {
1319 dev_err(host->dev, "ioremap failed\n");
1320 return -EIO;
1321 }
1322
1323 /* ECC is calculated for the whole page (1 step) */
1324 nand_chip->ecc.size = mtd->writesize;
1325
1326 /* set ECC page size and oob layout */
1327 switch (mtd->writesize) {
1328 case 512:
1329 nand_chip->ecc.layout = &atmel_oobinfo_small;
1330 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_528);
1331 break;
1332 case 1024:
1333 nand_chip->ecc.layout = &atmel_oobinfo_large;
1334 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_1056);
1335 break;
1336 case 2048:
1337 nand_chip->ecc.layout = &atmel_oobinfo_large;
1338 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_2112);
1339 break;
1340 case 4096:
1341 nand_chip->ecc.layout = &atmel_oobinfo_large;
1342 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_4224);
1343 break;
1344 default:
1345 /* page size not handled by HW ECC */
1346 /* switching back to soft ECC */
1347 nand_chip->ecc.mode = NAND_ECC_SOFT;
1348 return 0;
1349 }
1350
1351 /* set up for HW ECC */
1352 nand_chip->ecc.calculate = atmel_nand_calculate;
1353 nand_chip->ecc.correct = atmel_nand_correct;
1354 nand_chip->ecc.hwctl = atmel_nand_hwctl;
1355 nand_chip->ecc.read_page = atmel_nand_read_page;
1356 nand_chip->ecc.bytes = 4;
1357 nand_chip->ecc.strength = 1;
1358
1359 return 0;
1360}
1361
530/* 1362/*
531 * Probe for the NAND device. 1363 * Probe for the NAND device.
532 */ 1364 */
@@ -535,7 +1367,6 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
535 struct atmel_nand_host *host; 1367 struct atmel_nand_host *host;
536 struct mtd_info *mtd; 1368 struct mtd_info *mtd;
537 struct nand_chip *nand_chip; 1369 struct nand_chip *nand_chip;
538 struct resource *regs;
539 struct resource *mem; 1370 struct resource *mem;
540 struct mtd_part_parser_data ppdata = {}; 1371 struct mtd_part_parser_data ppdata = {};
541 int res; 1372 int res;
@@ -568,7 +1399,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
568 if (pdev->dev.of_node) { 1399 if (pdev->dev.of_node) {
569 res = atmel_of_init_port(host, pdev->dev.of_node); 1400 res = atmel_of_init_port(host, pdev->dev.of_node);
570 if (res) 1401 if (res)
571 goto err_nand_ioremap; 1402 goto err_ecc_ioremap;
572 } else { 1403 } else {
573 memcpy(&host->board, pdev->dev.platform_data, 1404 memcpy(&host->board, pdev->dev.platform_data,
574 sizeof(struct atmel_nand_data)); 1405 sizeof(struct atmel_nand_data));
@@ -583,33 +1414,45 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
583 nand_chip->IO_ADDR_W = host->io_base; 1414 nand_chip->IO_ADDR_W = host->io_base;
584 nand_chip->cmd_ctrl = atmel_nand_cmd_ctrl; 1415 nand_chip->cmd_ctrl = atmel_nand_cmd_ctrl;
585 1416
586 if (gpio_is_valid(host->board.rdy_pin)) 1417 if (gpio_is_valid(host->board.rdy_pin)) {
587 nand_chip->dev_ready = atmel_nand_device_ready; 1418 res = gpio_request(host->board.rdy_pin, "nand_rdy");
1419 if (res < 0) {
1420 dev_err(&pdev->dev,
1421 "can't request rdy gpio %d\n",
1422 host->board.rdy_pin);
1423 goto err_ecc_ioremap;
1424 }
588 1425
589 nand_chip->ecc.mode = host->board.ecc_mode; 1426 res = gpio_direction_input(host->board.rdy_pin);
1427 if (res < 0) {
1428 dev_err(&pdev->dev,
1429 "can't request input direction rdy gpio %d\n",
1430 host->board.rdy_pin);
1431 goto err_ecc_ioremap;
1432 }
590 1433
591 regs = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1434 nand_chip->dev_ready = atmel_nand_device_ready;
592 if (!regs && nand_chip->ecc.mode == NAND_ECC_HW) {
593 printk(KERN_ERR "atmel_nand: can't get I/O resource "
594 "regs\nFalling back on software ECC\n");
595 nand_chip->ecc.mode = NAND_ECC_SOFT;
596 } 1435 }
597 1436
598 if (nand_chip->ecc.mode == NAND_ECC_HW) { 1437 if (gpio_is_valid(host->board.enable_pin)) {
599 host->ecc = ioremap(regs->start, resource_size(regs)); 1438 res = gpio_request(host->board.enable_pin, "nand_enable");
600 if (host->ecc == NULL) { 1439 if (res < 0) {
601 printk(KERN_ERR "atmel_nand: ioremap failed\n"); 1440 dev_err(&pdev->dev,
602 res = -EIO; 1441 "can't request enable gpio %d\n",
1442 host->board.enable_pin);
1443 goto err_ecc_ioremap;
1444 }
1445
1446 res = gpio_direction_output(host->board.enable_pin, 1);
1447 if (res < 0) {
1448 dev_err(&pdev->dev,
1449 "can't request output direction enable gpio %d\n",
1450 host->board.enable_pin);
603 goto err_ecc_ioremap; 1451 goto err_ecc_ioremap;
604 } 1452 }
605 nand_chip->ecc.calculate = atmel_nand_calculate;
606 nand_chip->ecc.correct = atmel_nand_correct;
607 nand_chip->ecc.hwctl = atmel_nand_hwctl;
608 nand_chip->ecc.read_page = atmel_nand_read_page;
609 nand_chip->ecc.bytes = 4;
610 nand_chip->ecc.strength = 1;
611 } 1453 }
612 1454
1455 nand_chip->ecc.mode = host->board.ecc_mode;
613 nand_chip->chip_delay = 20; /* 20us command delay time */ 1456 nand_chip->chip_delay = 20; /* 20us command delay time */
614 1457
615 if (host->board.bus_width_16) /* 16-bit bus width */ 1458 if (host->board.bus_width_16) /* 16-bit bus width */
@@ -622,6 +1465,22 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
622 atmel_nand_enable(host); 1465 atmel_nand_enable(host);
623 1466
624 if (gpio_is_valid(host->board.det_pin)) { 1467 if (gpio_is_valid(host->board.det_pin)) {
1468 res = gpio_request(host->board.det_pin, "nand_det");
1469 if (res < 0) {
1470 dev_err(&pdev->dev,
1471 "can't request det gpio %d\n",
1472 host->board.det_pin);
1473 goto err_no_card;
1474 }
1475
1476 res = gpio_direction_input(host->board.det_pin);
1477 if (res < 0) {
1478 dev_err(&pdev->dev,
1479 "can't request input direction det gpio %d\n",
1480 host->board.det_pin);
1481 goto err_no_card;
1482 }
1483
625 if (gpio_get_value(host->board.det_pin)) { 1484 if (gpio_get_value(host->board.det_pin)) {
626 printk(KERN_INFO "No SmartMedia card inserted.\n"); 1485 printk(KERN_INFO "No SmartMedia card inserted.\n");
627 res = -ENXIO; 1486 res = -ENXIO;
@@ -661,40 +1520,13 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
661 } 1520 }
662 1521
663 if (nand_chip->ecc.mode == NAND_ECC_HW) { 1522 if (nand_chip->ecc.mode == NAND_ECC_HW) {
664 /* ECC is calculated for the whole page (1 step) */ 1523 if (host->has_pmecc)
665 nand_chip->ecc.size = mtd->writesize; 1524 res = atmel_pmecc_nand_init_params(pdev, host);
666 1525 else
667 /* set ECC page size and oob layout */ 1526 res = atmel_hw_nand_init_params(pdev, host);
668 switch (mtd->writesize) { 1527
669 case 512: 1528 if (res != 0)
670 nand_chip->ecc.layout = &atmel_oobinfo_small; 1529 goto err_hw_ecc;
671 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_528);
672 break;
673 case 1024:
674 nand_chip->ecc.layout = &atmel_oobinfo_large;
675 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_1056);
676 break;
677 case 2048:
678 nand_chip->ecc.layout = &atmel_oobinfo_large;
679 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_2112);
680 break;
681 case 4096:
682 nand_chip->ecc.layout = &atmel_oobinfo_large;
683 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_4224);
684 break;
685 default:
686 /* page size not handled by HW ECC */
687 /* switching back to soft ECC */
688 nand_chip->ecc.mode = NAND_ECC_SOFT;
689 nand_chip->ecc.calculate = NULL;
690 nand_chip->ecc.correct = NULL;
691 nand_chip->ecc.hwctl = NULL;
692 nand_chip->ecc.read_page = NULL;
693 nand_chip->ecc.postpad = 0;
694 nand_chip->ecc.prepad = 0;
695 nand_chip->ecc.bytes = 0;
696 break;
697 }
698 } 1530 }
699 1531
700 /* second phase scan */ 1532 /* second phase scan */
@@ -711,14 +1543,23 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
711 return res; 1543 return res;
712 1544
713err_scan_tail: 1545err_scan_tail:
1546 if (host->has_pmecc && host->nand_chip.ecc.mode == NAND_ECC_HW) {
1547 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
1548 pmecc_data_free(host);
1549 }
1550 if (host->ecc)
1551 iounmap(host->ecc);
1552 if (host->pmerrloc_base)
1553 iounmap(host->pmerrloc_base);
1554 if (host->pmecc_rom_base)
1555 iounmap(host->pmecc_rom_base);
1556err_hw_ecc:
714err_scan_ident: 1557err_scan_ident:
715err_no_card: 1558err_no_card:
716 atmel_nand_disable(host); 1559 atmel_nand_disable(host);
717 platform_set_drvdata(pdev, NULL); 1560 platform_set_drvdata(pdev, NULL);
718 if (host->dma_chan) 1561 if (host->dma_chan)
719 dma_release_channel(host->dma_chan); 1562 dma_release_channel(host->dma_chan);
720 if (host->ecc)
721 iounmap(host->ecc);
722err_ecc_ioremap: 1563err_ecc_ioremap:
723 iounmap(host->io_base); 1564 iounmap(host->io_base);
724err_nand_ioremap: 1565err_nand_ioremap:
@@ -738,8 +1579,28 @@ static int __exit atmel_nand_remove(struct platform_device *pdev)
738 1579
739 atmel_nand_disable(host); 1580 atmel_nand_disable(host);
740 1581
1582 if (host->has_pmecc && host->nand_chip.ecc.mode == NAND_ECC_HW) {
1583 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
1584 pmerrloc_writel(host->pmerrloc_base, ELDIS,
1585 PMERRLOC_DISABLE);
1586 pmecc_data_free(host);
1587 }
1588
1589 if (gpio_is_valid(host->board.det_pin))
1590 gpio_free(host->board.det_pin);
1591
1592 if (gpio_is_valid(host->board.enable_pin))
1593 gpio_free(host->board.enable_pin);
1594
1595 if (gpio_is_valid(host->board.rdy_pin))
1596 gpio_free(host->board.rdy_pin);
1597
741 if (host->ecc) 1598 if (host->ecc)
742 iounmap(host->ecc); 1599 iounmap(host->ecc);
1600 if (host->pmecc_rom_base)
1601 iounmap(host->pmecc_rom_base);
1602 if (host->pmerrloc_base)
1603 iounmap(host->pmerrloc_base);
743 1604
744 if (host->dma_chan) 1605 if (host->dma_chan)
745 dma_release_channel(host->dma_chan); 1606 dma_release_channel(host->dma_chan);
diff --git a/drivers/mtd/nand/atmel_nand_ecc.h b/drivers/mtd/nand/atmel_nand_ecc.h
index 578c776e1356..8a1e9a686759 100644
--- a/drivers/mtd/nand/atmel_nand_ecc.h
+++ b/drivers/mtd/nand/atmel_nand_ecc.h
@@ -3,7 +3,7 @@
3 * Based on AT91SAM9260 datasheet revision B. 3 * Based on AT91SAM9260 datasheet revision B.
4 * 4 *
5 * Copyright (C) 2007 Andrew Victor 5 * Copyright (C) 2007 Andrew Victor
6 * Copyright (C) 2007 Atmel Corporation. 6 * Copyright (C) 2007 - 2012 Atmel Corporation.
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the 9 * under the terms of the GNU General Public License as published by the
@@ -36,4 +36,116 @@
36#define ATMEL_ECC_NPR 0x10 /* NParity register */ 36#define ATMEL_ECC_NPR 0x10 /* NParity register */
37#define ATMEL_ECC_NPARITY (0xffff << 0) /* NParity */ 37#define ATMEL_ECC_NPARITY (0xffff << 0) /* NParity */
38 38
39/* PMECC Register Definitions */
40#define ATMEL_PMECC_CFG 0x000 /* Configuration Register */
41#define PMECC_CFG_BCH_ERR2 (0 << 0)
42#define PMECC_CFG_BCH_ERR4 (1 << 0)
43#define PMECC_CFG_BCH_ERR8 (2 << 0)
44#define PMECC_CFG_BCH_ERR12 (3 << 0)
45#define PMECC_CFG_BCH_ERR24 (4 << 0)
46
47#define PMECC_CFG_SECTOR512 (0 << 4)
48#define PMECC_CFG_SECTOR1024 (1 << 4)
49
50#define PMECC_CFG_PAGE_1SECTOR (0 << 8)
51#define PMECC_CFG_PAGE_2SECTORS (1 << 8)
52#define PMECC_CFG_PAGE_4SECTORS (2 << 8)
53#define PMECC_CFG_PAGE_8SECTORS (3 << 8)
54
55#define PMECC_CFG_READ_OP (0 << 12)
56#define PMECC_CFG_WRITE_OP (1 << 12)
57
58#define PMECC_CFG_SPARE_ENABLE (1 << 16)
59#define PMECC_CFG_SPARE_DISABLE (0 << 16)
60
61#define PMECC_CFG_AUTO_ENABLE (1 << 20)
62#define PMECC_CFG_AUTO_DISABLE (0 << 20)
63
64#define ATMEL_PMECC_SAREA 0x004 /* Spare area size */
65#define ATMEL_PMECC_SADDR 0x008 /* PMECC starting address */
66#define ATMEL_PMECC_EADDR 0x00c /* PMECC ending address */
67#define ATMEL_PMECC_CLK 0x010 /* PMECC clock control */
68#define PMECC_CLK_133MHZ (2 << 0)
69
70#define ATMEL_PMECC_CTRL 0x014 /* PMECC control register */
71#define PMECC_CTRL_RST (1 << 0)
72#define PMECC_CTRL_DATA (1 << 1)
73#define PMECC_CTRL_USER (1 << 2)
74#define PMECC_CTRL_ENABLE (1 << 4)
75#define PMECC_CTRL_DISABLE (1 << 5)
76
77#define ATMEL_PMECC_SR 0x018 /* PMECC status register */
78#define PMECC_SR_BUSY (1 << 0)
79#define PMECC_SR_ENABLE (1 << 4)
80
81#define ATMEL_PMECC_IER 0x01c /* PMECC interrupt enable */
82#define PMECC_IER_ENABLE (1 << 0)
83#define ATMEL_PMECC_IDR 0x020 /* PMECC interrupt disable */
84#define PMECC_IER_DISABLE (1 << 0)
85#define ATMEL_PMECC_IMR 0x024 /* PMECC interrupt mask */
86#define PMECC_IER_MASK (1 << 0)
87#define ATMEL_PMECC_ISR 0x028 /* PMECC interrupt status */
88#define ATMEL_PMECC_ECCx 0x040 /* PMECC ECC x */
89#define ATMEL_PMECC_REMx 0x240 /* PMECC REM x */
90
91/* PMERRLOC Register Definitions */
92#define ATMEL_PMERRLOC_ELCFG 0x000 /* Error location config */
93#define PMERRLOC_ELCFG_SECTOR_512 (0 << 0)
94#define PMERRLOC_ELCFG_SECTOR_1024 (1 << 0)
95#define PMERRLOC_ELCFG_NUM_ERRORS(n) ((n) << 16)
96
97#define ATMEL_PMERRLOC_ELPRIM 0x004 /* Error location primitive */
98#define ATMEL_PMERRLOC_ELEN 0x008 /* Error location enable */
99#define ATMEL_PMERRLOC_ELDIS 0x00c /* Error location disable */
100#define PMERRLOC_DISABLE (1 << 0)
101
102#define ATMEL_PMERRLOC_ELSR 0x010 /* Error location status */
103#define PMERRLOC_ELSR_BUSY (1 << 0)
104#define ATMEL_PMERRLOC_ELIER 0x014 /* Error location int enable */
105#define ATMEL_PMERRLOC_ELIDR 0x018 /* Error location int disable */
106#define ATMEL_PMERRLOC_ELIMR 0x01c /* Error location int mask */
107#define ATMEL_PMERRLOC_ELISR 0x020 /* Error location int status */
108#define PMERRLOC_ERR_NUM_MASK (0x1f << 8)
109#define PMERRLOC_CALC_DONE (1 << 0)
110#define ATMEL_PMERRLOC_SIGMAx 0x028 /* Error location SIGMA x */
111#define ATMEL_PMERRLOC_ELx 0x08c /* Error location x */
112
113/* Register access macros for PMECC */
114#define pmecc_readl_relaxed(addr, reg) \
115 readl_relaxed((addr) + ATMEL_PMECC_##reg)
116
117#define pmecc_writel(addr, reg, value) \
118 writel((value), (addr) + ATMEL_PMECC_##reg)
119
120#define pmecc_readb_ecc_relaxed(addr, sector, n) \
121 readb_relaxed((addr) + ATMEL_PMECC_ECCx + ((sector) * 0x40) + (n))
122
123#define pmecc_readl_rem_relaxed(addr, sector, n) \
124 readl_relaxed((addr) + ATMEL_PMECC_REMx + ((sector) * 0x40) + ((n) * 4))
125
126#define pmerrloc_readl_relaxed(addr, reg) \
127 readl_relaxed((addr) + ATMEL_PMERRLOC_##reg)
128
129#define pmerrloc_writel(addr, reg, value) \
130 writel((value), (addr) + ATMEL_PMERRLOC_##reg)
131
132#define pmerrloc_writel_sigma_relaxed(addr, n, value) \
133 writel_relaxed((value), (addr) + ATMEL_PMERRLOC_SIGMAx + ((n) * 4))
134
135#define pmerrloc_readl_sigma_relaxed(addr, n) \
136 readl_relaxed((addr) + ATMEL_PMERRLOC_SIGMAx + ((n) * 4))
137
138#define pmerrloc_readl_el_relaxed(addr, n) \
139 readl_relaxed((addr) + ATMEL_PMERRLOC_ELx + ((n) * 4))
140
141/* Galois field dimension */
142#define PMECC_GF_DIMENSION_13 13
143#define PMECC_GF_DIMENSION_14 14
144
145#define PMECC_LOOKUP_TABLE_SIZE_512 0x2000
146#define PMECC_LOOKUP_TABLE_SIZE_1024 0x4000
147
148/* Time out value for reading PMECC status register */
149#define PMECC_MAX_TIMEOUT_MS 100
150
39#endif 151#endif
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 9f609d2dcf62..5c47b200045a 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -141,28 +141,6 @@ static void au_read_buf(struct mtd_info *mtd, u_char *buf, int len)
141} 141}
142 142
143/** 143/**
144 * au_verify_buf - Verify chip data against buffer
145 * @mtd: MTD device structure
146 * @buf: buffer containing the data to compare
147 * @len: number of bytes to compare
148 *
149 * verify function for 8bit buswidth
150 */
151static int au_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
152{
153 int i;
154 struct nand_chip *this = mtd->priv;
155
156 for (i = 0; i < len; i++) {
157 if (buf[i] != readb(this->IO_ADDR_R))
158 return -EFAULT;
159 au_sync();
160 }
161
162 return 0;
163}
164
165/**
166 * au_write_buf16 - write buffer to chip 144 * au_write_buf16 - write buffer to chip
167 * @mtd: MTD device structure 145 * @mtd: MTD device structure
168 * @buf: data buffer 146 * @buf: data buffer
@@ -205,29 +183,6 @@ static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
205 } 183 }
206} 184}
207 185
208/**
209 * au_verify_buf16 - Verify chip data against buffer
210 * @mtd: MTD device structure
211 * @buf: buffer containing the data to compare
212 * @len: number of bytes to compare
213 *
214 * verify function for 16bit buswidth
215 */
216static int au_verify_buf16(struct mtd_info *mtd, const u_char *buf, int len)
217{
218 int i;
219 struct nand_chip *this = mtd->priv;
220 u16 *p = (u16 *) buf;
221 len >>= 1;
222
223 for (i = 0; i < len; i++) {
224 if (p[i] != readw(this->IO_ADDR_R))
225 return -EFAULT;
226 au_sync();
227 }
228 return 0;
229}
230
231/* Select the chip by setting nCE to low */ 186/* Select the chip by setting nCE to low */
232#define NAND_CTL_SETNCE 1 187#define NAND_CTL_SETNCE 1
233/* Deselect the chip by setting nCE to high */ 188/* Deselect the chip by setting nCE to high */
@@ -516,7 +471,6 @@ static int __devinit au1550nd_probe(struct platform_device *pdev)
516 this->read_word = au_read_word; 471 this->read_word = au_read_word;
517 this->write_buf = (pd->devwidth) ? au_write_buf16 : au_write_buf; 472 this->write_buf = (pd->devwidth) ? au_write_buf16 : au_write_buf;
518 this->read_buf = (pd->devwidth) ? au_read_buf16 : au_read_buf; 473 this->read_buf = (pd->devwidth) ? au_read_buf16 : au_read_buf;
519 this->verify_buf = (pd->devwidth) ? au_verify_buf16 : au_verify_buf;
520 474
521 ret = nand_scan(&ctx->info, 1); 475 ret = nand_scan(&ctx->info, 1);
522 if (ret) { 476 if (ret) {
diff --git a/drivers/mtd/nand/bcm_umi_bch.c b/drivers/mtd/nand/bcm_umi_bch.c
deleted file mode 100644
index 5914bb32e001..000000000000
--- a/drivers/mtd/nand/bcm_umi_bch.c
+++ /dev/null
@@ -1,217 +0,0 @@
1/*****************************************************************************
2* Copyright 2004 - 2009 Broadcom Corporation. All rights reserved.
3*
4* Unless you and Broadcom execute a separate written software license
5* agreement governing use of this software, this software is licensed to you
6* under the terms of the GNU General Public License version 2, available at
7* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
8*
9* Notwithstanding the above, under no circumstances may you combine this
10* software in any way with any other Broadcom software provided under a
11* license other than the GPL, without Broadcom's express prior written
12* consent.
13*****************************************************************************/
14
15/* ---- Include Files ---------------------------------------------------- */
16#include "nand_bcm_umi.h"
17
18/* ---- External Variable Declarations ----------------------------------- */
19/* ---- External Function Prototypes ------------------------------------- */
20/* ---- Public Variables ------------------------------------------------- */
21/* ---- Private Constants and Types -------------------------------------- */
22
23/* ---- Private Function Prototypes -------------------------------------- */
24static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
25 struct nand_chip *chip, uint8_t *buf, int oob_required, int page);
26static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
27 struct nand_chip *chip, const uint8_t *buf, int oob_required);
28
29/* ---- Private Variables ------------------------------------------------ */
30
31/*
32** nand_hw_eccoob
33** New oob placement block for use with hardware ecc generation.
34*/
35static struct nand_ecclayout nand_hw_eccoob_512 = {
36 /* Reserve 5 for BI indicator */
37 .oobfree = {
38#if (NAND_ECC_NUM_BYTES > 3)
39 {.offset = 0, .length = 2}
40#else
41 {.offset = 0, .length = 5},
42 {.offset = 6, .length = 7}
43#endif
44 }
45};
46
47/*
48** We treat the OOB for a 2K page as if it were 4 512 byte oobs,
49** except the BI is at byte 0.
50*/
51static struct nand_ecclayout nand_hw_eccoob_2048 = {
52 /* Reserve 0 as BI indicator */
53 .oobfree = {
54#if (NAND_ECC_NUM_BYTES > 10)
55 {.offset = 1, .length = 2},
56#elif (NAND_ECC_NUM_BYTES > 7)
57 {.offset = 1, .length = 5},
58 {.offset = 16, .length = 6},
59 {.offset = 32, .length = 6},
60 {.offset = 48, .length = 6}
61#else
62 {.offset = 1, .length = 8},
63 {.offset = 16, .length = 9},
64 {.offset = 32, .length = 9},
65 {.offset = 48, .length = 9}
66#endif
67 }
68};
69
70/* We treat the OOB for a 4K page as if it were 8 512 byte oobs,
71 * except the BI is at byte 0. */
72static struct nand_ecclayout nand_hw_eccoob_4096 = {
73 /* Reserve 0 as BI indicator */
74 .oobfree = {
75#if (NAND_ECC_NUM_BYTES > 10)
76 {.offset = 1, .length = 2},
77 {.offset = 16, .length = 3},
78 {.offset = 32, .length = 3},
79 {.offset = 48, .length = 3},
80 {.offset = 64, .length = 3},
81 {.offset = 80, .length = 3},
82 {.offset = 96, .length = 3},
83 {.offset = 112, .length = 3}
84#else
85 {.offset = 1, .length = 5},
86 {.offset = 16, .length = 6},
87 {.offset = 32, .length = 6},
88 {.offset = 48, .length = 6},
89 {.offset = 64, .length = 6},
90 {.offset = 80, .length = 6},
91 {.offset = 96, .length = 6},
92 {.offset = 112, .length = 6}
93#endif
94 }
95};
96
97/* ---- Private Functions ------------------------------------------------ */
98/* ==== Public Functions ================================================= */
99
100/****************************************************************************
101*
102* bcm_umi_bch_read_page_hwecc - hardware ecc based page read function
103* @mtd: mtd info structure
104* @chip: nand chip info structure
105* @buf: buffer to store read data
106* @oob_required: caller expects OOB data read to chip->oob_poi
107*
108***************************************************************************/
109static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
110 struct nand_chip *chip, uint8_t * buf,
111 int oob_required, int page)
112{
113 int sectorIdx = 0;
114 int eccsize = chip->ecc.size;
115 int eccsteps = chip->ecc.steps;
116 uint8_t *datap = buf;
117 uint8_t eccCalc[NAND_ECC_NUM_BYTES];
118 int sectorOobSize = mtd->oobsize / eccsteps;
119 int stat;
120 unsigned int max_bitflips = 0;
121
122 for (sectorIdx = 0; sectorIdx < eccsteps;
123 sectorIdx++, datap += eccsize) {
124 if (sectorIdx > 0) {
125 /* Seek to page location within sector */
126 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, sectorIdx * eccsize,
127 -1);
128 }
129
130 /* Enable hardware ECC before reading the buf */
131 nand_bcm_umi_bch_enable_read_hwecc();
132
133 /* Read in data */
134 bcm_umi_nand_read_buf(mtd, datap, eccsize);
135
136 /* Pause hardware ECC after reading the buf */
137 nand_bcm_umi_bch_pause_read_ecc_calc();
138
139 /* Read the OOB ECC */
140 chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
141 mtd->writesize + sectorIdx * sectorOobSize, -1);
142 nand_bcm_umi_bch_read_oobEcc(mtd->writesize, eccCalc,
143 NAND_ECC_NUM_BYTES,
144 chip->oob_poi +
145 sectorIdx * sectorOobSize);
146
147 /* Correct any ECC detected errors */
148 stat =
149 nand_bcm_umi_bch_correct_page(datap, eccCalc,
150 NAND_ECC_NUM_BYTES);
151
152 /* Update Stats */
153 if (stat < 0) {
154#if defined(NAND_BCM_UMI_DEBUG)
155 printk(KERN_WARNING "%s uncorr_err sectorIdx=%d\n",
156 __func__, sectorIdx);
157 printk(KERN_WARNING
158 "%s data %02x %02x %02x %02x "
159 "%02x %02x %02x %02x\n",
160 __func__, datap[0], datap[1], datap[2], datap[3],
161 datap[4], datap[5], datap[6], datap[7]);
162 printk(KERN_WARNING
163 "%s ecc %02x %02x %02x %02x "
164 "%02x %02x %02x %02x %02x %02x "
165 "%02x %02x %02x\n",
166 __func__, eccCalc[0], eccCalc[1], eccCalc[2],
167 eccCalc[3], eccCalc[4], eccCalc[5], eccCalc[6],
168 eccCalc[7], eccCalc[8], eccCalc[9], eccCalc[10],
169 eccCalc[11], eccCalc[12]);
170 BUG();
171#endif
172 mtd->ecc_stats.failed++;
173 } else {
174#if defined(NAND_BCM_UMI_DEBUG)
175 if (stat > 0) {
176 printk(KERN_INFO
177 "%s %d correctable_errors detected\n",
178 __func__, stat);
179 }
180#endif
181 mtd->ecc_stats.corrected += stat;
182 max_bitflips = max_t(unsigned int, max_bitflips, stat);
183 }
184 }
185 return max_bitflips;
186}
187
188/****************************************************************************
189*
190* bcm_umi_bch_write_page_hwecc - hardware ecc based page write function
191* @mtd: mtd info structure
192* @chip: nand chip info structure
193* @buf: data buffer
194* @oob_required: must write chip->oob_poi to OOB
195*
196***************************************************************************/
197static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
198 struct nand_chip *chip, const uint8_t *buf, int oob_required)
199{
200 int sectorIdx = 0;
201 int eccsize = chip->ecc.size;
202 int eccsteps = chip->ecc.steps;
203 const uint8_t *datap = buf;
204 uint8_t *oobp = chip->oob_poi;
205 int sectorOobSize = mtd->oobsize / eccsteps;
206
207 for (sectorIdx = 0; sectorIdx < eccsteps;
208 sectorIdx++, datap += eccsize, oobp += sectorOobSize) {
209 /* Enable hardware ECC before writing the buf */
210 nand_bcm_umi_bch_enable_write_hwecc();
211 bcm_umi_nand_write_buf(mtd, datap, eccsize);
212 nand_bcm_umi_bch_write_oobEcc(mtd->writesize, oobp,
213 NAND_ECC_NUM_BYTES);
214 }
215
216 bcm_umi_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
217}
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
deleted file mode 100644
index d0d1bd4d0e7d..000000000000
--- a/drivers/mtd/nand/bcm_umi_nand.c
+++ /dev/null
@@ -1,555 +0,0 @@
1/*****************************************************************************
2* Copyright 2004 - 2009 Broadcom Corporation. All rights reserved.
3*
4* Unless you and Broadcom execute a separate written software license
5* agreement governing use of this software, this software is licensed to you
6* under the terms of the GNU General Public License version 2, available at
7* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
8*
9* Notwithstanding the above, under no circumstances may you combine this
10* software in any way with any other Broadcom software provided under a
11* license other than the GPL, without Broadcom's express prior written
12* consent.
13*****************************************************************************/
14
15/* ---- Include Files ---------------------------------------------------- */
16#include <linux/module.h>
17#include <linux/types.h>
18#include <linux/init.h>
19#include <linux/kernel.h>
20#include <linux/slab.h>
21#include <linux/string.h>
22#include <linux/ioport.h>
23#include <linux/device.h>
24#include <linux/delay.h>
25#include <linux/err.h>
26#include <linux/io.h>
27#include <linux/platform_device.h>
28#include <linux/mtd/mtd.h>
29#include <linux/mtd/nand.h>
30#include <linux/mtd/nand_ecc.h>
31#include <linux/mtd/partitions.h>
32
33#include <asm/mach-types.h>
34
35#include <mach/reg_nand.h>
36#include <mach/reg_umi.h>
37
38#include "nand_bcm_umi.h"
39
40#include <mach/memory_settings.h>
41
42#define USE_DMA 1
43#include <mach/dma.h>
44#include <linux/dma-mapping.h>
45#include <linux/completion.h>
46
47/* ---- External Variable Declarations ----------------------------------- */
48/* ---- External Function Prototypes ------------------------------------- */
49/* ---- Public Variables ------------------------------------------------- */
50/* ---- Private Constants and Types -------------------------------------- */
51static const __devinitconst char gBanner[] = KERN_INFO \
52 "BCM UMI MTD NAND Driver: 1.00\n";
53
54#if NAND_ECC_BCH
55static uint8_t scan_ff_pattern[] = { 0xff };
56
57static struct nand_bbt_descr largepage_bbt = {
58 .options = 0,
59 .offs = 0,
60 .len = 1,
61 .pattern = scan_ff_pattern
62};
63#endif
64
65/*
66** Preallocate a buffer to avoid having to do this every dma operation.
67** This is the size of the preallocated coherent DMA buffer.
68*/
69#if USE_DMA
70#define DMA_MIN_BUFLEN 512
71#define DMA_MAX_BUFLEN PAGE_SIZE
72#define USE_DIRECT_IO(len) (((len) < DMA_MIN_BUFLEN) || \
73 ((len) > DMA_MAX_BUFLEN))
74
75/*
76 * The current NAND data space goes from 0x80001900 to 0x80001FFF,
77 * which is only 0x700 = 1792 bytes long. This is too small for 2K, 4K page
78 * size NAND flash. Need to break the DMA down to multiple 1Ks.
79 *
80 * Need to make sure REG_NAND_DATA_PADDR + DMA_MAX_LEN < 0x80002000
81 */
82#define DMA_MAX_LEN 1024
83
84#else /* !USE_DMA */
85#define DMA_MIN_BUFLEN 0
86#define DMA_MAX_BUFLEN 0
87#define USE_DIRECT_IO(len) 1
88#endif
89/* ---- Private Function Prototypes -------------------------------------- */
90static void bcm_umi_nand_read_buf(struct mtd_info *mtd, u_char * buf, int len);
91static void bcm_umi_nand_write_buf(struct mtd_info *mtd, const u_char * buf,
92 int len);
93
94/* ---- Private Variables ------------------------------------------------ */
95static struct mtd_info *board_mtd;
96static void __iomem *bcm_umi_io_base;
97static void *virtPtr;
98static dma_addr_t physPtr;
99static struct completion nand_comp;
100
101/* ---- Private Functions ------------------------------------------------ */
102#if NAND_ECC_BCH
103#include "bcm_umi_bch.c"
104#else
105#include "bcm_umi_hamming.c"
106#endif
107
108#if USE_DMA
109
110/* Handler called when the DMA finishes. */
111static void nand_dma_handler(DMA_Device_t dev, int reason, void *userData)
112{
113 complete(&nand_comp);
114}
115
116static int nand_dma_init(void)
117{
118 int rc;
119
120 rc = dma_set_device_handler(DMA_DEVICE_NAND_MEM_TO_MEM,
121 nand_dma_handler, NULL);
122 if (rc != 0) {
123 printk(KERN_ERR "dma_set_device_handler failed: %d\n", rc);
124 return rc;
125 }
126
127 virtPtr =
128 dma_alloc_coherent(NULL, DMA_MAX_BUFLEN, &physPtr, GFP_KERNEL);
129 if (virtPtr == NULL) {
130 printk(KERN_ERR "NAND - Failed to allocate memory for DMA buffer\n");
131 return -ENOMEM;
132 }
133
134 return 0;
135}
136
137static void nand_dma_term(void)
138{
139 if (virtPtr != NULL)
140 dma_free_coherent(NULL, DMA_MAX_BUFLEN, virtPtr, physPtr);
141}
142
143static void nand_dma_read(void *buf, int len)
144{
145 int offset = 0;
146 int tmp_len = 0;
147 int len_left = len;
148 DMA_Handle_t hndl;
149
150 if (virtPtr == NULL)
151 panic("nand_dma_read: virtPtr == NULL\n");
152
153 if ((void *)physPtr == NULL)
154 panic("nand_dma_read: physPtr == NULL\n");
155
156 hndl = dma_request_channel(DMA_DEVICE_NAND_MEM_TO_MEM);
157 if (hndl < 0) {
158 printk(KERN_ERR
159 "nand_dma_read: unable to allocate dma channel: %d\n",
160 (int)hndl);
161 panic("\n");
162 }
163
164 while (len_left > 0) {
165 if (len_left > DMA_MAX_LEN) {
166 tmp_len = DMA_MAX_LEN;
167 len_left -= DMA_MAX_LEN;
168 } else {
169 tmp_len = len_left;
170 len_left = 0;
171 }
172
173 init_completion(&nand_comp);
174 dma_transfer_mem_to_mem(hndl, REG_NAND_DATA_PADDR,
175 physPtr + offset, tmp_len);
176 wait_for_completion(&nand_comp);
177
178 offset += tmp_len;
179 }
180
181 dma_free_channel(hndl);
182
183 if (buf != NULL)
184 memcpy(buf, virtPtr, len);
185}
186
187static void nand_dma_write(const void *buf, int len)
188{
189 int offset = 0;
190 int tmp_len = 0;
191 int len_left = len;
192 DMA_Handle_t hndl;
193
194 if (buf == NULL)
195 panic("nand_dma_write: buf == NULL\n");
196
197 if (virtPtr == NULL)
198 panic("nand_dma_write: virtPtr == NULL\n");
199
200 if ((void *)physPtr == NULL)
201 panic("nand_dma_write: physPtr == NULL\n");
202
203 memcpy(virtPtr, buf, len);
204
205
206 hndl = dma_request_channel(DMA_DEVICE_NAND_MEM_TO_MEM);
207 if (hndl < 0) {
208 printk(KERN_ERR
209 "nand_dma_write: unable to allocate dma channel: %d\n",
210 (int)hndl);
211 panic("\n");
212 }
213
214 while (len_left > 0) {
215 if (len_left > DMA_MAX_LEN) {
216 tmp_len = DMA_MAX_LEN;
217 len_left -= DMA_MAX_LEN;
218 } else {
219 tmp_len = len_left;
220 len_left = 0;
221 }
222
223 init_completion(&nand_comp);
224 dma_transfer_mem_to_mem(hndl, physPtr + offset,
225 REG_NAND_DATA_PADDR, tmp_len);
226 wait_for_completion(&nand_comp);
227
228 offset += tmp_len;
229 }
230
231 dma_free_channel(hndl);
232}
233
234#endif
235
236static int nand_dev_ready(struct mtd_info *mtd)
237{
238 return nand_bcm_umi_dev_ready();
239}
240
241/****************************************************************************
242*
243* bcm_umi_nand_inithw
244*
245* This routine does the necessary hardware (board-specific)
246* initializations. This includes setting up the timings, etc.
247*
248***************************************************************************/
249int bcm_umi_nand_inithw(void)
250{
251 /* Configure nand timing parameters */
252 writel(readl(&REG_UMI_NAND_TCR) & ~0x7ffff, &REG_UMI_NAND_TCR);
253 writel(readl(&REG_UMI_NAND_TCR) | HW_CFG_NAND_TCR, &REG_UMI_NAND_TCR);
254
255#if !defined(CONFIG_MTD_NAND_BCM_UMI_HWCS)
256 /* enable software control of CS */
257 writel(readl(&REG_UMI_NAND_TCR) | REG_UMI_NAND_TCR_CS_SWCTRL, &REG_UMI_NAND_TCR);
258#endif
259
260 /* keep NAND chip select asserted */
261 writel(readl(&REG_UMI_NAND_RCSR) | REG_UMI_NAND_RCSR_CS_ASSERTED, &REG_UMI_NAND_RCSR);
262
263 writel(readl(&REG_UMI_NAND_TCR) & ~REG_UMI_NAND_TCR_WORD16, &REG_UMI_NAND_TCR);
264 /* enable writes to flash */
265 writel(readl(&REG_UMI_MMD_ICR) | REG_UMI_MMD_ICR_FLASH_WP, &REG_UMI_MMD_ICR);
266
267 writel(NAND_CMD_RESET, bcm_umi_io_base + REG_NAND_CMD_OFFSET);
268 nand_bcm_umi_wait_till_ready();
269
270#if NAND_ECC_BCH
271 nand_bcm_umi_bch_config_ecc(NAND_ECC_NUM_BYTES);
272#endif
273
274 return 0;
275}
276
277/* Used to turn latch the proper register for access. */
278static void bcm_umi_nand_hwcontrol(struct mtd_info *mtd, int cmd,
279 unsigned int ctrl)
280{
281 /* send command to hardware */
282 struct nand_chip *chip = mtd->priv;
283 if (ctrl & NAND_CTRL_CHANGE) {
284 if (ctrl & NAND_CLE) {
285 chip->IO_ADDR_W = bcm_umi_io_base + REG_NAND_CMD_OFFSET;
286 goto CMD;
287 }
288 if (ctrl & NAND_ALE) {
289 chip->IO_ADDR_W =
290 bcm_umi_io_base + REG_NAND_ADDR_OFFSET;
291 goto CMD;
292 }
293 chip->IO_ADDR_W = bcm_umi_io_base + REG_NAND_DATA8_OFFSET;
294 }
295
296CMD:
297 /* Send command to chip directly */
298 if (cmd != NAND_CMD_NONE)
299 writeb(cmd, chip->IO_ADDR_W);
300}
301
302static void bcm_umi_nand_write_buf(struct mtd_info *mtd, const u_char * buf,
303 int len)
304{
305 if (USE_DIRECT_IO(len)) {
306 /* Do it the old way if the buffer is small or too large.
307 * Probably quicker than starting and checking dma. */
308 int i;
309 struct nand_chip *this = mtd->priv;
310
311 for (i = 0; i < len; i++)
312 writeb(buf[i], this->IO_ADDR_W);
313 }
314#if USE_DMA
315 else
316 nand_dma_write(buf, len);
317#endif
318}
319
320static void bcm_umi_nand_read_buf(struct mtd_info *mtd, u_char * buf, int len)
321{
322 if (USE_DIRECT_IO(len)) {
323 int i;
324 struct nand_chip *this = mtd->priv;
325
326 for (i = 0; i < len; i++)
327 buf[i] = readb(this->IO_ADDR_R);
328 }
329#if USE_DMA
330 else
331 nand_dma_read(buf, len);
332#endif
333}
334
335static uint8_t readbackbuf[NAND_MAX_PAGESIZE];
336static int bcm_umi_nand_verify_buf(struct mtd_info *mtd, const u_char * buf,
337 int len)
338{
339 /*
340 * Try to readback page with ECC correction. This is necessary
341 * for MLC parts which may have permanently stuck bits.
342 */
343 struct nand_chip *chip = mtd->priv;
344 int ret = chip->ecc.read_page(mtd, chip, readbackbuf, 0, 0);
345 if (ret < 0)
346 return -EFAULT;
347 else {
348 if (memcmp(readbackbuf, buf, len) == 0)
349 return 0;
350
351 return -EFAULT;
352 }
353 return 0;
354}
355
356static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
357{
358 struct nand_chip *this;
359 struct resource *r;
360 int err = 0;
361
362 printk(gBanner);
363
364 /* Allocate memory for MTD device structure and private data */
365 board_mtd =
366 kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip),
367 GFP_KERNEL);
368 if (!board_mtd) {
369 printk(KERN_WARNING
370 "Unable to allocate NAND MTD device structure.\n");
371 return -ENOMEM;
372 }
373
374 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
375
376 if (!r) {
377 err = -ENXIO;
378 goto out_free;
379 }
380
381 /* map physical address */
382 bcm_umi_io_base = ioremap(r->start, resource_size(r));
383
384 if (!bcm_umi_io_base) {
385 printk(KERN_ERR "ioremap to access BCM UMI NAND chip failed\n");
386 err = -EIO;
387 goto out_free;
388 }
389
390 /* Get pointer to private data */
391 this = (struct nand_chip *)(&board_mtd[1]);
392
393 /* Initialize structures */
394 memset((char *)board_mtd, 0, sizeof(struct mtd_info));
395 memset((char *)this, 0, sizeof(struct nand_chip));
396
397 /* Link the private data with the MTD structure */
398 board_mtd->priv = this;
399
400 /* Initialize the NAND hardware. */
401 if (bcm_umi_nand_inithw() < 0) {
402 printk(KERN_ERR "BCM UMI NAND chip could not be initialized\n");
403 err = -EIO;
404 goto out_unmap;
405 }
406
407 /* Set address of NAND IO lines */
408 this->IO_ADDR_W = bcm_umi_io_base + REG_NAND_DATA8_OFFSET;
409 this->IO_ADDR_R = bcm_umi_io_base + REG_NAND_DATA8_OFFSET;
410
411 /* Set command delay time, see datasheet for correct value */
412 this->chip_delay = 0;
413 /* Assign the device ready function, if available */
414 this->dev_ready = nand_dev_ready;
415 this->options = 0;
416
417 this->write_buf = bcm_umi_nand_write_buf;
418 this->read_buf = bcm_umi_nand_read_buf;
419 this->verify_buf = bcm_umi_nand_verify_buf;
420
421 this->cmd_ctrl = bcm_umi_nand_hwcontrol;
422 this->ecc.mode = NAND_ECC_HW;
423 this->ecc.size = 512;
424 this->ecc.bytes = NAND_ECC_NUM_BYTES;
425#if NAND_ECC_BCH
426 this->ecc.read_page = bcm_umi_bch_read_page_hwecc;
427 this->ecc.write_page = bcm_umi_bch_write_page_hwecc;
428#else
429 this->ecc.correct = nand_correct_data512;
430 this->ecc.calculate = bcm_umi_hamming_get_hw_ecc;
431 this->ecc.hwctl = bcm_umi_hamming_enable_hwecc;
432#endif
433
434#if USE_DMA
435 err = nand_dma_init();
436 if (err != 0)
437 goto out_unmap;
438#endif
439
440 /* Figure out the size of the device that we have.
441 * We need to do this to figure out which ECC
442 * layout we'll be using.
443 */
444
445 err = nand_scan_ident(board_mtd, 1, NULL);
446 if (err) {
447 printk(KERN_ERR "nand_scan failed: %d\n", err);
448 goto out_unmap;
449 }
450
451 /* Now that we know the nand size, we can setup the ECC layout */
452
453 switch (board_mtd->writesize) { /* writesize is the pagesize */
454 case 4096:
455 this->ecc.layout = &nand_hw_eccoob_4096;
456 break;
457 case 2048:
458 this->ecc.layout = &nand_hw_eccoob_2048;
459 break;
460 case 512:
461 this->ecc.layout = &nand_hw_eccoob_512;
462 break;
463 default:
464 {
465 printk(KERN_ERR "NAND - Unrecognized pagesize: %d\n",
466 board_mtd->writesize);
467 err = -EINVAL;
468 goto out_unmap;
469 }
470 }
471
472#if NAND_ECC_BCH
473 if (board_mtd->writesize > 512) {
474 if (this->bbt_options & NAND_BBT_USE_FLASH)
475 largepage_bbt.options = NAND_BBT_SCAN2NDPAGE;
476 this->badblock_pattern = &largepage_bbt;
477 }
478
479 this->ecc.strength = 8;
480
481#endif
482
483 /* Now finish off the scan, now that ecc.layout has been initialized. */
484
485 err = nand_scan_tail(board_mtd);
486 if (err) {
487 printk(KERN_ERR "nand_scan failed: %d\n", err);
488 goto out_unmap;
489 }
490
491 /* Register the partitions */
492 board_mtd->name = "bcm_umi-nand";
493 mtd_device_parse_register(board_mtd, NULL, NULL, NULL, 0);
494
495 /* Return happy */
496 return 0;
497out_unmap:
498 iounmap(bcm_umi_io_base);
499out_free:
500 kfree(board_mtd);
501 return err;
502}
503
504static int bcm_umi_nand_remove(struct platform_device *pdev)
505{
506#if USE_DMA
507 nand_dma_term();
508#endif
509
510 /* Release resources, unregister device */
511 nand_release(board_mtd);
512
513 /* unmap physical address */
514 iounmap(bcm_umi_io_base);
515
516 /* Free the MTD device structure */
517 kfree(board_mtd);
518
519 return 0;
520}
521
522#ifdef CONFIG_PM
523static int bcm_umi_nand_suspend(struct platform_device *pdev,
524 pm_message_t state)
525{
526 printk(KERN_ERR "MTD NAND suspend is being called\n");
527 return 0;
528}
529
530static int bcm_umi_nand_resume(struct platform_device *pdev)
531{
532 printk(KERN_ERR "MTD NAND resume is being called\n");
533 return 0;
534}
535#else
536#define bcm_umi_nand_suspend NULL
537#define bcm_umi_nand_resume NULL
538#endif
539
540static struct platform_driver nand_driver = {
541 .driver = {
542 .name = "bcm-nand",
543 .owner = THIS_MODULE,
544 },
545 .probe = bcm_umi_nand_probe,
546 .remove = bcm_umi_nand_remove,
547 .suspend = bcm_umi_nand_suspend,
548 .resume = bcm_umi_nand_resume,
549};
550
551module_platform_driver(nand_driver);
552
553MODULE_LICENSE("GPL");
554MODULE_AUTHOR("Broadcom");
555MODULE_DESCRIPTION("BCM UMI MTD NAND driver");
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 3f1c18599cbd..ab0caa74eb43 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -566,11 +566,13 @@ static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip
566 return 0; 566 return 0;
567} 567}
568 568
569static void bf5xx_nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 569static int bf5xx_nand_write_page_raw(struct mtd_info *mtd,
570 const uint8_t *buf, int oob_required) 570 struct nand_chip *chip, const uint8_t *buf, int oob_required)
571{ 571{
572 bf5xx_nand_write_buf(mtd, buf, mtd->writesize); 572 bf5xx_nand_write_buf(mtd, buf, mtd->writesize);
573 bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize); 573 bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
574
575 return 0;
574} 576}
575 577
576/* 578/*
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index f3f6cfedd69e..2bb7170502c2 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -377,7 +377,7 @@ static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
377 * @buf: buffer to store read data 377 * @buf: buffer to store read data
378 * @oob_required: caller expects OOB data read to chip->oob_poi 378 * @oob_required: caller expects OOB data read to chip->oob_poi
379 * 379 *
380 * The hw generator calculates the error syndrome automatically. Therefor 380 * The hw generator calculates the error syndrome automatically. Therefore
381 * we need a special oob layout and handling. 381 * we need a special oob layout and handling.
382 */ 382 */
383static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip, 383static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
@@ -520,7 +520,7 @@ static struct nand_bbt_descr cafe_bbt_mirror_descr_512 = {
520}; 520};
521 521
522 522
523static void cafe_nand_write_page_lowlevel(struct mtd_info *mtd, 523static int cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
524 struct nand_chip *chip, 524 struct nand_chip *chip,
525 const uint8_t *buf, int oob_required) 525 const uint8_t *buf, int oob_required)
526{ 526{
@@ -531,6 +531,8 @@ static void cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
531 531
532 /* Set up ECC autogeneration */ 532 /* Set up ECC autogeneration */
533 cafe->ctl2 |= (1<<30); 533 cafe->ctl2 |= (1<<30);
534
535 return 0;
534} 536}
535 537
536static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, 538static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
@@ -542,9 +544,12 @@ static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
542 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); 544 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
543 545
544 if (unlikely(raw)) 546 if (unlikely(raw))
545 chip->ecc.write_page_raw(mtd, chip, buf, oob_required); 547 status = chip->ecc.write_page_raw(mtd, chip, buf, oob_required);
546 else 548 else
547 chip->ecc.write_page(mtd, chip, buf, oob_required); 549 status = chip->ecc.write_page(mtd, chip, buf, oob_required);
550
551 if (status < 0)
552 return status;
548 553
549 /* 554 /*
550 * Cached progamming disabled for now, Not sure if its worth the 555 * Cached progamming disabled for now, Not sure if its worth the
@@ -571,13 +576,6 @@ static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
571 status = chip->waitfunc(mtd, chip); 576 status = chip->waitfunc(mtd, chip);
572 } 577 }
573 578
574#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
575 /* Send command to read back the data */
576 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
577
578 if (chip->verify_buf(mtd, buf, mtd->writesize))
579 return -EIO;
580#endif
581 return 0; 579 return 0;
582} 580}
583 581
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index 1024bfc05c86..39b2ef848811 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -76,18 +76,6 @@ static void cmx270_read_buf(struct mtd_info *mtd, u_char *buf, int len)
76 *buf++ = readl(this->IO_ADDR_R) >> 16; 76 *buf++ = readl(this->IO_ADDR_R) >> 16;
77} 77}
78 78
79static int cmx270_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
80{
81 int i;
82 struct nand_chip *this = mtd->priv;
83
84 for (i=0; i<len; i++)
85 if (buf[i] != (u_char)(readl(this->IO_ADDR_R) >> 16))
86 return -EFAULT;
87
88 return 0;
89}
90
91static inline void nand_cs_on(void) 79static inline void nand_cs_on(void)
92{ 80{
93 gpio_set_value(GPIO_NAND_CS, 0); 81 gpio_set_value(GPIO_NAND_CS, 0);
@@ -209,7 +197,6 @@ static int __init cmx270_init(void)
209 this->read_byte = cmx270_read_byte; 197 this->read_byte = cmx270_read_byte;
210 this->read_buf = cmx270_read_buf; 198 this->read_buf = cmx270_read_buf;
211 this->write_buf = cmx270_write_buf; 199 this->write_buf = cmx270_write_buf;
212 this->verify_buf = cmx270_verify_buf;
213 200
214 /* Scan to find existence of the device */ 201 /* Scan to find existence of the device */
215 if (nand_scan (cmx270_nand_mtd, 1)) { 202 if (nand_scan (cmx270_nand_mtd, 1)) {
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index f1deb1ee2c95..945047ad0952 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -33,6 +33,7 @@
33#include <linux/mtd/nand.h> 33#include <linux/mtd/nand.h>
34#include <linux/mtd/partitions.h> 34#include <linux/mtd/partitions.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/of_device.h>
36 37
37#include <linux/platform_data/mtd-davinci.h> 38#include <linux/platform_data/mtd-davinci.h>
38#include <linux/platform_data/mtd-davinci-aemif.h> 39#include <linux/platform_data/mtd-davinci-aemif.h>
@@ -518,9 +519,75 @@ static struct nand_ecclayout hwecc4_2048 __initconst = {
518 }, 519 },
519}; 520};
520 521
522#if defined(CONFIG_OF)
523static const struct of_device_id davinci_nand_of_match[] = {
524 {.compatible = "ti,davinci-nand", },
525 {},
526}
527MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
528
529static struct davinci_nand_pdata
530 *nand_davinci_get_pdata(struct platform_device *pdev)
531{
532 if (!pdev->dev.platform_data && pdev->dev.of_node) {
533 struct davinci_nand_pdata *pdata;
534 const char *mode;
535 u32 prop;
536 int len;
537
538 pdata = devm_kzalloc(&pdev->dev,
539 sizeof(struct davinci_nand_pdata),
540 GFP_KERNEL);
541 pdev->dev.platform_data = pdata;
542 if (!pdata)
543 return NULL;
544 if (!of_property_read_u32(pdev->dev.of_node,
545 "ti,davinci-chipselect", &prop))
546 pdev->id = prop;
547 if (!of_property_read_u32(pdev->dev.of_node,
548 "ti,davinci-mask-ale", &prop))
549 pdata->mask_ale = prop;
550 if (!of_property_read_u32(pdev->dev.of_node,
551 "ti,davinci-mask-cle", &prop))
552 pdata->mask_cle = prop;
553 if (!of_property_read_u32(pdev->dev.of_node,
554 "ti,davinci-mask-chipsel", &prop))
555 pdata->mask_chipsel = prop;
556 if (!of_property_read_string(pdev->dev.of_node,
557 "ti,davinci-ecc-mode", &mode)) {
558 if (!strncmp("none", mode, 4))
559 pdata->ecc_mode = NAND_ECC_NONE;
560 if (!strncmp("soft", mode, 4))
561 pdata->ecc_mode = NAND_ECC_SOFT;
562 if (!strncmp("hw", mode, 2))
563 pdata->ecc_mode = NAND_ECC_HW;
564 }
565 if (!of_property_read_u32(pdev->dev.of_node,
566 "ti,davinci-ecc-bits", &prop))
567 pdata->ecc_bits = prop;
568 if (!of_property_read_u32(pdev->dev.of_node,
569 "ti,davinci-nand-buswidth", &prop))
570 if (prop == 16)
571 pdata->options |= NAND_BUSWIDTH_16;
572 if (of_find_property(pdev->dev.of_node,
573 "ti,davinci-nand-use-bbt", &len))
574 pdata->bbt_options = NAND_BBT_USE_FLASH;
575 }
576
577 return pdev->dev.platform_data;
578}
579#else
580#define davinci_nand_of_match NULL
581static struct davinci_nand_pdata
582 *nand_davinci_get_pdata(struct platform_device *pdev)
583{
584 return pdev->dev.platform_data;
585}
586#endif
587
521static int __init nand_davinci_probe(struct platform_device *pdev) 588static int __init nand_davinci_probe(struct platform_device *pdev)
522{ 589{
523 struct davinci_nand_pdata *pdata = pdev->dev.platform_data; 590 struct davinci_nand_pdata *pdata;
524 struct davinci_nand_info *info; 591 struct davinci_nand_info *info;
525 struct resource *res1; 592 struct resource *res1;
526 struct resource *res2; 593 struct resource *res2;
@@ -530,6 +597,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
530 uint32_t val; 597 uint32_t val;
531 nand_ecc_modes_t ecc_mode; 598 nand_ecc_modes_t ecc_mode;
532 599
600 pdata = nand_davinci_get_pdata(pdev);
533 /* insist on board-specific configuration */ 601 /* insist on board-specific configuration */
534 if (!pdata) 602 if (!pdata)
535 return -ENODEV; 603 return -ENODEV;
@@ -656,7 +724,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
656 goto err_clk; 724 goto err_clk;
657 } 725 }
658 726
659 ret = clk_enable(info->clk); 727 ret = clk_prepare_enable(info->clk);
660 if (ret < 0) { 728 if (ret < 0) {
661 dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n", 729 dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n",
662 ret); 730 ret);
@@ -767,7 +835,7 @@ syndrome_done:
767 835
768err_scan: 836err_scan:
769err_timing: 837err_timing:
770 clk_disable(info->clk); 838 clk_disable_unprepare(info->clk);
771 839
772err_clk_enable: 840err_clk_enable:
773 clk_put(info->clk); 841 clk_put(info->clk);
@@ -804,7 +872,7 @@ static int __exit nand_davinci_remove(struct platform_device *pdev)
804 872
805 nand_release(&info->mtd); 873 nand_release(&info->mtd);
806 874
807 clk_disable(info->clk); 875 clk_disable_unprepare(info->clk);
808 clk_put(info->clk); 876 clk_put(info->clk);
809 877
810 kfree(info); 878 kfree(info);
@@ -816,6 +884,8 @@ static struct platform_driver nand_davinci_driver = {
816 .remove = __exit_p(nand_davinci_remove), 884 .remove = __exit_p(nand_davinci_remove),
817 .driver = { 885 .driver = {
818 .name = "davinci_nand", 886 .name = "davinci_nand",
887 .owner = THIS_MODULE,
888 .of_match_table = davinci_nand_of_match,
819 }, 889 },
820}; 890};
821MODULE_ALIAS("platform:davinci_nand"); 891MODULE_ALIAS("platform:davinci_nand");
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 0650aafa0dd2..e706a237170f 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -1028,7 +1028,7 @@ static void denali_setup_dma(struct denali_nand_info *denali, int op)
1028 1028
1029/* writes a page. user specifies type, and this function handles the 1029/* writes a page. user specifies type, and this function handles the
1030 * configuration details. */ 1030 * configuration details. */
1031static void write_page(struct mtd_info *mtd, struct nand_chip *chip, 1031static int write_page(struct mtd_info *mtd, struct nand_chip *chip,
1032 const uint8_t *buf, bool raw_xfer) 1032 const uint8_t *buf, bool raw_xfer)
1033{ 1033{
1034 struct denali_nand_info *denali = mtd_to_denali(mtd); 1034 struct denali_nand_info *denali = mtd_to_denali(mtd);
@@ -1078,6 +1078,8 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
1078 1078
1079 denali_enable_dma(denali, false); 1079 denali_enable_dma(denali, false);
1080 dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE); 1080 dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE);
1081
1082 return 0;
1081} 1083}
1082 1084
1083/* NAND core entry points */ 1085/* NAND core entry points */
@@ -1086,24 +1088,24 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
1086 * writing a page with ECC or without is similar, all the work is done 1088 * writing a page with ECC or without is similar, all the work is done
1087 * by write_page above. 1089 * by write_page above.
1088 * */ 1090 * */
1089static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip, 1091static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1090 const uint8_t *buf, int oob_required) 1092 const uint8_t *buf, int oob_required)
1091{ 1093{
1092 /* for regular page writes, we let HW handle all the ECC 1094 /* for regular page writes, we let HW handle all the ECC
1093 * data written to the device. */ 1095 * data written to the device. */
1094 write_page(mtd, chip, buf, false); 1096 return write_page(mtd, chip, buf, false);
1095} 1097}
1096 1098
1097/* This is the callback that the NAND core calls to write a page without ECC. 1099/* This is the callback that the NAND core calls to write a page without ECC.
1098 * raw access is similar to ECC page writes, so all the work is done in the 1100 * raw access is similar to ECC page writes, so all the work is done in the
1099 * write_page() function above. 1101 * write_page() function above.
1100 */ 1102 */
1101static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 1103static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1102 const uint8_t *buf, int oob_required) 1104 const uint8_t *buf, int oob_required)
1103{ 1105{
1104 /* for raw page writes, we want to disable ECC and simply write 1106 /* for raw page writes, we want to disable ECC and simply write
1105 whatever data is in the buffer. */ 1107 whatever data is in the buffer. */
1106 write_page(mtd, chip, buf, true); 1108 return write_page(mtd, chip, buf, true);
1107} 1109}
1108 1110
1109static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip, 1111static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index e2ca067631cf..256eb30f6180 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -376,19 +376,6 @@ static void doc2000_readbuf_dword(struct mtd_info *mtd, u_char *buf, int len)
376 } 376 }
377} 377}
378 378
379static int doc2000_verifybuf(struct mtd_info *mtd, const u_char *buf, int len)
380{
381 struct nand_chip *this = mtd->priv;
382 struct doc_priv *doc = this->priv;
383 void __iomem *docptr = doc->virtadr;
384 int i;
385
386 for (i = 0; i < len; i++)
387 if (buf[i] != ReadDOC(docptr, 2k_CDSN_IO))
388 return -EFAULT;
389 return 0;
390}
391
392static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr) 379static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr)
393{ 380{
394 struct nand_chip *this = mtd->priv; 381 struct nand_chip *this = mtd->priv;
@@ -526,26 +513,6 @@ static void doc2001_readbuf(struct mtd_info *mtd, u_char *buf, int len)
526 buf[i] = ReadDOC(docptr, LastDataRead); 513 buf[i] = ReadDOC(docptr, LastDataRead);
527} 514}
528 515
529static int doc2001_verifybuf(struct mtd_info *mtd, const u_char *buf, int len)
530{
531 struct nand_chip *this = mtd->priv;
532 struct doc_priv *doc = this->priv;
533 void __iomem *docptr = doc->virtadr;
534 int i;
535
536 /* Start read pipeline */
537 ReadDOC(docptr, ReadPipeInit);
538
539 for (i = 0; i < len - 1; i++)
540 if (buf[i] != ReadDOC(docptr, Mil_CDSN_IO)) {
541 ReadDOC(docptr, LastDataRead);
542 return i;
543 }
544 if (buf[i] != ReadDOC(docptr, LastDataRead))
545 return i;
546 return 0;
547}
548
549static u_char doc2001plus_read_byte(struct mtd_info *mtd) 516static u_char doc2001plus_read_byte(struct mtd_info *mtd)
550{ 517{
551 struct nand_chip *this = mtd->priv; 518 struct nand_chip *this = mtd->priv;
@@ -610,33 +577,6 @@ static void doc2001plus_readbuf(struct mtd_info *mtd, u_char *buf, int len)
610 printk("\n"); 577 printk("\n");
611} 578}
612 579
613static int doc2001plus_verifybuf(struct mtd_info *mtd, const u_char *buf, int len)
614{
615 struct nand_chip *this = mtd->priv;
616 struct doc_priv *doc = this->priv;
617 void __iomem *docptr = doc->virtadr;
618 int i;
619
620 if (debug)
621 printk("verifybuf of %d bytes: ", len);
622
623 /* Start read pipeline */
624 ReadDOC(docptr, Mplus_ReadPipeInit);
625 ReadDOC(docptr, Mplus_ReadPipeInit);
626
627 for (i = 0; i < len - 2; i++)
628 if (buf[i] != ReadDOC(docptr, Mil_CDSN_IO)) {
629 ReadDOC(docptr, Mplus_LastDataRead);
630 ReadDOC(docptr, Mplus_LastDataRead);
631 return i;
632 }
633 if (buf[len - 2] != ReadDOC(docptr, Mplus_LastDataRead))
634 return len - 2;
635 if (buf[len - 1] != ReadDOC(docptr, Mplus_LastDataRead))
636 return len - 1;
637 return 0;
638}
639
640static void doc2001plus_select_chip(struct mtd_info *mtd, int chip) 580static void doc2001plus_select_chip(struct mtd_info *mtd, int chip)
641{ 581{
642 struct nand_chip *this = mtd->priv; 582 struct nand_chip *this = mtd->priv;
@@ -1432,7 +1372,6 @@ static inline int __init doc2000_init(struct mtd_info *mtd)
1432 this->read_byte = doc2000_read_byte; 1372 this->read_byte = doc2000_read_byte;
1433 this->write_buf = doc2000_writebuf; 1373 this->write_buf = doc2000_writebuf;
1434 this->read_buf = doc2000_readbuf; 1374 this->read_buf = doc2000_readbuf;
1435 this->verify_buf = doc2000_verifybuf;
1436 this->scan_bbt = nftl_scan_bbt; 1375 this->scan_bbt = nftl_scan_bbt;
1437 1376
1438 doc->CDSNControl = CDSN_CTRL_FLASH_IO | CDSN_CTRL_ECC_IO; 1377 doc->CDSNControl = CDSN_CTRL_FLASH_IO | CDSN_CTRL_ECC_IO;
@@ -1449,7 +1388,6 @@ static inline int __init doc2001_init(struct mtd_info *mtd)
1449 this->read_byte = doc2001_read_byte; 1388 this->read_byte = doc2001_read_byte;
1450 this->write_buf = doc2001_writebuf; 1389 this->write_buf = doc2001_writebuf;
1451 this->read_buf = doc2001_readbuf; 1390 this->read_buf = doc2001_readbuf;
1452 this->verify_buf = doc2001_verifybuf;
1453 1391
1454 ReadDOC(doc->virtadr, ChipID); 1392 ReadDOC(doc->virtadr, ChipID);
1455 ReadDOC(doc->virtadr, ChipID); 1393 ReadDOC(doc->virtadr, ChipID);
@@ -1480,7 +1418,6 @@ static inline int __init doc2001plus_init(struct mtd_info *mtd)
1480 this->read_byte = doc2001plus_read_byte; 1418 this->read_byte = doc2001plus_read_byte;
1481 this->write_buf = doc2001plus_writebuf; 1419 this->write_buf = doc2001plus_writebuf;
1482 this->read_buf = doc2001plus_readbuf; 1420 this->read_buf = doc2001plus_readbuf;
1483 this->verify_buf = doc2001plus_verifybuf;
1484 this->scan_bbt = inftl_scan_bbt; 1421 this->scan_bbt = inftl_scan_bbt;
1485 this->cmd_ctrl = NULL; 1422 this->cmd_ctrl = NULL;
1486 this->select_chip = doc2001plus_select_chip; 1423 this->select_chip = doc2001plus_select_chip;
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index a225e49a5623..799da5d1c857 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -378,9 +378,9 @@ static int correct_data(struct mtd_info *mtd, uint8_t *buf, int page)
378 * bit flips(s) are not reported in stats. 378 * bit flips(s) are not reported in stats.
379 */ 379 */
380 380
381 if (doc->oob_buf[15]) { 381 if (nand->oob_poi[15]) {
382 int bit, numsetbits = 0; 382 int bit, numsetbits = 0;
383 unsigned long written_flag = doc->oob_buf[15]; 383 unsigned long written_flag = nand->oob_poi[15];
384 for_each_set_bit(bit, &written_flag, 8) 384 for_each_set_bit(bit, &written_flag, 8)
385 numsetbits++; 385 numsetbits++;
386 if (numsetbits > 4) { /* assume blank */ 386 if (numsetbits > 4) { /* assume blank */
@@ -428,7 +428,7 @@ static int correct_data(struct mtd_info *mtd, uint8_t *buf, int page)
428 /* if error within oob area preceeding ecc bytes... */ 428 /* if error within oob area preceeding ecc bytes... */
429 if (errpos[i] > DOCG4_PAGE_SIZE * 8) 429 if (errpos[i] > DOCG4_PAGE_SIZE * 8)
430 change_bit(errpos[i] - DOCG4_PAGE_SIZE * 8, 430 change_bit(errpos[i] - DOCG4_PAGE_SIZE * 8,
431 (unsigned long *)doc->oob_buf); 431 (unsigned long *)nand->oob_poi);
432 432
433 else /* error in page data */ 433 else /* error in page data */
434 change_bit(errpos[i], (unsigned long *)buf); 434 change_bit(errpos[i], (unsigned long *)buf);
@@ -748,18 +748,12 @@ static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
748 748
749 docg4_read_buf(mtd, buf, DOCG4_PAGE_SIZE); /* read the page data */ 749 docg4_read_buf(mtd, buf, DOCG4_PAGE_SIZE); /* read the page data */
750 750
751 /* 751 /* this device always reads oob after page data */
752 * Diskonchips read oob immediately after a page read. Mtd
753 * infrastructure issues a separate command for reading oob after the
754 * page is read. So we save the oob bytes in a local buffer and just
755 * copy it if the next command reads oob from the same page.
756 */
757
758 /* first 14 oob bytes read from I/O reg */ 752 /* first 14 oob bytes read from I/O reg */
759 docg4_read_buf(mtd, doc->oob_buf, 14); 753 docg4_read_buf(mtd, nand->oob_poi, 14);
760 754
761 /* last 2 read from another reg */ 755 /* last 2 read from another reg */
762 buf16 = (uint16_t *)(doc->oob_buf + 14); 756 buf16 = (uint16_t *)(nand->oob_poi + 14);
763 *buf16 = readw(docptr + DOCG4_MYSTERY_REG); 757 *buf16 = readw(docptr + DOCG4_MYSTERY_REG);
764 758
765 write_nop(docptr); 759 write_nop(docptr);
@@ -782,6 +776,8 @@ static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
782 } 776 }
783 777
784 writew(0, docptr + DOC_DATAEND); 778 writew(0, docptr + DOC_DATAEND);
779 if (bits_corrected == -EBADMSG) /* uncorrectable errors */
780 return 0;
785 return bits_corrected; 781 return bits_corrected;
786} 782}
787 783
@@ -807,21 +803,6 @@ static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
807 803
808 dev_dbg(doc->dev, "%s: page %x\n", __func__, page); 804 dev_dbg(doc->dev, "%s: page %x\n", __func__, page);
809 805
810 /*
811 * Oob bytes are read as part of a normal page read. If the previous
812 * nand command was a read of the page whose oob is now being read, just
813 * copy the oob bytes that we saved in a local buffer and avoid a
814 * separate oob read.
815 */
816 if (doc->last_command.command == NAND_CMD_READ0 &&
817 doc->last_command.page == page) {
818 memcpy(nand->oob_poi, doc->oob_buf, 16);
819 return 0;
820 }
821
822 /*
823 * Separate read of oob data only.
824 */
825 docg4_command(mtd, NAND_CMD_READ0, nand->ecc.size, page); 806 docg4_command(mtd, NAND_CMD_READ0, nand->ecc.size, page);
826 807
827 writew(DOC_ECCCONF0_READ_MODE | DOCG4_OOB_SIZE, docptr + DOC_ECCCONF0); 808 writew(DOC_ECCCONF0_READ_MODE | DOCG4_OOB_SIZE, docptr + DOC_ECCCONF0);
@@ -898,7 +879,7 @@ static void docg4_erase_block(struct mtd_info *mtd, int page)
898 write_nop(docptr); 879 write_nop(docptr);
899} 880}
900 881
901static void write_page(struct mtd_info *mtd, struct nand_chip *nand, 882static int write_page(struct mtd_info *mtd, struct nand_chip *nand,
902 const uint8_t *buf, bool use_ecc) 883 const uint8_t *buf, bool use_ecc)
903{ 884{
904 struct docg4_priv *doc = nand->priv; 885 struct docg4_priv *doc = nand->priv;
@@ -950,15 +931,17 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *nand,
950 write_nop(docptr); 931 write_nop(docptr);
951 writew(0, docptr + DOC_DATAEND); 932 writew(0, docptr + DOC_DATAEND);
952 write_nop(docptr); 933 write_nop(docptr);
934
935 return 0;
953} 936}
954 937
955static void docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand, 938static int docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
956 const uint8_t *buf, int oob_required) 939 const uint8_t *buf, int oob_required)
957{ 940{
958 return write_page(mtd, nand, buf, false); 941 return write_page(mtd, nand, buf, false);
959} 942}
960 943
961static void docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand, 944static int docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand,
962 const uint8_t *buf, int oob_required) 945 const uint8_t *buf, int oob_required)
963{ 946{
964 return write_page(mtd, nand, buf, true); 947 return write_page(mtd, nand, buf, true);
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 784293806110..cc1480a5e4c1 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -614,41 +614,6 @@ static void fsl_elbc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
614 len, avail); 614 len, avail);
615} 615}
616 616
617/*
618 * Verify buffer against the FCM Controller Data Buffer
619 */
620static int fsl_elbc_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
621{
622 struct nand_chip *chip = mtd->priv;
623 struct fsl_elbc_mtd *priv = chip->priv;
624 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
625 int i;
626
627 if (len < 0) {
628 dev_err(priv->dev, "write_buf of %d bytes", len);
629 return -EINVAL;
630 }
631
632 if ((unsigned int)len >
633 elbc_fcm_ctrl->read_bytes - elbc_fcm_ctrl->index) {
634 dev_err(priv->dev,
635 "verify_buf beyond end of buffer "
636 "(%d requested, %u available)\n",
637 len, elbc_fcm_ctrl->read_bytes - elbc_fcm_ctrl->index);
638
639 elbc_fcm_ctrl->index = elbc_fcm_ctrl->read_bytes;
640 return -EINVAL;
641 }
642
643 for (i = 0; i < len; i++)
644 if (in_8(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index + i])
645 != buf[i])
646 break;
647
648 elbc_fcm_ctrl->index += len;
649 return i == len && elbc_fcm_ctrl->status == LTESR_CC ? 0 : -EIO;
650}
651
652/* This function is called after Program and Erase Operations to 617/* This function is called after Program and Erase Operations to
653 * check for success or failure. 618 * check for success or failure.
654 */ 619 */
@@ -766,11 +731,13 @@ static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
766/* ECC will be calculated automatically, and errors will be detected in 731/* ECC will be calculated automatically, and errors will be detected in
767 * waitfunc. 732 * waitfunc.
768 */ 733 */
769static void fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip, 734static int fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
770 const uint8_t *buf, int oob_required) 735 const uint8_t *buf, int oob_required)
771{ 736{
772 fsl_elbc_write_buf(mtd, buf, mtd->writesize); 737 fsl_elbc_write_buf(mtd, buf, mtd->writesize);
773 fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize); 738 fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
739
740 return 0;
774} 741}
775 742
776static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv) 743static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
@@ -796,7 +763,6 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
796 chip->read_byte = fsl_elbc_read_byte; 763 chip->read_byte = fsl_elbc_read_byte;
797 chip->write_buf = fsl_elbc_write_buf; 764 chip->write_buf = fsl_elbc_write_buf;
798 chip->read_buf = fsl_elbc_read_buf; 765 chip->read_buf = fsl_elbc_read_buf;
799 chip->verify_buf = fsl_elbc_verify_buf;
800 chip->select_chip = fsl_elbc_select_chip; 766 chip->select_chip = fsl_elbc_select_chip;
801 chip->cmdfunc = fsl_elbc_cmdfunc; 767 chip->cmdfunc = fsl_elbc_cmdfunc;
802 chip->waitfunc = fsl_elbc_wait; 768 chip->waitfunc = fsl_elbc_wait;
@@ -805,7 +771,6 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
805 chip->bbt_md = &bbt_mirror_descr; 771 chip->bbt_md = &bbt_mirror_descr;
806 772
807 /* set up nand options */ 773 /* set up nand options */
808 chip->options = NAND_NO_READRDY;
809 chip->bbt_options = NAND_BBT_USE_FLASH; 774 chip->bbt_options = NAND_BBT_USE_FLASH;
810 775
811 chip->controller = &elbc_fcm_ctrl->controller; 776 chip->controller = &elbc_fcm_ctrl->controller;
@@ -916,7 +881,8 @@ static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev)
916 elbc_fcm_ctrl->chips[bank] = priv; 881 elbc_fcm_ctrl->chips[bank] = priv;
917 priv->bank = bank; 882 priv->bank = bank;
918 priv->ctrl = fsl_lbc_ctrl_dev; 883 priv->ctrl = fsl_lbc_ctrl_dev;
919 priv->dev = dev; 884 priv->dev = &pdev->dev;
885 dev_set_drvdata(priv->dev, priv);
920 886
921 priv->vbase = ioremap(res.start, resource_size(&res)); 887 priv->vbase = ioremap(res.start, resource_size(&res));
922 if (!priv->vbase) { 888 if (!priv->vbase) {
@@ -963,11 +929,10 @@ err:
963 929
964static int fsl_elbc_nand_remove(struct platform_device *pdev) 930static int fsl_elbc_nand_remove(struct platform_device *pdev)
965{ 931{
966 int i;
967 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand; 932 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
968 for (i = 0; i < MAX_BANKS; i++) 933 struct fsl_elbc_mtd *priv = dev_get_drvdata(&pdev->dev);
969 if (elbc_fcm_ctrl->chips[i]) 934
970 fsl_elbc_chip_remove(elbc_fcm_ctrl->chips[i]); 935 fsl_elbc_chip_remove(priv);
971 936
972 mutex_lock(&fsl_elbc_nand_mutex); 937 mutex_lock(&fsl_elbc_nand_mutex);
973 elbc_fcm_ctrl->counter--; 938 elbc_fcm_ctrl->counter--;
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 01e2f2e87d8c..3551a99076ba 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -194,7 +194,7 @@ static int is_blank(struct mtd_info *mtd, unsigned int bufnum)
194 struct nand_chip *chip = mtd->priv; 194 struct nand_chip *chip = mtd->priv;
195 struct fsl_ifc_mtd *priv = chip->priv; 195 struct fsl_ifc_mtd *priv = chip->priv;
196 u8 __iomem *addr = priv->vbase + bufnum * (mtd->writesize * 2); 196 u8 __iomem *addr = priv->vbase + bufnum * (mtd->writesize * 2);
197 u32 __iomem *mainarea = (u32 *)addr; 197 u32 __iomem *mainarea = (u32 __iomem *)addr;
198 u8 __iomem *oob = addr + mtd->writesize; 198 u8 __iomem *oob = addr + mtd->writesize;
199 int i; 199 int i;
200 200
@@ -592,8 +592,8 @@ static uint8_t fsl_ifc_read_byte16(struct mtd_info *mtd)
592 * next byte. 592 * next byte.
593 */ 593 */
594 if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) { 594 if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
595 data = in_be16((uint16_t *)&ifc_nand_ctrl-> 595 data = in_be16((uint16_t __iomem *)&ifc_nand_ctrl->
596 addr[ifc_nand_ctrl->index]); 596 addr[ifc_nand_ctrl->index]);
597 ifc_nand_ctrl->index += 2; 597 ifc_nand_ctrl->index += 2;
598 return (uint8_t) data; 598 return (uint8_t) data;
599 } 599 }
@@ -628,46 +628,6 @@ static void fsl_ifc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
628} 628}
629 629
630/* 630/*
631 * Verify buffer against the IFC Controller Data Buffer
632 */
633static int fsl_ifc_verify_buf(struct mtd_info *mtd,
634 const u_char *buf, int len)
635{
636 struct nand_chip *chip = mtd->priv;
637 struct fsl_ifc_mtd *priv = chip->priv;
638 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
639 struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
640 int i;
641
642 if (len < 0) {
643 dev_err(priv->dev, "%s: write_buf of %d bytes", __func__, len);
644 return -EINVAL;
645 }
646
647 if ((unsigned int)len > nctrl->read_bytes - nctrl->index) {
648 dev_err(priv->dev,
649 "%s: beyond end of buffer (%d requested, %u available)\n",
650 __func__, len, nctrl->read_bytes - nctrl->index);
651
652 nctrl->index = nctrl->read_bytes;
653 return -EINVAL;
654 }
655
656 for (i = 0; i < len; i++)
657 if (in_8(&nctrl->addr[nctrl->index + i]) != buf[i])
658 break;
659
660 nctrl->index += len;
661
662 if (i != len)
663 return -EIO;
664 if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
665 return -EIO;
666
667 return 0;
668}
669
670/*
671 * This function is called after Program and Erase Operations to 631 * This function is called after Program and Erase Operations to
672 * check for success or failure. 632 * check for success or failure.
673 */ 633 */
@@ -722,11 +682,13 @@ static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
722/* ECC will be calculated automatically, and errors will be detected in 682/* ECC will be calculated automatically, and errors will be detected in
723 * waitfunc. 683 * waitfunc.
724 */ 684 */
725static void fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip, 685static int fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
726 const uint8_t *buf, int oob_required) 686 const uint8_t *buf, int oob_required)
727{ 687{
728 fsl_ifc_write_buf(mtd, buf, mtd->writesize); 688 fsl_ifc_write_buf(mtd, buf, mtd->writesize);
729 fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize); 689 fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
690
691 return 0;
730} 692}
731 693
732static int fsl_ifc_chip_init_tail(struct mtd_info *mtd) 694static int fsl_ifc_chip_init_tail(struct mtd_info *mtd)
@@ -844,7 +806,6 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
844 806
845 chip->write_buf = fsl_ifc_write_buf; 807 chip->write_buf = fsl_ifc_write_buf;
846 chip->read_buf = fsl_ifc_read_buf; 808 chip->read_buf = fsl_ifc_read_buf;
847 chip->verify_buf = fsl_ifc_verify_buf;
848 chip->select_chip = fsl_ifc_select_chip; 809 chip->select_chip = fsl_ifc_select_chip;
849 chip->cmdfunc = fsl_ifc_cmdfunc; 810 chip->cmdfunc = fsl_ifc_cmdfunc;
850 chip->waitfunc = fsl_ifc_wait; 811 chip->waitfunc = fsl_ifc_wait;
@@ -855,7 +816,6 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
855 out_be32(&ifc->ifc_nand.ncfgr, 0x0); 816 out_be32(&ifc->ifc_nand.ncfgr, 0x0);
856 817
857 /* set up nand options */ 818 /* set up nand options */
858 chip->options = NAND_NO_READRDY;
859 chip->bbt_options = NAND_BBT_USE_FLASH; 819 chip->bbt_options = NAND_BBT_USE_FLASH;
860 820
861 821
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index 27000a5f5f47..bc73bc5f2713 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -100,23 +100,6 @@ static void gpio_nand_readbuf(struct mtd_info *mtd, u_char *buf, int len)
100 readsb(this->IO_ADDR_R, buf, len); 100 readsb(this->IO_ADDR_R, buf, len);
101} 101}
102 102
103static int gpio_nand_verifybuf(struct mtd_info *mtd, const u_char *buf, int len)
104{
105 struct nand_chip *this = mtd->priv;
106 unsigned char read, *p = (unsigned char *) buf;
107 int i, err = 0;
108
109 for (i = 0; i < len; i++) {
110 read = readb(this->IO_ADDR_R);
111 if (read != p[i]) {
112 pr_debug("%s: err at %d (read %04x vs %04x)\n",
113 __func__, i, read, p[i]);
114 err = -EFAULT;
115 }
116 }
117 return err;
118}
119
120static void gpio_nand_writebuf16(struct mtd_info *mtd, const u_char *buf, 103static void gpio_nand_writebuf16(struct mtd_info *mtd, const u_char *buf,
121 int len) 104 int len)
122{ 105{
@@ -148,26 +131,6 @@ static void gpio_nand_readbuf16(struct mtd_info *mtd, u_char *buf, int len)
148 } 131 }
149} 132}
150 133
151static int gpio_nand_verifybuf16(struct mtd_info *mtd, const u_char *buf,
152 int len)
153{
154 struct nand_chip *this = mtd->priv;
155 unsigned short read, *p = (unsigned short *) buf;
156 int i, err = 0;
157 len >>= 1;
158
159 for (i = 0; i < len; i++) {
160 read = readw(this->IO_ADDR_R);
161 if (read != p[i]) {
162 pr_debug("%s: err at %d (read %04x vs %04x)\n",
163 __func__, i, read, p[i]);
164 err = -EFAULT;
165 }
166 }
167 return err;
168}
169
170
171static int gpio_nand_devready(struct mtd_info *mtd) 134static int gpio_nand_devready(struct mtd_info *mtd)
172{ 135{
173 struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd); 136 struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
@@ -391,11 +354,9 @@ static int __devinit gpio_nand_probe(struct platform_device *dev)
391 if (this->options & NAND_BUSWIDTH_16) { 354 if (this->options & NAND_BUSWIDTH_16) {
392 this->read_buf = gpio_nand_readbuf16; 355 this->read_buf = gpio_nand_readbuf16;
393 this->write_buf = gpio_nand_writebuf16; 356 this->write_buf = gpio_nand_writebuf16;
394 this->verify_buf = gpio_nand_verifybuf16;
395 } else { 357 } else {
396 this->read_buf = gpio_nand_readbuf; 358 this->read_buf = gpio_nand_readbuf;
397 this->write_buf = gpio_nand_writebuf; 359 this->write_buf = gpio_nand_writebuf;
398 this->verify_buf = gpio_nand_verifybuf;
399 } 360 }
400 361
401 /* set the mtd private data for the nand driver */ 362 /* set the mtd private data for the nand driver */
@@ -456,20 +417,7 @@ static struct platform_driver gpio_nand_driver = {
456 }, 417 },
457}; 418};
458 419
459static int __init gpio_nand_init(void) 420module_platform_driver(gpio_nand_driver);
460{
461 printk(KERN_INFO "GPIO NAND driver, © 2004 Simtec Electronics\n");
462
463 return platform_driver_register(&gpio_nand_driver);
464}
465
466static void __exit gpio_nand_exit(void)
467{
468 platform_driver_unregister(&gpio_nand_driver);
469}
470
471module_init(gpio_nand_init);
472module_exit(gpio_nand_exit);
473 421
474MODULE_LICENSE("GPL"); 422MODULE_LICENSE("GPL");
475MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); 423MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index a1f43329ad43..3502accd4bc3 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -26,7 +26,7 @@
26#include "gpmi-regs.h" 26#include "gpmi-regs.h"
27#include "bch-regs.h" 27#include "bch-regs.h"
28 28
29struct timing_threshod timing_default_threshold = { 29static struct timing_threshod timing_default_threshold = {
30 .max_data_setup_cycles = (BM_GPMI_TIMING0_DATA_SETUP >> 30 .max_data_setup_cycles = (BM_GPMI_TIMING0_DATA_SETUP >>
31 BP_GPMI_TIMING0_DATA_SETUP), 31 BP_GPMI_TIMING0_DATA_SETUP),
32 .internal_data_setup_in_ns = 0, 32 .internal_data_setup_in_ns = 0,
@@ -124,12 +124,42 @@ error:
124 return -ETIMEDOUT; 124 return -ETIMEDOUT;
125} 125}
126 126
127static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v)
128{
129 struct clk *clk;
130 int ret;
131 int i;
132
133 for (i = 0; i < GPMI_CLK_MAX; i++) {
134 clk = this->resources.clock[i];
135 if (!clk)
136 break;
137
138 if (v) {
139 ret = clk_prepare_enable(clk);
140 if (ret)
141 goto err_clk;
142 } else {
143 clk_disable_unprepare(clk);
144 }
145 }
146 return 0;
147
148err_clk:
149 for (; i > 0; i--)
150 clk_disable_unprepare(this->resources.clock[i - 1]);
151 return ret;
152}
153
154#define gpmi_enable_clk(x) __gpmi_enable_clk(x, true)
155#define gpmi_disable_clk(x) __gpmi_enable_clk(x, false)
156
127int gpmi_init(struct gpmi_nand_data *this) 157int gpmi_init(struct gpmi_nand_data *this)
128{ 158{
129 struct resources *r = &this->resources; 159 struct resources *r = &this->resources;
130 int ret; 160 int ret;
131 161
132 ret = clk_prepare_enable(r->clock); 162 ret = gpmi_enable_clk(this);
133 if (ret) 163 if (ret)
134 goto err_out; 164 goto err_out;
135 ret = gpmi_reset_block(r->gpmi_regs, false); 165 ret = gpmi_reset_block(r->gpmi_regs, false);
@@ -149,7 +179,7 @@ int gpmi_init(struct gpmi_nand_data *this)
149 /* Select BCH ECC. */ 179 /* Select BCH ECC. */
150 writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET); 180 writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
151 181
152 clk_disable_unprepare(r->clock); 182 gpmi_disable_clk(this);
153 return 0; 183 return 0;
154err_out: 184err_out:
155 return ret; 185 return ret;
@@ -205,7 +235,7 @@ int bch_set_geometry(struct gpmi_nand_data *this)
205 ecc_strength = bch_geo->ecc_strength >> 1; 235 ecc_strength = bch_geo->ecc_strength >> 1;
206 page_size = bch_geo->page_size; 236 page_size = bch_geo->page_size;
207 237
208 ret = clk_prepare_enable(r->clock); 238 ret = gpmi_enable_clk(this);
209 if (ret) 239 if (ret)
210 goto err_out; 240 goto err_out;
211 241
@@ -240,7 +270,7 @@ int bch_set_geometry(struct gpmi_nand_data *this)
240 writel(BM_BCH_CTRL_COMPLETE_IRQ_EN, 270 writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
241 r->bch_regs + HW_BCH_CTRL_SET); 271 r->bch_regs + HW_BCH_CTRL_SET);
242 272
243 clk_disable_unprepare(r->clock); 273 gpmi_disable_clk(this);
244 return 0; 274 return 0;
245err_out: 275err_out:
246 return ret; 276 return ret;
@@ -263,6 +293,7 @@ static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
263 struct gpmi_nfc_hardware_timing *hw) 293 struct gpmi_nfc_hardware_timing *hw)
264{ 294{
265 struct timing_threshod *nfc = &timing_default_threshold; 295 struct timing_threshod *nfc = &timing_default_threshold;
296 struct resources *r = &this->resources;
266 struct nand_chip *nand = &this->nand; 297 struct nand_chip *nand = &this->nand;
267 struct nand_timing target = this->timing; 298 struct nand_timing target = this->timing;
268 bool improved_timing_is_available; 299 bool improved_timing_is_available;
@@ -302,8 +333,9 @@ static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
302 (target.tRHOH_in_ns >= 0) ; 333 (target.tRHOH_in_ns >= 0) ;
303 334
304 /* Inspect the clock. */ 335 /* Inspect the clock. */
336 nfc->clock_frequency_in_hz = clk_get_rate(r->clock[0]);
305 clock_frequency_in_hz = nfc->clock_frequency_in_hz; 337 clock_frequency_in_hz = nfc->clock_frequency_in_hz;
306 clock_period_in_ns = 1000000000 / clock_frequency_in_hz; 338 clock_period_in_ns = NSEC_PER_SEC / clock_frequency_in_hz;
307 339
308 /* 340 /*
309 * The NFC quantizes setup and hold parameters in terms of clock cycles. 341 * The NFC quantizes setup and hold parameters in terms of clock cycles.
@@ -698,17 +730,230 @@ return_results:
698 hw->address_setup_in_cycles = address_setup_in_cycles; 730 hw->address_setup_in_cycles = address_setup_in_cycles;
699 hw->use_half_periods = dll_use_half_periods; 731 hw->use_half_periods = dll_use_half_periods;
700 hw->sample_delay_factor = sample_delay_factor; 732 hw->sample_delay_factor = sample_delay_factor;
733 hw->device_busy_timeout = GPMI_DEFAULT_BUSY_TIMEOUT;
734 hw->wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
701 735
702 /* Return success. */ 736 /* Return success. */
703 return 0; 737 return 0;
704} 738}
705 739
740/*
741 * <1> Firstly, we should know what's the GPMI-clock means.
742 * The GPMI-clock is the internal clock in the gpmi nand controller.
743 * If you set 100MHz to gpmi nand controller, the GPMI-clock's period
744 * is 10ns. Mark the GPMI-clock's period as GPMI-clock-period.
745 *
746 * <2> Secondly, we should know what's the frequency on the nand chip pins.
747 * The frequency on the nand chip pins is derived from the GPMI-clock.
748 * We can get it from the following equation:
749 *
750 * F = G / (DS + DH)
751 *
752 * F : the frequency on the nand chip pins.
753 * G : the GPMI clock, such as 100MHz.
754 * DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP
755 * DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD
756 *
757 * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz,
758 * the nand EDO(extended Data Out) timing could be applied.
759 * The GPMI implements a feedback read strobe to sample the read data.
760 * The feedback read strobe can be delayed to support the nand EDO timing
761 * where the read strobe may deasserts before the read data is valid, and
762 * read data is valid for some time after read strobe.
763 *
764 * The following figure illustrates some aspects of a NAND Flash read:
765 *
766 * |<---tREA---->|
767 * | |
768 * | | |
769 * |<--tRP-->| |
770 * | | |
771 * __ ___|__________________________________
772 * RDN \________/ |
773 * |
774 * /---------\
775 * Read Data --------------< >---------
776 * \---------/
777 * | |
778 * |<-D->|
779 * FeedbackRDN ________ ____________
780 * \___________/
781 *
782 * D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY.
783 *
784 *
785 * <4> Now, we begin to describe how to compute the right RDN_DELAY.
786 *
787 * 4.1) From the aspect of the nand chip pins:
788 * Delay = (tREA + C - tRP) {1}
789 *
790 * tREA : the maximum read access time. From the ONFI nand standards,
791 * we know that tREA is 16ns in mode 5, tREA is 20ns is mode 4.
792 * Please check it in : www.onfi.org
793 * C : a constant for adjust the delay. default is 4.
794 * tRP : the read pulse width.
795 * Specified by the HW_GPMI_TIMING0:DATA_SETUP:
796 * tRP = (GPMI-clock-period) * DATA_SETUP
797 *
798 * 4.2) From the aspect of the GPMI nand controller:
799 * Delay = RDN_DELAY * 0.125 * RP {2}
800 *
801 * RP : the DLL reference period.
802 * if (GPMI-clock-period > DLL_THRETHOLD)
803 * RP = GPMI-clock-period / 2;
804 * else
805 * RP = GPMI-clock-period;
806 *
807 * Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period
808 * is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD
809 * is 16ns, but in mx6q, we use 12ns.
810 *
811 * 4.3) since {1} equals {2}, we get:
812 *
813 * (tREA + 4 - tRP) * 8
814 * RDN_DELAY = --------------------- {3}
815 * RP
816 *
817 * 4.4) We only support the fastest asynchronous mode of ONFI nand.
818 * For some ONFI nand, the mode 4 is the fastest mode;
819 * while for some ONFI nand, the mode 5 is the fastest mode.
820 * So we only support the mode 4 and mode 5. It is no need to
821 * support other modes.
822 */
823static void gpmi_compute_edo_timing(struct gpmi_nand_data *this,
824 struct gpmi_nfc_hardware_timing *hw)
825{
826 struct resources *r = &this->resources;
827 unsigned long rate = clk_get_rate(r->clock[0]);
828 int mode = this->timing_mode;
829 int dll_threshold = 16; /* in ns */
830 unsigned long delay;
831 unsigned long clk_period;
832 int t_rea;
833 int c = 4;
834 int t_rp;
835 int rp;
836
837 /*
838 * [1] for GPMI_HW_GPMI_TIMING0:
839 * The async mode requires 40MHz for mode 4, 50MHz for mode 5.
840 * The GPMI can support 100MHz at most. So if we want to
841 * get the 40MHz or 50MHz, we have to set DS=1, DH=1.
842 * Set the ADDRESS_SETUP to 0 in mode 4.
843 */
844 hw->data_setup_in_cycles = 1;
845 hw->data_hold_in_cycles = 1;
846 hw->address_setup_in_cycles = ((mode == 5) ? 1 : 0);
847
848 /* [2] for GPMI_HW_GPMI_TIMING1 */
849 hw->device_busy_timeout = 0x9000;
850
851 /* [3] for GPMI_HW_GPMI_CTRL1 */
852 hw->wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
853
854 if (GPMI_IS_MX6Q(this))
855 dll_threshold = 12;
856
857 /*
858 * Enlarge 10 times for the numerator and denominator in {3}.
859 * This make us to get more accurate result.
860 */
861 clk_period = NSEC_PER_SEC / (rate / 10);
862 dll_threshold *= 10;
863 t_rea = ((mode == 5) ? 16 : 20) * 10;
864 c *= 10;
865
866 t_rp = clk_period * 1; /* DATA_SETUP is 1 */
867
868 if (clk_period > dll_threshold) {
869 hw->use_half_periods = 1;
870 rp = clk_period / 2;
871 } else {
872 hw->use_half_periods = 0;
873 rp = clk_period;
874 }
875
876 /*
877 * Multiply the numerator with 10, we could do a round off:
878 * 7.8 round up to 8; 7.4 round down to 7.
879 */
880 delay = (((t_rea + c - t_rp) * 8) * 10) / rp;
881 delay = (delay + 5) / 10;
882
883 hw->sample_delay_factor = delay;
884}
885
886static int enable_edo_mode(struct gpmi_nand_data *this, int mode)
887{
888 struct resources *r = &this->resources;
889 struct nand_chip *nand = &this->nand;
890 struct mtd_info *mtd = &this->mtd;
891 uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {};
892 unsigned long rate;
893 int ret;
894
895 nand->select_chip(mtd, 0);
896
897 /* [1] send SET FEATURE commond to NAND */
898 feature[0] = mode;
899 ret = nand->onfi_set_features(mtd, nand,
900 ONFI_FEATURE_ADDR_TIMING_MODE, feature);
901 if (ret)
902 goto err_out;
903
904 /* [2] send GET FEATURE command to double-check the timing mode */
905 memset(feature, 0, ONFI_SUBFEATURE_PARAM_LEN);
906 ret = nand->onfi_get_features(mtd, nand,
907 ONFI_FEATURE_ADDR_TIMING_MODE, feature);
908 if (ret || feature[0] != mode)
909 goto err_out;
910
911 nand->select_chip(mtd, -1);
912
913 /* [3] set the main IO clock, 100MHz for mode 5, 80MHz for mode 4. */
914 rate = (mode == 5) ? 100000000 : 80000000;
915 clk_set_rate(r->clock[0], rate);
916
917 /* Let the gpmi_begin() re-compute the timing again. */
918 this->flags &= ~GPMI_TIMING_INIT_OK;
919
920 this->flags |= GPMI_ASYNC_EDO_ENABLED;
921 this->timing_mode = mode;
922 dev_info(this->dev, "enable the asynchronous EDO mode %d\n", mode);
923 return 0;
924
925err_out:
926 nand->select_chip(mtd, -1);
927 dev_err(this->dev, "mode:%d ,failed in set feature.\n", mode);
928 return -EINVAL;
929}
930
931int gpmi_extra_init(struct gpmi_nand_data *this)
932{
933 struct nand_chip *chip = &this->nand;
934
935 /* Enable the asynchronous EDO feature. */
936 if (GPMI_IS_MX6Q(this) && chip->onfi_version) {
937 int mode = onfi_get_async_timing_mode(chip);
938
939 /* We only support the timing mode 4 and mode 5. */
940 if (mode & ONFI_TIMING_MODE_5)
941 mode = 5;
942 else if (mode & ONFI_TIMING_MODE_4)
943 mode = 4;
944 else
945 return 0;
946
947 return enable_edo_mode(this, mode);
948 }
949 return 0;
950}
951
706/* Begin the I/O */ 952/* Begin the I/O */
707void gpmi_begin(struct gpmi_nand_data *this) 953void gpmi_begin(struct gpmi_nand_data *this)
708{ 954{
709 struct resources *r = &this->resources; 955 struct resources *r = &this->resources;
710 struct timing_threshod *nfc = &timing_default_threshold; 956 void __iomem *gpmi_regs = r->gpmi_regs;
711 unsigned char *gpmi_regs = r->gpmi_regs;
712 unsigned int clock_period_in_ns; 957 unsigned int clock_period_in_ns;
713 uint32_t reg; 958 uint32_t reg;
714 unsigned int dll_wait_time_in_us; 959 unsigned int dll_wait_time_in_us;
@@ -716,60 +961,66 @@ void gpmi_begin(struct gpmi_nand_data *this)
716 int ret; 961 int ret;
717 962
718 /* Enable the clock. */ 963 /* Enable the clock. */
719 ret = clk_prepare_enable(r->clock); 964 ret = gpmi_enable_clk(this);
720 if (ret) { 965 if (ret) {
721 pr_err("We failed in enable the clk\n"); 966 pr_err("We failed in enable the clk\n");
722 goto err_out; 967 goto err_out;
723 } 968 }
724 969
725 /* set ready/busy timeout */ 970 /* Only initialize the timing once */
726 writel(0x500 << BP_GPMI_TIMING1_BUSY_TIMEOUT, 971 if (this->flags & GPMI_TIMING_INIT_OK)
727 gpmi_regs + HW_GPMI_TIMING1); 972 return;
728 973 this->flags |= GPMI_TIMING_INIT_OK;
729 /* Get the timing information we need. */
730 nfc->clock_frequency_in_hz = clk_get_rate(r->clock);
731 clock_period_in_ns = 1000000000 / nfc->clock_frequency_in_hz;
732 974
733 gpmi_nfc_compute_hardware_timing(this, &hw); 975 if (this->flags & GPMI_ASYNC_EDO_ENABLED)
976 gpmi_compute_edo_timing(this, &hw);
977 else
978 gpmi_nfc_compute_hardware_timing(this, &hw);
734 979
735 /* Set up all the simple timing parameters. */ 980 /* [1] Set HW_GPMI_TIMING0 */
736 reg = BF_GPMI_TIMING0_ADDRESS_SETUP(hw.address_setup_in_cycles) | 981 reg = BF_GPMI_TIMING0_ADDRESS_SETUP(hw.address_setup_in_cycles) |
737 BF_GPMI_TIMING0_DATA_HOLD(hw.data_hold_in_cycles) | 982 BF_GPMI_TIMING0_DATA_HOLD(hw.data_hold_in_cycles) |
738 BF_GPMI_TIMING0_DATA_SETUP(hw.data_setup_in_cycles) ; 983 BF_GPMI_TIMING0_DATA_SETUP(hw.data_setup_in_cycles) ;
739 984
740 writel(reg, gpmi_regs + HW_GPMI_TIMING0); 985 writel(reg, gpmi_regs + HW_GPMI_TIMING0);
741 986
742 /* 987 /* [2] Set HW_GPMI_TIMING1 */
743 * DLL_ENABLE must be set to 0 when setting RDN_DELAY or HALF_PERIOD. 988 writel(BF_GPMI_TIMING1_BUSY_TIMEOUT(hw.device_busy_timeout),
744 */ 989 gpmi_regs + HW_GPMI_TIMING1);
990
991 /* [3] The following code is to set the HW_GPMI_CTRL1. */
992
993 /* Set the WRN_DLY_SEL */
994 writel(BM_GPMI_CTRL1_WRN_DLY_SEL, gpmi_regs + HW_GPMI_CTRL1_CLR);
995 writel(BF_GPMI_CTRL1_WRN_DLY_SEL(hw.wrn_dly_sel),
996 gpmi_regs + HW_GPMI_CTRL1_SET);
997
998 /* DLL_ENABLE must be set to 0 when setting RDN_DELAY or HALF_PERIOD. */
745 writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_CLR); 999 writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_CLR);
746 1000
747 /* Clear out the DLL control fields. */ 1001 /* Clear out the DLL control fields. */
748 writel(BM_GPMI_CTRL1_RDN_DELAY, gpmi_regs + HW_GPMI_CTRL1_CLR); 1002 reg = BM_GPMI_CTRL1_RDN_DELAY | BM_GPMI_CTRL1_HALF_PERIOD;
749 writel(BM_GPMI_CTRL1_HALF_PERIOD, gpmi_regs + HW_GPMI_CTRL1_CLR); 1003 writel(reg, gpmi_regs + HW_GPMI_CTRL1_CLR);
750 1004
751 /* If no sample delay is called for, return immediately. */ 1005 /* If no sample delay is called for, return immediately. */
752 if (!hw.sample_delay_factor) 1006 if (!hw.sample_delay_factor)
753 return; 1007 return;
754 1008
755 /* Configure the HALF_PERIOD flag. */ 1009 /* Set RDN_DELAY or HALF_PERIOD. */
756 if (hw.use_half_periods) 1010 reg = ((hw.use_half_periods) ? BM_GPMI_CTRL1_HALF_PERIOD : 0)
757 writel(BM_GPMI_CTRL1_HALF_PERIOD, 1011 | BF_GPMI_CTRL1_RDN_DELAY(hw.sample_delay_factor);
758 gpmi_regs + HW_GPMI_CTRL1_SET);
759 1012
760 /* Set the delay factor. */ 1013 writel(reg, gpmi_regs + HW_GPMI_CTRL1_SET);
761 writel(BF_GPMI_CTRL1_RDN_DELAY(hw.sample_delay_factor),
762 gpmi_regs + HW_GPMI_CTRL1_SET);
763 1014
764 /* Enable the DLL. */ 1015 /* At last, we enable the DLL. */
765 writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_SET); 1016 writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_SET);
766 1017
767 /* 1018 /*
768 * After we enable the GPMI DLL, we have to wait 64 clock cycles before 1019 * After we enable the GPMI DLL, we have to wait 64 clock cycles before
769 * we can use the GPMI. 1020 * we can use the GPMI. Calculate the amount of time we need to wait,
770 * 1021 * in microseconds.
771 * Calculate the amount of time we need to wait, in microseconds.
772 */ 1022 */
1023 clock_period_in_ns = NSEC_PER_SEC / clk_get_rate(r->clock[0]);
773 dll_wait_time_in_us = (clock_period_in_ns * 64) / 1000; 1024 dll_wait_time_in_us = (clock_period_in_ns * 64) / 1000;
774 1025
775 if (!dll_wait_time_in_us) 1026 if (!dll_wait_time_in_us)
@@ -784,8 +1035,7 @@ err_out:
784 1035
785void gpmi_end(struct gpmi_nand_data *this) 1036void gpmi_end(struct gpmi_nand_data *this)
786{ 1037{
787 struct resources *r = &this->resources; 1038 gpmi_disable_clk(this);
788 clk_disable_unprepare(r->clock);
789} 1039}
790 1040
791/* Clears a BCH interrupt. */ 1041/* Clears a BCH interrupt. */
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index a6cad5caba78..d79696b2f19b 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -18,6 +18,9 @@
18 * with this program; if not, write to the Free Software Foundation, Inc., 18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 */ 20 */
21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
21#include <linux/clk.h> 24#include <linux/clk.h>
22#include <linux/slab.h> 25#include <linux/slab.h>
23#include <linux/interrupt.h> 26#include <linux/interrupt.h>
@@ -27,6 +30,7 @@
27#include <linux/pinctrl/consumer.h> 30#include <linux/pinctrl/consumer.h>
28#include <linux/of.h> 31#include <linux/of.h>
29#include <linux/of_device.h> 32#include <linux/of_device.h>
33#include <linux/of_mtd.h>
30#include "gpmi-nand.h" 34#include "gpmi-nand.h"
31 35
32/* add our owner bbt descriptor */ 36/* add our owner bbt descriptor */
@@ -113,7 +117,7 @@ int common_nfc_set_geometry(struct gpmi_nand_data *this)
113 /* We use the same ECC strength for all chunks. */ 117 /* We use the same ECC strength for all chunks. */
114 geo->ecc_strength = get_ecc_strength(this); 118 geo->ecc_strength = get_ecc_strength(this);
115 if (!geo->ecc_strength) { 119 if (!geo->ecc_strength) {
116 pr_err("We get a wrong ECC strength.\n"); 120 pr_err("wrong ECC strength.\n");
117 return -EINVAL; 121 return -EINVAL;
118 } 122 }
119 123
@@ -316,7 +320,7 @@ acquire_register_block(struct gpmi_nand_data *this, const char *res_name)
316 struct platform_device *pdev = this->pdev; 320 struct platform_device *pdev = this->pdev;
317 struct resources *res = &this->resources; 321 struct resources *res = &this->resources;
318 struct resource *r; 322 struct resource *r;
319 void *p; 323 void __iomem *p;
320 324
321 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name); 325 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
322 if (!r) { 326 if (!r) {
@@ -423,8 +427,8 @@ static int __devinit acquire_dma_channels(struct gpmi_nand_data *this)
423 struct platform_device *pdev = this->pdev; 427 struct platform_device *pdev = this->pdev;
424 struct resource *r_dma; 428 struct resource *r_dma;
425 struct device_node *dn; 429 struct device_node *dn;
426 int dma_channel; 430 u32 dma_channel;
427 unsigned int ret; 431 int ret;
428 struct dma_chan *dma_chan; 432 struct dma_chan *dma_chan;
429 dma_cap_mask_t mask; 433 dma_cap_mask_t mask;
430 434
@@ -464,9 +468,73 @@ acquire_err:
464 return -EINVAL; 468 return -EINVAL;
465} 469}
466 470
471static void gpmi_put_clks(struct gpmi_nand_data *this)
472{
473 struct resources *r = &this->resources;
474 struct clk *clk;
475 int i;
476
477 for (i = 0; i < GPMI_CLK_MAX; i++) {
478 clk = r->clock[i];
479 if (clk) {
480 clk_put(clk);
481 r->clock[i] = NULL;
482 }
483 }
484}
485
486static char *extra_clks_for_mx6q[GPMI_CLK_MAX] = {
487 "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
488};
489
490static int __devinit gpmi_get_clks(struct gpmi_nand_data *this)
491{
492 struct resources *r = &this->resources;
493 char **extra_clks = NULL;
494 struct clk *clk;
495 int i;
496
497 /* The main clock is stored in the first. */
498 r->clock[0] = clk_get(this->dev, "gpmi_io");
499 if (IS_ERR(r->clock[0]))
500 goto err_clock;
501
502 /* Get extra clocks */
503 if (GPMI_IS_MX6Q(this))
504 extra_clks = extra_clks_for_mx6q;
505 if (!extra_clks)
506 return 0;
507
508 for (i = 1; i < GPMI_CLK_MAX; i++) {
509 if (extra_clks[i - 1] == NULL)
510 break;
511
512 clk = clk_get(this->dev, extra_clks[i - 1]);
513 if (IS_ERR(clk))
514 goto err_clock;
515
516 r->clock[i] = clk;
517 }
518
519 if (GPMI_IS_MX6Q(this))
520 /*
521 * Set the default value for the gpmi clock in mx6q:
522 *
523 * If you want to use the ONFI nand which is in the
524 * Synchronous Mode, you should change the clock as you need.
525 */
526 clk_set_rate(r->clock[0], 22000000);
527
528 return 0;
529
530err_clock:
531 dev_dbg(this->dev, "failed in finding the clocks.\n");
532 gpmi_put_clks(this);
533 return -ENOMEM;
534}
535
467static int __devinit acquire_resources(struct gpmi_nand_data *this) 536static int __devinit acquire_resources(struct gpmi_nand_data *this)
468{ 537{
469 struct resources *res = &this->resources;
470 struct pinctrl *pinctrl; 538 struct pinctrl *pinctrl;
471 int ret; 539 int ret;
472 540
@@ -492,12 +560,9 @@ static int __devinit acquire_resources(struct gpmi_nand_data *this)
492 goto exit_pin; 560 goto exit_pin;
493 } 561 }
494 562
495 res->clock = clk_get(&this->pdev->dev, NULL); 563 ret = gpmi_get_clks(this);
496 if (IS_ERR(res->clock)) { 564 if (ret)
497 pr_err("can not get the clock\n");
498 ret = -ENOENT;
499 goto exit_clock; 565 goto exit_clock;
500 }
501 return 0; 566 return 0;
502 567
503exit_clock: 568exit_clock:
@@ -512,9 +577,7 @@ exit_regs:
512 577
513static void release_resources(struct gpmi_nand_data *this) 578static void release_resources(struct gpmi_nand_data *this)
514{ 579{
515 struct resources *r = &this->resources; 580 gpmi_put_clks(this);
516
517 clk_put(r->clock);
518 release_register_block(this); 581 release_register_block(this);
519 release_bch_irq(this); 582 release_bch_irq(this);
520 release_dma_channels(this); 583 release_dma_channels(this);
@@ -667,12 +730,12 @@ static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
667 struct device *dev = this->dev; 730 struct device *dev = this->dev;
668 731
669 /* [1] Allocate a command buffer. PAGE_SIZE is enough. */ 732 /* [1] Allocate a command buffer. PAGE_SIZE is enough. */
670 this->cmd_buffer = kzalloc(PAGE_SIZE, GFP_DMA); 733 this->cmd_buffer = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL);
671 if (this->cmd_buffer == NULL) 734 if (this->cmd_buffer == NULL)
672 goto error_alloc; 735 goto error_alloc;
673 736
674 /* [2] Allocate a read/write data buffer. PAGE_SIZE is enough. */ 737 /* [2] Allocate a read/write data buffer. PAGE_SIZE is enough. */
675 this->data_buffer_dma = kzalloc(PAGE_SIZE, GFP_DMA); 738 this->data_buffer_dma = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL);
676 if (this->data_buffer_dma == NULL) 739 if (this->data_buffer_dma == NULL)
677 goto error_alloc; 740 goto error_alloc;
678 741
@@ -930,7 +993,7 @@ exit_nfc:
930 return ret; 993 return ret;
931} 994}
932 995
933static void gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip, 996static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
934 const uint8_t *buf, int oob_required) 997 const uint8_t *buf, int oob_required)
935{ 998{
936 struct gpmi_nand_data *this = chip->priv; 999 struct gpmi_nand_data *this = chip->priv;
@@ -972,7 +1035,7 @@ static void gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
972 &payload_virt, &payload_phys); 1035 &payload_virt, &payload_phys);
973 if (ret) { 1036 if (ret) {
974 pr_err("Inadequate payload DMA buffer\n"); 1037 pr_err("Inadequate payload DMA buffer\n");
975 return; 1038 return 0;
976 } 1039 }
977 1040
978 ret = send_page_prepare(this, 1041 ret = send_page_prepare(this,
@@ -1002,6 +1065,8 @@ exit_auxiliary:
1002 nfc_geo->payload_size, 1065 nfc_geo->payload_size,
1003 payload_virt, payload_phys); 1066 payload_virt, payload_phys);
1004 } 1067 }
1068
1069 return 0;
1005} 1070}
1006 1071
1007/* 1072/*
@@ -1064,6 +1129,9 @@ exit_auxiliary:
1064 * ECC-based or raw view of the page is implicit in which function it calls 1129 * ECC-based or raw view of the page is implicit in which function it calls
1065 * (there is a similar pair of ECC-based/raw functions for writing). 1130 * (there is a similar pair of ECC-based/raw functions for writing).
1066 * 1131 *
1132 * FIXME: The following paragraph is incorrect, now that there exist
1133 * ecc.read_oob_raw and ecc.write_oob_raw functions.
1134 *
1067 * Since MTD assumes the OOB is not covered by ECC, there is no pair of 1135 * Since MTD assumes the OOB is not covered by ECC, there is no pair of
1068 * ECC-based/raw functions for reading or or writing the OOB. The fact that the 1136 * ECC-based/raw functions for reading or or writing the OOB. The fact that the
1069 * caller wants an ECC-based or raw view of the page is not propagated down to 1137 * caller wants an ECC-based or raw view of the page is not propagated down to
@@ -1190,7 +1258,6 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
1190 unsigned int search_area_size_in_strides; 1258 unsigned int search_area_size_in_strides;
1191 unsigned int stride; 1259 unsigned int stride;
1192 unsigned int page; 1260 unsigned int page;
1193 loff_t byte;
1194 uint8_t *buffer = chip->buffers->databuf; 1261 uint8_t *buffer = chip->buffers->databuf;
1195 int saved_chip_number; 1262 int saved_chip_number;
1196 int found_an_ncb_fingerprint = false; 1263 int found_an_ncb_fingerprint = false;
@@ -1207,9 +1274,8 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
1207 dev_dbg(dev, "Scanning for an NCB fingerprint...\n"); 1274 dev_dbg(dev, "Scanning for an NCB fingerprint...\n");
1208 1275
1209 for (stride = 0; stride < search_area_size_in_strides; stride++) { 1276 for (stride = 0; stride < search_area_size_in_strides; stride++) {
1210 /* Compute the page and byte addresses. */ 1277 /* Compute the page addresses. */
1211 page = stride * rom_geo->stride_size_in_pages; 1278 page = stride * rom_geo->stride_size_in_pages;
1212 byte = page * mtd->writesize;
1213 1279
1214 dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page); 1280 dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page);
1215 1281
@@ -1251,7 +1317,6 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
1251 unsigned int block; 1317 unsigned int block;
1252 unsigned int stride; 1318 unsigned int stride;
1253 unsigned int page; 1319 unsigned int page;
1254 loff_t byte;
1255 uint8_t *buffer = chip->buffers->databuf; 1320 uint8_t *buffer = chip->buffers->databuf;
1256 int saved_chip_number; 1321 int saved_chip_number;
1257 int status; 1322 int status;
@@ -1300,9 +1365,8 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
1300 /* Loop through the first search area, writing NCB fingerprints. */ 1365 /* Loop through the first search area, writing NCB fingerprints. */
1301 dev_dbg(dev, "Writing NCB fingerprints...\n"); 1366 dev_dbg(dev, "Writing NCB fingerprints...\n");
1302 for (stride = 0; stride < search_area_size_in_strides; stride++) { 1367 for (stride = 0; stride < search_area_size_in_strides; stride++) {
1303 /* Compute the page and byte addresses. */ 1368 /* Compute the page addresses. */
1304 page = stride * rom_geo->stride_size_in_pages; 1369 page = stride * rom_geo->stride_size_in_pages;
1305 byte = page * mtd->writesize;
1306 1370
1307 /* Write the first page of the current stride. */ 1371 /* Write the first page of the current stride. */
1308 dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page); 1372 dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
@@ -1436,6 +1500,7 @@ static int gpmi_pre_bbt_scan(struct gpmi_nand_data *this)
1436 /* Adjust the ECC strength according to the chip. */ 1500 /* Adjust the ECC strength according to the chip. */
1437 this->nand.ecc.strength = this->bch_geometry.ecc_strength; 1501 this->nand.ecc.strength = this->bch_geometry.ecc_strength;
1438 this->mtd.ecc_strength = this->bch_geometry.ecc_strength; 1502 this->mtd.ecc_strength = this->bch_geometry.ecc_strength;
1503 this->mtd.bitflip_threshold = this->bch_geometry.ecc_strength;
1439 1504
1440 /* NAND boot init, depends on the gpmi_set_geometry(). */ 1505 /* NAND boot init, depends on the gpmi_set_geometry(). */
1441 return nand_boot_init(this); 1506 return nand_boot_init(this);
@@ -1452,11 +1517,19 @@ static int gpmi_scan_bbt(struct mtd_info *mtd)
1452 if (ret) 1517 if (ret)
1453 return ret; 1518 return ret;
1454 1519
1520 /*
1521 * Can we enable the extra features? such as EDO or Sync mode.
1522 *
1523 * We do not check the return value now. That's means if we fail in
1524 * enable the extra features, we still can run in the normal way.
1525 */
1526 gpmi_extra_init(this);
1527
1455 /* use the default BBT implementation */ 1528 /* use the default BBT implementation */
1456 return nand_default_bbt(mtd); 1529 return nand_default_bbt(mtd);
1457} 1530}
1458 1531
1459void gpmi_nfc_exit(struct gpmi_nand_data *this) 1532static void gpmi_nfc_exit(struct gpmi_nand_data *this)
1460{ 1533{
1461 nand_release(&this->mtd); 1534 nand_release(&this->mtd);
1462 gpmi_free_dma_buffer(this); 1535 gpmi_free_dma_buffer(this);
@@ -1497,6 +1570,8 @@ static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this)
1497 chip->ecc.size = 1; 1570 chip->ecc.size = 1;
1498 chip->ecc.strength = 8; 1571 chip->ecc.strength = 8;
1499 chip->ecc.layout = &gpmi_hw_ecclayout; 1572 chip->ecc.layout = &gpmi_hw_ecclayout;
1573 if (of_get_nand_on_flash_bbt(this->dev->of_node))
1574 chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
1500 1575
1501 /* Allocate a temporary DMA buffer for reading ID in the nand_scan() */ 1576 /* Allocate a temporary DMA buffer for reading ID in the nand_scan() */
1502 this->bch_geometry.payload_size = 1024; 1577 this->bch_geometry.payload_size = 1024;
@@ -1579,6 +1654,8 @@ static int __devinit gpmi_nand_probe(struct platform_device *pdev)
1579 if (ret) 1654 if (ret)
1580 goto exit_nfc_init; 1655 goto exit_nfc_init;
1581 1656
1657 dev_info(this->dev, "driver registered.\n");
1658
1582 return 0; 1659 return 0;
1583 1660
1584exit_nfc_init: 1661exit_nfc_init:
@@ -1586,10 +1663,12 @@ exit_nfc_init:
1586exit_acquire_resources: 1663exit_acquire_resources:
1587 platform_set_drvdata(pdev, NULL); 1664 platform_set_drvdata(pdev, NULL);
1588 kfree(this); 1665 kfree(this);
1666 dev_err(this->dev, "driver registration failed: %d\n", ret);
1667
1589 return ret; 1668 return ret;
1590} 1669}
1591 1670
1592static int __exit gpmi_nand_remove(struct platform_device *pdev) 1671static int __devexit gpmi_nand_remove(struct platform_device *pdev)
1593{ 1672{
1594 struct gpmi_nand_data *this = platform_get_drvdata(pdev); 1673 struct gpmi_nand_data *this = platform_get_drvdata(pdev);
1595 1674
@@ -1606,29 +1685,10 @@ static struct platform_driver gpmi_nand_driver = {
1606 .of_match_table = gpmi_nand_id_table, 1685 .of_match_table = gpmi_nand_id_table,
1607 }, 1686 },
1608 .probe = gpmi_nand_probe, 1687 .probe = gpmi_nand_probe,
1609 .remove = __exit_p(gpmi_nand_remove), 1688 .remove = __devexit_p(gpmi_nand_remove),
1610 .id_table = gpmi_ids, 1689 .id_table = gpmi_ids,
1611}; 1690};
1612 1691module_platform_driver(gpmi_nand_driver);
1613static int __init gpmi_nand_init(void)
1614{
1615 int err;
1616
1617 err = platform_driver_register(&gpmi_nand_driver);
1618 if (err == 0)
1619 printk(KERN_INFO "GPMI NAND driver registered. (IMX)\n");
1620 else
1621 pr_err("i.MX GPMI NAND driver registration failed\n");
1622 return err;
1623}
1624
1625static void __exit gpmi_nand_exit(void)
1626{
1627 platform_driver_unregister(&gpmi_nand_driver);
1628}
1629
1630module_init(gpmi_nand_init);
1631module_exit(gpmi_nand_exit);
1632 1692
1633MODULE_AUTHOR("Freescale Semiconductor, Inc."); 1693MODULE_AUTHOR("Freescale Semiconductor, Inc.");
1634MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver"); 1694MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver");
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
index ce5daa160920..7ac25c1e58f9 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -22,14 +22,15 @@
22#include <linux/dma-mapping.h> 22#include <linux/dma-mapping.h>
23#include <linux/fsl/mxs-dma.h> 23#include <linux/fsl/mxs-dma.h>
24 24
25#define GPMI_CLK_MAX 5 /* MX6Q needs five clocks */
25struct resources { 26struct resources {
26 void *gpmi_regs; 27 void __iomem *gpmi_regs;
27 void *bch_regs; 28 void __iomem *bch_regs;
28 unsigned int bch_low_interrupt; 29 unsigned int bch_low_interrupt;
29 unsigned int bch_high_interrupt; 30 unsigned int bch_high_interrupt;
30 unsigned int dma_low_channel; 31 unsigned int dma_low_channel;
31 unsigned int dma_high_channel; 32 unsigned int dma_high_channel;
32 struct clk *clock; 33 struct clk *clock[GPMI_CLK_MAX];
33}; 34};
34 35
35/** 36/**
@@ -121,6 +122,11 @@ struct nand_timing {
121}; 122};
122 123
123struct gpmi_nand_data { 124struct gpmi_nand_data {
125 /* flags */
126#define GPMI_ASYNC_EDO_ENABLED (1 << 0)
127#define GPMI_TIMING_INIT_OK (1 << 1)
128 int flags;
129
124 /* System Interface */ 130 /* System Interface */
125 struct device *dev; 131 struct device *dev;
126 struct platform_device *pdev; 132 struct platform_device *pdev;
@@ -131,6 +137,7 @@ struct gpmi_nand_data {
131 137
132 /* Flash Hardware */ 138 /* Flash Hardware */
133 struct nand_timing timing; 139 struct nand_timing timing;
140 int timing_mode;
134 141
135 /* BCH */ 142 /* BCH */
136 struct bch_geometry bch_geometry; 143 struct bch_geometry bch_geometry;
@@ -188,16 +195,28 @@ struct gpmi_nand_data {
188 * @data_setup_in_cycles: The data setup time, in cycles. 195 * @data_setup_in_cycles: The data setup time, in cycles.
189 * @data_hold_in_cycles: The data hold time, in cycles. 196 * @data_hold_in_cycles: The data hold time, in cycles.
190 * @address_setup_in_cycles: The address setup time, in cycles. 197 * @address_setup_in_cycles: The address setup time, in cycles.
198 * @device_busy_timeout: The timeout waiting for NAND Ready/Busy,
199 * this value is the number of cycles multiplied
200 * by 4096.
191 * @use_half_periods: Indicates the clock is running slowly, so the 201 * @use_half_periods: Indicates the clock is running slowly, so the
192 * NFC DLL should use half-periods. 202 * NFC DLL should use half-periods.
193 * @sample_delay_factor: The sample delay factor. 203 * @sample_delay_factor: The sample delay factor.
204 * @wrn_dly_sel: The delay on the GPMI write strobe.
194 */ 205 */
195struct gpmi_nfc_hardware_timing { 206struct gpmi_nfc_hardware_timing {
207 /* for HW_GPMI_TIMING0 */
196 uint8_t data_setup_in_cycles; 208 uint8_t data_setup_in_cycles;
197 uint8_t data_hold_in_cycles; 209 uint8_t data_hold_in_cycles;
198 uint8_t address_setup_in_cycles; 210 uint8_t address_setup_in_cycles;
211
212 /* for HW_GPMI_TIMING1 */
213 uint16_t device_busy_timeout;
214#define GPMI_DEFAULT_BUSY_TIMEOUT 0x500 /* default busy timeout value.*/
215
216 /* for HW_GPMI_CTRL1 */
199 bool use_half_periods; 217 bool use_half_periods;
200 uint8_t sample_delay_factor; 218 uint8_t sample_delay_factor;
219 uint8_t wrn_dly_sel;
201}; 220};
202 221
203/** 222/**
@@ -246,6 +265,7 @@ extern int start_dma_with_bch_irq(struct gpmi_nand_data *,
246 265
247/* GPMI-NAND helper function library */ 266/* GPMI-NAND helper function library */
248extern int gpmi_init(struct gpmi_nand_data *); 267extern int gpmi_init(struct gpmi_nand_data *);
268extern int gpmi_extra_init(struct gpmi_nand_data *);
249extern void gpmi_clear_bch(struct gpmi_nand_data *); 269extern void gpmi_clear_bch(struct gpmi_nand_data *);
250extern void gpmi_dump_info(struct gpmi_nand_data *); 270extern void gpmi_dump_info(struct gpmi_nand_data *);
251extern int bch_set_geometry(struct gpmi_nand_data *); 271extern int bch_set_geometry(struct gpmi_nand_data *);
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-regs.h b/drivers/mtd/nand/gpmi-nand/gpmi-regs.h
index 83431240e2f2..53397cc290fc 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-regs.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-regs.h
@@ -108,6 +108,15 @@
108#define HW_GPMI_CTRL1_CLR 0x00000068 108#define HW_GPMI_CTRL1_CLR 0x00000068
109#define HW_GPMI_CTRL1_TOG 0x0000006c 109#define HW_GPMI_CTRL1_TOG 0x0000006c
110 110
111#define BP_GPMI_CTRL1_WRN_DLY_SEL 22
112#define BM_GPMI_CTRL1_WRN_DLY_SEL (0x3 << BP_GPMI_CTRL1_WRN_DLY_SEL)
113#define BF_GPMI_CTRL1_WRN_DLY_SEL(v) \
114 (((v) << BP_GPMI_CTRL1_WRN_DLY_SEL) & BM_GPMI_CTRL1_WRN_DLY_SEL)
115#define BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS 0x0
116#define BV_GPMI_CTRL1_WRN_DLY_SEL_6_TO_10NS 0x1
117#define BV_GPMI_CTRL1_WRN_DLY_SEL_7_TO_12NS 0x2
118#define BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY 0x3
119
111#define BM_GPMI_CTRL1_BCH_MODE (1 << 18) 120#define BM_GPMI_CTRL1_BCH_MODE (1 << 18)
112 121
113#define BP_GPMI_CTRL1_DLL_ENABLE 17 122#define BP_GPMI_CTRL1_DLL_ENABLE 17
@@ -154,6 +163,9 @@
154 163
155#define HW_GPMI_TIMING1 0x00000080 164#define HW_GPMI_TIMING1 0x00000080
156#define BP_GPMI_TIMING1_BUSY_TIMEOUT 16 165#define BP_GPMI_TIMING1_BUSY_TIMEOUT 16
166#define BM_GPMI_TIMING1_BUSY_TIMEOUT (0xffff << BP_GPMI_TIMING1_BUSY_TIMEOUT)
167#define BF_GPMI_TIMING1_BUSY_TIMEOUT(v) \
168 (((v) << BP_GPMI_TIMING1_BUSY_TIMEOUT) & BM_GPMI_TIMING1_BUSY_TIMEOUT)
157 169
158#define HW_GPMI_TIMING2 0x00000090 170#define HW_GPMI_TIMING2 0x00000090
159#define HW_GPMI_DATA 0x000000a0 171#define HW_GPMI_DATA 0x000000a0
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
new file mode 100644
index 000000000000..c29b7ac1f6af
--- /dev/null
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -0,0 +1,924 @@
1/*
2 * Driver for NAND MLC Controller in LPC32xx
3 *
4 * Author: Roland Stigge <stigge@antcom.de>
5 *
6 * Copyright © 2011 WORK Microwave GmbH
7 * Copyright © 2011, 2012 Roland Stigge
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 *
20 * NAND Flash Controller Operation:
21 * - Read: Auto Decode
22 * - Write: Auto Encode
23 * - Tested Page Sizes: 2048, 4096
24 */
25
26#include <linux/slab.h>
27#include <linux/module.h>
28#include <linux/platform_device.h>
29#include <linux/mtd/mtd.h>
30#include <linux/mtd/nand.h>
31#include <linux/mtd/partitions.h>
32#include <linux/clk.h>
33#include <linux/err.h>
34#include <linux/delay.h>
35#include <linux/completion.h>
36#include <linux/interrupt.h>
37#include <linux/of.h>
38#include <linux/of_mtd.h>
39#include <linux/of_gpio.h>
40#include <linux/mtd/lpc32xx_mlc.h>
41#include <linux/io.h>
42#include <linux/mm.h>
43#include <linux/dma-mapping.h>
44#include <linux/dmaengine.h>
45#include <linux/mtd/nand_ecc.h>
46
47#define DRV_NAME "lpc32xx_mlc"
48
49/**********************************************************************
50* MLC NAND controller register offsets
51**********************************************************************/
52
53#define MLC_BUFF(x) (x + 0x00000)
54#define MLC_DATA(x) (x + 0x08000)
55#define MLC_CMD(x) (x + 0x10000)
56#define MLC_ADDR(x) (x + 0x10004)
57#define MLC_ECC_ENC_REG(x) (x + 0x10008)
58#define MLC_ECC_DEC_REG(x) (x + 0x1000C)
59#define MLC_ECC_AUTO_ENC_REG(x) (x + 0x10010)
60#define MLC_ECC_AUTO_DEC_REG(x) (x + 0x10014)
61#define MLC_RPR(x) (x + 0x10018)
62#define MLC_WPR(x) (x + 0x1001C)
63#define MLC_RUBP(x) (x + 0x10020)
64#define MLC_ROBP(x) (x + 0x10024)
65#define MLC_SW_WP_ADD_LOW(x) (x + 0x10028)
66#define MLC_SW_WP_ADD_HIG(x) (x + 0x1002C)
67#define MLC_ICR(x) (x + 0x10030)
68#define MLC_TIME_REG(x) (x + 0x10034)
69#define MLC_IRQ_MR(x) (x + 0x10038)
70#define MLC_IRQ_SR(x) (x + 0x1003C)
71#define MLC_LOCK_PR(x) (x + 0x10044)
72#define MLC_ISR(x) (x + 0x10048)
73#define MLC_CEH(x) (x + 0x1004C)
74
75/**********************************************************************
76* MLC_CMD bit definitions
77**********************************************************************/
78#define MLCCMD_RESET 0xFF
79
80/**********************************************************************
81* MLC_ICR bit definitions
82**********************************************************************/
83#define MLCICR_WPROT (1 << 3)
84#define MLCICR_LARGEBLOCK (1 << 2)
85#define MLCICR_LONGADDR (1 << 1)
86#define MLCICR_16BIT (1 << 0) /* unsupported by LPC32x0! */
87
88/**********************************************************************
89* MLC_TIME_REG bit definitions
90**********************************************************************/
91#define MLCTIMEREG_TCEA_DELAY(n) (((n) & 0x03) << 24)
92#define MLCTIMEREG_BUSY_DELAY(n) (((n) & 0x1F) << 19)
93#define MLCTIMEREG_NAND_TA(n) (((n) & 0x07) << 16)
94#define MLCTIMEREG_RD_HIGH(n) (((n) & 0x0F) << 12)
95#define MLCTIMEREG_RD_LOW(n) (((n) & 0x0F) << 8)
96#define MLCTIMEREG_WR_HIGH(n) (((n) & 0x0F) << 4)
97#define MLCTIMEREG_WR_LOW(n) (((n) & 0x0F) << 0)
98
99/**********************************************************************
100* MLC_IRQ_MR and MLC_IRQ_SR bit definitions
101**********************************************************************/
102#define MLCIRQ_NAND_READY (1 << 5)
103#define MLCIRQ_CONTROLLER_READY (1 << 4)
104#define MLCIRQ_DECODE_FAILURE (1 << 3)
105#define MLCIRQ_DECODE_ERROR (1 << 2)
106#define MLCIRQ_ECC_READY (1 << 1)
107#define MLCIRQ_WRPROT_FAULT (1 << 0)
108
109/**********************************************************************
110* MLC_LOCK_PR bit definitions
111**********************************************************************/
112#define MLCLOCKPR_MAGIC 0xA25E
113
114/**********************************************************************
115* MLC_ISR bit definitions
116**********************************************************************/
117#define MLCISR_DECODER_FAILURE (1 << 6)
118#define MLCISR_ERRORS ((1 << 4) | (1 << 5))
119#define MLCISR_ERRORS_DETECTED (1 << 3)
120#define MLCISR_ECC_READY (1 << 2)
121#define MLCISR_CONTROLLER_READY (1 << 1)
122#define MLCISR_NAND_READY (1 << 0)
123
124/**********************************************************************
125* MLC_CEH bit definitions
126**********************************************************************/
127#define MLCCEH_NORMAL (1 << 0)
128
129struct lpc32xx_nand_cfg_mlc {
130 uint32_t tcea_delay;
131 uint32_t busy_delay;
132 uint32_t nand_ta;
133 uint32_t rd_high;
134 uint32_t rd_low;
135 uint32_t wr_high;
136 uint32_t wr_low;
137 int wp_gpio;
138 struct mtd_partition *parts;
139 unsigned num_parts;
140};
141
142static struct nand_ecclayout lpc32xx_nand_oob = {
143 .eccbytes = 40,
144 .eccpos = { 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
145 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
146 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
147 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 },
148 .oobfree = {
149 { .offset = 0,
150 .length = 6, },
151 { .offset = 16,
152 .length = 6, },
153 { .offset = 32,
154 .length = 6, },
155 { .offset = 48,
156 .length = 6, },
157 },
158};
159
160static struct nand_bbt_descr lpc32xx_nand_bbt = {
161 .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
162 NAND_BBT_WRITE,
163 .pages = { 524224, 0, 0, 0, 0, 0, 0, 0 },
164};
165
166static struct nand_bbt_descr lpc32xx_nand_bbt_mirror = {
167 .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
168 NAND_BBT_WRITE,
169 .pages = { 524160, 0, 0, 0, 0, 0, 0, 0 },
170};
171
172struct lpc32xx_nand_host {
173 struct nand_chip nand_chip;
174 struct lpc32xx_mlc_platform_data *pdata;
175 struct clk *clk;
176 struct mtd_info mtd;
177 void __iomem *io_base;
178 int irq;
179 struct lpc32xx_nand_cfg_mlc *ncfg;
180 struct completion comp_nand;
181 struct completion comp_controller;
182 uint32_t llptr;
183 /*
184 * Physical addresses of ECC buffer, DMA data buffers, OOB data buffer
185 */
186 dma_addr_t oob_buf_phy;
187 /*
188 * Virtual addresses of ECC buffer, DMA data buffers, OOB data buffer
189 */
190 uint8_t *oob_buf;
191 /* Physical address of DMA base address */
192 dma_addr_t io_base_phy;
193
194 struct completion comp_dma;
195 struct dma_chan *dma_chan;
196 struct dma_slave_config dma_slave_config;
197 struct scatterlist sgl;
198 uint8_t *dma_buf;
199 uint8_t *dummy_buf;
200 int mlcsubpages; /* number of 512bytes-subpages */
201};
202
203/*
204 * Activate/Deactivate DMA Operation:
205 *
206 * Using the PL080 DMA Controller for transferring the 512 byte subpages
207 * instead of doing readl() / writel() in a loop slows it down significantly.
208 * Measurements via getnstimeofday() upon 512 byte subpage reads reveal:
209 *
210 * - readl() of 128 x 32 bits in a loop: ~20us
211 * - DMA read of 512 bytes (32 bit, 4...128 words bursts): ~60us
212 * - DMA read of 512 bytes (32 bit, no bursts): ~100us
213 *
214 * This applies to the transfer itself. In the DMA case: only the
215 * wait_for_completion() (DMA setup _not_ included).
216 *
217 * Note that the 512 bytes subpage transfer is done directly from/to a
218 * FIFO/buffer inside the NAND controller. Most of the time (~400-800us for a
219 * 2048 bytes page) is spent waiting for the NAND IRQ, anyway. (The NAND
220 * controller transferring data between its internal buffer to/from the NAND
221 * chip.)
222 *
223 * Therefore, using the PL080 DMA is disabled by default, for now.
224 *
225 */
226static int use_dma;
227
228static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
229{
230 uint32_t clkrate, tmp;
231
232 /* Reset MLC controller */
233 writel(MLCCMD_RESET, MLC_CMD(host->io_base));
234 udelay(1000);
235
236 /* Get base clock for MLC block */
237 clkrate = clk_get_rate(host->clk);
238 if (clkrate == 0)
239 clkrate = 104000000;
240
241 /* Unlock MLC_ICR
242 * (among others, will be locked again automatically) */
243 writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
244
245 /* Configure MLC Controller: Large Block, 5 Byte Address */
246 tmp = MLCICR_LARGEBLOCK | MLCICR_LONGADDR;
247 writel(tmp, MLC_ICR(host->io_base));
248
249 /* Unlock MLC_TIME_REG
250 * (among others, will be locked again automatically) */
251 writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
252
253 /* Compute clock setup values, see LPC and NAND manual */
254 tmp = 0;
255 tmp |= MLCTIMEREG_TCEA_DELAY(clkrate / host->ncfg->tcea_delay + 1);
256 tmp |= MLCTIMEREG_BUSY_DELAY(clkrate / host->ncfg->busy_delay + 1);
257 tmp |= MLCTIMEREG_NAND_TA(clkrate / host->ncfg->nand_ta + 1);
258 tmp |= MLCTIMEREG_RD_HIGH(clkrate / host->ncfg->rd_high + 1);
259 tmp |= MLCTIMEREG_RD_LOW(clkrate / host->ncfg->rd_low);
260 tmp |= MLCTIMEREG_WR_HIGH(clkrate / host->ncfg->wr_high + 1);
261 tmp |= MLCTIMEREG_WR_LOW(clkrate / host->ncfg->wr_low);
262 writel(tmp, MLC_TIME_REG(host->io_base));
263
264 /* Enable IRQ for CONTROLLER_READY and NAND_READY */
265 writeb(MLCIRQ_CONTROLLER_READY | MLCIRQ_NAND_READY,
266 MLC_IRQ_MR(host->io_base));
267
268 /* Normal nCE operation: nCE controlled by controller */
269 writel(MLCCEH_NORMAL, MLC_CEH(host->io_base));
270}
271
272/*
273 * Hardware specific access to control lines
274 */
275static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
276 unsigned int ctrl)
277{
278 struct nand_chip *nand_chip = mtd->priv;
279 struct lpc32xx_nand_host *host = nand_chip->priv;
280
281 if (cmd != NAND_CMD_NONE) {
282 if (ctrl & NAND_CLE)
283 writel(cmd, MLC_CMD(host->io_base));
284 else
285 writel(cmd, MLC_ADDR(host->io_base));
286 }
287}
288
289/*
290 * Read Device Ready (NAND device _and_ controller ready)
291 */
292static int lpc32xx_nand_device_ready(struct mtd_info *mtd)
293{
294 struct nand_chip *nand_chip = mtd->priv;
295 struct lpc32xx_nand_host *host = nand_chip->priv;
296
297 if ((readb(MLC_ISR(host->io_base)) &
298 (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY)) ==
299 (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY))
300 return 1;
301
302 return 0;
303}
304
305static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host)
306{
307 uint8_t sr;
308
309 /* Clear interrupt flag by reading status */
310 sr = readb(MLC_IRQ_SR(host->io_base));
311 if (sr & MLCIRQ_NAND_READY)
312 complete(&host->comp_nand);
313 if (sr & MLCIRQ_CONTROLLER_READY)
314 complete(&host->comp_controller);
315
316 return IRQ_HANDLED;
317}
318
319static int lpc32xx_waitfunc_nand(struct mtd_info *mtd, struct nand_chip *chip)
320{
321 struct lpc32xx_nand_host *host = chip->priv;
322
323 if (readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)
324 goto exit;
325
326 wait_for_completion(&host->comp_nand);
327
328 while (!(readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)) {
329 /* Seems to be delayed sometimes by controller */
330 dev_dbg(&mtd->dev, "Warning: NAND not ready.\n");
331 cpu_relax();
332 }
333
334exit:
335 return NAND_STATUS_READY;
336}
337
338static int lpc32xx_waitfunc_controller(struct mtd_info *mtd,
339 struct nand_chip *chip)
340{
341 struct lpc32xx_nand_host *host = chip->priv;
342
343 if (readb(MLC_ISR(host->io_base)) & MLCISR_CONTROLLER_READY)
344 goto exit;
345
346 wait_for_completion(&host->comp_controller);
347
348 while (!(readb(MLC_ISR(host->io_base)) &
349 MLCISR_CONTROLLER_READY)) {
350 dev_dbg(&mtd->dev, "Warning: Controller not ready.\n");
351 cpu_relax();
352 }
353
354exit:
355 return NAND_STATUS_READY;
356}
357
358static int lpc32xx_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
359{
360 lpc32xx_waitfunc_nand(mtd, chip);
361 lpc32xx_waitfunc_controller(mtd, chip);
362
363 return NAND_STATUS_READY;
364}
365
366/*
367 * Enable NAND write protect
368 */
369static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
370{
371 if (gpio_is_valid(host->ncfg->wp_gpio))
372 gpio_set_value(host->ncfg->wp_gpio, 0);
373}
374
375/*
376 * Disable NAND write protect
377 */
378static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
379{
380 if (gpio_is_valid(host->ncfg->wp_gpio))
381 gpio_set_value(host->ncfg->wp_gpio, 1);
382}
383
384static void lpc32xx_dma_complete_func(void *completion)
385{
386 complete(completion);
387}
388
389static int lpc32xx_xmit_dma(struct mtd_info *mtd, void *mem, int len,
390 enum dma_transfer_direction dir)
391{
392 struct nand_chip *chip = mtd->priv;
393 struct lpc32xx_nand_host *host = chip->priv;
394 struct dma_async_tx_descriptor *desc;
395 int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
396 int res;
397
398 sg_init_one(&host->sgl, mem, len);
399
400 res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
401 DMA_BIDIRECTIONAL);
402 if (res != 1) {
403 dev_err(mtd->dev.parent, "Failed to map sg list\n");
404 return -ENXIO;
405 }
406 desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
407 flags);
408 if (!desc) {
409 dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
410 goto out1;
411 }
412
413 init_completion(&host->comp_dma);
414 desc->callback = lpc32xx_dma_complete_func;
415 desc->callback_param = &host->comp_dma;
416
417 dmaengine_submit(desc);
418 dma_async_issue_pending(host->dma_chan);
419
420 wait_for_completion_timeout(&host->comp_dma, msecs_to_jiffies(1000));
421
422 dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
423 DMA_BIDIRECTIONAL);
424 return 0;
425out1:
426 dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
427 DMA_BIDIRECTIONAL);
428 return -ENXIO;
429}
430
431static int lpc32xx_read_page(struct mtd_info *mtd, struct nand_chip *chip,
432 uint8_t *buf, int oob_required, int page)
433{
434 struct lpc32xx_nand_host *host = chip->priv;
435 int i, j;
436 uint8_t *oobbuf = chip->oob_poi;
437 uint32_t mlc_isr;
438 int res;
439 uint8_t *dma_buf;
440 bool dma_mapped;
441
442 if ((void *)buf <= high_memory) {
443 dma_buf = buf;
444 dma_mapped = true;
445 } else {
446 dma_buf = host->dma_buf;
447 dma_mapped = false;
448 }
449
450 /* Writing Command and Address */
451 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
452
453 /* For all sub-pages */
454 for (i = 0; i < host->mlcsubpages; i++) {
455 /* Start Auto Decode Command */
456 writeb(0x00, MLC_ECC_AUTO_DEC_REG(host->io_base));
457
458 /* Wait for Controller Ready */
459 lpc32xx_waitfunc_controller(mtd, chip);
460
461 /* Check ECC Error status */
462 mlc_isr = readl(MLC_ISR(host->io_base));
463 if (mlc_isr & MLCISR_DECODER_FAILURE) {
464 mtd->ecc_stats.failed++;
465 dev_warn(&mtd->dev, "%s: DECODER_FAILURE\n", __func__);
466 } else if (mlc_isr & MLCISR_ERRORS_DETECTED) {
467 mtd->ecc_stats.corrected += ((mlc_isr >> 4) & 0x3) + 1;
468 }
469
470 /* Read 512 + 16 Bytes */
471 if (use_dma) {
472 res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
473 DMA_DEV_TO_MEM);
474 if (res)
475 return res;
476 } else {
477 for (j = 0; j < (512 >> 2); j++) {
478 *((uint32_t *)(buf)) =
479 readl(MLC_BUFF(host->io_base));
480 buf += 4;
481 }
482 }
483 for (j = 0; j < (16 >> 2); j++) {
484 *((uint32_t *)(oobbuf)) =
485 readl(MLC_BUFF(host->io_base));
486 oobbuf += 4;
487 }
488 }
489
490 if (use_dma && !dma_mapped)
491 memcpy(buf, dma_buf, mtd->writesize);
492
493 return 0;
494}
495
496static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,
497 struct nand_chip *chip,
498 const uint8_t *buf, int oob_required)
499{
500 struct lpc32xx_nand_host *host = chip->priv;
501 const uint8_t *oobbuf = chip->oob_poi;
502 uint8_t *dma_buf = (uint8_t *)buf;
503 int res;
504 int i, j;
505
506 if (use_dma && (void *)buf >= high_memory) {
507 dma_buf = host->dma_buf;
508 memcpy(dma_buf, buf, mtd->writesize);
509 }
510
511 for (i = 0; i < host->mlcsubpages; i++) {
512 /* Start Encode */
513 writeb(0x00, MLC_ECC_ENC_REG(host->io_base));
514
515 /* Write 512 + 6 Bytes to Buffer */
516 if (use_dma) {
517 res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
518 DMA_MEM_TO_DEV);
519 if (res)
520 return res;
521 } else {
522 for (j = 0; j < (512 >> 2); j++) {
523 writel(*((uint32_t *)(buf)),
524 MLC_BUFF(host->io_base));
525 buf += 4;
526 }
527 }
528 writel(*((uint32_t *)(oobbuf)), MLC_BUFF(host->io_base));
529 oobbuf += 4;
530 writew(*((uint16_t *)(oobbuf)), MLC_BUFF(host->io_base));
531 oobbuf += 12;
532
533 /* Auto Encode w/ Bit 8 = 0 (see LPC MLC Controller manual) */
534 writeb(0x00, MLC_ECC_AUTO_ENC_REG(host->io_base));
535
536 /* Wait for Controller Ready */
537 lpc32xx_waitfunc_controller(mtd, chip);
538 }
539 return 0;
540}
541
542static int lpc32xx_write_page(struct mtd_info *mtd, struct nand_chip *chip,
543 const uint8_t *buf, int oob_required, int page,
544 int cached, int raw)
545{
546 int res;
547
548 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
549 res = lpc32xx_write_page_lowlevel(mtd, chip, buf, oob_required);
550 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
551 lpc32xx_waitfunc(mtd, chip);
552
553 return res;
554}
555
556static int lpc32xx_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
557 int page)
558{
559 struct lpc32xx_nand_host *host = chip->priv;
560
561 /* Read whole page - necessary with MLC controller! */
562 lpc32xx_read_page(mtd, chip, host->dummy_buf, 1, page);
563
564 return 0;
565}
566
567static int lpc32xx_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
568 int page)
569{
570 /* None, write_oob conflicts with the automatic LPC MLC ECC decoder! */
571 return 0;
572}
573
574/* Prepares MLC for transfers with H/W ECC enabled: always enabled anyway */
575static void lpc32xx_ecc_enable(struct mtd_info *mtd, int mode)
576{
577 /* Always enabled! */
578}
579
580static int lpc32xx_dma_setup(struct lpc32xx_nand_host *host)
581{
582 struct mtd_info *mtd = &host->mtd;
583 dma_cap_mask_t mask;
584
585 if (!host->pdata || !host->pdata->dma_filter) {
586 dev_err(mtd->dev.parent, "no DMA platform data\n");
587 return -ENOENT;
588 }
589
590 dma_cap_zero(mask);
591 dma_cap_set(DMA_SLAVE, mask);
592 host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
593 "nand-mlc");
594 if (!host->dma_chan) {
595 dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
596 return -EBUSY;
597 }
598
599 /*
600 * Set direction to a sensible value even if the dmaengine driver
601 * should ignore it. With the default (DMA_MEM_TO_MEM), the amba-pl08x
602 * driver criticizes it as "alien transfer direction".
603 */
604 host->dma_slave_config.direction = DMA_DEV_TO_MEM;
605 host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
606 host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
607 host->dma_slave_config.src_maxburst = 128;
608 host->dma_slave_config.dst_maxburst = 128;
609 /* DMA controller does flow control: */
610 host->dma_slave_config.device_fc = false;
611 host->dma_slave_config.src_addr = MLC_BUFF(host->io_base_phy);
612 host->dma_slave_config.dst_addr = MLC_BUFF(host->io_base_phy);
613 if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
614 dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
615 goto out1;
616 }
617
618 return 0;
619out1:
620 dma_release_channel(host->dma_chan);
621 return -ENXIO;
622}
623
624static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev)
625{
626 struct lpc32xx_nand_cfg_mlc *ncfg;
627 struct device_node *np = dev->of_node;
628
629 ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
630 if (!ncfg) {
631 dev_err(dev, "could not allocate memory for platform data\n");
632 return NULL;
633 }
634
635 of_property_read_u32(np, "nxp,tcea-delay", &ncfg->tcea_delay);
636 of_property_read_u32(np, "nxp,busy-delay", &ncfg->busy_delay);
637 of_property_read_u32(np, "nxp,nand-ta", &ncfg->nand_ta);
638 of_property_read_u32(np, "nxp,rd-high", &ncfg->rd_high);
639 of_property_read_u32(np, "nxp,rd-low", &ncfg->rd_low);
640 of_property_read_u32(np, "nxp,wr-high", &ncfg->wr_high);
641 of_property_read_u32(np, "nxp,wr-low", &ncfg->wr_low);
642
643 if (!ncfg->tcea_delay || !ncfg->busy_delay || !ncfg->nand_ta ||
644 !ncfg->rd_high || !ncfg->rd_low || !ncfg->wr_high ||
645 !ncfg->wr_low) {
646 dev_err(dev, "chip parameters not specified correctly\n");
647 return NULL;
648 }
649
650 ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0);
651
652 return ncfg;
653}
654
655/*
656 * Probe for NAND controller
657 */
658static int __devinit lpc32xx_nand_probe(struct platform_device *pdev)
659{
660 struct lpc32xx_nand_host *host;
661 struct mtd_info *mtd;
662 struct nand_chip *nand_chip;
663 struct resource *rc;
664 int res;
665 struct mtd_part_parser_data ppdata = {};
666
667 /* Allocate memory for the device structure (and zero it) */
668 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
669 if (!host) {
670 dev_err(&pdev->dev, "failed to allocate device structure.\n");
671 return -ENOMEM;
672 }
673
674 rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
675 if (rc == NULL) {
676 dev_err(&pdev->dev, "No memory resource found for device!\r\n");
677 return -ENXIO;
678 }
679
680 host->io_base = devm_request_and_ioremap(&pdev->dev, rc);
681 if (host->io_base == NULL) {
682 dev_err(&pdev->dev, "ioremap failed\n");
683 return -EIO;
684 }
685 host->io_base_phy = rc->start;
686
687 mtd = &host->mtd;
688 nand_chip = &host->nand_chip;
689 if (pdev->dev.of_node)
690 host->ncfg = lpc32xx_parse_dt(&pdev->dev);
691 if (!host->ncfg) {
692 dev_err(&pdev->dev,
693 "Missing or bad NAND config from device tree\n");
694 return -ENOENT;
695 }
696 if (host->ncfg->wp_gpio == -EPROBE_DEFER)
697 return -EPROBE_DEFER;
698 if (gpio_is_valid(host->ncfg->wp_gpio) &&
699 gpio_request(host->ncfg->wp_gpio, "NAND WP")) {
700 dev_err(&pdev->dev, "GPIO not available\n");
701 return -EBUSY;
702 }
703 lpc32xx_wp_disable(host);
704
705 host->pdata = pdev->dev.platform_data;
706
707 nand_chip->priv = host; /* link the private data structures */
708 mtd->priv = nand_chip;
709 mtd->owner = THIS_MODULE;
710 mtd->dev.parent = &pdev->dev;
711
712 /* Get NAND clock */
713 host->clk = clk_get(&pdev->dev, NULL);
714 if (IS_ERR(host->clk)) {
715 dev_err(&pdev->dev, "Clock initialization failure\n");
716 res = -ENOENT;
717 goto err_exit1;
718 }
719 clk_enable(host->clk);
720
721 nand_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
722 nand_chip->dev_ready = lpc32xx_nand_device_ready;
723 nand_chip->chip_delay = 25; /* us */
724 nand_chip->IO_ADDR_R = MLC_DATA(host->io_base);
725 nand_chip->IO_ADDR_W = MLC_DATA(host->io_base);
726
727 /* Init NAND controller */
728 lpc32xx_nand_setup(host);
729
730 platform_set_drvdata(pdev, host);
731
732 /* Initialize function pointers */
733 nand_chip->ecc.hwctl = lpc32xx_ecc_enable;
734 nand_chip->ecc.read_page_raw = lpc32xx_read_page;
735 nand_chip->ecc.read_page = lpc32xx_read_page;
736 nand_chip->ecc.write_page_raw = lpc32xx_write_page_lowlevel;
737 nand_chip->ecc.write_page = lpc32xx_write_page_lowlevel;
738 nand_chip->ecc.write_oob = lpc32xx_write_oob;
739 nand_chip->ecc.read_oob = lpc32xx_read_oob;
740 nand_chip->ecc.strength = 4;
741 nand_chip->write_page = lpc32xx_write_page;
742 nand_chip->waitfunc = lpc32xx_waitfunc;
743
744 nand_chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
745 nand_chip->bbt_td = &lpc32xx_nand_bbt;
746 nand_chip->bbt_md = &lpc32xx_nand_bbt_mirror;
747
748 /* bitflip_threshold's default is defined as ecc_strength anyway.
749 * Unfortunately, it is set only later at add_mtd_device(). Meanwhile
750 * being 0, it causes bad block table scanning errors in
751 * nand_scan_tail(), so preparing it here. */
752 mtd->bitflip_threshold = nand_chip->ecc.strength;
753
754 if (use_dma) {
755 res = lpc32xx_dma_setup(host);
756 if (res) {
757 res = -EIO;
758 goto err_exit2;
759 }
760 }
761
762 /*
763 * Scan to find existance of the device and
764 * Get the type of NAND device SMALL block or LARGE block
765 */
766 if (nand_scan_ident(mtd, 1, NULL)) {
767 res = -ENXIO;
768 goto err_exit3;
769 }
770
771 host->dma_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
772 if (!host->dma_buf) {
773 dev_err(&pdev->dev, "Error allocating dma_buf memory\n");
774 res = -ENOMEM;
775 goto err_exit3;
776 }
777
778 host->dummy_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
779 if (!host->dummy_buf) {
780 dev_err(&pdev->dev, "Error allocating dummy_buf memory\n");
781 res = -ENOMEM;
782 goto err_exit3;
783 }
784
785 nand_chip->ecc.mode = NAND_ECC_HW;
786 nand_chip->ecc.size = mtd->writesize;
787 nand_chip->ecc.layout = &lpc32xx_nand_oob;
788 host->mlcsubpages = mtd->writesize / 512;
789
790 /* initially clear interrupt status */
791 readb(MLC_IRQ_SR(host->io_base));
792
793 init_completion(&host->comp_nand);
794 init_completion(&host->comp_controller);
795
796 host->irq = platform_get_irq(pdev, 0);
797 if ((host->irq < 0) || (host->irq >= NR_IRQS)) {
798 dev_err(&pdev->dev, "failed to get platform irq\n");
799 res = -EINVAL;
800 goto err_exit3;
801 }
802
803 if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq,
804 IRQF_TRIGGER_HIGH, DRV_NAME, host)) {
805 dev_err(&pdev->dev, "Error requesting NAND IRQ\n");
806 res = -ENXIO;
807 goto err_exit3;
808 }
809
810 /*
811 * Fills out all the uninitialized function pointers with the defaults
812 * And scans for a bad block table if appropriate.
813 */
814 if (nand_scan_tail(mtd)) {
815 res = -ENXIO;
816 goto err_exit4;
817 }
818
819 mtd->name = DRV_NAME;
820
821 ppdata.of_node = pdev->dev.of_node;
822 res = mtd_device_parse_register(mtd, NULL, &ppdata, host->ncfg->parts,
823 host->ncfg->num_parts);
824 if (!res)
825 return res;
826
827 nand_release(mtd);
828
829err_exit4:
830 free_irq(host->irq, host);
831err_exit3:
832 if (use_dma)
833 dma_release_channel(host->dma_chan);
834err_exit2:
835 clk_disable(host->clk);
836 clk_put(host->clk);
837 platform_set_drvdata(pdev, NULL);
838err_exit1:
839 lpc32xx_wp_enable(host);
840 gpio_free(host->ncfg->wp_gpio);
841
842 return res;
843}
844
845/*
846 * Remove NAND device
847 */
848static int __devexit lpc32xx_nand_remove(struct platform_device *pdev)
849{
850 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
851 struct mtd_info *mtd = &host->mtd;
852
853 nand_release(mtd);
854 free_irq(host->irq, host);
855 if (use_dma)
856 dma_release_channel(host->dma_chan);
857
858 clk_disable(host->clk);
859 clk_put(host->clk);
860 platform_set_drvdata(pdev, NULL);
861
862 lpc32xx_wp_enable(host);
863 gpio_free(host->ncfg->wp_gpio);
864
865 return 0;
866}
867
868#ifdef CONFIG_PM
869static int lpc32xx_nand_resume(struct platform_device *pdev)
870{
871 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
872
873 /* Re-enable NAND clock */
874 clk_enable(host->clk);
875
876 /* Fresh init of NAND controller */
877 lpc32xx_nand_setup(host);
878
879 /* Disable write protect */
880 lpc32xx_wp_disable(host);
881
882 return 0;
883}
884
885static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
886{
887 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
888
889 /* Enable write protect for safety */
890 lpc32xx_wp_enable(host);
891
892 /* Disable clock */
893 clk_disable(host->clk);
894 return 0;
895}
896
897#else
898#define lpc32xx_nand_resume NULL
899#define lpc32xx_nand_suspend NULL
900#endif
901
902static const struct of_device_id lpc32xx_nand_match[] = {
903 { .compatible = "nxp,lpc3220-mlc" },
904 { /* sentinel */ },
905};
906MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
907
908static struct platform_driver lpc32xx_nand_driver = {
909 .probe = lpc32xx_nand_probe,
910 .remove = __devexit_p(lpc32xx_nand_remove),
911 .resume = lpc32xx_nand_resume,
912 .suspend = lpc32xx_nand_suspend,
913 .driver = {
914 .name = DRV_NAME,
915 .owner = THIS_MODULE,
916 .of_match_table = of_match_ptr(lpc32xx_nand_match),
917 },
918};
919
920module_platform_driver(lpc32xx_nand_driver);
921
922MODULE_LICENSE("GPL");
923MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
924MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX MLC controller");
diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c
new file mode 100644
index 000000000000..32409c45d479
--- /dev/null
+++ b/drivers/mtd/nand/lpc32xx_slc.c
@@ -0,0 +1,1039 @@
1/*
2 * NXP LPC32XX NAND SLC driver
3 *
4 * Authors:
5 * Kevin Wells <kevin.wells@nxp.com>
6 * Roland Stigge <stigge@antcom.de>
7 *
8 * Copyright © 2011 NXP Semiconductors
9 * Copyright © 2012 Roland Stigge
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 */
21
22#include <linux/slab.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
25#include <linux/mtd/mtd.h>
26#include <linux/mtd/nand.h>
27#include <linux/mtd/partitions.h>
28#include <linux/clk.h>
29#include <linux/err.h>
30#include <linux/delay.h>
31#include <linux/io.h>
32#include <linux/mm.h>
33#include <linux/dma-mapping.h>
34#include <linux/dmaengine.h>
35#include <linux/mtd/nand_ecc.h>
36#include <linux/gpio.h>
37#include <linux/of.h>
38#include <linux/of_mtd.h>
39#include <linux/of_gpio.h>
40#include <linux/mtd/lpc32xx_slc.h>
41
42#define LPC32XX_MODNAME "lpc32xx-nand"
43
44/**********************************************************************
45* SLC NAND controller register offsets
46**********************************************************************/
47
48#define SLC_DATA(x) (x + 0x000)
49#define SLC_ADDR(x) (x + 0x004)
50#define SLC_CMD(x) (x + 0x008)
51#define SLC_STOP(x) (x + 0x00C)
52#define SLC_CTRL(x) (x + 0x010)
53#define SLC_CFG(x) (x + 0x014)
54#define SLC_STAT(x) (x + 0x018)
55#define SLC_INT_STAT(x) (x + 0x01C)
56#define SLC_IEN(x) (x + 0x020)
57#define SLC_ISR(x) (x + 0x024)
58#define SLC_ICR(x) (x + 0x028)
59#define SLC_TAC(x) (x + 0x02C)
60#define SLC_TC(x) (x + 0x030)
61#define SLC_ECC(x) (x + 0x034)
62#define SLC_DMA_DATA(x) (x + 0x038)
63
64/**********************************************************************
65* slc_ctrl register definitions
66**********************************************************************/
67#define SLCCTRL_SW_RESET (1 << 2) /* Reset the NAND controller bit */
68#define SLCCTRL_ECC_CLEAR (1 << 1) /* Reset ECC bit */
69#define SLCCTRL_DMA_START (1 << 0) /* Start DMA channel bit */
70
71/**********************************************************************
72* slc_cfg register definitions
73**********************************************************************/
74#define SLCCFG_CE_LOW (1 << 5) /* Force CE low bit */
75#define SLCCFG_DMA_ECC (1 << 4) /* Enable DMA ECC bit */
76#define SLCCFG_ECC_EN (1 << 3) /* ECC enable bit */
77#define SLCCFG_DMA_BURST (1 << 2) /* DMA burst bit */
78#define SLCCFG_DMA_DIR (1 << 1) /* DMA write(0)/read(1) bit */
79#define SLCCFG_WIDTH (1 << 0) /* External device width, 0=8bit */
80
81/**********************************************************************
82* slc_stat register definitions
83**********************************************************************/
84#define SLCSTAT_DMA_FIFO (1 << 2) /* DMA FIFO has data bit */
85#define SLCSTAT_SLC_FIFO (1 << 1) /* SLC FIFO has data bit */
86#define SLCSTAT_NAND_READY (1 << 0) /* NAND device is ready bit */
87
88/**********************************************************************
89* slc_int_stat, slc_ien, slc_isr, and slc_icr register definitions
90**********************************************************************/
91#define SLCSTAT_INT_TC (1 << 1) /* Transfer count bit */
92#define SLCSTAT_INT_RDY_EN (1 << 0) /* Ready interrupt bit */
93
94/**********************************************************************
95* slc_tac register definitions
96**********************************************************************/
97/* Clock setting for RDY write sample wait time in 2*n clocks */
98#define SLCTAC_WDR(n) (((n) & 0xF) << 28)
99/* Write pulse width in clock cycles, 1 to 16 clocks */
100#define SLCTAC_WWIDTH(n) (((n) & 0xF) << 24)
101/* Write hold time of control and data signals, 1 to 16 clocks */
102#define SLCTAC_WHOLD(n) (((n) & 0xF) << 20)
103/* Write setup time of control and data signals, 1 to 16 clocks */
104#define SLCTAC_WSETUP(n) (((n) & 0xF) << 16)
105/* Clock setting for RDY read sample wait time in 2*n clocks */
106#define SLCTAC_RDR(n) (((n) & 0xF) << 12)
107/* Read pulse width in clock cycles, 1 to 16 clocks */
108#define SLCTAC_RWIDTH(n) (((n) & 0xF) << 8)
109/* Read hold time of control and data signals, 1 to 16 clocks */
110#define SLCTAC_RHOLD(n) (((n) & 0xF) << 4)
111/* Read setup time of control and data signals, 1 to 16 clocks */
112#define SLCTAC_RSETUP(n) (((n) & 0xF) << 0)
113
114/**********************************************************************
115* slc_ecc register definitions
116**********************************************************************/
117/* ECC line party fetch macro */
118#define SLCECC_TO_LINEPAR(n) (((n) >> 6) & 0x7FFF)
119#define SLCECC_TO_COLPAR(n) ((n) & 0x3F)
120
121/*
122 * DMA requires storage space for the DMA local buffer and the hardware ECC
123 * storage area. The DMA local buffer is only used if DMA mapping fails
124 * during runtime.
125 */
126#define LPC32XX_DMA_DATA_SIZE 4096
127#define LPC32XX_ECC_SAVE_SIZE ((4096 / 256) * 4)
128
129/* Number of bytes used for ECC stored in NAND per 256 bytes */
130#define LPC32XX_SLC_DEV_ECC_BYTES 3
131
132/*
133 * If the NAND base clock frequency can't be fetched, this frequency will be
134 * used instead as the base. This rate is used to setup the timing registers
135 * used for NAND accesses.
136 */
137#define LPC32XX_DEF_BUS_RATE 133250000
138
139/* Milliseconds for DMA FIFO timeout (unlikely anyway) */
140#define LPC32XX_DMA_TIMEOUT 100
141
142/*
143 * NAND ECC Layout for small page NAND devices
144 * Note: For large and huge page devices, the default layouts are used
145 */
146static struct nand_ecclayout lpc32xx_nand_oob_16 = {
147 .eccbytes = 6,
148 .eccpos = {10, 11, 12, 13, 14, 15},
149 .oobfree = {
150 { .offset = 0, .length = 4 },
151 { .offset = 6, .length = 4 },
152 },
153};
154
155static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
156static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
157
158/*
159 * Small page FLASH BBT descriptors, marker at offset 0, version at offset 6
160 * Note: Large page devices used the default layout
161 */
162static struct nand_bbt_descr bbt_smallpage_main_descr = {
163 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
164 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
165 .offs = 0,
166 .len = 4,
167 .veroffs = 6,
168 .maxblocks = 4,
169 .pattern = bbt_pattern
170};
171
172static struct nand_bbt_descr bbt_smallpage_mirror_descr = {
173 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
174 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
175 .offs = 0,
176 .len = 4,
177 .veroffs = 6,
178 .maxblocks = 4,
179 .pattern = mirror_pattern
180};
181
182/*
183 * NAND platform configuration structure
184 */
185struct lpc32xx_nand_cfg_slc {
186 uint32_t wdr_clks;
187 uint32_t wwidth;
188 uint32_t whold;
189 uint32_t wsetup;
190 uint32_t rdr_clks;
191 uint32_t rwidth;
192 uint32_t rhold;
193 uint32_t rsetup;
194 bool use_bbt;
195 int wp_gpio;
196 struct mtd_partition *parts;
197 unsigned num_parts;
198};
199
200struct lpc32xx_nand_host {
201 struct nand_chip nand_chip;
202 struct lpc32xx_slc_platform_data *pdata;
203 struct clk *clk;
204 struct mtd_info mtd;
205 void __iomem *io_base;
206 struct lpc32xx_nand_cfg_slc *ncfg;
207
208 struct completion comp;
209 struct dma_chan *dma_chan;
210 uint32_t dma_buf_len;
211 struct dma_slave_config dma_slave_config;
212 struct scatterlist sgl;
213
214 /*
215 * DMA and CPU addresses of ECC work area and data buffer
216 */
217 uint32_t *ecc_buf;
218 uint8_t *data_buf;
219 dma_addr_t io_base_dma;
220};
221
222static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
223{
224 uint32_t clkrate, tmp;
225
226 /* Reset SLC controller */
227 writel(SLCCTRL_SW_RESET, SLC_CTRL(host->io_base));
228 udelay(1000);
229
230 /* Basic setup */
231 writel(0, SLC_CFG(host->io_base));
232 writel(0, SLC_IEN(host->io_base));
233 writel((SLCSTAT_INT_TC | SLCSTAT_INT_RDY_EN),
234 SLC_ICR(host->io_base));
235
236 /* Get base clock for SLC block */
237 clkrate = clk_get_rate(host->clk);
238 if (clkrate == 0)
239 clkrate = LPC32XX_DEF_BUS_RATE;
240
241 /* Compute clock setup values */
242 tmp = SLCTAC_WDR(host->ncfg->wdr_clks) |
243 SLCTAC_WWIDTH(1 + (clkrate / host->ncfg->wwidth)) |
244 SLCTAC_WHOLD(1 + (clkrate / host->ncfg->whold)) |
245 SLCTAC_WSETUP(1 + (clkrate / host->ncfg->wsetup)) |
246 SLCTAC_RDR(host->ncfg->rdr_clks) |
247 SLCTAC_RWIDTH(1 + (clkrate / host->ncfg->rwidth)) |
248 SLCTAC_RHOLD(1 + (clkrate / host->ncfg->rhold)) |
249 SLCTAC_RSETUP(1 + (clkrate / host->ncfg->rsetup));
250 writel(tmp, SLC_TAC(host->io_base));
251}
252
253/*
254 * Hardware specific access to control lines
255 */
256static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
257 unsigned int ctrl)
258{
259 uint32_t tmp;
260 struct nand_chip *chip = mtd->priv;
261 struct lpc32xx_nand_host *host = chip->priv;
262
263 /* Does CE state need to be changed? */
264 tmp = readl(SLC_CFG(host->io_base));
265 if (ctrl & NAND_NCE)
266 tmp |= SLCCFG_CE_LOW;
267 else
268 tmp &= ~SLCCFG_CE_LOW;
269 writel(tmp, SLC_CFG(host->io_base));
270
271 if (cmd != NAND_CMD_NONE) {
272 if (ctrl & NAND_CLE)
273 writel(cmd, SLC_CMD(host->io_base));
274 else
275 writel(cmd, SLC_ADDR(host->io_base));
276 }
277}
278
279/*
280 * Read the Device Ready pin
281 */
282static int lpc32xx_nand_device_ready(struct mtd_info *mtd)
283{
284 struct nand_chip *chip = mtd->priv;
285 struct lpc32xx_nand_host *host = chip->priv;
286 int rdy = 0;
287
288 if ((readl(SLC_STAT(host->io_base)) & SLCSTAT_NAND_READY) != 0)
289 rdy = 1;
290
291 return rdy;
292}
293
294/*
295 * Enable NAND write protect
296 */
297static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
298{
299 if (gpio_is_valid(host->ncfg->wp_gpio))
300 gpio_set_value(host->ncfg->wp_gpio, 0);
301}
302
303/*
304 * Disable NAND write protect
305 */
306static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
307{
308 if (gpio_is_valid(host->ncfg->wp_gpio))
309 gpio_set_value(host->ncfg->wp_gpio, 1);
310}
311
312/*
313 * Prepares SLC for transfers with H/W ECC enabled
314 */
315static void lpc32xx_nand_ecc_enable(struct mtd_info *mtd, int mode)
316{
317 /* Hardware ECC is enabled automatically in hardware as needed */
318}
319
320/*
321 * Calculates the ECC for the data
322 */
323static int lpc32xx_nand_ecc_calculate(struct mtd_info *mtd,
324 const unsigned char *buf,
325 unsigned char *code)
326{
327 /*
328 * ECC is calculated automatically in hardware during syndrome read
329 * and write operations, so it doesn't need to be calculated here.
330 */
331 return 0;
332}
333
334/*
335 * Read a single byte from NAND device
336 */
337static uint8_t lpc32xx_nand_read_byte(struct mtd_info *mtd)
338{
339 struct nand_chip *chip = mtd->priv;
340 struct lpc32xx_nand_host *host = chip->priv;
341
342 return (uint8_t)readl(SLC_DATA(host->io_base));
343}
344
345/*
346 * Simple device read without ECC
347 */
348static void lpc32xx_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
349{
350 struct nand_chip *chip = mtd->priv;
351 struct lpc32xx_nand_host *host = chip->priv;
352
353 /* Direct device read with no ECC */
354 while (len-- > 0)
355 *buf++ = (uint8_t)readl(SLC_DATA(host->io_base));
356}
357
358/*
359 * Simple device write without ECC
360 */
361static void lpc32xx_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
362{
363 struct nand_chip *chip = mtd->priv;
364 struct lpc32xx_nand_host *host = chip->priv;
365
366 /* Direct device write with no ECC */
367 while (len-- > 0)
368 writel((uint32_t)*buf++, SLC_DATA(host->io_base));
369}
370
371/*
372 * Read the OOB data from the device without ECC using FIFO method
373 */
374static int lpc32xx_nand_read_oob_syndrome(struct mtd_info *mtd,
375 struct nand_chip *chip, int page)
376{
377 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
378 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
379
380 return 0;
381}
382
383/*
384 * Write the OOB data to the device without ECC using FIFO method
385 */
386static int lpc32xx_nand_write_oob_syndrome(struct mtd_info *mtd,
387 struct nand_chip *chip, int page)
388{
389 int status;
390
391 chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
392 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
393
394 /* Send command to program the OOB data */
395 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
396
397 status = chip->waitfunc(mtd, chip);
398
399 return status & NAND_STATUS_FAIL ? -EIO : 0;
400}
401
402/*
403 * Fills in the ECC fields in the OOB buffer with the hardware generated ECC
404 */
405static void lpc32xx_slc_ecc_copy(uint8_t *spare, const uint32_t *ecc, int count)
406{
407 int i;
408
409 for (i = 0; i < (count * 3); i += 3) {
410 uint32_t ce = ecc[i / 3];
411 ce = ~(ce << 2) & 0xFFFFFF;
412 spare[i + 2] = (uint8_t)(ce & 0xFF);
413 ce >>= 8;
414 spare[i + 1] = (uint8_t)(ce & 0xFF);
415 ce >>= 8;
416 spare[i] = (uint8_t)(ce & 0xFF);
417 }
418}
419
420static void lpc32xx_dma_complete_func(void *completion)
421{
422 complete(completion);
423}
424
425static int lpc32xx_xmit_dma(struct mtd_info *mtd, dma_addr_t dma,
426 void *mem, int len, enum dma_transfer_direction dir)
427{
428 struct nand_chip *chip = mtd->priv;
429 struct lpc32xx_nand_host *host = chip->priv;
430 struct dma_async_tx_descriptor *desc;
431 int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
432 int res;
433
434 host->dma_slave_config.direction = dir;
435 host->dma_slave_config.src_addr = dma;
436 host->dma_slave_config.dst_addr = dma;
437 host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
438 host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
439 host->dma_slave_config.src_maxburst = 4;
440 host->dma_slave_config.dst_maxburst = 4;
441 /* DMA controller does flow control: */
442 host->dma_slave_config.device_fc = false;
443 if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
444 dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
445 return -ENXIO;
446 }
447
448 sg_init_one(&host->sgl, mem, len);
449
450 res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
451 DMA_BIDIRECTIONAL);
452 if (res != 1) {
453 dev_err(mtd->dev.parent, "Failed to map sg list\n");
454 return -ENXIO;
455 }
456 desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
457 flags);
458 if (!desc) {
459 dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
460 goto out1;
461 }
462
463 init_completion(&host->comp);
464 desc->callback = lpc32xx_dma_complete_func;
465 desc->callback_param = &host->comp;
466
467 dmaengine_submit(desc);
468 dma_async_issue_pending(host->dma_chan);
469
470 wait_for_completion_timeout(&host->comp, msecs_to_jiffies(1000));
471
472 dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
473 DMA_BIDIRECTIONAL);
474
475 return 0;
476out1:
477 dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
478 DMA_BIDIRECTIONAL);
479 return -ENXIO;
480}
481
482/*
483 * DMA read/write transfers with ECC support
484 */
485static int lpc32xx_xfer(struct mtd_info *mtd, uint8_t *buf, int eccsubpages,
486 int read)
487{
488 struct nand_chip *chip = mtd->priv;
489 struct lpc32xx_nand_host *host = chip->priv;
490 int i, status = 0;
491 unsigned long timeout;
492 int res;
493 enum dma_transfer_direction dir =
494 read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
495 uint8_t *dma_buf;
496 bool dma_mapped;
497
498 if ((void *)buf <= high_memory) {
499 dma_buf = buf;
500 dma_mapped = true;
501 } else {
502 dma_buf = host->data_buf;
503 dma_mapped = false;
504 if (!read)
505 memcpy(host->data_buf, buf, mtd->writesize);
506 }
507
508 if (read) {
509 writel(readl(SLC_CFG(host->io_base)) |
510 SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
511 SLCCFG_DMA_BURST, SLC_CFG(host->io_base));
512 } else {
513 writel((readl(SLC_CFG(host->io_base)) |
514 SLCCFG_ECC_EN | SLCCFG_DMA_ECC | SLCCFG_DMA_BURST) &
515 ~SLCCFG_DMA_DIR,
516 SLC_CFG(host->io_base));
517 }
518
519 /* Clear initial ECC */
520 writel(SLCCTRL_ECC_CLEAR, SLC_CTRL(host->io_base));
521
522 /* Transfer size is data area only */
523 writel(mtd->writesize, SLC_TC(host->io_base));
524
525 /* Start transfer in the NAND controller */
526 writel(readl(SLC_CTRL(host->io_base)) | SLCCTRL_DMA_START,
527 SLC_CTRL(host->io_base));
528
529 for (i = 0; i < chip->ecc.steps; i++) {
530 /* Data */
531 res = lpc32xx_xmit_dma(mtd, SLC_DMA_DATA(host->io_base_dma),
532 dma_buf + i * chip->ecc.size,
533 mtd->writesize / chip->ecc.steps, dir);
534 if (res)
535 return res;
536
537 /* Always _read_ ECC */
538 if (i == chip->ecc.steps - 1)
539 break;
540 if (!read) /* ECC availability delayed on write */
541 udelay(10);
542 res = lpc32xx_xmit_dma(mtd, SLC_ECC(host->io_base_dma),
543 &host->ecc_buf[i], 4, DMA_DEV_TO_MEM);
544 if (res)
545 return res;
546 }
547
548 /*
549 * According to NXP, the DMA can be finished here, but the NAND
550 * controller may still have buffered data. After porting to using the
551 * dmaengine DMA driver (amba-pl080), the condition (DMA_FIFO empty)
552 * appears to be always true, according to tests. Keeping the check for
553 * safety reasons for now.
554 */
555 if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) {
556 dev_warn(mtd->dev.parent, "FIFO not empty!\n");
557 timeout = jiffies + msecs_to_jiffies(LPC32XX_DMA_TIMEOUT);
558 while ((readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) &&
559 time_before(jiffies, timeout))
560 cpu_relax();
561 if (!time_before(jiffies, timeout)) {
562 dev_err(mtd->dev.parent, "FIFO held data too long\n");
563 status = -EIO;
564 }
565 }
566
567 /* Read last calculated ECC value */
568 if (!read)
569 udelay(10);
570 host->ecc_buf[chip->ecc.steps - 1] =
571 readl(SLC_ECC(host->io_base));
572
573 /* Flush DMA */
574 dmaengine_terminate_all(host->dma_chan);
575
576 if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO ||
577 readl(SLC_TC(host->io_base))) {
578 /* Something is left in the FIFO, something is wrong */
579 dev_err(mtd->dev.parent, "DMA FIFO failure\n");
580 status = -EIO;
581 }
582
583 /* Stop DMA & HW ECC */
584 writel(readl(SLC_CTRL(host->io_base)) & ~SLCCTRL_DMA_START,
585 SLC_CTRL(host->io_base));
586 writel(readl(SLC_CFG(host->io_base)) &
587 ~(SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
588 SLCCFG_DMA_BURST), SLC_CFG(host->io_base));
589
590 if (!dma_mapped && read)
591 memcpy(buf, host->data_buf, mtd->writesize);
592
593 return status;
594}
595
596/*
597 * Read the data and OOB data from the device, use ECC correction with the
598 * data, disable ECC for the OOB data
599 */
600static int lpc32xx_nand_read_page_syndrome(struct mtd_info *mtd,
601 struct nand_chip *chip, uint8_t *buf,
602 int oob_required, int page)
603{
604 struct lpc32xx_nand_host *host = chip->priv;
605 int stat, i, status;
606 uint8_t *oobecc, tmpecc[LPC32XX_ECC_SAVE_SIZE];
607
608 /* Issue read command */
609 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
610
611 /* Read data and oob, calculate ECC */
612 status = lpc32xx_xfer(mtd, buf, chip->ecc.steps, 1);
613
614 /* Get OOB data */
615 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
616
617 /* Convert to stored ECC format */
618 lpc32xx_slc_ecc_copy(tmpecc, (uint32_t *) host->ecc_buf, chip->ecc.steps);
619
620 /* Pointer to ECC data retrieved from NAND spare area */
621 oobecc = chip->oob_poi + chip->ecc.layout->eccpos[0];
622
623 for (i = 0; i < chip->ecc.steps; i++) {
624 stat = chip->ecc.correct(mtd, buf, oobecc,
625 &tmpecc[i * chip->ecc.bytes]);
626 if (stat < 0)
627 mtd->ecc_stats.failed++;
628 else
629 mtd->ecc_stats.corrected += stat;
630
631 buf += chip->ecc.size;
632 oobecc += chip->ecc.bytes;
633 }
634
635 return status;
636}
637
638/*
639 * Read the data and OOB data from the device, no ECC correction with the
640 * data or OOB data
641 */
642static int lpc32xx_nand_read_page_raw_syndrome(struct mtd_info *mtd,
643 struct nand_chip *chip,
644 uint8_t *buf, int oob_required,
645 int page)
646{
647 /* Issue read command */
648 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
649
650 /* Raw reads can just use the FIFO interface */
651 chip->read_buf(mtd, buf, chip->ecc.size * chip->ecc.steps);
652 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
653
654 return 0;
655}
656
657/*
658 * Write the data and OOB data to the device, use ECC with the data,
659 * disable ECC for the OOB data
660 */
661static int lpc32xx_nand_write_page_syndrome(struct mtd_info *mtd,
662 struct nand_chip *chip,
663 const uint8_t *buf, int oob_required)
664{
665 struct lpc32xx_nand_host *host = chip->priv;
666 uint8_t *pb = chip->oob_poi + chip->ecc.layout->eccpos[0];
667 int error;
668
669 /* Write data, calculate ECC on outbound data */
670 error = lpc32xx_xfer(mtd, (uint8_t *)buf, chip->ecc.steps, 0);
671 if (error)
672 return error;
673
674 /*
675 * The calculated ECC needs some manual work done to it before
676 * committing it to NAND. Process the calculated ECC and place
677 * the resultant values directly into the OOB buffer. */
678 lpc32xx_slc_ecc_copy(pb, (uint32_t *)host->ecc_buf, chip->ecc.steps);
679
680 /* Write ECC data to device */
681 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
682 return 0;
683}
684
685/*
686 * Write the data and OOB data to the device, no ECC correction with the
687 * data or OOB data
688 */
689static int lpc32xx_nand_write_page_raw_syndrome(struct mtd_info *mtd,
690 struct nand_chip *chip,
691 const uint8_t *buf,
692 int oob_required)
693{
694 /* Raw writes can just use the FIFO interface */
695 chip->write_buf(mtd, buf, chip->ecc.size * chip->ecc.steps);
696 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
697 return 0;
698}
699
700static int lpc32xx_nand_dma_setup(struct lpc32xx_nand_host *host)
701{
702 struct mtd_info *mtd = &host->mtd;
703 dma_cap_mask_t mask;
704
705 if (!host->pdata || !host->pdata->dma_filter) {
706 dev_err(mtd->dev.parent, "no DMA platform data\n");
707 return -ENOENT;
708 }
709
710 dma_cap_zero(mask);
711 dma_cap_set(DMA_SLAVE, mask);
712 host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
713 "nand-slc");
714 if (!host->dma_chan) {
715 dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
716 return -EBUSY;
717 }
718
719 return 0;
720}
721
722static struct lpc32xx_nand_cfg_slc *lpc32xx_parse_dt(struct device *dev)
723{
724 struct lpc32xx_nand_cfg_slc *ncfg;
725 struct device_node *np = dev->of_node;
726
727 ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
728 if (!ncfg) {
729 dev_err(dev, "could not allocate memory for NAND config\n");
730 return NULL;
731 }
732
733 of_property_read_u32(np, "nxp,wdr-clks", &ncfg->wdr_clks);
734 of_property_read_u32(np, "nxp,wwidth", &ncfg->wwidth);
735 of_property_read_u32(np, "nxp,whold", &ncfg->whold);
736 of_property_read_u32(np, "nxp,wsetup", &ncfg->wsetup);
737 of_property_read_u32(np, "nxp,rdr-clks", &ncfg->rdr_clks);
738 of_property_read_u32(np, "nxp,rwidth", &ncfg->rwidth);
739 of_property_read_u32(np, "nxp,rhold", &ncfg->rhold);
740 of_property_read_u32(np, "nxp,rsetup", &ncfg->rsetup);
741
742 if (!ncfg->wdr_clks || !ncfg->wwidth || !ncfg->whold ||
743 !ncfg->wsetup || !ncfg->rdr_clks || !ncfg->rwidth ||
744 !ncfg->rhold || !ncfg->rsetup) {
745 dev_err(dev, "chip parameters not specified correctly\n");
746 return NULL;
747 }
748
749 ncfg->use_bbt = of_get_nand_on_flash_bbt(np);
750 ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0);
751
752 return ncfg;
753}
754
755/*
756 * Probe for NAND controller
757 */
758static int __devinit lpc32xx_nand_probe(struct platform_device *pdev)
759{
760 struct lpc32xx_nand_host *host;
761 struct mtd_info *mtd;
762 struct nand_chip *chip;
763 struct resource *rc;
764 struct mtd_part_parser_data ppdata = {};
765 int res;
766
767 rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
768 if (rc == NULL) {
769 dev_err(&pdev->dev, "No memory resource found for device\n");
770 return -EBUSY;
771 }
772
773 /* Allocate memory for the device structure (and zero it) */
774 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
775 if (!host) {
776 dev_err(&pdev->dev, "failed to allocate device structure\n");
777 return -ENOMEM;
778 }
779 host->io_base_dma = rc->start;
780
781 host->io_base = devm_request_and_ioremap(&pdev->dev, rc);
782 if (host->io_base == NULL) {
783 dev_err(&pdev->dev, "ioremap failed\n");
784 return -ENOMEM;
785 }
786
787 if (pdev->dev.of_node)
788 host->ncfg = lpc32xx_parse_dt(&pdev->dev);
789 if (!host->ncfg) {
790 dev_err(&pdev->dev,
791 "Missing or bad NAND config from device tree\n");
792 return -ENOENT;
793 }
794 if (host->ncfg->wp_gpio == -EPROBE_DEFER)
795 return -EPROBE_DEFER;
796 if (gpio_is_valid(host->ncfg->wp_gpio) &&
797 gpio_request(host->ncfg->wp_gpio, "NAND WP")) {
798 dev_err(&pdev->dev, "GPIO not available\n");
799 return -EBUSY;
800 }
801 lpc32xx_wp_disable(host);
802
803 host->pdata = pdev->dev.platform_data;
804
805 mtd = &host->mtd;
806 chip = &host->nand_chip;
807 chip->priv = host;
808 mtd->priv = chip;
809 mtd->owner = THIS_MODULE;
810 mtd->dev.parent = &pdev->dev;
811
812 /* Get NAND clock */
813 host->clk = clk_get(&pdev->dev, NULL);
814 if (IS_ERR(host->clk)) {
815 dev_err(&pdev->dev, "Clock failure\n");
816 res = -ENOENT;
817 goto err_exit1;
818 }
819 clk_enable(host->clk);
820
821 /* Set NAND IO addresses and command/ready functions */
822 chip->IO_ADDR_R = SLC_DATA(host->io_base);
823 chip->IO_ADDR_W = SLC_DATA(host->io_base);
824 chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
825 chip->dev_ready = lpc32xx_nand_device_ready;
826 chip->chip_delay = 20; /* 20us command delay time */
827
828 /* Init NAND controller */
829 lpc32xx_nand_setup(host);
830
831 platform_set_drvdata(pdev, host);
832
833 /* NAND callbacks for LPC32xx SLC hardware */
834 chip->ecc.mode = NAND_ECC_HW_SYNDROME;
835 chip->read_byte = lpc32xx_nand_read_byte;
836 chip->read_buf = lpc32xx_nand_read_buf;
837 chip->write_buf = lpc32xx_nand_write_buf;
838 chip->ecc.read_page_raw = lpc32xx_nand_read_page_raw_syndrome;
839 chip->ecc.read_page = lpc32xx_nand_read_page_syndrome;
840 chip->ecc.write_page_raw = lpc32xx_nand_write_page_raw_syndrome;
841 chip->ecc.write_page = lpc32xx_nand_write_page_syndrome;
842 chip->ecc.write_oob = lpc32xx_nand_write_oob_syndrome;
843 chip->ecc.read_oob = lpc32xx_nand_read_oob_syndrome;
844 chip->ecc.calculate = lpc32xx_nand_ecc_calculate;
845 chip->ecc.correct = nand_correct_data;
846 chip->ecc.strength = 1;
847 chip->ecc.hwctl = lpc32xx_nand_ecc_enable;
848
849 /* bitflip_threshold's default is defined as ecc_strength anyway.
850 * Unfortunately, it is set only later at add_mtd_device(). Meanwhile
851 * being 0, it causes bad block table scanning errors in
852 * nand_scan_tail(), so preparing it here already. */
853 mtd->bitflip_threshold = chip->ecc.strength;
854
855 /*
856 * Allocate a large enough buffer for a single huge page plus
857 * extra space for the spare area and ECC storage area
858 */
859 host->dma_buf_len = LPC32XX_DMA_DATA_SIZE + LPC32XX_ECC_SAVE_SIZE;
860 host->data_buf = devm_kzalloc(&pdev->dev, host->dma_buf_len,
861 GFP_KERNEL);
862 if (host->data_buf == NULL) {
863 dev_err(&pdev->dev, "Error allocating memory\n");
864 res = -ENOMEM;
865 goto err_exit2;
866 }
867
868 res = lpc32xx_nand_dma_setup(host);
869 if (res) {
870 res = -EIO;
871 goto err_exit2;
872 }
873
874 /* Find NAND device */
875 if (nand_scan_ident(mtd, 1, NULL)) {
876 res = -ENXIO;
877 goto err_exit3;
878 }
879
880 /* OOB and ECC CPU and DMA work areas */
881 host->ecc_buf = (uint32_t *)(host->data_buf + LPC32XX_DMA_DATA_SIZE);
882
883 /*
884 * Small page FLASH has a unique OOB layout, but large and huge
885 * page FLASH use the standard layout. Small page FLASH uses a
886 * custom BBT marker layout.
887 */
888 if (mtd->writesize <= 512)
889 chip->ecc.layout = &lpc32xx_nand_oob_16;
890
891 /* These sizes remain the same regardless of page size */
892 chip->ecc.size = 256;
893 chip->ecc.bytes = LPC32XX_SLC_DEV_ECC_BYTES;
894 chip->ecc.prepad = chip->ecc.postpad = 0;
895
896 /* Avoid extra scan if using BBT, setup BBT support */
897 if (host->ncfg->use_bbt) {
898 chip->options |= NAND_SKIP_BBTSCAN;
899 chip->bbt_options |= NAND_BBT_USE_FLASH;
900
901 /*
902 * Use a custom BBT marker setup for small page FLASH that
903 * won't interfere with the ECC layout. Large and huge page
904 * FLASH use the standard layout.
905 */
906 if (mtd->writesize <= 512) {
907 chip->bbt_td = &bbt_smallpage_main_descr;
908 chip->bbt_md = &bbt_smallpage_mirror_descr;
909 }
910 }
911
912 /*
913 * Fills out all the uninitialized function pointers with the defaults
914 */
915 if (nand_scan_tail(mtd)) {
916 res = -ENXIO;
917 goto err_exit3;
918 }
919
920 /* Standard layout in FLASH for bad block tables */
921 if (host->ncfg->use_bbt) {
922 if (nand_default_bbt(mtd) < 0)
923 dev_err(&pdev->dev,
924 "Error initializing default bad block tables\n");
925 }
926
927 mtd->name = "nxp_lpc3220_slc";
928 ppdata.of_node = pdev->dev.of_node;
929 res = mtd_device_parse_register(mtd, NULL, &ppdata, host->ncfg->parts,
930 host->ncfg->num_parts);
931 if (!res)
932 return res;
933
934 nand_release(mtd);
935
936err_exit3:
937 dma_release_channel(host->dma_chan);
938err_exit2:
939 clk_disable(host->clk);
940 clk_put(host->clk);
941 platform_set_drvdata(pdev, NULL);
942err_exit1:
943 lpc32xx_wp_enable(host);
944 gpio_free(host->ncfg->wp_gpio);
945
946 return res;
947}
948
949/*
950 * Remove NAND device.
951 */
952static int __devexit lpc32xx_nand_remove(struct platform_device *pdev)
953{
954 uint32_t tmp;
955 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
956 struct mtd_info *mtd = &host->mtd;
957
958 nand_release(mtd);
959 dma_release_channel(host->dma_chan);
960
961 /* Force CE high */
962 tmp = readl(SLC_CTRL(host->io_base));
963 tmp &= ~SLCCFG_CE_LOW;
964 writel(tmp, SLC_CTRL(host->io_base));
965
966 clk_disable(host->clk);
967 clk_put(host->clk);
968 platform_set_drvdata(pdev, NULL);
969 lpc32xx_wp_enable(host);
970 gpio_free(host->ncfg->wp_gpio);
971
972 return 0;
973}
974
975#ifdef CONFIG_PM
976static int lpc32xx_nand_resume(struct platform_device *pdev)
977{
978 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
979
980 /* Re-enable NAND clock */
981 clk_enable(host->clk);
982
983 /* Fresh init of NAND controller */
984 lpc32xx_nand_setup(host);
985
986 /* Disable write protect */
987 lpc32xx_wp_disable(host);
988
989 return 0;
990}
991
992static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
993{
994 uint32_t tmp;
995 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
996
997 /* Force CE high */
998 tmp = readl(SLC_CTRL(host->io_base));
999 tmp &= ~SLCCFG_CE_LOW;
1000 writel(tmp, SLC_CTRL(host->io_base));
1001
1002 /* Enable write protect for safety */
1003 lpc32xx_wp_enable(host);
1004
1005 /* Disable clock */
1006 clk_disable(host->clk);
1007
1008 return 0;
1009}
1010
1011#else
1012#define lpc32xx_nand_resume NULL
1013#define lpc32xx_nand_suspend NULL
1014#endif
1015
1016static const struct of_device_id lpc32xx_nand_match[] = {
1017 { .compatible = "nxp,lpc3220-slc" },
1018 { /* sentinel */ },
1019};
1020MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
1021
1022static struct platform_driver lpc32xx_nand_driver = {
1023 .probe = lpc32xx_nand_probe,
1024 .remove = __devexit_p(lpc32xx_nand_remove),
1025 .resume = lpc32xx_nand_resume,
1026 .suspend = lpc32xx_nand_suspend,
1027 .driver = {
1028 .name = LPC32XX_MODNAME,
1029 .owner = THIS_MODULE,
1030 .of_match_table = of_match_ptr(lpc32xx_nand_match),
1031 },
1032};
1033
1034module_platform_driver(lpc32xx_nand_driver);
1035
1036MODULE_LICENSE("GPL");
1037MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
1038MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
1039MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX SLC controller");
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index c259c24d7986..f776c8577b8c 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -506,27 +506,6 @@ static void mpc5121_nfc_write_buf(struct mtd_info *mtd,
506 mpc5121_nfc_buf_copy(mtd, (u_char *)buf, len, 1); 506 mpc5121_nfc_buf_copy(mtd, (u_char *)buf, len, 1);
507} 507}
508 508
509/* Compare buffer with NAND flash */
510static int mpc5121_nfc_verify_buf(struct mtd_info *mtd,
511 const u_char *buf, int len)
512{
513 u_char tmp[256];
514 uint bsize;
515
516 while (len) {
517 bsize = min(len, 256);
518 mpc5121_nfc_read_buf(mtd, tmp, bsize);
519
520 if (memcmp(buf, tmp, bsize))
521 return 1;
522
523 buf += bsize;
524 len -= bsize;
525 }
526
527 return 0;
528}
529
530/* Read byte from NFC buffers */ 509/* Read byte from NFC buffers */
531static u8 mpc5121_nfc_read_byte(struct mtd_info *mtd) 510static u8 mpc5121_nfc_read_byte(struct mtd_info *mtd)
532{ 511{
@@ -732,7 +711,6 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
732 chip->read_word = mpc5121_nfc_read_word; 711 chip->read_word = mpc5121_nfc_read_word;
733 chip->read_buf = mpc5121_nfc_read_buf; 712 chip->read_buf = mpc5121_nfc_read_buf;
734 chip->write_buf = mpc5121_nfc_write_buf; 713 chip->write_buf = mpc5121_nfc_write_buf;
735 chip->verify_buf = mpc5121_nfc_verify_buf;
736 chip->select_chip = mpc5121_nfc_select_chip; 714 chip->select_chip = mpc5121_nfc_select_chip;
737 chip->bbt_options = NAND_BBT_USE_FLASH; 715 chip->bbt_options = NAND_BBT_USE_FLASH;
738 chip->ecc.mode = NAND_ECC_SOFT; 716 chip->ecc.mode = NAND_ECC_SOFT;
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 5683604967d7..72e31d86030d 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -43,8 +43,8 @@
43 43
44#define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35()) 44#define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35())
45#define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27() || cpu_is_mx21()) 45#define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27() || cpu_is_mx21())
46#define nfc_is_v3_2() (cpu_is_mx51() || cpu_is_mx53()) 46#define nfc_is_v3_2a() cpu_is_mx51()
47#define nfc_is_v3() nfc_is_v3_2() 47#define nfc_is_v3_2b() cpu_is_mx53()
48 48
49/* Addresses for NFC registers */ 49/* Addresses for NFC registers */
50#define NFC_V1_V2_BUF_SIZE (host->regs + 0x00) 50#define NFC_V1_V2_BUF_SIZE (host->regs + 0x00)
@@ -122,7 +122,7 @@
122#define NFC_V3_CONFIG2_2CMD_PHASES (1 << 4) 122#define NFC_V3_CONFIG2_2CMD_PHASES (1 << 4)
123#define NFC_V3_CONFIG2_NUM_ADDR_PHASE0 (1 << 5) 123#define NFC_V3_CONFIG2_NUM_ADDR_PHASE0 (1 << 5)
124#define NFC_V3_CONFIG2_ECC_MODE_8 (1 << 6) 124#define NFC_V3_CONFIG2_ECC_MODE_8 (1 << 6)
125#define NFC_V3_CONFIG2_PPB(x) (((x) & 0x3) << 7) 125#define NFC_V3_CONFIG2_PPB(x, shift) (((x) & 0x3) << shift)
126#define NFC_V3_CONFIG2_NUM_ADDR_PHASE1(x) (((x) & 0x3) << 12) 126#define NFC_V3_CONFIG2_NUM_ADDR_PHASE1(x) (((x) & 0x3) << 12)
127#define NFC_V3_CONFIG2_INT_MSK (1 << 15) 127#define NFC_V3_CONFIG2_INT_MSK (1 << 15)
128#define NFC_V3_CONFIG2_ST_CMD(x) (((x) & 0xff) << 24) 128#define NFC_V3_CONFIG2_ST_CMD(x) (((x) & 0xff) << 24)
@@ -174,6 +174,7 @@ struct mxc_nand_devtype_data {
174 int spare_len; 174 int spare_len;
175 int eccbytes; 175 int eccbytes;
176 int eccsize; 176 int eccsize;
177 int ppb_shift;
177}; 178};
178 179
179struct mxc_nand_host { 180struct mxc_nand_host {
@@ -745,14 +746,6 @@ static void mxc_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
745 host->buf_start += n; 746 host->buf_start += n;
746} 747}
747 748
748/* Used by the upper layer to verify the data in NAND Flash
749 * with the data in the buf. */
750static int mxc_nand_verify_buf(struct mtd_info *mtd,
751 const u_char *buf, int len)
752{
753 return -EFAULT;
754}
755
756/* This function is used by upper layer for select and 749/* This function is used by upper layer for select and
757 * deselect of the NAND chip */ 750 * deselect of the NAND chip */
758static void mxc_nand_select_chip_v1_v3(struct mtd_info *mtd, int chip) 751static void mxc_nand_select_chip_v1_v3(struct mtd_info *mtd, int chip)
@@ -784,7 +777,7 @@ static void mxc_nand_select_chip_v2(struct mtd_info *mtd, int chip)
784 if (chip == -1) { 777 if (chip == -1) {
785 /* Disable the NFC clock */ 778 /* Disable the NFC clock */
786 if (host->clk_act) { 779 if (host->clk_act) {
787 clk_disable(host->clk); 780 clk_disable_unprepare(host->clk);
788 host->clk_act = 0; 781 host->clk_act = 0;
789 } 782 }
790 return; 783 return;
@@ -792,7 +785,7 @@ static void mxc_nand_select_chip_v2(struct mtd_info *mtd, int chip)
792 785
793 if (!host->clk_act) { 786 if (!host->clk_act) {
794 /* Enable the NFC clock */ 787 /* Enable the NFC clock */
795 clk_enable(host->clk); 788 clk_prepare_enable(host->clk);
796 host->clk_act = 1; 789 host->clk_act = 1;
797 } 790 }
798 791
@@ -1021,7 +1014,9 @@ static void preset_v3(struct mtd_info *mtd)
1021 } 1014 }
1022 1015
1023 if (mtd->writesize) { 1016 if (mtd->writesize) {
1024 config2 |= NFC_V3_CONFIG2_PPB(ffs(mtd->erasesize / mtd->writesize) - 6); 1017 config2 |= NFC_V3_CONFIG2_PPB(
1018 ffs(mtd->erasesize / mtd->writesize) - 6,
1019 host->devtype_data->ppb_shift);
1025 host->eccsize = get_eccsize(mtd); 1020 host->eccsize = get_eccsize(mtd);
1026 if (host->eccsize == 8) 1021 if (host->eccsize == 8)
1027 config2 |= NFC_V3_CONFIG2_ECC_MODE_8; 1022 config2 |= NFC_V3_CONFIG2_ECC_MODE_8;
@@ -1234,7 +1229,7 @@ static const struct mxc_nand_devtype_data imx25_nand_devtype_data = {
1234 .eccsize = 0, 1229 .eccsize = 0,
1235}; 1230};
1236 1231
1237/* v3: i.MX51, i.MX53 */ 1232/* v3.2a: i.MX51 */
1238static const struct mxc_nand_devtype_data imx51_nand_devtype_data = { 1233static const struct mxc_nand_devtype_data imx51_nand_devtype_data = {
1239 .preset = preset_v3, 1234 .preset = preset_v3,
1240 .send_cmd = send_cmd_v3, 1235 .send_cmd = send_cmd_v3,
@@ -1258,6 +1253,34 @@ static const struct mxc_nand_devtype_data imx51_nand_devtype_data = {
1258 .spare_len = 64, 1253 .spare_len = 64,
1259 .eccbytes = 0, 1254 .eccbytes = 0,
1260 .eccsize = 0, 1255 .eccsize = 0,
1256 .ppb_shift = 7,
1257};
1258
1259/* v3.2b: i.MX53 */
1260static const struct mxc_nand_devtype_data imx53_nand_devtype_data = {
1261 .preset = preset_v3,
1262 .send_cmd = send_cmd_v3,
1263 .send_addr = send_addr_v3,
1264 .send_page = send_page_v3,
1265 .send_read_id = send_read_id_v3,
1266 .get_dev_status = get_dev_status_v3,
1267 .check_int = check_int_v3,
1268 .irq_control = irq_control_v3,
1269 .get_ecc_status = get_ecc_status_v3,
1270 .ecclayout_512 = &nandv2_hw_eccoob_smallpage,
1271 .ecclayout_2k = &nandv2_hw_eccoob_largepage,
1272 .ecclayout_4k = &nandv2_hw_eccoob_smallpage, /* XXX: needs fix */
1273 .select_chip = mxc_nand_select_chip_v1_v3,
1274 .correct_data = mxc_nand_correct_data_v2_v3,
1275 .irqpending_quirk = 0,
1276 .needs_ip = 1,
1277 .regs_offset = 0,
1278 .spare0_offset = 0x1000,
1279 .axi_offset = 0x1e00,
1280 .spare_len = 64,
1281 .eccbytes = 0,
1282 .eccsize = 0,
1283 .ppb_shift = 8,
1261}; 1284};
1262 1285
1263#ifdef CONFIG_OF_MTD 1286#ifdef CONFIG_OF_MTD
@@ -1274,6 +1297,9 @@ static const struct of_device_id mxcnd_dt_ids[] = {
1274 }, { 1297 }, {
1275 .compatible = "fsl,imx51-nand", 1298 .compatible = "fsl,imx51-nand",
1276 .data = &imx51_nand_devtype_data, 1299 .data = &imx51_nand_devtype_data,
1300 }, {
1301 .compatible = "fsl,imx53-nand",
1302 .data = &imx53_nand_devtype_data,
1277 }, 1303 },
1278 { /* sentinel */ } 1304 { /* sentinel */ }
1279}; 1305};
@@ -1327,15 +1353,17 @@ static int __init mxcnd_probe_pdata(struct mxc_nand_host *host)
1327 host->devtype_data = &imx27_nand_devtype_data; 1353 host->devtype_data = &imx27_nand_devtype_data;
1328 } else if (nfc_is_v21()) { 1354 } else if (nfc_is_v21()) {
1329 host->devtype_data = &imx25_nand_devtype_data; 1355 host->devtype_data = &imx25_nand_devtype_data;
1330 } else if (nfc_is_v3_2()) { 1356 } else if (nfc_is_v3_2a()) {
1331 host->devtype_data = &imx51_nand_devtype_data; 1357 host->devtype_data = &imx51_nand_devtype_data;
1358 } else if (nfc_is_v3_2b()) {
1359 host->devtype_data = &imx53_nand_devtype_data;
1332 } else 1360 } else
1333 BUG(); 1361 BUG();
1334 1362
1335 return 0; 1363 return 0;
1336} 1364}
1337 1365
1338static int __init mxcnd_probe(struct platform_device *pdev) 1366static int __devinit mxcnd_probe(struct platform_device *pdev)
1339{ 1367{
1340 struct nand_chip *this; 1368 struct nand_chip *this;
1341 struct mtd_info *mtd; 1369 struct mtd_info *mtd;
@@ -1344,8 +1372,8 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1344 int err = 0; 1372 int err = 0;
1345 1373
1346 /* Allocate memory for MTD device structure and private data */ 1374 /* Allocate memory for MTD device structure and private data */
1347 host = kzalloc(sizeof(struct mxc_nand_host) + NAND_MAX_PAGESIZE + 1375 host = devm_kzalloc(&pdev->dev, sizeof(struct mxc_nand_host) +
1348 NAND_MAX_OOBSIZE, GFP_KERNEL); 1376 NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE, GFP_KERNEL);
1349 if (!host) 1377 if (!host)
1350 return -ENOMEM; 1378 return -ENOMEM;
1351 1379
@@ -1370,36 +1398,38 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1370 this->read_word = mxc_nand_read_word; 1398 this->read_word = mxc_nand_read_word;
1371 this->write_buf = mxc_nand_write_buf; 1399 this->write_buf = mxc_nand_write_buf;
1372 this->read_buf = mxc_nand_read_buf; 1400 this->read_buf = mxc_nand_read_buf;
1373 this->verify_buf = mxc_nand_verify_buf;
1374 1401
1375 host->clk = clk_get(&pdev->dev, "nfc"); 1402 host->clk = devm_clk_get(&pdev->dev, NULL);
1376 if (IS_ERR(host->clk)) { 1403 if (IS_ERR(host->clk))
1377 err = PTR_ERR(host->clk); 1404 return PTR_ERR(host->clk);
1378 goto eclk;
1379 }
1380 1405
1381 clk_prepare_enable(host->clk); 1406 err = mxcnd_probe_dt(host);
1382 host->clk_act = 1; 1407 if (err > 0)
1408 err = mxcnd_probe_pdata(host);
1409 if (err < 0)
1410 return err;
1383 1411
1384 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1412 if (host->devtype_data->needs_ip) {
1385 if (!res) { 1413 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1386 err = -ENODEV; 1414 if (!res)
1387 goto eres; 1415 return -ENODEV;
1388 } 1416 host->regs_ip = devm_request_and_ioremap(&pdev->dev, res);
1417 if (!host->regs_ip)
1418 return -ENOMEM;
1389 1419
1390 host->base = ioremap(res->start, resource_size(res)); 1420 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1391 if (!host->base) { 1421 } else {
1392 err = -ENOMEM; 1422 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1393 goto eres;
1394 } 1423 }
1395 1424
1396 host->main_area0 = host->base; 1425 if (!res)
1426 return -ENODEV;
1397 1427
1398 err = mxcnd_probe_dt(host); 1428 host->base = devm_request_and_ioremap(&pdev->dev, res);
1399 if (err > 0) 1429 if (!host->base)
1400 err = mxcnd_probe_pdata(host); 1430 return -ENOMEM;
1401 if (err < 0) 1431
1402 goto eirq; 1432 host->main_area0 = host->base;
1403 1433
1404 if (host->devtype_data->regs_offset) 1434 if (host->devtype_data->regs_offset)
1405 host->regs = host->base + host->devtype_data->regs_offset; 1435 host->regs = host->base + host->devtype_data->regs_offset;
@@ -1414,19 +1444,6 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1414 this->ecc.size = 512; 1444 this->ecc.size = 512;
1415 this->ecc.layout = host->devtype_data->ecclayout_512; 1445 this->ecc.layout = host->devtype_data->ecclayout_512;
1416 1446
1417 if (host->devtype_data->needs_ip) {
1418 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1419 if (!res) {
1420 err = -ENODEV;
1421 goto eirq;
1422 }
1423 host->regs_ip = ioremap(res->start, resource_size(res));
1424 if (!host->regs_ip) {
1425 err = -ENOMEM;
1426 goto eirq;
1427 }
1428 }
1429
1430 if (host->pdata.hw_ecc) { 1447 if (host->pdata.hw_ecc) {
1431 this->ecc.calculate = mxc_nand_calculate_ecc; 1448 this->ecc.calculate = mxc_nand_calculate_ecc;
1432 this->ecc.hwctl = mxc_nand_enable_hwecc; 1449 this->ecc.hwctl = mxc_nand_enable_hwecc;
@@ -1458,9 +1475,13 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1458 */ 1475 */
1459 host->devtype_data->irq_control(host, 0); 1476 host->devtype_data->irq_control(host, 0);
1460 1477
1461 err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host); 1478 err = devm_request_irq(&pdev->dev, host->irq, mxc_nfc_irq,
1479 IRQF_DISABLED, DRIVER_NAME, host);
1462 if (err) 1480 if (err)
1463 goto eirq; 1481 return err;
1482
1483 clk_prepare_enable(host->clk);
1484 host->clk_act = 1;
1464 1485
1465 /* 1486 /*
1466 * Now that we "own" the interrupt make sure the interrupt mask bit is 1487 * Now that we "own" the interrupt make sure the interrupt mask bit is
@@ -1512,15 +1533,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1512 return 0; 1533 return 0;
1513 1534
1514escan: 1535escan:
1515 free_irq(host->irq, host); 1536 clk_disable_unprepare(host->clk);
1516eirq:
1517 if (host->regs_ip)
1518 iounmap(host->regs_ip);
1519 iounmap(host->base);
1520eres:
1521 clk_put(host->clk);
1522eclk:
1523 kfree(host);
1524 1537
1525 return err; 1538 return err;
1526} 1539}
@@ -1529,16 +1542,9 @@ static int __devexit mxcnd_remove(struct platform_device *pdev)
1529{ 1542{
1530 struct mxc_nand_host *host = platform_get_drvdata(pdev); 1543 struct mxc_nand_host *host = platform_get_drvdata(pdev);
1531 1544
1532 clk_put(host->clk);
1533
1534 platform_set_drvdata(pdev, NULL); 1545 platform_set_drvdata(pdev, NULL);
1535 1546
1536 nand_release(&host->mtd); 1547 nand_release(&host->mtd);
1537 free_irq(host->irq, host);
1538 if (host->regs_ip)
1539 iounmap(host->regs_ip);
1540 iounmap(host->base);
1541 kfree(host);
1542 1548
1543 return 0; 1549 return 0;
1544} 1550}
@@ -1549,22 +1555,10 @@ static struct platform_driver mxcnd_driver = {
1549 .owner = THIS_MODULE, 1555 .owner = THIS_MODULE,
1550 .of_match_table = of_match_ptr(mxcnd_dt_ids), 1556 .of_match_table = of_match_ptr(mxcnd_dt_ids),
1551 }, 1557 },
1558 .probe = mxcnd_probe,
1552 .remove = __devexit_p(mxcnd_remove), 1559 .remove = __devexit_p(mxcnd_remove),
1553}; 1560};
1554 1561module_platform_driver(mxcnd_driver);
1555static int __init mxc_nd_init(void)
1556{
1557 return platform_driver_probe(&mxcnd_driver, mxcnd_probe);
1558}
1559
1560static void __exit mxc_nd_cleanup(void)
1561{
1562 /* Unregister the device structure */
1563 platform_driver_unregister(&mxcnd_driver);
1564}
1565
1566module_init(mxc_nd_init);
1567module_exit(mxc_nd_cleanup);
1568 1562
1569MODULE_AUTHOR("Freescale Semiconductor, Inc."); 1563MODULE_AUTHOR("Freescale Semiconductor, Inc.");
1570MODULE_DESCRIPTION("MXC NAND MTD driver"); 1564MODULE_DESCRIPTION("MXC NAND MTD driver");
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index a11253a0fcab..ec6841d8e956 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -243,25 +243,6 @@ static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
243} 243}
244 244
245/** 245/**
246 * nand_verify_buf - [DEFAULT] Verify chip data against buffer
247 * @mtd: MTD device structure
248 * @buf: buffer containing the data to compare
249 * @len: number of bytes to compare
250 *
251 * Default verify function for 8bit buswidth.
252 */
253static int nand_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
254{
255 int i;
256 struct nand_chip *chip = mtd->priv;
257
258 for (i = 0; i < len; i++)
259 if (buf[i] != readb(chip->IO_ADDR_R))
260 return -EFAULT;
261 return 0;
262}
263
264/**
265 * nand_write_buf16 - [DEFAULT] write buffer to chip 246 * nand_write_buf16 - [DEFAULT] write buffer to chip
266 * @mtd: MTD device structure 247 * @mtd: MTD device structure
267 * @buf: data buffer 248 * @buf: data buffer
@@ -301,28 +282,6 @@ static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
301} 282}
302 283
303/** 284/**
304 * nand_verify_buf16 - [DEFAULT] Verify chip data against buffer
305 * @mtd: MTD device structure
306 * @buf: buffer containing the data to compare
307 * @len: number of bytes to compare
308 *
309 * Default verify function for 16bit buswidth.
310 */
311static int nand_verify_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
312{
313 int i;
314 struct nand_chip *chip = mtd->priv;
315 u16 *p = (u16 *) buf;
316 len >>= 1;
317
318 for (i = 0; i < len; i++)
319 if (p[i] != readw(chip->IO_ADDR_R))
320 return -EFAULT;
321
322 return 0;
323}
324
325/**
326 * nand_block_bad - [DEFAULT] Read bad block marker from the chip 285 * nand_block_bad - [DEFAULT] Read bad block marker from the chip
327 * @mtd: MTD device structure 286 * @mtd: MTD device structure
328 * @ofs: offset from device start 287 * @ofs: offset from device start
@@ -1525,7 +1484,8 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1525 ret = chip->ecc.read_page_raw(mtd, chip, bufpoi, 1484 ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
1526 oob_required, 1485 oob_required,
1527 page); 1486 page);
1528 else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob) 1487 else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
1488 !oob)
1529 ret = chip->ecc.read_subpage(mtd, chip, 1489 ret = chip->ecc.read_subpage(mtd, chip,
1530 col, bytes, bufpoi); 1490 col, bytes, bufpoi);
1531 else 1491 else
@@ -1542,7 +1502,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1542 1502
1543 /* Transfer not aligned data */ 1503 /* Transfer not aligned data */
1544 if (!aligned) { 1504 if (!aligned) {
1545 if (!NAND_SUBPAGE_READ(chip) && !oob && 1505 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
1546 !(mtd->ecc_stats.failed - stats.failed) && 1506 !(mtd->ecc_stats.failed - stats.failed) &&
1547 (ops->mode != MTD_OPS_RAW)) { 1507 (ops->mode != MTD_OPS_RAW)) {
1548 chip->pagebuf = realpage; 1508 chip->pagebuf = realpage;
@@ -1565,14 +1525,6 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1565 oobreadlen -= toread; 1525 oobreadlen -= toread;
1566 } 1526 }
1567 } 1527 }
1568
1569 if (!(chip->options & NAND_NO_READRDY)) {
1570 /* Apply delay or wait for ready/busy pin */
1571 if (!chip->dev_ready)
1572 udelay(chip->chip_delay);
1573 else
1574 nand_wait_ready(mtd);
1575 }
1576 } else { 1528 } else {
1577 memcpy(buf, chip->buffers->databuf + col, bytes); 1529 memcpy(buf, chip->buffers->databuf + col, bytes);
1578 buf += bytes; 1530 buf += bytes;
@@ -1633,7 +1585,7 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
1633 ops.len = len; 1585 ops.len = len;
1634 ops.datbuf = buf; 1586 ops.datbuf = buf;
1635 ops.oobbuf = NULL; 1587 ops.oobbuf = NULL;
1636 ops.mode = 0; 1588 ops.mode = MTD_OPS_PLACE_OOB;
1637 ret = nand_do_read_ops(mtd, from, &ops); 1589 ret = nand_do_read_ops(mtd, from, &ops);
1638 *retlen = ops.retlen; 1590 *retlen = ops.retlen;
1639 nand_release_device(mtd); 1591 nand_release_device(mtd);
@@ -1837,14 +1789,6 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
1837 len = min(len, readlen); 1789 len = min(len, readlen);
1838 buf = nand_transfer_oob(chip, buf, ops, len); 1790 buf = nand_transfer_oob(chip, buf, ops, len);
1839 1791
1840 if (!(chip->options & NAND_NO_READRDY)) {
1841 /* Apply delay or wait for ready/busy pin */
1842 if (!chip->dev_ready)
1843 udelay(chip->chip_delay);
1844 else
1845 nand_wait_ready(mtd);
1846 }
1847
1848 readlen -= len; 1792 readlen -= len;
1849 if (!readlen) 1793 if (!readlen)
1850 break; 1794 break;
@@ -1927,12 +1871,14 @@ out:
1927 * 1871 *
1928 * Not for syndrome calculating ECC controllers, which use a special oob layout. 1872 * Not for syndrome calculating ECC controllers, which use a special oob layout.
1929 */ 1873 */
1930static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 1874static int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1931 const uint8_t *buf, int oob_required) 1875 const uint8_t *buf, int oob_required)
1932{ 1876{
1933 chip->write_buf(mtd, buf, mtd->writesize); 1877 chip->write_buf(mtd, buf, mtd->writesize);
1934 if (oob_required) 1878 if (oob_required)
1935 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); 1879 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1880
1881 return 0;
1936} 1882}
1937 1883
1938/** 1884/**
@@ -1944,7 +1890,7 @@ static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1944 * 1890 *
1945 * We need a special oob layout and handling even when ECC isn't checked. 1891 * We need a special oob layout and handling even when ECC isn't checked.
1946 */ 1892 */
1947static void nand_write_page_raw_syndrome(struct mtd_info *mtd, 1893static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
1948 struct nand_chip *chip, 1894 struct nand_chip *chip,
1949 const uint8_t *buf, int oob_required) 1895 const uint8_t *buf, int oob_required)
1950{ 1896{
@@ -1974,6 +1920,8 @@ static void nand_write_page_raw_syndrome(struct mtd_info *mtd,
1974 size = mtd->oobsize - (oob - chip->oob_poi); 1920 size = mtd->oobsize - (oob - chip->oob_poi);
1975 if (size) 1921 if (size)
1976 chip->write_buf(mtd, oob, size); 1922 chip->write_buf(mtd, oob, size);
1923
1924 return 0;
1977} 1925}
1978/** 1926/**
1979 * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function 1927 * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
@@ -1982,7 +1930,7 @@ static void nand_write_page_raw_syndrome(struct mtd_info *mtd,
1982 * @buf: data buffer 1930 * @buf: data buffer
1983 * @oob_required: must write chip->oob_poi to OOB 1931 * @oob_required: must write chip->oob_poi to OOB
1984 */ 1932 */
1985static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, 1933static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1986 const uint8_t *buf, int oob_required) 1934 const uint8_t *buf, int oob_required)
1987{ 1935{
1988 int i, eccsize = chip->ecc.size; 1936 int i, eccsize = chip->ecc.size;
@@ -1999,7 +1947,7 @@ static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1999 for (i = 0; i < chip->ecc.total; i++) 1947 for (i = 0; i < chip->ecc.total; i++)
2000 chip->oob_poi[eccpos[i]] = ecc_calc[i]; 1948 chip->oob_poi[eccpos[i]] = ecc_calc[i];
2001 1949
2002 chip->ecc.write_page_raw(mtd, chip, buf, 1); 1950 return chip->ecc.write_page_raw(mtd, chip, buf, 1);
2003} 1951}
2004 1952
2005/** 1953/**
@@ -2009,7 +1957,7 @@ static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
2009 * @buf: data buffer 1957 * @buf: data buffer
2010 * @oob_required: must write chip->oob_poi to OOB 1958 * @oob_required: must write chip->oob_poi to OOB
2011 */ 1959 */
2012static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 1960static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
2013 const uint8_t *buf, int oob_required) 1961 const uint8_t *buf, int oob_required)
2014{ 1962{
2015 int i, eccsize = chip->ecc.size; 1963 int i, eccsize = chip->ecc.size;
@@ -2029,6 +1977,8 @@ static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
2029 chip->oob_poi[eccpos[i]] = ecc_calc[i]; 1977 chip->oob_poi[eccpos[i]] = ecc_calc[i];
2030 1978
2031 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); 1979 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1980
1981 return 0;
2032} 1982}
2033 1983
2034/** 1984/**
@@ -2041,7 +1991,7 @@ static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
2041 * The hw generator calculates the error syndrome automatically. Therefore we 1991 * The hw generator calculates the error syndrome automatically. Therefore we
2042 * need a special oob layout and handling. 1992 * need a special oob layout and handling.
2043 */ 1993 */
2044static void nand_write_page_syndrome(struct mtd_info *mtd, 1994static int nand_write_page_syndrome(struct mtd_info *mtd,
2045 struct nand_chip *chip, 1995 struct nand_chip *chip,
2046 const uint8_t *buf, int oob_required) 1996 const uint8_t *buf, int oob_required)
2047{ 1997{
@@ -2075,6 +2025,8 @@ static void nand_write_page_syndrome(struct mtd_info *mtd,
2075 i = mtd->oobsize - (oob - chip->oob_poi); 2025 i = mtd->oobsize - (oob - chip->oob_poi);
2076 if (i) 2026 if (i)
2077 chip->write_buf(mtd, oob, i); 2027 chip->write_buf(mtd, oob, i);
2028
2029 return 0;
2078} 2030}
2079 2031
2080/** 2032/**
@@ -2096,9 +2048,12 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
2096 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); 2048 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
2097 2049
2098 if (unlikely(raw)) 2050 if (unlikely(raw))
2099 chip->ecc.write_page_raw(mtd, chip, buf, oob_required); 2051 status = chip->ecc.write_page_raw(mtd, chip, buf, oob_required);
2100 else 2052 else
2101 chip->ecc.write_page(mtd, chip, buf, oob_required); 2053 status = chip->ecc.write_page(mtd, chip, buf, oob_required);
2054
2055 if (status < 0)
2056 return status;
2102 2057
2103 /* 2058 /*
2104 * Cached progamming disabled for now. Not sure if it's worth the 2059 * Cached progamming disabled for now. Not sure if it's worth the
@@ -2125,16 +2080,6 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
2125 status = chip->waitfunc(mtd, chip); 2080 status = chip->waitfunc(mtd, chip);
2126 } 2081 }
2127 2082
2128#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
2129 /* Send command to read back the data */
2130 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
2131
2132 if (chip->verify_buf(mtd, buf, mtd->writesize))
2133 return -EIO;
2134
2135 /* Make sure the next page prog is preceded by a status read */
2136 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
2137#endif
2138 return 0; 2083 return 0;
2139} 2084}
2140 2085
@@ -2336,7 +2281,7 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2336 ops.len = len; 2281 ops.len = len;
2337 ops.datbuf = (uint8_t *)buf; 2282 ops.datbuf = (uint8_t *)buf;
2338 ops.oobbuf = NULL; 2283 ops.oobbuf = NULL;
2339 ops.mode = 0; 2284 ops.mode = MTD_OPS_PLACE_OOB;
2340 2285
2341 ret = nand_do_write_ops(mtd, to, &ops); 2286 ret = nand_do_write_ops(mtd, to, &ops);
2342 2287
@@ -2365,7 +2310,7 @@ static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2365 ops.len = len; 2310 ops.len = len;
2366 ops.datbuf = (uint8_t *)buf; 2311 ops.datbuf = (uint8_t *)buf;
2367 ops.oobbuf = NULL; 2312 ops.oobbuf = NULL;
2368 ops.mode = 0; 2313 ops.mode = MTD_OPS_PLACE_OOB;
2369 ret = nand_do_write_ops(mtd, to, &ops); 2314 ret = nand_do_write_ops(mtd, to, &ops);
2370 *retlen = ops.retlen; 2315 *retlen = ops.retlen;
2371 nand_release_device(mtd); 2316 nand_release_device(mtd);
@@ -2755,6 +2700,50 @@ static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
2755} 2700}
2756 2701
2757/** 2702/**
2703 * nand_onfi_set_features- [REPLACEABLE] set features for ONFI nand
2704 * @mtd: MTD device structure
2705 * @chip: nand chip info structure
2706 * @addr: feature address.
2707 * @subfeature_param: the subfeature parameters, a four bytes array.
2708 */
2709static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
2710 int addr, uint8_t *subfeature_param)
2711{
2712 int status;
2713
2714 if (!chip->onfi_version)
2715 return -EINVAL;
2716
2717 chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1);
2718 chip->write_buf(mtd, subfeature_param, ONFI_SUBFEATURE_PARAM_LEN);
2719 status = chip->waitfunc(mtd, chip);
2720 if (status & NAND_STATUS_FAIL)
2721 return -EIO;
2722 return 0;
2723}
2724
2725/**
2726 * nand_onfi_get_features- [REPLACEABLE] get features for ONFI nand
2727 * @mtd: MTD device structure
2728 * @chip: nand chip info structure
2729 * @addr: feature address.
2730 * @subfeature_param: the subfeature parameters, a four bytes array.
2731 */
2732static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
2733 int addr, uint8_t *subfeature_param)
2734{
2735 if (!chip->onfi_version)
2736 return -EINVAL;
2737
2738 /* clear the sub feature parameters */
2739 memset(subfeature_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
2740
2741 chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1);
2742 chip->read_buf(mtd, subfeature_param, ONFI_SUBFEATURE_PARAM_LEN);
2743 return 0;
2744}
2745
2746/**
2758 * nand_suspend - [MTD Interface] Suspend the NAND flash 2747 * nand_suspend - [MTD Interface] Suspend the NAND flash
2759 * @mtd: MTD device structure 2748 * @mtd: MTD device structure
2760 */ 2749 */
@@ -2809,8 +2798,6 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
2809 chip->write_buf = busw ? nand_write_buf16 : nand_write_buf; 2798 chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
2810 if (!chip->read_buf) 2799 if (!chip->read_buf)
2811 chip->read_buf = busw ? nand_read_buf16 : nand_read_buf; 2800 chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
2812 if (!chip->verify_buf)
2813 chip->verify_buf = busw ? nand_verify_buf16 : nand_verify_buf;
2814 if (!chip->scan_bbt) 2801 if (!chip->scan_bbt)
2815 chip->scan_bbt = nand_default_bbt; 2802 chip->scan_bbt = nand_default_bbt;
2816 2803
@@ -2914,14 +2901,250 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
2914 if (le16_to_cpu(p->features) & 1) 2901 if (le16_to_cpu(p->features) & 1)
2915 *busw = NAND_BUSWIDTH_16; 2902 *busw = NAND_BUSWIDTH_16;
2916 2903
2917 chip->options &= ~NAND_CHIPOPTIONS_MSK;
2918 chip->options |= NAND_NO_READRDY & NAND_CHIPOPTIONS_MSK;
2919
2920 pr_info("ONFI flash detected\n"); 2904 pr_info("ONFI flash detected\n");
2921 return 1; 2905 return 1;
2922} 2906}
2923 2907
2924/* 2908/*
2909 * nand_id_has_period - Check if an ID string has a given wraparound period
2910 * @id_data: the ID string
2911 * @arrlen: the length of the @id_data array
2912 * @period: the period of repitition
2913 *
2914 * Check if an ID string is repeated within a given sequence of bytes at
2915 * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
2916 * period of 2). This is a helper function for nand_id_len(). Returns non-zero
2917 * if the repetition has a period of @period; otherwise, returns zero.
2918 */
2919static int nand_id_has_period(u8 *id_data, int arrlen, int period)
2920{
2921 int i, j;
2922 for (i = 0; i < period; i++)
2923 for (j = i + period; j < arrlen; j += period)
2924 if (id_data[i] != id_data[j])
2925 return 0;
2926 return 1;
2927}
2928
2929/*
2930 * nand_id_len - Get the length of an ID string returned by CMD_READID
2931 * @id_data: the ID string
2932 * @arrlen: the length of the @id_data array
2933
2934 * Returns the length of the ID string, according to known wraparound/trailing
2935 * zero patterns. If no pattern exists, returns the length of the array.
2936 */
2937static int nand_id_len(u8 *id_data, int arrlen)
2938{
2939 int last_nonzero, period;
2940
2941 /* Find last non-zero byte */
2942 for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
2943 if (id_data[last_nonzero])
2944 break;
2945
2946 /* All zeros */
2947 if (last_nonzero < 0)
2948 return 0;
2949
2950 /* Calculate wraparound period */
2951 for (period = 1; period < arrlen; period++)
2952 if (nand_id_has_period(id_data, arrlen, period))
2953 break;
2954
2955 /* There's a repeated pattern */
2956 if (period < arrlen)
2957 return period;
2958
2959 /* There are trailing zeros */
2960 if (last_nonzero < arrlen - 1)
2961 return last_nonzero + 1;
2962
2963 /* No pattern detected */
2964 return arrlen;
2965}
2966
2967/*
2968 * Many new NAND share similar device ID codes, which represent the size of the
2969 * chip. The rest of the parameters must be decoded according to generic or
2970 * manufacturer-specific "extended ID" decoding patterns.
2971 */
2972static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
2973 u8 id_data[8], int *busw)
2974{
2975 int extid, id_len;
2976 /* The 3rd id byte holds MLC / multichip data */
2977 chip->cellinfo = id_data[2];
2978 /* The 4th id byte is the important one */
2979 extid = id_data[3];
2980
2981 id_len = nand_id_len(id_data, 8);
2982
2983 /*
2984 * Field definitions are in the following datasheets:
2985 * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32)
2986 * New style (6 byte ID): Samsung K9GAG08U0F (p.44)
2987 * Hynix MLC (6 byte ID): Hynix H27UBG8T2B (p.22)
2988 *
2989 * Check for ID length, cell type, and Hynix/Samsung ID to decide what
2990 * to do.
2991 */
2992 if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG) {
2993 /* Calc pagesize */
2994 mtd->writesize = 2048 << (extid & 0x03);
2995 extid >>= 2;
2996 /* Calc oobsize */
2997 switch (((extid >> 2) & 0x04) | (extid & 0x03)) {
2998 case 1:
2999 mtd->oobsize = 128;
3000 break;
3001 case 2:
3002 mtd->oobsize = 218;
3003 break;
3004 case 3:
3005 mtd->oobsize = 400;
3006 break;
3007 case 4:
3008 mtd->oobsize = 436;
3009 break;
3010 case 5:
3011 mtd->oobsize = 512;
3012 break;
3013 case 6:
3014 default: /* Other cases are "reserved" (unknown) */
3015 mtd->oobsize = 640;
3016 break;
3017 }
3018 extid >>= 2;
3019 /* Calc blocksize */
3020 mtd->erasesize = (128 * 1024) <<
3021 (((extid >> 1) & 0x04) | (extid & 0x03));
3022 *busw = 0;
3023 } else if (id_len == 6 && id_data[0] == NAND_MFR_HYNIX &&
3024 (chip->cellinfo & NAND_CI_CELLTYPE_MSK)) {
3025 unsigned int tmp;
3026
3027 /* Calc pagesize */
3028 mtd->writesize = 2048 << (extid & 0x03);
3029 extid >>= 2;
3030 /* Calc oobsize */
3031 switch (((extid >> 2) & 0x04) | (extid & 0x03)) {
3032 case 0:
3033 mtd->oobsize = 128;
3034 break;
3035 case 1:
3036 mtd->oobsize = 224;
3037 break;
3038 case 2:
3039 mtd->oobsize = 448;
3040 break;
3041 case 3:
3042 mtd->oobsize = 64;
3043 break;
3044 case 4:
3045 mtd->oobsize = 32;
3046 break;
3047 case 5:
3048 mtd->oobsize = 16;
3049 break;
3050 default:
3051 mtd->oobsize = 640;
3052 break;
3053 }
3054 extid >>= 2;
3055 /* Calc blocksize */
3056 tmp = ((extid >> 1) & 0x04) | (extid & 0x03);
3057 if (tmp < 0x03)
3058 mtd->erasesize = (128 * 1024) << tmp;
3059 else if (tmp == 0x03)
3060 mtd->erasesize = 768 * 1024;
3061 else
3062 mtd->erasesize = (64 * 1024) << tmp;
3063 *busw = 0;
3064 } else {
3065 /* Calc pagesize */
3066 mtd->writesize = 1024 << (extid & 0x03);
3067 extid >>= 2;
3068 /* Calc oobsize */
3069 mtd->oobsize = (8 << (extid & 0x01)) *
3070 (mtd->writesize >> 9);
3071 extid >>= 2;
3072 /* Calc blocksize. Blocksize is multiples of 64KiB */
3073 mtd->erasesize = (64 * 1024) << (extid & 0x03);
3074 extid >>= 2;
3075 /* Get buswidth information */
3076 *busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
3077 }
3078}
3079
3080/*
3081 * Old devices have chip data hardcoded in the device ID table. nand_decode_id
3082 * decodes a matching ID table entry and assigns the MTD size parameters for
3083 * the chip.
3084 */
3085static void nand_decode_id(struct mtd_info *mtd, struct nand_chip *chip,
3086 struct nand_flash_dev *type, u8 id_data[8],
3087 int *busw)
3088{
3089 int maf_id = id_data[0];
3090
3091 mtd->erasesize = type->erasesize;
3092 mtd->writesize = type->pagesize;
3093 mtd->oobsize = mtd->writesize / 32;
3094 *busw = type->options & NAND_BUSWIDTH_16;
3095
3096 /*
3097 * Check for Spansion/AMD ID + repeating 5th, 6th byte since
3098 * some Spansion chips have erasesize that conflicts with size
3099 * listed in nand_ids table.
3100 * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39)
3101 */
3102 if (maf_id == NAND_MFR_AMD && id_data[4] != 0x00 && id_data[5] == 0x00
3103 && id_data[6] == 0x00 && id_data[7] == 0x00
3104 && mtd->writesize == 512) {
3105 mtd->erasesize = 128 * 1024;
3106 mtd->erasesize <<= ((id_data[3] & 0x03) << 1);
3107 }
3108}
3109
3110/*
3111 * Set the bad block marker/indicator (BBM/BBI) patterns according to some
3112 * heuristic patterns using various detected parameters (e.g., manufacturer,
3113 * page size, cell-type information).
3114 */
3115static void nand_decode_bbm_options(struct mtd_info *mtd,
3116 struct nand_chip *chip, u8 id_data[8])
3117{
3118 int maf_id = id_data[0];
3119
3120 /* Set the bad block position */
3121 if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
3122 chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
3123 else
3124 chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
3125
3126 /*
3127 * Bad block marker is stored in the last page of each block on Samsung
3128 * and Hynix MLC devices; stored in first two pages of each block on
3129 * Micron devices with 2KiB pages and on SLC Samsung, Hynix, Toshiba,
3130 * AMD/Spansion, and Macronix. All others scan only the first page.
3131 */
3132 if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
3133 (maf_id == NAND_MFR_SAMSUNG ||
3134 maf_id == NAND_MFR_HYNIX))
3135 chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
3136 else if ((!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
3137 (maf_id == NAND_MFR_SAMSUNG ||
3138 maf_id == NAND_MFR_HYNIX ||
3139 maf_id == NAND_MFR_TOSHIBA ||
3140 maf_id == NAND_MFR_AMD ||
3141 maf_id == NAND_MFR_MACRONIX)) ||
3142 (mtd->writesize == 2048 &&
3143 maf_id == NAND_MFR_MICRON))
3144 chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
3145}
3146
3147/*
2925 * Get the flash and manufacturer id and lookup if the type is supported. 3148 * Get the flash and manufacturer id and lookup if the type is supported.
2926 */ 3149 */
2927static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, 3150static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
@@ -2932,7 +3155,6 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2932{ 3155{
2933 int i, maf_idx; 3156 int i, maf_idx;
2934 u8 id_data[8]; 3157 u8 id_data[8];
2935 int ret;
2936 3158
2937 /* Select the device */ 3159 /* Select the device */
2938 chip->select_chip(mtd, 0); 3160 chip->select_chip(mtd, 0);
@@ -2959,7 +3181,8 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2959 3181
2960 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); 3182 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
2961 3183
2962 for (i = 0; i < 2; i++) 3184 /* Read entire ID string */
3185 for (i = 0; i < 8; i++)
2963 id_data[i] = chip->read_byte(mtd); 3186 id_data[i] = chip->read_byte(mtd);
2964 3187
2965 if (id_data[0] != *maf_id || id_data[1] != *dev_id) { 3188 if (id_data[0] != *maf_id || id_data[1] != *dev_id) {
@@ -2979,18 +3202,10 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2979 chip->onfi_version = 0; 3202 chip->onfi_version = 0;
2980 if (!type->name || !type->pagesize) { 3203 if (!type->name || !type->pagesize) {
2981 /* Check is chip is ONFI compliant */ 3204 /* Check is chip is ONFI compliant */
2982 ret = nand_flash_detect_onfi(mtd, chip, &busw); 3205 if (nand_flash_detect_onfi(mtd, chip, &busw))
2983 if (ret)
2984 goto ident_done; 3206 goto ident_done;
2985 } 3207 }
2986 3208
2987 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
2988
2989 /* Read entire ID string */
2990
2991 for (i = 0; i < 8; i++)
2992 id_data[i] = chip->read_byte(mtd);
2993
2994 if (!type->name) 3209 if (!type->name)
2995 return ERR_PTR(-ENODEV); 3210 return ERR_PTR(-ENODEV);
2996 3211
@@ -3003,86 +3218,13 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
3003 /* Set the pagesize, oobsize, erasesize by the driver */ 3218 /* Set the pagesize, oobsize, erasesize by the driver */
3004 busw = chip->init_size(mtd, chip, id_data); 3219 busw = chip->init_size(mtd, chip, id_data);
3005 } else if (!type->pagesize) { 3220 } else if (!type->pagesize) {
3006 int extid; 3221 /* Decode parameters from extended ID */
3007 /* The 3rd id byte holds MLC / multichip data */ 3222 nand_decode_ext_id(mtd, chip, id_data, &busw);
3008 chip->cellinfo = id_data[2];
3009 /* The 4th id byte is the important one */
3010 extid = id_data[3];
3011
3012 /*
3013 * Field definitions are in the following datasheets:
3014 * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32)
3015 * New style (6 byte ID): Samsung K9GBG08U0M (p.40)
3016 *
3017 * Check for wraparound + Samsung ID + nonzero 6th byte
3018 * to decide what to do.
3019 */
3020 if (id_data[0] == id_data[6] && id_data[1] == id_data[7] &&
3021 id_data[0] == NAND_MFR_SAMSUNG &&
3022 (chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
3023 id_data[5] != 0x00) {
3024 /* Calc pagesize */
3025 mtd->writesize = 2048 << (extid & 0x03);
3026 extid >>= 2;
3027 /* Calc oobsize */
3028 switch (extid & 0x03) {
3029 case 1:
3030 mtd->oobsize = 128;
3031 break;
3032 case 2:
3033 mtd->oobsize = 218;
3034 break;
3035 case 3:
3036 mtd->oobsize = 400;
3037 break;
3038 default:
3039 mtd->oobsize = 436;
3040 break;
3041 }
3042 extid >>= 2;
3043 /* Calc blocksize */
3044 mtd->erasesize = (128 * 1024) <<
3045 (((extid >> 1) & 0x04) | (extid & 0x03));
3046 busw = 0;
3047 } else {
3048 /* Calc pagesize */
3049 mtd->writesize = 1024 << (extid & 0x03);
3050 extid >>= 2;
3051 /* Calc oobsize */
3052 mtd->oobsize = (8 << (extid & 0x01)) *
3053 (mtd->writesize >> 9);
3054 extid >>= 2;
3055 /* Calc blocksize. Blocksize is multiples of 64KiB */
3056 mtd->erasesize = (64 * 1024) << (extid & 0x03);
3057 extid >>= 2;
3058 /* Get buswidth information */
3059 busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
3060 }
3061 } else { 3223 } else {
3062 /* 3224 nand_decode_id(mtd, chip, type, id_data, &busw);
3063 * Old devices have chip data hardcoded in the device id table.
3064 */
3065 mtd->erasesize = type->erasesize;
3066 mtd->writesize = type->pagesize;
3067 mtd->oobsize = mtd->writesize / 32;
3068 busw = type->options & NAND_BUSWIDTH_16;
3069
3070 /*
3071 * Check for Spansion/AMD ID + repeating 5th, 6th byte since
3072 * some Spansion chips have erasesize that conflicts with size
3073 * listed in nand_ids table.
3074 * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39)
3075 */
3076 if (*maf_id == NAND_MFR_AMD && id_data[4] != 0x00 &&
3077 id_data[5] == 0x00 && id_data[6] == 0x00 &&
3078 id_data[7] == 0x00 && mtd->writesize == 512) {
3079 mtd->erasesize = 128 * 1024;
3080 mtd->erasesize <<= ((id_data[3] & 0x03) << 1);
3081 }
3082 } 3225 }
3083 /* Get chip options, preserve non chip based options */ 3226 /* Get chip options */
3084 chip->options &= ~NAND_CHIPOPTIONS_MSK; 3227 chip->options |= type->options;
3085 chip->options |= type->options & NAND_CHIPOPTIONS_MSK;
3086 3228
3087 /* 3229 /*
3088 * Check if chip is not a Samsung device. Do not clear the 3230 * Check if chip is not a Samsung device. Do not clear the
@@ -3112,6 +3254,8 @@ ident_done:
3112 return ERR_PTR(-EINVAL); 3254 return ERR_PTR(-EINVAL);
3113 } 3255 }
3114 3256
3257 nand_decode_bbm_options(mtd, chip, id_data);
3258
3115 /* Calculate the address shift from the page size */ 3259 /* Calculate the address shift from the page size */
3116 chip->page_shift = ffs(mtd->writesize) - 1; 3260 chip->page_shift = ffs(mtd->writesize) - 1;
3117 /* Convert chipsize to number of pages per chip -1 */ 3261 /* Convert chipsize to number of pages per chip -1 */
@@ -3128,33 +3272,6 @@ ident_done:
3128 3272
3129 chip->badblockbits = 8; 3273 chip->badblockbits = 8;
3130 3274
3131 /* Set the bad block position */
3132 if (mtd->writesize > 512 || (busw & NAND_BUSWIDTH_16))
3133 chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
3134 else
3135 chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
3136
3137 /*
3138 * Bad block marker is stored in the last page of each block
3139 * on Samsung and Hynix MLC devices; stored in first two pages
3140 * of each block on Micron devices with 2KiB pages and on
3141 * SLC Samsung, Hynix, Toshiba, AMD/Spansion, and Macronix.
3142 * All others scan only the first page.
3143 */
3144 if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
3145 (*maf_id == NAND_MFR_SAMSUNG ||
3146 *maf_id == NAND_MFR_HYNIX))
3147 chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
3148 else if ((!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
3149 (*maf_id == NAND_MFR_SAMSUNG ||
3150 *maf_id == NAND_MFR_HYNIX ||
3151 *maf_id == NAND_MFR_TOSHIBA ||
3152 *maf_id == NAND_MFR_AMD ||
3153 *maf_id == NAND_MFR_MACRONIX)) ||
3154 (mtd->writesize == 2048 &&
3155 *maf_id == NAND_MFR_MICRON))
3156 chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
3157
3158 /* Check for AND chips with 4 page planes */ 3275 /* Check for AND chips with 4 page planes */
3159 if (chip->options & NAND_4PAGE_ARRAY) 3276 if (chip->options & NAND_4PAGE_ARRAY)
3160 chip->erase_cmd = multi_erase_cmd; 3277 chip->erase_cmd = multi_erase_cmd;
@@ -3284,6 +3401,12 @@ int nand_scan_tail(struct mtd_info *mtd)
3284 if (!chip->write_page) 3401 if (!chip->write_page)
3285 chip->write_page = nand_write_page; 3402 chip->write_page = nand_write_page;
3286 3403
3404 /* set for ONFI nand */
3405 if (!chip->onfi_set_features)
3406 chip->onfi_set_features = nand_onfi_set_features;
3407 if (!chip->onfi_get_features)
3408 chip->onfi_get_features = nand_onfi_get_features;
3409
3287 /* 3410 /*
3288 * Check ECC mode, default to software if 3byte/512byte hardware ECC is 3411 * Check ECC mode, default to software if 3byte/512byte hardware ECC is
3289 * selected and we have 256 byte pagesize fallback to software ECC 3412 * selected and we have 256 byte pagesize fallback to software ECC
@@ -3477,6 +3600,10 @@ int nand_scan_tail(struct mtd_info *mtd)
3477 /* Invalidate the pagebuffer reference */ 3600 /* Invalidate the pagebuffer reference */
3478 chip->pagebuf = -1; 3601 chip->pagebuf = -1;
3479 3602
3603 /* Large page NAND with SOFT_ECC should support subpage reads */
3604 if ((chip->ecc.mode == NAND_ECC_SOFT) && (chip->page_shift > 9))
3605 chip->options |= NAND_SUBPAGE_READ;
3606
3480 /* Fill in remaining MTD driver data */ 3607 /* Fill in remaining MTD driver data */
3481 mtd->type = MTD_NANDFLASH; 3608 mtd->type = MTD_NANDFLASH;
3482 mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM : 3609 mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 30d1319ff065..916d6e9c0ab1 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -4,7 +4,7 @@
4 * Overview: 4 * Overview:
5 * Bad block table support for the NAND driver 5 * Bad block table support for the NAND driver
6 * 6 *
7 * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de) 7 * Copyright © 2004 Thomas Gleixner (tglx@linutronix.de)
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
@@ -22,7 +22,7 @@
22 * BBT on flash. If a BBT is found then the contents are read and the memory 22 * BBT on flash. If a BBT is found then the contents are read and the memory
23 * based BBT is created. If a mirrored BBT is selected then the mirror is 23 * based BBT is created. If a mirrored BBT is selected then the mirror is
24 * searched too and the versions are compared. If the mirror has a greater 24 * searched too and the versions are compared. If the mirror has a greater
25 * version number than the mirror BBT is used to build the memory based BBT. 25 * version number, then the mirror BBT is used to build the memory based BBT.
26 * If the tables are not versioned, then we "or" the bad block information. 26 * If the tables are not versioned, then we "or" the bad block information.
27 * If one of the BBTs is out of date or does not exist it is (re)created. 27 * If one of the BBTs is out of date or does not exist it is (re)created.
28 * If no BBT exists at all then the device is scanned for factory marked 28 * If no BBT exists at all then the device is scanned for factory marked
@@ -62,21 +62,20 @@
62#include <linux/slab.h> 62#include <linux/slab.h>
63#include <linux/types.h> 63#include <linux/types.h>
64#include <linux/mtd/mtd.h> 64#include <linux/mtd/mtd.h>
65#include <linux/mtd/bbm.h>
65#include <linux/mtd/nand.h> 66#include <linux/mtd/nand.h>
66#include <linux/mtd/nand_ecc.h> 67#include <linux/mtd/nand_ecc.h>
67#include <linux/bitops.h> 68#include <linux/bitops.h>
68#include <linux/delay.h> 69#include <linux/delay.h>
69#include <linux/vmalloc.h> 70#include <linux/vmalloc.h>
70#include <linux/export.h> 71#include <linux/export.h>
72#include <linux/string.h>
71 73
72static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td) 74static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td)
73{ 75{
74 int ret; 76 if (memcmp(buf, td->pattern, td->len))
75 77 return -1;
76 ret = memcmp(buf, td->pattern, td->len); 78 return 0;
77 if (!ret)
78 return ret;
79 return -1;
80} 79}
81 80
82/** 81/**
@@ -92,19 +91,16 @@ static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td)
92 */ 91 */
93static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td) 92static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
94{ 93{
95 int i, end = 0; 94 int end = 0;
96 uint8_t *p = buf; 95 uint8_t *p = buf;
97 96
98 if (td->options & NAND_BBT_NO_OOB) 97 if (td->options & NAND_BBT_NO_OOB)
99 return check_pattern_no_oob(buf, td); 98 return check_pattern_no_oob(buf, td);
100 99
101 end = paglen + td->offs; 100 end = paglen + td->offs;
102 if (td->options & NAND_BBT_SCANEMPTY) { 101 if (td->options & NAND_BBT_SCANEMPTY)
103 for (i = 0; i < end; i++) { 102 if (memchr_inv(p, 0xff, end))
104 if (p[i] != 0xff) 103 return -1;
105 return -1;
106 }
107 }
108 p += end; 104 p += end;
109 105
110 /* Compare the pattern */ 106 /* Compare the pattern */
@@ -114,10 +110,8 @@ static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_desc
114 if (td->options & NAND_BBT_SCANEMPTY) { 110 if (td->options & NAND_BBT_SCANEMPTY) {
115 p += td->len; 111 p += td->len;
116 end += td->len; 112 end += td->len;
117 for (i = end; i < len; i++) { 113 if (memchr_inv(p, 0xff, len - end))
118 if (*p++ != 0xff) 114 return -1;
119 return -1;
120 }
121 } 115 }
122 return 0; 116 return 0;
123} 117}
@@ -133,14 +127,9 @@ static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_desc
133 */ 127 */
134static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td) 128static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td)
135{ 129{
136 int i;
137 uint8_t *p = buf;
138
139 /* Compare the pattern */ 130 /* Compare the pattern */
140 for (i = 0; i < td->len; i++) { 131 if (memcmp(buf + td->offs, td->pattern, td->len))
141 if (p[td->offs + i] != td->pattern[i]) 132 return -1;
142 return -1;
143 }
144 return 0; 133 return 0;
145} 134}
146 135
@@ -288,7 +277,7 @@ static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
288} 277}
289 278
290/* BBT marker is in the first page, no OOB */ 279/* BBT marker is in the first page, no OOB */
291static int scan_read_raw_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs, 280static int scan_read_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
292 struct nand_bbt_descr *td) 281 struct nand_bbt_descr *td)
293{ 282{
294 size_t retlen; 283 size_t retlen;
@@ -301,14 +290,24 @@ static int scan_read_raw_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
301 return mtd_read(mtd, offs, len, &retlen, buf); 290 return mtd_read(mtd, offs, len, &retlen, buf);
302} 291}
303 292
304/* Scan read raw data from flash */ 293/**
305static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs, 294 * scan_read_oob - [GENERIC] Scan data+OOB region to buffer
295 * @mtd: MTD device structure
296 * @buf: temporary buffer
297 * @offs: offset at which to scan
298 * @len: length of data region to read
299 *
300 * Scan read data from data+OOB. May traverse multiple pages, interleaving
301 * page,OOB,page,OOB,... in buf. Completes transfer and returns the "strongest"
302 * ECC condition (error or bitflip). May quit on the first (non-ECC) error.
303 */
304static int scan_read_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
306 size_t len) 305 size_t len)
307{ 306{
308 struct mtd_oob_ops ops; 307 struct mtd_oob_ops ops;
309 int res; 308 int res, ret = 0;
310 309
311 ops.mode = MTD_OPS_RAW; 310 ops.mode = MTD_OPS_PLACE_OOB;
312 ops.ooboffs = 0; 311 ops.ooboffs = 0;
313 ops.ooblen = mtd->oobsize; 312 ops.ooblen = mtd->oobsize;
314 313
@@ -318,24 +317,27 @@ static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
318 ops.oobbuf = buf + ops.len; 317 ops.oobbuf = buf + ops.len;
319 318
320 res = mtd_read_oob(mtd, offs, &ops); 319 res = mtd_read_oob(mtd, offs, &ops);
321 320 if (res) {
322 if (res) 321 if (!mtd_is_bitflip_or_eccerr(res))
323 return res; 322 return res;
323 else if (mtd_is_eccerr(res) || !ret)
324 ret = res;
325 }
324 326
325 buf += mtd->oobsize + mtd->writesize; 327 buf += mtd->oobsize + mtd->writesize;
326 len -= mtd->writesize; 328 len -= mtd->writesize;
327 offs += mtd->writesize; 329 offs += mtd->writesize;
328 } 330 }
329 return 0; 331 return ret;
330} 332}
331 333
332static int scan_read_raw(struct mtd_info *mtd, uint8_t *buf, loff_t offs, 334static int scan_read(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
333 size_t len, struct nand_bbt_descr *td) 335 size_t len, struct nand_bbt_descr *td)
334{ 336{
335 if (td->options & NAND_BBT_NO_OOB) 337 if (td->options & NAND_BBT_NO_OOB)
336 return scan_read_raw_data(mtd, buf, offs, td); 338 return scan_read_data(mtd, buf, offs, td);
337 else 339 else
338 return scan_read_raw_oob(mtd, buf, offs, len); 340 return scan_read_oob(mtd, buf, offs, len);
339} 341}
340 342
341/* Scan write data with oob to flash */ 343/* Scan write data with oob to flash */
@@ -373,14 +375,14 @@ static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td)
373 * Read the bad block table(s) for all chips starting at a given page. We 375 * Read the bad block table(s) for all chips starting at a given page. We
374 * assume that the bbt bits are in consecutive order. 376 * assume that the bbt bits are in consecutive order.
375 */ 377 */
376static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf, 378static void read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
377 struct nand_bbt_descr *td, struct nand_bbt_descr *md) 379 struct nand_bbt_descr *td, struct nand_bbt_descr *md)
378{ 380{
379 struct nand_chip *this = mtd->priv; 381 struct nand_chip *this = mtd->priv;
380 382
381 /* Read the primary version, if available */ 383 /* Read the primary version, if available */
382 if (td->options & NAND_BBT_VERSION) { 384 if (td->options & NAND_BBT_VERSION) {
383 scan_read_raw(mtd, buf, (loff_t)td->pages[0] << this->page_shift, 385 scan_read(mtd, buf, (loff_t)td->pages[0] << this->page_shift,
384 mtd->writesize, td); 386 mtd->writesize, td);
385 td->version[0] = buf[bbt_get_ver_offs(mtd, td)]; 387 td->version[0] = buf[bbt_get_ver_offs(mtd, td)];
386 pr_info("Bad block table at page %d, version 0x%02X\n", 388 pr_info("Bad block table at page %d, version 0x%02X\n",
@@ -389,28 +391,27 @@ static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
389 391
390 /* Read the mirror version, if available */ 392 /* Read the mirror version, if available */
391 if (md && (md->options & NAND_BBT_VERSION)) { 393 if (md && (md->options & NAND_BBT_VERSION)) {
392 scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift, 394 scan_read(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
393 mtd->writesize, td); 395 mtd->writesize, md);
394 md->version[0] = buf[bbt_get_ver_offs(mtd, md)]; 396 md->version[0] = buf[bbt_get_ver_offs(mtd, md)];
395 pr_info("Bad block table at page %d, version 0x%02X\n", 397 pr_info("Bad block table at page %d, version 0x%02X\n",
396 md->pages[0], md->version[0]); 398 md->pages[0], md->version[0]);
397 } 399 }
398 return 1;
399} 400}
400 401
401/* Scan a given block full */ 402/* Scan a given block full */
402static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd, 403static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd,
403 loff_t offs, uint8_t *buf, size_t readlen, 404 loff_t offs, uint8_t *buf, size_t readlen,
404 int scanlen, int len) 405 int scanlen, int numpages)
405{ 406{
406 int ret, j; 407 int ret, j;
407 408
408 ret = scan_read_raw_oob(mtd, buf, offs, readlen); 409 ret = scan_read_oob(mtd, buf, offs, readlen);
409 /* Ignore ECC errors when checking for BBM */ 410 /* Ignore ECC errors when checking for BBM */
410 if (ret && !mtd_is_bitflip_or_eccerr(ret)) 411 if (ret && !mtd_is_bitflip_or_eccerr(ret))
411 return ret; 412 return ret;
412 413
413 for (j = 0; j < len; j++, buf += scanlen) { 414 for (j = 0; j < numpages; j++, buf += scanlen) {
414 if (check_pattern(buf, scanlen, mtd->writesize, bd)) 415 if (check_pattern(buf, scanlen, mtd->writesize, bd))
415 return 1; 416 return 1;
416 } 417 }
@@ -419,7 +420,7 @@ static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd,
419 420
420/* Scan a given block partially */ 421/* Scan a given block partially */
421static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd, 422static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
422 loff_t offs, uint8_t *buf, int len) 423 loff_t offs, uint8_t *buf, int numpages)
423{ 424{
424 struct mtd_oob_ops ops; 425 struct mtd_oob_ops ops;
425 int j, ret; 426 int j, ret;
@@ -430,7 +431,7 @@ static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
430 ops.datbuf = NULL; 431 ops.datbuf = NULL;
431 ops.mode = MTD_OPS_PLACE_OOB; 432 ops.mode = MTD_OPS_PLACE_OOB;
432 433
433 for (j = 0; j < len; j++) { 434 for (j = 0; j < numpages; j++) {
434 /* 435 /*
435 * Read the full oob until read_oob is fixed to handle single 436 * Read the full oob until read_oob is fixed to handle single
436 * byte reads for 16 bit buswidth. 437 * byte reads for 16 bit buswidth.
@@ -463,7 +464,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
463 struct nand_bbt_descr *bd, int chip) 464 struct nand_bbt_descr *bd, int chip)
464{ 465{
465 struct nand_chip *this = mtd->priv; 466 struct nand_chip *this = mtd->priv;
466 int i, numblocks, len, scanlen; 467 int i, numblocks, numpages, scanlen;
467 int startblock; 468 int startblock;
468 loff_t from; 469 loff_t from;
469 size_t readlen; 470 size_t readlen;
@@ -471,11 +472,11 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
471 pr_info("Scanning device for bad blocks\n"); 472 pr_info("Scanning device for bad blocks\n");
472 473
473 if (bd->options & NAND_BBT_SCANALLPAGES) 474 if (bd->options & NAND_BBT_SCANALLPAGES)
474 len = 1 << (this->bbt_erase_shift - this->page_shift); 475 numpages = 1 << (this->bbt_erase_shift - this->page_shift);
475 else if (bd->options & NAND_BBT_SCAN2NDPAGE) 476 else if (bd->options & NAND_BBT_SCAN2NDPAGE)
476 len = 2; 477 numpages = 2;
477 else 478 else
478 len = 1; 479 numpages = 1;
479 480
480 if (!(bd->options & NAND_BBT_SCANEMPTY)) { 481 if (!(bd->options & NAND_BBT_SCANEMPTY)) {
481 /* We need only read few bytes from the OOB area */ 482 /* We need only read few bytes from the OOB area */
@@ -484,7 +485,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
484 } else { 485 } else {
485 /* Full page content should be read */ 486 /* Full page content should be read */
486 scanlen = mtd->writesize + mtd->oobsize; 487 scanlen = mtd->writesize + mtd->oobsize;
487 readlen = len * mtd->writesize; 488 readlen = numpages * mtd->writesize;
488 } 489 }
489 490
490 if (chip == -1) { 491 if (chip == -1) {
@@ -508,7 +509,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
508 } 509 }
509 510
510 if (this->bbt_options & NAND_BBT_SCANLASTPAGE) 511 if (this->bbt_options & NAND_BBT_SCANLASTPAGE)
511 from += mtd->erasesize - (mtd->writesize * len); 512 from += mtd->erasesize - (mtd->writesize * numpages);
512 513
513 for (i = startblock; i < numblocks;) { 514 for (i = startblock; i < numblocks;) {
514 int ret; 515 int ret;
@@ -517,9 +518,9 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
517 518
518 if (bd->options & NAND_BBT_SCANALLPAGES) 519 if (bd->options & NAND_BBT_SCANALLPAGES)
519 ret = scan_block_full(mtd, bd, from, buf, readlen, 520 ret = scan_block_full(mtd, bd, from, buf, readlen,
520 scanlen, len); 521 scanlen, numpages);
521 else 522 else
522 ret = scan_block_fast(mtd, bd, from, buf, len); 523 ret = scan_block_fast(mtd, bd, from, buf, numpages);
523 524
524 if (ret < 0) 525 if (ret < 0)
525 return ret; 526 return ret;
@@ -594,7 +595,7 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
594 loff_t offs = (loff_t)actblock << this->bbt_erase_shift; 595 loff_t offs = (loff_t)actblock << this->bbt_erase_shift;
595 596
596 /* Read first page */ 597 /* Read first page */
597 scan_read_raw(mtd, buf, offs, mtd->writesize, td); 598 scan_read(mtd, buf, offs, mtd->writesize, td);
598 if (!check_pattern(buf, scanlen, mtd->writesize, td)) { 599 if (!check_pattern(buf, scanlen, mtd->writesize, td)) {
599 td->pages[i] = actblock << blocktopage; 600 td->pages[i] = actblock << blocktopage;
600 if (td->options & NAND_BBT_VERSION) { 601 if (td->options & NAND_BBT_VERSION) {
@@ -626,7 +627,9 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
626 * 627 *
627 * Search and read the bad block table(s). 628 * Search and read the bad block table(s).
628 */ 629 */
629static int search_read_bbts(struct mtd_info *mtd, uint8_t * buf, struct nand_bbt_descr *td, struct nand_bbt_descr *md) 630static void search_read_bbts(struct mtd_info *mtd, uint8_t *buf,
631 struct nand_bbt_descr *td,
632 struct nand_bbt_descr *md)
630{ 633{
631 /* Search the primary table */ 634 /* Search the primary table */
632 search_bbt(mtd, buf, td); 635 search_bbt(mtd, buf, td);
@@ -634,9 +637,6 @@ static int search_read_bbts(struct mtd_info *mtd, uint8_t * buf, struct nand_bbt
634 /* Search the mirror table */ 637 /* Search the mirror table */
635 if (md) 638 if (md)
636 search_bbt(mtd, buf, md); 639 search_bbt(mtd, buf, md);
637
638 /* Force result check */
639 return 1;
640} 640}
641 641
642/** 642/**
@@ -1162,14 +1162,13 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
1162 1162
1163 /* Is the bbt at a given page? */ 1163 /* Is the bbt at a given page? */
1164 if (td->options & NAND_BBT_ABSPAGE) { 1164 if (td->options & NAND_BBT_ABSPAGE) {
1165 res = read_abs_bbts(mtd, buf, td, md); 1165 read_abs_bbts(mtd, buf, td, md);
1166 } else { 1166 } else {
1167 /* Search the bad block table using a pattern in oob */ 1167 /* Search the bad block table using a pattern in oob */
1168 res = search_read_bbts(mtd, buf, td, md); 1168 search_read_bbts(mtd, buf, td, md);
1169 } 1169 }
1170 1170
1171 if (res) 1171 res = check_create(mtd, buf, bd);
1172 res = check_create(mtd, buf, bd);
1173 1172
1174 /* Prevent the bbt regions from erasing / writing */ 1173 /* Prevent the bbt regions from erasing / writing */
1175 mark_bbt_region(mtd, td); 1174 mark_bbt_region(mtd, td);
@@ -1260,7 +1259,7 @@ static struct nand_bbt_descr bbt_main_descr = {
1260 .offs = 8, 1259 .offs = 8,
1261 .len = 4, 1260 .len = 4,
1262 .veroffs = 12, 1261 .veroffs = 12,
1263 .maxblocks = 4, 1262 .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
1264 .pattern = bbt_pattern 1263 .pattern = bbt_pattern
1265}; 1264};
1266 1265
@@ -1270,27 +1269,27 @@ static struct nand_bbt_descr bbt_mirror_descr = {
1270 .offs = 8, 1269 .offs = 8,
1271 .len = 4, 1270 .len = 4,
1272 .veroffs = 12, 1271 .veroffs = 12,
1273 .maxblocks = 4, 1272 .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
1274 .pattern = mirror_pattern 1273 .pattern = mirror_pattern
1275}; 1274};
1276 1275
1277static struct nand_bbt_descr bbt_main_no_bbt_descr = { 1276static struct nand_bbt_descr bbt_main_no_oob_descr = {
1278 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE 1277 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
1279 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP 1278 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
1280 | NAND_BBT_NO_OOB, 1279 | NAND_BBT_NO_OOB,
1281 .len = 4, 1280 .len = 4,
1282 .veroffs = 4, 1281 .veroffs = 4,
1283 .maxblocks = 4, 1282 .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
1284 .pattern = bbt_pattern 1283 .pattern = bbt_pattern
1285}; 1284};
1286 1285
1287static struct nand_bbt_descr bbt_mirror_no_bbt_descr = { 1286static struct nand_bbt_descr bbt_mirror_no_oob_descr = {
1288 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE 1287 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
1289 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP 1288 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
1290 | NAND_BBT_NO_OOB, 1289 | NAND_BBT_NO_OOB,
1291 .len = 4, 1290 .len = 4,
1292 .veroffs = 4, 1291 .veroffs = 4,
1293 .maxblocks = 4, 1292 .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
1294 .pattern = mirror_pattern 1293 .pattern = mirror_pattern
1295}; 1294};
1296 1295
@@ -1355,8 +1354,8 @@ int nand_default_bbt(struct mtd_info *mtd)
1355 /* Use the default pattern descriptors */ 1354 /* Use the default pattern descriptors */
1356 if (!this->bbt_td) { 1355 if (!this->bbt_td) {
1357 if (this->bbt_options & NAND_BBT_NO_OOB) { 1356 if (this->bbt_options & NAND_BBT_NO_OOB) {
1358 this->bbt_td = &bbt_main_no_bbt_descr; 1357 this->bbt_td = &bbt_main_no_oob_descr;
1359 this->bbt_md = &bbt_mirror_no_bbt_descr; 1358 this->bbt_md = &bbt_mirror_no_oob_descr;
1360 } else { 1359 } else {
1361 this->bbt_td = &bbt_main_descr; 1360 this->bbt_td = &bbt_main_descr;
1362 this->bbt_md = &bbt_mirror_descr; 1361 this->bbt_md = &bbt_mirror_descr;
@@ -1406,3 +1405,4 @@ int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
1406 1405
1407EXPORT_SYMBOL(nand_scan_bbt); 1406EXPORT_SYMBOL(nand_scan_bbt);
1408EXPORT_SYMBOL(nand_default_bbt); 1407EXPORT_SYMBOL(nand_default_bbt);
1408EXPORT_SYMBOL_GPL(nand_update_bbt);
diff --git a/drivers/mtd/nand/nand_bcm_umi.c b/drivers/mtd/nand/nand_bcm_umi.c
deleted file mode 100644
index 46a6bc9c4b74..000000000000
--- a/drivers/mtd/nand/nand_bcm_umi.c
+++ /dev/null
@@ -1,149 +0,0 @@
1/*****************************************************************************
2* Copyright 2004 - 2009 Broadcom Corporation. All rights reserved.
3*
4* Unless you and Broadcom execute a separate written software license
5* agreement governing use of this software, this software is licensed to you
6* under the terms of the GNU General Public License version 2, available at
7* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
8*
9* Notwithstanding the above, under no circumstances may you combine this
10* software in any way with any other Broadcom software provided under a
11* license other than the GPL, without Broadcom's express prior written
12* consent.
13*****************************************************************************/
14
15/* ---- Include Files ---------------------------------------------------- */
16#include <mach/reg_umi.h>
17#include "nand_bcm_umi.h"
18#ifdef BOOT0_BUILD
19#include <uart.h>
20#endif
21
22/* ---- External Variable Declarations ----------------------------------- */
23/* ---- External Function Prototypes ------------------------------------- */
24/* ---- Public Variables ------------------------------------------------- */
25/* ---- Private Constants and Types -------------------------------------- */
26/* ---- Private Function Prototypes -------------------------------------- */
27/* ---- Private Variables ------------------------------------------------ */
28/* ---- Private Functions ------------------------------------------------ */
29
30#if NAND_ECC_BCH
31/****************************************************************************
32* nand_bch_ecc_flip_bit - Routine to flip an errored bit
33*
34* PURPOSE:
35* This is a helper routine that flips the bit (0 -> 1 or 1 -> 0) of the
36* errored bit specified
37*
38* PARAMETERS:
39* datap - Container that holds the 512 byte data
40* errorLocation - Location of the bit that needs to be flipped
41*
42* RETURNS:
43* None
44****************************************************************************/
45static void nand_bcm_umi_bch_ecc_flip_bit(uint8_t *datap, int errorLocation)
46{
47 int locWithinAByte = (errorLocation & REG_UMI_BCH_ERR_LOC_BYTE) >> 0;
48 int locWithinAWord = (errorLocation & REG_UMI_BCH_ERR_LOC_WORD) >> 3;
49 int locWithinAPage = (errorLocation & REG_UMI_BCH_ERR_LOC_PAGE) >> 5;
50
51 uint8_t errorByte = 0;
52 uint8_t byteMask = 1 << locWithinAByte;
53
54 /* BCH uses big endian, need to change the location
55 * bits to little endian */
56 locWithinAWord = 3 - locWithinAWord;
57
58 errorByte = datap[locWithinAPage * sizeof(uint32_t) + locWithinAWord];
59
60#ifdef BOOT0_BUILD
61 puthexs("\nECC Correct Offset: ",
62 locWithinAPage * sizeof(uint32_t) + locWithinAWord);
63 puthexs(" errorByte:", errorByte);
64 puthex8(" Bit: ", locWithinAByte);
65#endif
66
67 if (errorByte & byteMask) {
68 /* bit needs to be cleared */
69 errorByte &= ~byteMask;
70 } else {
71 /* bit needs to be set */
72 errorByte |= byteMask;
73 }
74
75 /* write back the value with the fixed bit */
76 datap[locWithinAPage * sizeof(uint32_t) + locWithinAWord] = errorByte;
77}
78
79/****************************************************************************
80* nand_correct_page_bch - Routine to correct bit errors when reading NAND
81*
82* PURPOSE:
83* This routine reads the BCH registers to determine if there are any bit
84* errors during the read of the last 512 bytes of data + ECC bytes. If
85* errors exists, the routine fixes it.
86*
87* PARAMETERS:
88* datap - Container that holds the 512 byte data
89*
90* RETURNS:
91* 0 or greater = Number of errors corrected
92* (No errors are found or errors have been fixed)
93* -1 = Error(s) cannot be fixed
94****************************************************************************/
95int nand_bcm_umi_bch_correct_page(uint8_t *datap, uint8_t *readEccData,
96 int numEccBytes)
97{
98 int numErrors;
99 int errorLocation;
100 int idx;
101 uint32_t regValue;
102
103 /* wait for read ECC to be valid */
104 regValue = nand_bcm_umi_bch_poll_read_ecc_calc();
105
106 /*
107 * read the control status register to determine if there
108 * are error'ed bits
109 * see if errors are correctible
110 */
111 if ((regValue & REG_UMI_BCH_CTRL_STATUS_UNCORR_ERR) > 0) {
112 int i;
113
114 for (i = 0; i < numEccBytes; i++) {
115 if (readEccData[i] != 0xff) {
116 /* errors cannot be fixed, return -1 */
117 return -1;
118 }
119 }
120 /* If ECC is unprogrammed then we can't correct,
121 * assume everything OK */
122 return 0;
123 }
124
125 if ((regValue & REG_UMI_BCH_CTRL_STATUS_CORR_ERR) == 0) {
126 /* no errors */
127 return 0;
128 }
129
130 /*
131 * Fix errored bits by doing the following:
132 * 1. Read the number of errors in the control and status register
133 * 2. Read the error location registers that corresponds to the number
134 * of errors reported
135 * 3. Invert the bit in the data
136 */
137 numErrors = (regValue & REG_UMI_BCH_CTRL_STATUS_NB_CORR_ERROR) >> 20;
138
139 for (idx = 0; idx < numErrors; idx++) {
140 errorLocation =
141 REG_UMI_BCH_ERR_LOC_ADDR(idx) & REG_UMI_BCH_ERR_LOC_MASK;
142
143 /* Flip bit */
144 nand_bcm_umi_bch_ecc_flip_bit(datap, errorLocation);
145 }
146 /* Errors corrected */
147 return numErrors;
148}
149#endif
diff --git a/drivers/mtd/nand/nand_bcm_umi.h b/drivers/mtd/nand/nand_bcm_umi.h
deleted file mode 100644
index d90186684db8..000000000000
--- a/drivers/mtd/nand/nand_bcm_umi.h
+++ /dev/null
@@ -1,336 +0,0 @@
1/*****************************************************************************
2* Copyright 2003 - 2009 Broadcom Corporation. All rights reserved.
3*
4* Unless you and Broadcom execute a separate written software license
5* agreement governing use of this software, this software is licensed to you
6* under the terms of the GNU General Public License version 2, available at
7* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
8*
9* Notwithstanding the above, under no circumstances may you combine this
10* software in any way with any other Broadcom software provided under a
11* license other than the GPL, without Broadcom's express prior written
12* consent.
13*****************************************************************************/
14#ifndef NAND_BCM_UMI_H
15#define NAND_BCM_UMI_H
16
17/* ---- Include Files ---------------------------------------------------- */
18#include <mach/reg_umi.h>
19#include <mach/reg_nand.h>
20#include <mach/cfg_global.h>
21
22/* ---- Constants and Types ---------------------------------------------- */
23#if (CFG_GLOBAL_CHIP_FAMILY == CFG_GLOBAL_CHIP_FAMILY_BCMRING)
24#define NAND_ECC_BCH (CFG_GLOBAL_CHIP_REV > 0xA0)
25#else
26#define NAND_ECC_BCH 0
27#endif
28
29#define CFG_GLOBAL_NAND_ECC_BCH_NUM_BYTES 13
30
31#if NAND_ECC_BCH
32#ifdef BOOT0_BUILD
33#define NAND_ECC_NUM_BYTES 13
34#else
35#define NAND_ECC_NUM_BYTES CFG_GLOBAL_NAND_ECC_BCH_NUM_BYTES
36#endif
37#else
38#define NAND_ECC_NUM_BYTES 3
39#endif
40
41#define NAND_DATA_ACCESS_SIZE 512
42
43/* ---- Variable Externs ------------------------------------------ */
44/* ---- Function Prototypes --------------------------------------- */
45int nand_bcm_umi_bch_correct_page(uint8_t *datap, uint8_t *readEccData,
46 int numEccBytes);
47
48/* Check in device is ready */
49static inline int nand_bcm_umi_dev_ready(void)
50{
51 return readl(&REG_UMI_NAND_RCSR) & REG_UMI_NAND_RCSR_RDY;
52}
53
54/* Wait until device is ready */
55static inline void nand_bcm_umi_wait_till_ready(void)
56{
57 while (nand_bcm_umi_dev_ready() == 0)
58 ;
59}
60
61/* Enable Hamming ECC */
62static inline void nand_bcm_umi_hamming_enable_hwecc(void)
63{
64 /* disable and reset ECC, 512 byte page */
65 writel(readl(&REG_UMI_NAND_ECC_CSR) & ~(REG_UMI_NAND_ECC_CSR_ECC_ENABLE |
66 REG_UMI_NAND_ECC_CSR_256BYTE), &REG_UMI_NAND_ECC_CSR);
67 /* enable ECC */
68 writel(readl(&REG_UMI_NAND_ECC_CSR) | REG_UMI_NAND_ECC_CSR_ECC_ENABLE,
69 &REG_UMI_NAND_ECC_CSR);
70}
71
72#if NAND_ECC_BCH
73/* BCH ECC specifics */
74#define ECC_BITS_PER_CORRECTABLE_BIT 13
75
76/* Enable BCH Read ECC */
77static inline void nand_bcm_umi_bch_enable_read_hwecc(void)
78{
79 /* disable and reset ECC */
80 writel(REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID, &REG_UMI_BCH_CTRL_STATUS);
81 /* Turn on ECC */
82 writel(REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN, &REG_UMI_BCH_CTRL_STATUS);
83}
84
85/* Enable BCH Write ECC */
86static inline void nand_bcm_umi_bch_enable_write_hwecc(void)
87{
88 /* disable and reset ECC */
89 writel(REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID, &REG_UMI_BCH_CTRL_STATUS);
90 /* Turn on ECC */
91 writel(REG_UMI_BCH_CTRL_STATUS_ECC_WR_EN, &REG_UMI_BCH_CTRL_STATUS);
92}
93
94/* Config number of BCH ECC bytes */
95static inline void nand_bcm_umi_bch_config_ecc(uint8_t numEccBytes)
96{
97 uint32_t nValue;
98 uint32_t tValue;
99 uint32_t kValue;
100 uint32_t numBits = numEccBytes * 8;
101
102 /* disable and reset ECC */
103 writel(REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID |
104 REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID,
105 &REG_UMI_BCH_CTRL_STATUS);
106
107 /* Every correctible bit requires 13 ECC bits */
108 tValue = (uint32_t) (numBits / ECC_BITS_PER_CORRECTABLE_BIT);
109
110 /* Total data in number of bits for generating and computing BCH ECC */
111 nValue = (NAND_DATA_ACCESS_SIZE + numEccBytes) * 8;
112
113 /* K parameter is used internally. K = N - (T * 13) */
114 kValue = nValue - (tValue * ECC_BITS_PER_CORRECTABLE_BIT);
115
116 /* Write the settings */
117 writel(nValue, &REG_UMI_BCH_N);
118 writel(tValue, &REG_UMI_BCH_T);
119 writel(kValue, &REG_UMI_BCH_K);
120}
121
122/* Pause during ECC read calculation to skip bytes in OOB */
123static inline void nand_bcm_umi_bch_pause_read_ecc_calc(void)
124{
125 writel(REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN | REG_UMI_BCH_CTRL_STATUS_PAUSE_ECC_DEC, &REG_UMI_BCH_CTRL_STATUS);
126}
127
128/* Resume during ECC read calculation after skipping bytes in OOB */
129static inline void nand_bcm_umi_bch_resume_read_ecc_calc(void)
130{
131 writel(REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN, &REG_UMI_BCH_CTRL_STATUS);
132}
133
134/* Poll read ECC calc to check when hardware completes */
135static inline uint32_t nand_bcm_umi_bch_poll_read_ecc_calc(void)
136{
137 uint32_t regVal;
138
139 do {
140 /* wait for ECC to be valid */
141 regVal = readl(&REG_UMI_BCH_CTRL_STATUS);
142 } while ((regVal & REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID) == 0);
143
144 return regVal;
145}
146
147/* Poll write ECC calc to check when hardware completes */
148static inline void nand_bcm_umi_bch_poll_write_ecc_calc(void)
149{
150 /* wait for ECC to be valid */
151 while ((readl(&REG_UMI_BCH_CTRL_STATUS) & REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID)
152 == 0)
153 ;
154}
155
156/* Read the OOB and ECC, for kernel write OOB to a buffer */
157#if defined(__KERNEL__) && !defined(STANDALONE)
158static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
159 uint8_t *eccCalc, int numEccBytes, uint8_t *oobp)
160#else
161static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
162 uint8_t *eccCalc, int numEccBytes)
163#endif
164{
165 int eccPos = 0;
166 int numToRead = 16; /* There are 16 bytes per sector in the OOB */
167
168 /* ECC is already paused when this function is called */
169 if (pageSize != NAND_DATA_ACCESS_SIZE) {
170 /* skip BI */
171#if defined(__KERNEL__) && !defined(STANDALONE)
172 *oobp++ = readb(&REG_NAND_DATA8);
173#else
174 readb(&REG_NAND_DATA8);
175#endif
176 numToRead--;
177 }
178
179 while (numToRead > numEccBytes) {
180 /* skip free oob region */
181#if defined(__KERNEL__) && !defined(STANDALONE)
182 *oobp++ = readb(&REG_NAND_DATA8);
183#else
184 readb(&REG_NAND_DATA8);
185#endif
186 numToRead--;
187 }
188
189 if (pageSize == NAND_DATA_ACCESS_SIZE) {
190 /* read ECC bytes before BI */
191 nand_bcm_umi_bch_resume_read_ecc_calc();
192
193 while (numToRead > 11) {
194#if defined(__KERNEL__) && !defined(STANDALONE)
195 *oobp = readb(&REG_NAND_DATA8);
196 eccCalc[eccPos++] = *oobp;
197 oobp++;
198#else
199 eccCalc[eccPos++] = readb(&REG_NAND_DATA8);
200#endif
201 numToRead--;
202 }
203
204 nand_bcm_umi_bch_pause_read_ecc_calc();
205
206 if (numToRead == 11) {
207 /* read BI */
208#if defined(__KERNEL__) && !defined(STANDALONE)
209 *oobp++ = readb(&REG_NAND_DATA8);
210#else
211 readb(&REG_NAND_DATA8);
212#endif
213 numToRead--;
214 }
215
216 }
217 /* read ECC bytes */
218 nand_bcm_umi_bch_resume_read_ecc_calc();
219 while (numToRead) {
220#if defined(__KERNEL__) && !defined(STANDALONE)
221 *oobp = readb(&REG_NAND_DATA8);
222 eccCalc[eccPos++] = *oobp;
223 oobp++;
224#else
225 eccCalc[eccPos++] = readb(&REG_NAND_DATA8);
226#endif
227 numToRead--;
228 }
229}
230
231/* Helper function to write ECC */
232static inline void NAND_BCM_UMI_ECC_WRITE(int numEccBytes, int eccBytePos,
233 uint8_t *oobp, uint8_t eccVal)
234{
235 if (eccBytePos <= numEccBytes)
236 *oobp = eccVal;
237}
238
239/* Write OOB with ECC */
240static inline void nand_bcm_umi_bch_write_oobEcc(uint32_t pageSize,
241 uint8_t *oobp, int numEccBytes)
242{
243 uint32_t eccVal = 0xffffffff;
244
245 /* wait for write ECC to be valid */
246 nand_bcm_umi_bch_poll_write_ecc_calc();
247
248 /*
249 ** Get the hardware ecc from the 32-bit result registers.
250 ** Read after 512 byte accesses. Format B3B2B1B0
251 ** where B3 = ecc3, etc.
252 */
253
254 if (pageSize == NAND_DATA_ACCESS_SIZE) {
255 /* Now fill in the ECC bytes */
256 if (numEccBytes >= 13)
257 eccVal = readl(&REG_UMI_BCH_WR_ECC_3);
258
259 /* Usually we skip CM in oob[0,1] */
260 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 15, &oobp[0],
261 (eccVal >> 16) & 0xff);
262 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 14, &oobp[1],
263 (eccVal >> 8) & 0xff);
264
265 /* Write ECC in oob[2,3,4] */
266 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 13, &oobp[2],
267 eccVal & 0xff); /* ECC 12 */
268
269 if (numEccBytes >= 9)
270 eccVal = readl(&REG_UMI_BCH_WR_ECC_2);
271
272 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 12, &oobp[3],
273 (eccVal >> 24) & 0xff); /* ECC11 */
274 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 11, &oobp[4],
275 (eccVal >> 16) & 0xff); /* ECC10 */
276
277 /* Always Skip BI in oob[5] */
278 } else {
279 /* Always Skip BI in oob[0] */
280
281 /* Now fill in the ECC bytes */
282 if (numEccBytes >= 13)
283 eccVal = readl(&REG_UMI_BCH_WR_ECC_3);
284
285 /* Usually skip CM in oob[1,2] */
286 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 15, &oobp[1],
287 (eccVal >> 16) & 0xff);
288 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 14, &oobp[2],
289 (eccVal >> 8) & 0xff);
290
291 /* Write ECC in oob[3-15] */
292 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 13, &oobp[3],
293 eccVal & 0xff); /* ECC12 */
294
295 if (numEccBytes >= 9)
296 eccVal = readl(&REG_UMI_BCH_WR_ECC_2);
297
298 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 12, &oobp[4],
299 (eccVal >> 24) & 0xff); /* ECC11 */
300 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 11, &oobp[5],
301 (eccVal >> 16) & 0xff); /* ECC10 */
302 }
303
304 /* Fill in the remainder of ECC locations */
305 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 10, &oobp[6],
306 (eccVal >> 8) & 0xff); /* ECC9 */
307 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 9, &oobp[7],
308 eccVal & 0xff); /* ECC8 */
309
310 if (numEccBytes >= 5)
311 eccVal = readl(&REG_UMI_BCH_WR_ECC_1);
312
313 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 8, &oobp[8],
314 (eccVal >> 24) & 0xff); /* ECC7 */
315 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 7, &oobp[9],
316 (eccVal >> 16) & 0xff); /* ECC6 */
317 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 6, &oobp[10],
318 (eccVal >> 8) & 0xff); /* ECC5 */
319 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 5, &oobp[11],
320 eccVal & 0xff); /* ECC4 */
321
322 if (numEccBytes >= 1)
323 eccVal = readl(&REG_UMI_BCH_WR_ECC_0);
324
325 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 4, &oobp[12],
326 (eccVal >> 24) & 0xff); /* ECC3 */
327 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 3, &oobp[13],
328 (eccVal >> 16) & 0xff); /* ECC2 */
329 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 2, &oobp[14],
330 (eccVal >> 8) & 0xff); /* ECC1 */
331 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 1, &oobp[15],
332 eccVal & 0xff); /* ECC0 */
333}
334#endif
335
336#endif /* NAND_BCM_UMI_H */
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index 621b70b7a159..e3aa2748a6e7 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -70,7 +70,7 @@ struct nand_flash_dev nand_flash_ids[] = {
70 * These are the new chips with large page size. The pagesize and the 70 * These are the new chips with large page size. The pagesize and the
71 * erasesize is determined from the extended id bytes 71 * erasesize is determined from the extended id bytes
72 */ 72 */
73#define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY) 73#define LP_OPTIONS NAND_SAMSUNG_LP_OPTIONS
74#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16) 74#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
75 75
76 /* 512 Megabit */ 76 /* 512 Megabit */
@@ -157,7 +157,7 @@ struct nand_flash_dev nand_flash_ids[] = {
157 * writes possible, but not implemented now 157 * writes possible, but not implemented now
158 */ 158 */
159 {"AND 128MiB 3,3V 8-bit", 0x01, 2048, 128, 0x4000, 159 {"AND 128MiB 3,3V 8-bit", 0x01, 2048, 128, 0x4000,
160 NAND_IS_AND | NAND_NO_READRDY | NAND_4PAGE_ARRAY | BBT_AUTO_REFRESH}, 160 NAND_IS_AND | NAND_4PAGE_ARRAY | BBT_AUTO_REFRESH},
161 161
162 {NULL,} 162 {NULL,}
163}; 163};
@@ -174,8 +174,9 @@ struct nand_manufacturers nand_manuf_ids[] = {
174 {NAND_MFR_STMICRO, "ST Micro"}, 174 {NAND_MFR_STMICRO, "ST Micro"},
175 {NAND_MFR_HYNIX, "Hynix"}, 175 {NAND_MFR_HYNIX, "Hynix"},
176 {NAND_MFR_MICRON, "Micron"}, 176 {NAND_MFR_MICRON, "Micron"},
177 {NAND_MFR_AMD, "AMD"}, 177 {NAND_MFR_AMD, "AMD/Spansion"},
178 {NAND_MFR_MACRONIX, "Macronix"}, 178 {NAND_MFR_MACRONIX, "Macronix"},
179 {NAND_MFR_EON, "Eon"},
179 {0x0, "Unknown"} 180 {0x0, "Unknown"}
180}; 181};
181 182
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index cf0cd3146817..a932c485eb04 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -447,8 +447,6 @@ static unsigned int rptwear_cnt = 0;
447/* MTD structure for NAND controller */ 447/* MTD structure for NAND controller */
448static struct mtd_info *nsmtd; 448static struct mtd_info *nsmtd;
449 449
450static u_char ns_verify_buf[NS_LARGEST_PAGE_SIZE];
451
452/* 450/*
453 * Allocate array of page pointers, create slab allocation for an array 451 * Allocate array of page pointers, create slab allocation for an array
454 * and initialize the array by NULL pointers. 452 * and initialize the array by NULL pointers.
@@ -2189,19 +2187,6 @@ static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
2189 return; 2187 return;
2190} 2188}
2191 2189
2192static int ns_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
2193{
2194 ns_nand_read_buf(mtd, (u_char *)&ns_verify_buf[0], len);
2195
2196 if (!memcmp(buf, &ns_verify_buf[0], len)) {
2197 NS_DBG("verify_buf: the buffer is OK\n");
2198 return 0;
2199 } else {
2200 NS_DBG("verify_buf: the buffer is wrong\n");
2201 return -EFAULT;
2202 }
2203}
2204
2205/* 2190/*
2206 * Module initialization function 2191 * Module initialization function
2207 */ 2192 */
@@ -2236,7 +2221,6 @@ static int __init ns_init_module(void)
2236 chip->dev_ready = ns_device_ready; 2221 chip->dev_ready = ns_device_ready;
2237 chip->write_buf = ns_nand_write_buf; 2222 chip->write_buf = ns_nand_write_buf;
2238 chip->read_buf = ns_nand_read_buf; 2223 chip->read_buf = ns_nand_read_buf;
2239 chip->verify_buf = ns_nand_verify_buf;
2240 chip->read_word = ns_nand_read_word; 2224 chip->read_word = ns_nand_read_word;
2241 chip->ecc.mode = NAND_ECC_SOFT; 2225 chip->ecc.mode = NAND_ECC_SOFT;
2242 /* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */ 2226 /* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
@@ -2333,6 +2317,7 @@ static int __init ns_init_module(void)
2333 uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize; 2317 uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize;
2334 if (new_size >> overridesize != nsmtd->erasesize) { 2318 if (new_size >> overridesize != nsmtd->erasesize) {
2335 NS_ERR("overridesize is too big\n"); 2319 NS_ERR("overridesize is too big\n");
2320 retval = -EINVAL;
2336 goto err_exit; 2321 goto err_exit;
2337 } 2322 }
2338 /* N.B. This relies on nand_scan not doing anything with the size before we change it */ 2323 /* N.B. This relies on nand_scan not doing anything with the size before we change it */
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index 2b6f632cf274..5fd3f010e3ae 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -140,18 +140,6 @@ static void ndfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
140 out_be32(ndfc->ndfcbase + NDFC_DATA, *p++); 140 out_be32(ndfc->ndfcbase + NDFC_DATA, *p++);
141} 141}
142 142
143static int ndfc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
144{
145 struct nand_chip *chip = mtd->priv;
146 struct ndfc_controller *ndfc = chip->priv;
147 uint32_t *p = (uint32_t *) buf;
148
149 for(;len > 0; len -= 4)
150 if (*p++ != in_be32(ndfc->ndfcbase + NDFC_DATA))
151 return -EFAULT;
152 return 0;
153}
154
155/* 143/*
156 * Initialize chip structure 144 * Initialize chip structure
157 */ 145 */
@@ -172,7 +160,6 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
172 chip->controller = &ndfc->ndfc_control; 160 chip->controller = &ndfc->ndfc_control;
173 chip->read_buf = ndfc_read_buf; 161 chip->read_buf = ndfc_read_buf;
174 chip->write_buf = ndfc_write_buf; 162 chip->write_buf = ndfc_write_buf;
175 chip->verify_buf = ndfc_verify_buf;
176 chip->ecc.correct = nand_correct_data; 163 chip->ecc.correct = nand_correct_data;
177 chip->ecc.hwctl = ndfc_enable_hwecc; 164 chip->ecc.hwctl = ndfc_enable_hwecc;
178 chip->ecc.calculate = ndfc_calculate_ecc; 165 chip->ecc.calculate = ndfc_calculate_ecc;
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 8febe46e1105..94dc46bc118c 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -112,22 +112,6 @@ static void nuc900_nand_write_buf(struct mtd_info *mtd,
112 write_data_reg(nand, buf[i]); 112 write_data_reg(nand, buf[i]);
113} 113}
114 114
115static int nuc900_verify_buf(struct mtd_info *mtd,
116 const unsigned char *buf, int len)
117{
118 int i;
119 struct nuc900_nand *nand;
120
121 nand = container_of(mtd, struct nuc900_nand, mtd);
122
123 for (i = 0; i < len; i++) {
124 if (buf[i] != (unsigned char)read_data_reg(nand))
125 return -EFAULT;
126 }
127
128 return 0;
129}
130
131static int nuc900_check_rb(struct nuc900_nand *nand) 115static int nuc900_check_rb(struct nuc900_nand *nand)
132{ 116{
133 unsigned int val; 117 unsigned int val;
@@ -292,7 +276,6 @@ static int __devinit nuc900_nand_probe(struct platform_device *pdev)
292 chip->read_byte = nuc900_nand_read_byte; 276 chip->read_byte = nuc900_nand_read_byte;
293 chip->write_buf = nuc900_nand_write_buf; 277 chip->write_buf = nuc900_nand_write_buf;
294 chip->read_buf = nuc900_nand_read_buf; 278 chip->read_buf = nuc900_nand_read_buf;
295 chip->verify_buf = nuc900_verify_buf;
296 chip->chip_delay = 50; 279 chip->chip_delay = 50;
297 chip->options = 0; 280 chip->options = 0;
298 chip->ecc.mode = NAND_ECC_SOFT; 281 chip->ecc.mode = NAND_ECC_SOFT;
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index fc8111278d12..5b3138620646 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -425,7 +425,7 @@ static void omap_nand_dma_callback(void *data)
425} 425}
426 426
427/* 427/*
428 * omap_nand_dma_transfer: configer and start dma transfer 428 * omap_nand_dma_transfer: configure and start dma transfer
429 * @mtd: MTD device structure 429 * @mtd: MTD device structure
430 * @addr: virtual address in RAM of source/destination 430 * @addr: virtual address in RAM of source/destination
431 * @len: number of data bytes to be transferred 431 * @len: number of data bytes to be transferred
@@ -546,7 +546,7 @@ static void omap_write_buf_dma_pref(struct mtd_info *mtd,
546} 546}
547 547
548/* 548/*
549 * omap_nand_irq - GMPC irq handler 549 * omap_nand_irq - GPMC irq handler
550 * @this_irq: gpmc irq number 550 * @this_irq: gpmc irq number
551 * @dev: omap_nand_info structure pointer is passed here 551 * @dev: omap_nand_info structure pointer is passed here
552 */ 552 */
@@ -698,27 +698,6 @@ out_copy:
698} 698}
699 699
700/** 700/**
701 * omap_verify_buf - Verify chip data against buffer
702 * @mtd: MTD device structure
703 * @buf: buffer containing the data to compare
704 * @len: number of bytes to compare
705 */
706static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
707{
708 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
709 mtd);
710 u16 *p = (u16 *) buf;
711
712 len >>= 1;
713 while (len--) {
714 if (*p++ != cpu_to_le16(readw(info->nand.IO_ADDR_R)))
715 return -EFAULT;
716 }
717
718 return 0;
719}
720
721/**
722 * gen_true_ecc - This function will generate true ECC value 701 * gen_true_ecc - This function will generate true ECC value
723 * @ecc_buf: buffer to store ecc code 702 * @ecc_buf: buffer to store ecc code
724 * 703 *
@@ -1326,8 +1305,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
1326 1305
1327 /* 1306 /*
1328 * If RDY/BSY line is connected to OMAP then use the omap ready 1307 * If RDY/BSY line is connected to OMAP then use the omap ready
1329 * funcrtion and the generic nand_wait function which reads the status 1308 * function and the generic nand_wait function which reads the status
1330 * register after monitoring the RDY/BSY line.Otherwise use a standard 1309 * register after monitoring the RDY/BSY line. Otherwise use a standard
1331 * chip delay which is slightly more than tR (AC Timing) of the NAND 1310 * chip delay which is slightly more than tR (AC Timing) of the NAND
1332 * device and read status register until you get a failure or success 1311 * device and read status register until you get a failure or success
1333 */ 1312 */
@@ -1428,9 +1407,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
1428 goto out_release_mem_region; 1407 goto out_release_mem_region;
1429 } 1408 }
1430 1409
1431 info->nand.verify_buf = omap_verify_buf; 1410 /* select the ecc type */
1432
1433 /* selsect the ecc type */
1434 if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT) 1411 if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT)
1435 info->nand.ecc.mode = NAND_ECC_SOFT; 1412 info->nand.ecc.mode = NAND_ECC_SOFT;
1436 else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) || 1413 else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) ||
@@ -1536,7 +1513,8 @@ static int omap_nand_remove(struct platform_device *pdev)
1536 /* Release NAND device, its internal structures and partitions */ 1513 /* Release NAND device, its internal structures and partitions */
1537 nand_release(&info->mtd); 1514 nand_release(&info->mtd);
1538 iounmap(info->nand.IO_ADDR_R); 1515 iounmap(info->nand.IO_ADDR_R);
1539 kfree(&info->mtd); 1516 release_mem_region(info->phys_base, NAND_IO_SIZE);
1517 kfree(info);
1540 return 0; 1518 return 0;
1541} 1519}
1542 1520
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 131b58a133f1..aefaf8cd31ef 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -21,7 +21,6 @@
21#include <linux/err.h> 21#include <linux/err.h>
22#include <asm/io.h> 22#include <asm/io.h>
23#include <asm/sizes.h> 23#include <asm/sizes.h>
24#include <mach/hardware.h>
25#include <linux/platform_data/mtd-orion_nand.h> 24#include <linux/platform_data/mtd-orion_nand.h>
26 25
27static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) 26static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index 1bcb52040422..a47ee68a0cfa 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -37,6 +37,11 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
37 const char **part_types; 37 const char **part_types;
38 int err = 0; 38 int err = 0;
39 39
40 if (!pdata) {
41 dev_err(&pdev->dev, "platform_nand_data is missing\n");
42 return -EINVAL;
43 }
44
40 if (pdata->chip.nr_chips < 1) { 45 if (pdata->chip.nr_chips < 1) {
41 dev_err(&pdev->dev, "invalid number of chips specified\n"); 46 dev_err(&pdev->dev, "invalid number of chips specified\n");
42 return -EINVAL; 47 return -EINVAL;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index c45227173efd..37ee75c7bacb 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -683,11 +683,13 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
683 info->state = STATE_IDLE; 683 info->state = STATE_IDLE;
684} 684}
685 685
686static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd, 686static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
687 struct nand_chip *chip, const uint8_t *buf, int oob_required) 687 struct nand_chip *chip, const uint8_t *buf, int oob_required)
688{ 688{
689 chip->write_buf(mtd, buf, mtd->writesize); 689 chip->write_buf(mtd, buf, mtd->writesize);
690 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); 690 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
691
692 return 0;
691} 693}
692 694
693static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd, 695static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
@@ -771,12 +773,6 @@ static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
771 info->buf_start += real_len; 773 info->buf_start += real_len;
772} 774}
773 775
774static int pxa3xx_nand_verify_buf(struct mtd_info *mtd,
775 const uint8_t *buf, int len)
776{
777 return 0;
778}
779
780static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip) 776static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
781{ 777{
782 return; 778 return;
@@ -1007,7 +1003,6 @@ KEEP_CONFIG:
1007 chip->ecc.size = host->page_size; 1003 chip->ecc.size = host->page_size;
1008 chip->ecc.strength = 1; 1004 chip->ecc.strength = 1;
1009 1005
1010 chip->options |= NAND_NO_READRDY;
1011 if (host->reg_ndcr & NDCR_DWIDTH_M) 1006 if (host->reg_ndcr & NDCR_DWIDTH_M)
1012 chip->options |= NAND_BUSWIDTH_16; 1007 chip->options |= NAND_BUSWIDTH_16;
1013 1008
@@ -1070,7 +1065,6 @@ static int alloc_nand_resource(struct platform_device *pdev)
1070 chip->read_byte = pxa3xx_nand_read_byte; 1065 chip->read_byte = pxa3xx_nand_read_byte;
1071 chip->read_buf = pxa3xx_nand_read_buf; 1066 chip->read_buf = pxa3xx_nand_read_buf;
1072 chip->write_buf = pxa3xx_nand_write_buf; 1067 chip->write_buf = pxa3xx_nand_write_buf;
1073 chip->verify_buf = pxa3xx_nand_verify_buf;
1074 } 1068 }
1075 1069
1076 spin_lock_init(&chip->controller->lock); 1070 spin_lock_init(&chip->controller->lock);
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index 8cb627751c9c..4495f8551fa0 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -309,27 +309,6 @@ static uint8_t r852_read_byte(struct mtd_info *mtd)
309 return r852_read_reg(dev, R852_DATALINE); 309 return r852_read_reg(dev, R852_DATALINE);
310} 310}
311 311
312
313/*
314 * Readback the buffer to verify it
315 */
316int r852_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
317{
318 struct r852_device *dev = r852_get_dev(mtd);
319
320 /* We can't be sure about anything here... */
321 if (dev->card_unstable)
322 return -1;
323
324 /* This will never happen, unless you wired up a nand chip
325 with > 512 bytes page size to the reader */
326 if (len > SM_SECTOR_SIZE)
327 return 0;
328
329 r852_read_buf(mtd, dev->tmp_buffer, len);
330 return memcmp(buf, dev->tmp_buffer, len);
331}
332
333/* 312/*
334 * Control several chip lines & send commands 313 * Control several chip lines & send commands
335 */ 314 */
@@ -882,7 +861,6 @@ int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
882 chip->read_byte = r852_read_byte; 861 chip->read_byte = r852_read_byte;
883 chip->read_buf = r852_read_buf; 862 chip->read_buf = r852_read_buf;
884 chip->write_buf = r852_write_buf; 863 chip->write_buf = r852_write_buf;
885 chip->verify_buf = r852_verify_buf;
886 864
887 /* ecc */ 865 /* ecc */
888 chip->ecc.mode = NAND_ECC_HW_SYNDROME; 866 chip->ecc.mode = NAND_ECC_HW_SYNDROME;
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index d8040619ad8d..295e4bedad96 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -21,6 +21,8 @@
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22*/ 22*/
23 23
24#define pr_fmt(fmt) "nand-s3c2410: " fmt
25
24#ifdef CONFIG_MTD_NAND_S3C2410_DEBUG 26#ifdef CONFIG_MTD_NAND_S3C2410_DEBUG
25#define DEBUG 27#define DEBUG
26#endif 28#endif
@@ -30,6 +32,7 @@
30#include <linux/init.h> 32#include <linux/init.h>
31#include <linux/kernel.h> 33#include <linux/kernel.h>
32#include <linux/string.h> 34#include <linux/string.h>
35#include <linux/io.h>
33#include <linux/ioport.h> 36#include <linux/ioport.h>
34#include <linux/platform_device.h> 37#include <linux/platform_device.h>
35#include <linux/delay.h> 38#include <linux/delay.h>
@@ -43,24 +46,9 @@
43#include <linux/mtd/nand_ecc.h> 46#include <linux/mtd/nand_ecc.h>
44#include <linux/mtd/partitions.h> 47#include <linux/mtd/partitions.h>
45 48
46#include <asm/io.h>
47
48#include <plat/regs-nand.h> 49#include <plat/regs-nand.h>
49#include <linux/platform_data/mtd-nand-s3c2410.h> 50#include <linux/platform_data/mtd-nand-s3c2410.h>
50 51
51#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
52static int hardware_ecc = 1;
53#else
54static int hardware_ecc = 0;
55#endif
56
57#ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP
58static const int clock_stop = 1;
59#else
60static const int clock_stop = 0;
61#endif
62
63
64/* new oob placement block for use with hardware ecc generation 52/* new oob placement block for use with hardware ecc generation
65 */ 53 */
66 54
@@ -109,9 +97,8 @@ enum s3c_nand_clk_state {
109 * @mtds: An array of MTD instances on this controoler. 97 * @mtds: An array of MTD instances on this controoler.
110 * @platform: The platform data for this board. 98 * @platform: The platform data for this board.
111 * @device: The platform device we bound to. 99 * @device: The platform device we bound to.
112 * @area: The IO area resource that came from request_mem_region().
113 * @clk: The clock resource for this controller. 100 * @clk: The clock resource for this controller.
114 * @regs: The area mapped for the hardware registers described by @area. 101 * @regs: The area mapped for the hardware registers.
115 * @sel_reg: Pointer to the register controlling the NAND selection. 102 * @sel_reg: Pointer to the register controlling the NAND selection.
116 * @sel_bit: The bit in @sel_reg to select the NAND chip. 103 * @sel_bit: The bit in @sel_reg to select the NAND chip.
117 * @mtd_count: The number of MTDs created from this controller. 104 * @mtd_count: The number of MTDs created from this controller.
@@ -128,7 +115,6 @@ struct s3c2410_nand_info {
128 115
129 /* device info */ 116 /* device info */
130 struct device *device; 117 struct device *device;
131 struct resource *area;
132 struct clk *clk; 118 struct clk *clk;
133 void __iomem *regs; 119 void __iomem *regs;
134 void __iomem *sel_reg; 120 void __iomem *sel_reg;
@@ -169,7 +155,11 @@ static struct s3c2410_platform_nand *to_nand_plat(struct platform_device *dev)
169 155
170static inline int allow_clk_suspend(struct s3c2410_nand_info *info) 156static inline int allow_clk_suspend(struct s3c2410_nand_info *info)
171{ 157{
172 return clock_stop; 158#ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP
159 return 1;
160#else
161 return 0;
162#endif
173} 163}
174 164
175/** 165/**
@@ -215,7 +205,8 @@ static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max)
215 pr_debug("result %d from %ld, %d\n", result, clk, wanted); 205 pr_debug("result %d from %ld, %d\n", result, clk, wanted);
216 206
217 if (result > max) { 207 if (result > max) {
218 printk("%d ns is too big for current clock rate %ld\n", wanted, clk); 208 pr_err("%d ns is too big for current clock rate %ld\n",
209 wanted, clk);
219 return -1; 210 return -1;
220 } 211 }
221 212
@@ -225,7 +216,7 @@ static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max)
225 return result; 216 return result;
226} 217}
227 218
228#define to_ns(ticks,clk) (((ticks) * NS_IN_KHZ) / (unsigned int)(clk)) 219#define to_ns(ticks, clk) (((ticks) * NS_IN_KHZ) / (unsigned int)(clk))
229 220
230/* controller setup */ 221/* controller setup */
231 222
@@ -268,7 +259,8 @@ static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
268 } 259 }
269 260
270 dev_info(info->device, "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n", 261 dev_info(info->device, "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n",
271 tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate), twrph1, to_ns(twrph1, clkrate)); 262 tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate),
263 twrph1, to_ns(twrph1, clkrate));
272 264
273 switch (info->cpu_type) { 265 switch (info->cpu_type) {
274 case TYPE_S3C2410: 266 case TYPE_S3C2410:
@@ -325,13 +317,13 @@ static int s3c2410_nand_inithw(struct s3c2410_nand_info *info)
325 if (ret < 0) 317 if (ret < 0)
326 return ret; 318 return ret;
327 319
328 switch (info->cpu_type) { 320 switch (info->cpu_type) {
329 case TYPE_S3C2410: 321 case TYPE_S3C2410:
330 default: 322 default:
331 break; 323 break;
332 324
333 case TYPE_S3C2440: 325 case TYPE_S3C2440:
334 case TYPE_S3C2412: 326 case TYPE_S3C2412:
335 /* enable the controller and de-assert nFCE */ 327 /* enable the controller and de-assert nFCE */
336 328
337 writel(S3C2440_NFCONT_ENABLE, info->regs + S3C2440_NFCONT); 329 writel(S3C2440_NFCONT_ENABLE, info->regs + S3C2440_NFCONT);
@@ -450,6 +442,7 @@ static int s3c2412_nand_devready(struct mtd_info *mtd)
450 442
451/* ECC handling functions */ 443/* ECC handling functions */
452 444
445#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
453static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat, 446static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
454 u_char *read_ecc, u_char *calc_ecc) 447 u_char *read_ecc, u_char *calc_ecc)
455{ 448{
@@ -463,10 +456,8 @@ static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
463 diff1 = read_ecc[1] ^ calc_ecc[1]; 456 diff1 = read_ecc[1] ^ calc_ecc[1];
464 diff2 = read_ecc[2] ^ calc_ecc[2]; 457 diff2 = read_ecc[2] ^ calc_ecc[2];
465 458
466 pr_debug("%s: rd %02x%02x%02x calc %02x%02x%02x diff %02x%02x%02x\n", 459 pr_debug("%s: rd %*phN calc %*phN diff %02x%02x%02x\n",
467 __func__, 460 __func__, 3, read_ecc, 3, calc_ecc,
468 read_ecc[0], read_ecc[1], read_ecc[2],
469 calc_ecc[0], calc_ecc[1], calc_ecc[2],
470 diff0, diff1, diff2); 461 diff0, diff1, diff2);
471 462
472 if (diff0 == 0 && diff1 == 0 && diff2 == 0) 463 if (diff0 == 0 && diff1 == 0 && diff2 == 0)
@@ -546,7 +537,8 @@ static void s3c2412_nand_enable_hwecc(struct mtd_info *mtd, int mode)
546 unsigned long ctrl; 537 unsigned long ctrl;
547 538
548 ctrl = readl(info->regs + S3C2440_NFCONT); 539 ctrl = readl(info->regs + S3C2440_NFCONT);
549 writel(ctrl | S3C2412_NFCONT_INIT_MAIN_ECC, info->regs + S3C2440_NFCONT); 540 writel(ctrl | S3C2412_NFCONT_INIT_MAIN_ECC,
541 info->regs + S3C2440_NFCONT);
550} 542}
551 543
552static void s3c2440_nand_enable_hwecc(struct mtd_info *mtd, int mode) 544static void s3c2440_nand_enable_hwecc(struct mtd_info *mtd, int mode)
@@ -558,7 +550,8 @@ static void s3c2440_nand_enable_hwecc(struct mtd_info *mtd, int mode)
558 writel(ctrl | S3C2440_NFCONT_INITECC, info->regs + S3C2440_NFCONT); 550 writel(ctrl | S3C2440_NFCONT_INITECC, info->regs + S3C2440_NFCONT);
559} 551}
560 552
561static int s3c2410_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code) 553static int s3c2410_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
554 u_char *ecc_code)
562{ 555{
563 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); 556 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
564 557
@@ -566,13 +559,13 @@ static int s3c2410_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u
566 ecc_code[1] = readb(info->regs + S3C2410_NFECC + 1); 559 ecc_code[1] = readb(info->regs + S3C2410_NFECC + 1);
567 ecc_code[2] = readb(info->regs + S3C2410_NFECC + 2); 560 ecc_code[2] = readb(info->regs + S3C2410_NFECC + 2);
568 561
569 pr_debug("%s: returning ecc %02x%02x%02x\n", __func__, 562 pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code);
570 ecc_code[0], ecc_code[1], ecc_code[2]);
571 563
572 return 0; 564 return 0;
573} 565}
574 566
575static int s3c2412_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code) 567static int s3c2412_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
568 u_char *ecc_code)
576{ 569{
577 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); 570 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
578 unsigned long ecc = readl(info->regs + S3C2412_NFMECC0); 571 unsigned long ecc = readl(info->regs + S3C2412_NFMECC0);
@@ -581,12 +574,13 @@ static int s3c2412_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u
581 ecc_code[1] = ecc >> 8; 574 ecc_code[1] = ecc >> 8;
582 ecc_code[2] = ecc >> 16; 575 ecc_code[2] = ecc >> 16;
583 576
584 pr_debug("calculate_ecc: returning ecc %02x,%02x,%02x\n", ecc_code[0], ecc_code[1], ecc_code[2]); 577 pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code);
585 578
586 return 0; 579 return 0;
587} 580}
588 581
589static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code) 582static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
583 u_char *ecc_code)
590{ 584{
591 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); 585 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
592 unsigned long ecc = readl(info->regs + S3C2440_NFMECC0); 586 unsigned long ecc = readl(info->regs + S3C2440_NFMECC0);
@@ -599,6 +593,7 @@ static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u
599 593
600 return 0; 594 return 0;
601} 595}
596#endif
602 597
603/* over-ride the standard functions for a little more speed. We can 598/* over-ride the standard functions for a little more speed. We can
604 * use read/write block to move the data buffers to/from the controller 599 * use read/write block to move the data buffers to/from the controller
@@ -625,13 +620,15 @@ static void s3c2440_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
625 } 620 }
626} 621}
627 622
628static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len) 623static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf,
624 int len)
629{ 625{
630 struct nand_chip *this = mtd->priv; 626 struct nand_chip *this = mtd->priv;
631 writesb(this->IO_ADDR_W, buf, len); 627 writesb(this->IO_ADDR_W, buf, len);
632} 628}
633 629
634static void s3c2440_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len) 630static void s3c2440_nand_write_buf(struct mtd_info *mtd, const u_char *buf,
631 int len)
635{ 632{
636 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); 633 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
637 634
@@ -675,7 +672,8 @@ static inline int s3c2410_nand_cpufreq_register(struct s3c2410_nand_info *info)
675 CPUFREQ_TRANSITION_NOTIFIER); 672 CPUFREQ_TRANSITION_NOTIFIER);
676} 673}
677 674
678static inline void s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info) 675static inline void
676s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
679{ 677{
680 cpufreq_unregister_notifier(&info->freq_transition, 678 cpufreq_unregister_notifier(&info->freq_transition,
681 CPUFREQ_TRANSITION_NOTIFIER); 679 CPUFREQ_TRANSITION_NOTIFIER);
@@ -687,7 +685,8 @@ static inline int s3c2410_nand_cpufreq_register(struct s3c2410_nand_info *info)
687 return 0; 685 return 0;
688} 686}
689 687
690static inline void s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info) 688static inline void
689s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
691{ 690{
692} 691}
693#endif 692#endif
@@ -717,29 +716,12 @@ static int s3c24xx_nand_remove(struct platform_device *pdev)
717 pr_debug("releasing mtd %d (%p)\n", mtdno, ptr); 716 pr_debug("releasing mtd %d (%p)\n", mtdno, ptr);
718 nand_release(&ptr->mtd); 717 nand_release(&ptr->mtd);
719 } 718 }
720
721 kfree(info->mtds);
722 } 719 }
723 720
724 /* free the common resources */ 721 /* free the common resources */
725 722
726 if (!IS_ERR(info->clk)) { 723 if (!IS_ERR(info->clk))
727 s3c2410_nand_clk_set_state(info, CLOCK_DISABLE); 724 s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
728 clk_put(info->clk);
729 }
730
731 if (info->regs != NULL) {
732 iounmap(info->regs);
733 info->regs = NULL;
734 }
735
736 if (info->area != NULL) {
737 release_resource(info->area);
738 kfree(info->area);
739 info->area = NULL;
740 }
741
742 kfree(info);
743 725
744 return 0; 726 return 0;
745} 727}
@@ -810,7 +792,7 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
810 dev_info(info->device, "System booted from NAND\n"); 792 dev_info(info->device, "System booted from NAND\n");
811 793
812 break; 794 break;
813 } 795 }
814 796
815 chip->IO_ADDR_R = chip->IO_ADDR_W; 797 chip->IO_ADDR_R = chip->IO_ADDR_W;
816 798
@@ -819,32 +801,31 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
819 nmtd->mtd.owner = THIS_MODULE; 801 nmtd->mtd.owner = THIS_MODULE;
820 nmtd->set = set; 802 nmtd->set = set;
821 803
822 if (hardware_ecc) { 804#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
805 chip->ecc.calculate = s3c2410_nand_calculate_ecc;
806 chip->ecc.correct = s3c2410_nand_correct_data;
807 chip->ecc.mode = NAND_ECC_HW;
808 chip->ecc.strength = 1;
809
810 switch (info->cpu_type) {
811 case TYPE_S3C2410:
812 chip->ecc.hwctl = s3c2410_nand_enable_hwecc;
823 chip->ecc.calculate = s3c2410_nand_calculate_ecc; 813 chip->ecc.calculate = s3c2410_nand_calculate_ecc;
824 chip->ecc.correct = s3c2410_nand_correct_data; 814 break;
825 chip->ecc.mode = NAND_ECC_HW;
826 chip->ecc.strength = 1;
827
828 switch (info->cpu_type) {
829 case TYPE_S3C2410:
830 chip->ecc.hwctl = s3c2410_nand_enable_hwecc;
831 chip->ecc.calculate = s3c2410_nand_calculate_ecc;
832 break;
833
834 case TYPE_S3C2412:
835 chip->ecc.hwctl = s3c2412_nand_enable_hwecc;
836 chip->ecc.calculate = s3c2412_nand_calculate_ecc;
837 break;
838
839 case TYPE_S3C2440:
840 chip->ecc.hwctl = s3c2440_nand_enable_hwecc;
841 chip->ecc.calculate = s3c2440_nand_calculate_ecc;
842 break;
843 815
844 } 816 case TYPE_S3C2412:
845 } else { 817 chip->ecc.hwctl = s3c2412_nand_enable_hwecc;
846 chip->ecc.mode = NAND_ECC_SOFT; 818 chip->ecc.calculate = s3c2412_nand_calculate_ecc;
819 break;
820
821 case TYPE_S3C2440:
822 chip->ecc.hwctl = s3c2440_nand_enable_hwecc;
823 chip->ecc.calculate = s3c2440_nand_calculate_ecc;
824 break;
847 } 825 }
826#else
827 chip->ecc.mode = NAND_ECC_SOFT;
828#endif
848 829
849 if (set->ecc_layout != NULL) 830 if (set->ecc_layout != NULL)
850 chip->ecc.layout = set->ecc_layout; 831 chip->ecc.layout = set->ecc_layout;
@@ -921,7 +902,7 @@ static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
921static int s3c24xx_nand_probe(struct platform_device *pdev) 902static int s3c24xx_nand_probe(struct platform_device *pdev)
922{ 903{
923 struct s3c2410_platform_nand *plat = to_nand_plat(pdev); 904 struct s3c2410_platform_nand *plat = to_nand_plat(pdev);
924 enum s3c_cpu_type cpu_type; 905 enum s3c_cpu_type cpu_type;
925 struct s3c2410_nand_info *info; 906 struct s3c2410_nand_info *info;
926 struct s3c2410_nand_mtd *nmtd; 907 struct s3c2410_nand_mtd *nmtd;
927 struct s3c2410_nand_set *sets; 908 struct s3c2410_nand_set *sets;
@@ -935,7 +916,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
935 916
936 pr_debug("s3c2410_nand_probe(%p)\n", pdev); 917 pr_debug("s3c2410_nand_probe(%p)\n", pdev);
937 918
938 info = kzalloc(sizeof(*info), GFP_KERNEL); 919 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
939 if (info == NULL) { 920 if (info == NULL) {
940 dev_err(&pdev->dev, "no memory for flash info\n"); 921 dev_err(&pdev->dev, "no memory for flash info\n");
941 err = -ENOMEM; 922 err = -ENOMEM;
@@ -949,7 +930,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
949 930
950 /* get the clock source and enable it */ 931 /* get the clock source and enable it */
951 932
952 info->clk = clk_get(&pdev->dev, "nand"); 933 info->clk = devm_clk_get(&pdev->dev, "nand");
953 if (IS_ERR(info->clk)) { 934 if (IS_ERR(info->clk)) {
954 dev_err(&pdev->dev, "failed to get clock\n"); 935 dev_err(&pdev->dev, "failed to get clock\n");
955 err = -ENOENT; 936 err = -ENOENT;
@@ -961,22 +942,14 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
961 /* allocate and map the resource */ 942 /* allocate and map the resource */
962 943
963 /* currently we assume we have the one resource */ 944 /* currently we assume we have the one resource */
964 res = pdev->resource; 945 res = pdev->resource;
965 size = resource_size(res); 946 size = resource_size(res);
966 947
967 info->area = request_mem_region(res->start, size, pdev->name); 948 info->device = &pdev->dev;
968 949 info->platform = plat;
969 if (info->area == NULL) { 950 info->cpu_type = cpu_type;
970 dev_err(&pdev->dev, "cannot reserve register region\n");
971 err = -ENOENT;
972 goto exit_error;
973 }
974
975 info->device = &pdev->dev;
976 info->platform = plat;
977 info->regs = ioremap(res->start, size);
978 info->cpu_type = cpu_type;
979 951
952 info->regs = devm_request_and_ioremap(&pdev->dev, res);
980 if (info->regs == NULL) { 953 if (info->regs == NULL) {
981 dev_err(&pdev->dev, "cannot reserve register region\n"); 954 dev_err(&pdev->dev, "cannot reserve register region\n");
982 err = -EIO; 955 err = -EIO;
@@ -999,7 +972,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
999 /* allocate our information */ 972 /* allocate our information */
1000 973
1001 size = nr_sets * sizeof(*info->mtds); 974 size = nr_sets * sizeof(*info->mtds);
1002 info->mtds = kzalloc(size, GFP_KERNEL); 975 info->mtds = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
1003 if (info->mtds == NULL) { 976 if (info->mtds == NULL) {
1004 dev_err(&pdev->dev, "failed to allocate mtd storage\n"); 977 dev_err(&pdev->dev, "failed to allocate mtd storage\n");
1005 err = -ENOMEM; 978 err = -ENOMEM;
@@ -1011,7 +984,8 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
1011 nmtd = info->mtds; 984 nmtd = info->mtds;
1012 985
1013 for (setno = 0; setno < nr_sets; setno++, nmtd++) { 986 for (setno = 0; setno < nr_sets; setno++, nmtd++) {
1014 pr_debug("initialising set %d (%p, info %p)\n", setno, nmtd, info); 987 pr_debug("initialising set %d (%p, info %p)\n",
988 setno, nmtd, info);
1015 989
1016 s3c2410_nand_init_chip(info, nmtd, sets); 990 s3c2410_nand_init_chip(info, nmtd, sets);
1017 991
@@ -1134,20 +1108,7 @@ static struct platform_driver s3c24xx_nand_driver = {
1134 }, 1108 },
1135}; 1109};
1136 1110
1137static int __init s3c2410_nand_init(void) 1111module_platform_driver(s3c24xx_nand_driver);
1138{
1139 printk("S3C24XX NAND Driver, (c) 2004 Simtec Electronics\n");
1140
1141 return platform_driver_register(&s3c24xx_nand_driver);
1142}
1143
1144static void __exit s3c2410_nand_exit(void)
1145{
1146 platform_driver_unregister(&s3c24xx_nand_driver);
1147}
1148
1149module_init(s3c2410_nand_init);
1150module_exit(s3c2410_nand_exit);
1151 1112
1152MODULE_LICENSE("GPL"); 1113MODULE_LICENSE("GPL");
1153MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); 1114MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index aa9b8a5e0b8f..4fbfe96e37a1 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -24,10 +24,12 @@
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/interrupt.h>
27#include <linux/io.h> 28#include <linux/io.h>
28#include <linux/platform_device.h> 29#include <linux/platform_device.h>
29#include <linux/pm_runtime.h> 30#include <linux/pm_runtime.h>
30#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/string.h>
31 33
32#include <linux/mtd/mtd.h> 34#include <linux/mtd/mtd.h>
33#include <linux/mtd/nand.h> 35#include <linux/mtd/nand.h>
@@ -43,11 +45,17 @@ static struct nand_ecclayout flctl_4secc_oob_16 = {
43}; 45};
44 46
45static struct nand_ecclayout flctl_4secc_oob_64 = { 47static struct nand_ecclayout flctl_4secc_oob_64 = {
46 .eccbytes = 10, 48 .eccbytes = 4 * 10,
47 .eccpos = {48, 49, 50, 51, 52, 53, 54, 55, 56, 57}, 49 .eccpos = {
50 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
51 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
52 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
53 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 },
48 .oobfree = { 54 .oobfree = {
49 {.offset = 60, 55 {.offset = 2, .length = 4},
50 . length = 4} }, 56 {.offset = 16, .length = 6},
57 {.offset = 32, .length = 6},
58 {.offset = 48, .length = 6} },
51}; 59};
52 60
53static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; 61static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
@@ -61,15 +69,15 @@ static struct nand_bbt_descr flctl_4secc_smallpage = {
61 69
62static struct nand_bbt_descr flctl_4secc_largepage = { 70static struct nand_bbt_descr flctl_4secc_largepage = {
63 .options = NAND_BBT_SCAN2NDPAGE, 71 .options = NAND_BBT_SCAN2NDPAGE,
64 .offs = 58, 72 .offs = 0,
65 .len = 2, 73 .len = 2,
66 .pattern = scan_ff_pattern, 74 .pattern = scan_ff_pattern,
67}; 75};
68 76
69static void empty_fifo(struct sh_flctl *flctl) 77static void empty_fifo(struct sh_flctl *flctl)
70{ 78{
71 writel(0x000c0000, FLINTDMACR(flctl)); /* FIFO Clear */ 79 writel(flctl->flintdmacr_base | AC1CLR | AC0CLR, FLINTDMACR(flctl));
72 writel(0x00000000, FLINTDMACR(flctl)); /* Clear Error flags */ 80 writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
73} 81}
74 82
75static void start_translation(struct sh_flctl *flctl) 83static void start_translation(struct sh_flctl *flctl)
@@ -158,27 +166,56 @@ static void wait_wfifo_ready(struct sh_flctl *flctl)
158 timeout_error(flctl, __func__); 166 timeout_error(flctl, __func__);
159} 167}
160 168
161static int wait_recfifo_ready(struct sh_flctl *flctl, int sector_number) 169static enum flctl_ecc_res_t wait_recfifo_ready
170 (struct sh_flctl *flctl, int sector_number)
162{ 171{
163 uint32_t timeout = LOOP_TIMEOUT_MAX; 172 uint32_t timeout = LOOP_TIMEOUT_MAX;
164 int checked[4];
165 void __iomem *ecc_reg[4]; 173 void __iomem *ecc_reg[4];
166 int i; 174 int i;
175 int state = FL_SUCCESS;
167 uint32_t data, size; 176 uint32_t data, size;
168 177
169 memset(checked, 0, sizeof(checked)); 178 /*
170 179 * First this loops checks in FLDTCNTR if we are ready to read out the
180 * oob data. This is the case if either all went fine without errors or
181 * if the bottom part of the loop corrected the errors or marked them as
182 * uncorrectable and the controller is given time to push the data into
183 * the FIFO.
184 */
171 while (timeout--) { 185 while (timeout--) {
186 /* check if all is ok and we can read out the OOB */
172 size = readl(FLDTCNTR(flctl)) >> 24; 187 size = readl(FLDTCNTR(flctl)) >> 24;
173 if (size & 0xFF) 188 if ((size & 0xFF) == 4)
174 return 0; /* success */ 189 return state;
190
191 /* check if a correction code has been calculated */
192 if (!(readl(FL4ECCCR(flctl)) & _4ECCEND)) {
193 /*
194 * either we wait for the fifo to be filled or a
195 * correction pattern is being generated
196 */
197 udelay(1);
198 continue;
199 }
175 200
176 if (readl(FL4ECCCR(flctl)) & _4ECCFA) 201 /* check for an uncorrectable error */
177 return 1; /* can't correct */ 202 if (readl(FL4ECCCR(flctl)) & _4ECCFA) {
203 /* check if we face a non-empty page */
204 for (i = 0; i < 512; i++) {
205 if (flctl->done_buff[i] != 0xff) {
206 state = FL_ERROR; /* can't correct */
207 break;
208 }
209 }
178 210
179 udelay(1); 211 if (state == FL_SUCCESS)
180 if (!(readl(FL4ECCCR(flctl)) & _4ECCEND)) 212 dev_dbg(&flctl->pdev->dev,
213 "reading empty sector %d, ecc error ignored\n",
214 sector_number);
215
216 writel(0, FL4ECCCR(flctl));
181 continue; 217 continue;
218 }
182 219
183 /* start error correction */ 220 /* start error correction */
184 ecc_reg[0] = FL4ECCRESULT0(flctl); 221 ecc_reg[0] = FL4ECCRESULT0(flctl);
@@ -187,28 +224,26 @@ static int wait_recfifo_ready(struct sh_flctl *flctl, int sector_number)
187 ecc_reg[3] = FL4ECCRESULT3(flctl); 224 ecc_reg[3] = FL4ECCRESULT3(flctl);
188 225
189 for (i = 0; i < 3; i++) { 226 for (i = 0; i < 3; i++) {
227 uint8_t org;
228 int index;
229
190 data = readl(ecc_reg[i]); 230 data = readl(ecc_reg[i]);
191 if (data != INIT_FL4ECCRESULT_VAL && !checked[i]) {
192 uint8_t org;
193 int index;
194
195 if (flctl->page_size)
196 index = (512 * sector_number) +
197 (data >> 16);
198 else
199 index = data >> 16;
200
201 org = flctl->done_buff[index];
202 flctl->done_buff[index] = org ^ (data & 0xFF);
203 checked[i] = 1;
204 }
205 }
206 231
232 if (flctl->page_size)
233 index = (512 * sector_number) +
234 (data >> 16);
235 else
236 index = data >> 16;
237
238 org = flctl->done_buff[index];
239 flctl->done_buff[index] = org ^ (data & 0xFF);
240 }
241 state = FL_REPAIRABLE;
207 writel(0, FL4ECCCR(flctl)); 242 writel(0, FL4ECCCR(flctl));
208 } 243 }
209 244
210 timeout_error(flctl, __func__); 245 timeout_error(flctl, __func__);
211 return 1; /* timeout */ 246 return FL_TIMEOUT; /* timeout */
212} 247}
213 248
214static void wait_wecfifo_ready(struct sh_flctl *flctl) 249static void wait_wecfifo_ready(struct sh_flctl *flctl)
@@ -241,31 +276,33 @@ static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
241{ 276{
242 int i, len_4align; 277 int i, len_4align;
243 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset]; 278 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
244 void *fifo_addr = (void *)FLDTFIFO(flctl);
245 279
246 len_4align = (rlen + 3) / 4; 280 len_4align = (rlen + 3) / 4;
247 281
248 for (i = 0; i < len_4align; i++) { 282 for (i = 0; i < len_4align; i++) {
249 wait_rfifo_ready(flctl); 283 wait_rfifo_ready(flctl);
250 buf[i] = readl(fifo_addr); 284 buf[i] = readl(FLDTFIFO(flctl));
251 buf[i] = be32_to_cpu(buf[i]); 285 buf[i] = be32_to_cpu(buf[i]);
252 } 286 }
253} 287}
254 288
255static int read_ecfiforeg(struct sh_flctl *flctl, uint8_t *buff, int sector) 289static enum flctl_ecc_res_t read_ecfiforeg
290 (struct sh_flctl *flctl, uint8_t *buff, int sector)
256{ 291{
257 int i; 292 int i;
293 enum flctl_ecc_res_t res;
258 unsigned long *ecc_buf = (unsigned long *)buff; 294 unsigned long *ecc_buf = (unsigned long *)buff;
259 void *fifo_addr = (void *)FLECFIFO(flctl);
260 295
261 for (i = 0; i < 4; i++) { 296 res = wait_recfifo_ready(flctl , sector);
262 if (wait_recfifo_ready(flctl , sector)) 297
263 return 1; 298 if (res != FL_ERROR) {
264 ecc_buf[i] = readl(fifo_addr); 299 for (i = 0; i < 4; i++) {
265 ecc_buf[i] = be32_to_cpu(ecc_buf[i]); 300 ecc_buf[i] = readl(FLECFIFO(flctl));
301 ecc_buf[i] = be32_to_cpu(ecc_buf[i]);
302 }
266 } 303 }
267 304
268 return 0; 305 return res;
269} 306}
270 307
271static void write_fiforeg(struct sh_flctl *flctl, int rlen, int offset) 308static void write_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
@@ -281,6 +318,18 @@ static void write_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
281 } 318 }
282} 319}
283 320
321static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
322{
323 int i, len_4align;
324 unsigned long *data = (unsigned long *)&flctl->done_buff[offset];
325
326 len_4align = (rlen + 3) / 4;
327 for (i = 0; i < len_4align; i++) {
328 wait_wecfifo_ready(flctl);
329 writel(cpu_to_be32(data[i]), FLECFIFO(flctl));
330 }
331}
332
284static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val) 333static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val)
285{ 334{
286 struct sh_flctl *flctl = mtd_to_flctl(mtd); 335 struct sh_flctl *flctl = mtd_to_flctl(mtd);
@@ -346,73 +395,65 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va
346static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 395static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
347 uint8_t *buf, int oob_required, int page) 396 uint8_t *buf, int oob_required, int page)
348{ 397{
349 int i, eccsize = chip->ecc.size; 398 chip->read_buf(mtd, buf, mtd->writesize);
350 int eccbytes = chip->ecc.bytes; 399 if (oob_required)
351 int eccsteps = chip->ecc.steps; 400 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
352 uint8_t *p = buf;
353 struct sh_flctl *flctl = mtd_to_flctl(mtd);
354
355 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
356 chip->read_buf(mtd, p, eccsize);
357
358 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
359 if (flctl->hwecc_cant_correct[i])
360 mtd->ecc_stats.failed++;
361 else
362 mtd->ecc_stats.corrected += 0; /* FIXME */
363 }
364
365 return 0; 401 return 0;
366} 402}
367 403
368static void flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 404static int flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
369 const uint8_t *buf, int oob_required) 405 const uint8_t *buf, int oob_required)
370{ 406{
371 int i, eccsize = chip->ecc.size; 407 chip->write_buf(mtd, buf, mtd->writesize);
372 int eccbytes = chip->ecc.bytes; 408 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
373 int eccsteps = chip->ecc.steps; 409 return 0;
374 const uint8_t *p = buf;
375
376 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
377 chip->write_buf(mtd, p, eccsize);
378} 410}
379 411
380static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr) 412static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
381{ 413{
382 struct sh_flctl *flctl = mtd_to_flctl(mtd); 414 struct sh_flctl *flctl = mtd_to_flctl(mtd);
383 int sector, page_sectors; 415 int sector, page_sectors;
416 enum flctl_ecc_res_t ecc_result;
384 417
385 if (flctl->page_size) 418 page_sectors = flctl->page_size ? 4 : 1;
386 page_sectors = 4;
387 else
388 page_sectors = 1;
389
390 writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT,
391 FLCMNCR(flctl));
392 419
393 set_cmd_regs(mtd, NAND_CMD_READ0, 420 set_cmd_regs(mtd, NAND_CMD_READ0,
394 (NAND_CMD_READSTART << 8) | NAND_CMD_READ0); 421 (NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
395 422
396 for (sector = 0; sector < page_sectors; sector++) { 423 writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT,
397 int ret; 424 FLCMNCR(flctl));
425 writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
426 writel(page_addr << 2, FLADR(flctl));
398 427
399 empty_fifo(flctl); 428 empty_fifo(flctl);
400 writel(readl(FLCMDCR(flctl)) | 1, FLCMDCR(flctl)); 429 start_translation(flctl);
401 writel(page_addr << 2 | sector, FLADR(flctl));
402 430
403 start_translation(flctl); 431 for (sector = 0; sector < page_sectors; sector++) {
404 read_fiforeg(flctl, 512, 512 * sector); 432 read_fiforeg(flctl, 512, 512 * sector);
405 433
406 ret = read_ecfiforeg(flctl, 434 ecc_result = read_ecfiforeg(flctl,
407 &flctl->done_buff[mtd->writesize + 16 * sector], 435 &flctl->done_buff[mtd->writesize + 16 * sector],
408 sector); 436 sector);
409 437
410 if (ret) 438 switch (ecc_result) {
411 flctl->hwecc_cant_correct[sector] = 1; 439 case FL_REPAIRABLE:
412 440 dev_info(&flctl->pdev->dev,
413 writel(0x0, FL4ECCCR(flctl)); 441 "applied ecc on page 0x%x", page_addr);
414 wait_completion(flctl); 442 flctl->mtd.ecc_stats.corrected++;
443 break;
444 case FL_ERROR:
445 dev_warn(&flctl->pdev->dev,
446 "page 0x%x contains corrupted data\n",
447 page_addr);
448 flctl->mtd.ecc_stats.failed++;
449 break;
450 default:
451 ;
452 }
415 } 453 }
454
455 wait_completion(flctl);
456
416 writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT), 457 writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT),
417 FLCMNCR(flctl)); 458 FLCMNCR(flctl));
418} 459}
@@ -420,30 +461,20 @@ static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
420static void execmd_read_oob(struct mtd_info *mtd, int page_addr) 461static void execmd_read_oob(struct mtd_info *mtd, int page_addr)
421{ 462{
422 struct sh_flctl *flctl = mtd_to_flctl(mtd); 463 struct sh_flctl *flctl = mtd_to_flctl(mtd);
464 int page_sectors = flctl->page_size ? 4 : 1;
465 int i;
423 466
424 set_cmd_regs(mtd, NAND_CMD_READ0, 467 set_cmd_regs(mtd, NAND_CMD_READ0,
425 (NAND_CMD_READSTART << 8) | NAND_CMD_READ0); 468 (NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
426 469
427 empty_fifo(flctl); 470 empty_fifo(flctl);
428 if (flctl->page_size) {
429 int i;
430 /* In case that the page size is 2k */
431 for (i = 0; i < 16 * 3; i++)
432 flctl->done_buff[i] = 0xFF;
433
434 set_addr(mtd, 3 * 528 + 512, page_addr);
435 writel(16, FLDTCNTR(flctl));
436 471
437 start_translation(flctl); 472 for (i = 0; i < page_sectors; i++) {
438 read_fiforeg(flctl, 16, 16 * 3); 473 set_addr(mtd, (512 + 16) * i + 512 , page_addr);
439 wait_completion(flctl);
440 } else {
441 /* In case that the page size is 512b */
442 set_addr(mtd, 512, page_addr);
443 writel(16, FLDTCNTR(flctl)); 474 writel(16, FLDTCNTR(flctl));
444 475
445 start_translation(flctl); 476 start_translation(flctl);
446 read_fiforeg(flctl, 16, 0); 477 read_fiforeg(flctl, 16, 16 * i);
447 wait_completion(flctl); 478 wait_completion(flctl);
448 } 479 }
449} 480}
@@ -451,34 +482,26 @@ static void execmd_read_oob(struct mtd_info *mtd, int page_addr)
451static void execmd_write_page_sector(struct mtd_info *mtd) 482static void execmd_write_page_sector(struct mtd_info *mtd)
452{ 483{
453 struct sh_flctl *flctl = mtd_to_flctl(mtd); 484 struct sh_flctl *flctl = mtd_to_flctl(mtd);
454 int i, page_addr = flctl->seqin_page_addr; 485 int page_addr = flctl->seqin_page_addr;
455 int sector, page_sectors; 486 int sector, page_sectors;
456 487
457 if (flctl->page_size) 488 page_sectors = flctl->page_size ? 4 : 1;
458 page_sectors = 4;
459 else
460 page_sectors = 1;
461
462 writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl));
463 489
464 set_cmd_regs(mtd, NAND_CMD_PAGEPROG, 490 set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
465 (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN); 491 (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
466 492
467 for (sector = 0; sector < page_sectors; sector++) { 493 empty_fifo(flctl);
468 empty_fifo(flctl); 494 writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl));
469 writel(readl(FLCMDCR(flctl)) | 1, FLCMDCR(flctl)); 495 writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
470 writel(page_addr << 2 | sector, FLADR(flctl)); 496 writel(page_addr << 2, FLADR(flctl));
497 start_translation(flctl);
471 498
472 start_translation(flctl); 499 for (sector = 0; sector < page_sectors; sector++) {
473 write_fiforeg(flctl, 512, 512 * sector); 500 write_fiforeg(flctl, 512, 512 * sector);
474 501 write_ec_fiforeg(flctl, 16, mtd->writesize + 16 * sector);
475 for (i = 0; i < 4; i++) {
476 wait_wecfifo_ready(flctl); /* wait for write ready */
477 writel(0xFFFFFFFF, FLECFIFO(flctl));
478 }
479 wait_completion(flctl);
480 } 502 }
481 503
504 wait_completion(flctl);
482 writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl)); 505 writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl));
483} 506}
484 507
@@ -488,18 +511,12 @@ static void execmd_write_oob(struct mtd_info *mtd)
488 int page_addr = flctl->seqin_page_addr; 511 int page_addr = flctl->seqin_page_addr;
489 int sector, page_sectors; 512 int sector, page_sectors;
490 513
491 if (flctl->page_size) { 514 page_sectors = flctl->page_size ? 4 : 1;
492 sector = 3;
493 page_sectors = 4;
494 } else {
495 sector = 0;
496 page_sectors = 1;
497 }
498 515
499 set_cmd_regs(mtd, NAND_CMD_PAGEPROG, 516 set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
500 (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN); 517 (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
501 518
502 for (; sector < page_sectors; sector++) { 519 for (sector = 0; sector < page_sectors; sector++) {
503 empty_fifo(flctl); 520 empty_fifo(flctl);
504 set_addr(mtd, sector * 528 + 512, page_addr); 521 set_addr(mtd, sector * 528 + 512, page_addr);
505 writel(16, FLDTCNTR(flctl)); /* set read size */ 522 writel(16, FLDTCNTR(flctl)); /* set read size */
@@ -731,10 +748,9 @@ static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
731static void flctl_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) 748static void flctl_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
732{ 749{
733 struct sh_flctl *flctl = mtd_to_flctl(mtd); 750 struct sh_flctl *flctl = mtd_to_flctl(mtd);
734 int i, index = flctl->index; 751 int index = flctl->index;
735 752
736 for (i = 0; i < len; i++) 753 memcpy(&flctl->done_buff[index], buf, len);
737 flctl->done_buff[index + i] = buf[i];
738 flctl->index += len; 754 flctl->index += len;
739} 755}
740 756
@@ -763,20 +779,11 @@ static uint16_t flctl_read_word(struct mtd_info *mtd)
763 779
764static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) 780static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
765{ 781{
766 int i; 782 struct sh_flctl *flctl = mtd_to_flctl(mtd);
767 783 int index = flctl->index;
768 for (i = 0; i < len; i++)
769 buf[i] = flctl_read_byte(mtd);
770}
771
772static int flctl_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
773{
774 int i;
775 784
776 for (i = 0; i < len; i++) 785 memcpy(buf, &flctl->done_buff[index], len);
777 if (buf[i] != flctl_read_byte(mtd)) 786 flctl->index += len;
778 return -EFAULT;
779 return 0;
780} 787}
781 788
782static int flctl_chip_init_tail(struct mtd_info *mtd) 789static int flctl_chip_init_tail(struct mtd_info *mtd)
@@ -831,7 +838,7 @@ static int flctl_chip_init_tail(struct mtd_info *mtd)
831 chip->ecc.mode = NAND_ECC_HW; 838 chip->ecc.mode = NAND_ECC_HW;
832 839
833 /* 4 symbols ECC enabled */ 840 /* 4 symbols ECC enabled */
834 flctl->flcmncr_base |= _4ECCEN | ECCPOS2 | ECCPOS_02; 841 flctl->flcmncr_base |= _4ECCEN;
835 } else { 842 } else {
836 chip->ecc.mode = NAND_ECC_SOFT; 843 chip->ecc.mode = NAND_ECC_SOFT;
837 } 844 }
@@ -839,6 +846,16 @@ static int flctl_chip_init_tail(struct mtd_info *mtd)
839 return 0; 846 return 0;
840} 847}
841 848
849static irqreturn_t flctl_handle_flste(int irq, void *dev_id)
850{
851 struct sh_flctl *flctl = dev_id;
852
853 dev_err(&flctl->pdev->dev, "flste irq: %x\n", readl(FLINTDMACR(flctl)));
854 writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
855
856 return IRQ_HANDLED;
857}
858
842static int __devinit flctl_probe(struct platform_device *pdev) 859static int __devinit flctl_probe(struct platform_device *pdev)
843{ 860{
844 struct resource *res; 861 struct resource *res;
@@ -847,6 +864,7 @@ static int __devinit flctl_probe(struct platform_device *pdev)
847 struct nand_chip *nand; 864 struct nand_chip *nand;
848 struct sh_flctl_platform_data *pdata; 865 struct sh_flctl_platform_data *pdata;
849 int ret = -ENXIO; 866 int ret = -ENXIO;
867 int irq;
850 868
851 pdata = pdev->dev.platform_data; 869 pdata = pdev->dev.platform_data;
852 if (pdata == NULL) { 870 if (pdata == NULL) {
@@ -872,14 +890,27 @@ static int __devinit flctl_probe(struct platform_device *pdev)
872 goto err_iomap; 890 goto err_iomap;
873 } 891 }
874 892
893 irq = platform_get_irq(pdev, 0);
894 if (irq < 0) {
895 dev_err(&pdev->dev, "failed to get flste irq data\n");
896 goto err_flste;
897 }
898
899 ret = request_irq(irq, flctl_handle_flste, IRQF_SHARED, "flste", flctl);
900 if (ret) {
901 dev_err(&pdev->dev, "request interrupt failed.\n");
902 goto err_flste;
903 }
904
875 platform_set_drvdata(pdev, flctl); 905 platform_set_drvdata(pdev, flctl);
876 flctl_mtd = &flctl->mtd; 906 flctl_mtd = &flctl->mtd;
877 nand = &flctl->chip; 907 nand = &flctl->chip;
878 flctl_mtd->priv = nand; 908 flctl_mtd->priv = nand;
879 flctl->pdev = pdev; 909 flctl->pdev = pdev;
880 flctl->flcmncr_base = pdata->flcmncr_val;
881 flctl->hwecc = pdata->has_hwecc; 910 flctl->hwecc = pdata->has_hwecc;
882 flctl->holden = pdata->use_holden; 911 flctl->holden = pdata->use_holden;
912 flctl->flcmncr_base = pdata->flcmncr_val;
913 flctl->flintdmacr_base = flctl->hwecc ? (STERINTE | ECERB) : STERINTE;
883 914
884 /* Set address of hardware control function */ 915 /* Set address of hardware control function */
885 /* 20 us command delay time */ 916 /* 20 us command delay time */
@@ -888,7 +919,6 @@ static int __devinit flctl_probe(struct platform_device *pdev)
888 nand->read_byte = flctl_read_byte; 919 nand->read_byte = flctl_read_byte;
889 nand->write_buf = flctl_write_buf; 920 nand->write_buf = flctl_write_buf;
890 nand->read_buf = flctl_read_buf; 921 nand->read_buf = flctl_read_buf;
891 nand->verify_buf = flctl_verify_buf;
892 nand->select_chip = flctl_select_chip; 922 nand->select_chip = flctl_select_chip;
893 nand->cmdfunc = flctl_cmdfunc; 923 nand->cmdfunc = flctl_cmdfunc;
894 924
@@ -918,6 +948,9 @@ static int __devinit flctl_probe(struct platform_device *pdev)
918 948
919err_chip: 949err_chip:
920 pm_runtime_disable(&pdev->dev); 950 pm_runtime_disable(&pdev->dev);
951 free_irq(irq, flctl);
952err_flste:
953 iounmap(flctl->reg);
921err_iomap: 954err_iomap:
922 kfree(flctl); 955 kfree(flctl);
923 return ret; 956 return ret;
@@ -929,6 +962,8 @@ static int __devexit flctl_remove(struct platform_device *pdev)
929 962
930 nand_release(&flctl->mtd); 963 nand_release(&flctl->mtd);
931 pm_runtime_disable(&pdev->dev); 964 pm_runtime_disable(&pdev->dev);
965 free_irq(platform_get_irq(pdev, 0), flctl);
966 iounmap(flctl->reg);
932 kfree(flctl); 967 kfree(flctl);
933 968
934 return 0; 969 return 0;
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index e02b08bcf0c0..f3f28fafbf7a 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -98,24 +98,6 @@ static uint16_t socrates_nand_read_word(struct mtd_info *mtd)
98 return word; 98 return word;
99} 99}
100 100
101/**
102 * socrates_nand_verify_buf - Verify chip data against buffer
103 * @mtd: MTD device structure
104 * @buf: buffer containing the data to compare
105 * @len: number of bytes to compare
106 */
107static int socrates_nand_verify_buf(struct mtd_info *mtd, const u8 *buf,
108 int len)
109{
110 int i;
111
112 for (i = 0; i < len; i++) {
113 if (buf[i] != socrates_nand_read_byte(mtd))
114 return -EFAULT;
115 }
116 return 0;
117}
118
119/* 101/*
120 * Hardware specific access to control-lines 102 * Hardware specific access to control-lines
121 */ 103 */
@@ -201,7 +183,6 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
201 nand_chip->read_word = socrates_nand_read_word; 183 nand_chip->read_word = socrates_nand_read_word;
202 nand_chip->write_buf = socrates_nand_write_buf; 184 nand_chip->write_buf = socrates_nand_write_buf;
203 nand_chip->read_buf = socrates_nand_read_buf; 185 nand_chip->read_buf = socrates_nand_read_buf;
204 nand_chip->verify_buf = socrates_nand_verify_buf;
205 nand_chip->dev_ready = socrates_nand_device_ready; 186 nand_chip->dev_ready = socrates_nand_device_ready;
206 187
207 nand_chip->ecc.mode = NAND_ECC_SOFT; /* enable ECC */ 188 nand_chip->ecc.mode = NAND_ECC_SOFT; /* enable ECC */
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index 5aa518081c51..508e9e04b092 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -256,18 +256,6 @@ static void tmio_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
256 tmio_ioread16_rep(tmio->fcr + FCR_DATA, buf, len >> 1); 256 tmio_ioread16_rep(tmio->fcr + FCR_DATA, buf, len >> 1);
257} 257}
258 258
259static int
260tmio_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
261{
262 struct tmio_nand *tmio = mtd_to_tmio(mtd);
263 u16 *p = (u16 *) buf;
264
265 for (len >>= 1; len; len--)
266 if (*(p++) != tmio_ioread16(tmio->fcr + FCR_DATA))
267 return -EFAULT;
268 return 0;
269}
270
271static void tmio_nand_enable_hwecc(struct mtd_info *mtd, int mode) 259static void tmio_nand_enable_hwecc(struct mtd_info *mtd, int mode)
272{ 260{
273 struct tmio_nand *tmio = mtd_to_tmio(mtd); 261 struct tmio_nand *tmio = mtd_to_tmio(mtd);
@@ -424,7 +412,6 @@ static int tmio_probe(struct platform_device *dev)
424 nand_chip->read_byte = tmio_nand_read_byte; 412 nand_chip->read_byte = tmio_nand_read_byte;
425 nand_chip->write_buf = tmio_nand_write_buf; 413 nand_chip->write_buf = tmio_nand_write_buf;
426 nand_chip->read_buf = tmio_nand_read_buf; 414 nand_chip->read_buf = tmio_nand_read_buf;
427 nand_chip->verify_buf = tmio_nand_verify_buf;
428 415
429 /* set eccmode using hardware ECC */ 416 /* set eccmode using hardware ECC */
430 nand_chip->ecc.mode = NAND_ECC_HW; 417 nand_chip->ecc.mode = NAND_ECC_HW;
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index 26398dcf21cf..e3d7266e256f 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -131,18 +131,6 @@ static void txx9ndfmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
131 *buf++ = __raw_readl(ndfdtr); 131 *buf++ = __raw_readl(ndfdtr);
132} 132}
133 133
134static int txx9ndfmc_verify_buf(struct mtd_info *mtd, const uint8_t *buf,
135 int len)
136{
137 struct platform_device *dev = mtd_to_platdev(mtd);
138 void __iomem *ndfdtr = ndregaddr(dev, TXX9_NDFDTR);
139
140 while (len--)
141 if (*buf++ != (uint8_t)__raw_readl(ndfdtr))
142 return -EFAULT;
143 return 0;
144}
145
146static void txx9ndfmc_cmd_ctrl(struct mtd_info *mtd, int cmd, 134static void txx9ndfmc_cmd_ctrl(struct mtd_info *mtd, int cmd,
147 unsigned int ctrl) 135 unsigned int ctrl)
148{ 136{
@@ -346,7 +334,6 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
346 chip->read_byte = txx9ndfmc_read_byte; 334 chip->read_byte = txx9ndfmc_read_byte;
347 chip->read_buf = txx9ndfmc_read_buf; 335 chip->read_buf = txx9ndfmc_read_buf;
348 chip->write_buf = txx9ndfmc_write_buf; 336 chip->write_buf = txx9ndfmc_write_buf;
349 chip->verify_buf = txx9ndfmc_verify_buf;
350 chip->cmd_ctrl = txx9ndfmc_cmd_ctrl; 337 chip->cmd_ctrl = txx9ndfmc_cmd_ctrl;
351 chip->dev_ready = txx9ndfmc_dev_ready; 338 chip->dev_ready = txx9ndfmc_dev_ready;
352 chip->ecc.calculate = txx9ndfmc_calculate_ecc; 339 chip->ecc.calculate = txx9ndfmc_calculate_ecc;
diff --git a/drivers/mtd/nand/xway_nand.c b/drivers/mtd/nand/xway_nand.c
new file mode 100644
index 000000000000..3f81dc8f214c
--- /dev/null
+++ b/drivers/mtd/nand/xway_nand.c
@@ -0,0 +1,201 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright © 2012 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/mtd/nand.h>
10#include <linux/of_gpio.h>
11#include <linux/of_platform.h>
12
13#include <lantiq_soc.h>
14
15/* nand registers */
16#define EBU_ADDSEL1 0x24
17#define EBU_NAND_CON 0xB0
18#define EBU_NAND_WAIT 0xB4
19#define EBU_NAND_ECC0 0xB8
20#define EBU_NAND_ECC_AC 0xBC
21
22/* nand commands */
23#define NAND_CMD_ALE (1 << 2)
24#define NAND_CMD_CLE (1 << 3)
25#define NAND_CMD_CS (1 << 4)
26#define NAND_WRITE_CMD_RESET 0xff
27#define NAND_WRITE_CMD (NAND_CMD_CS | NAND_CMD_CLE)
28#define NAND_WRITE_ADDR (NAND_CMD_CS | NAND_CMD_ALE)
29#define NAND_WRITE_DATA (NAND_CMD_CS)
30#define NAND_READ_DATA (NAND_CMD_CS)
31#define NAND_WAIT_WR_C (1 << 3)
32#define NAND_WAIT_RD (0x1)
33
34/* we need to tel the ebu which addr we mapped the nand to */
35#define ADDSEL1_MASK(x) (x << 4)
36#define ADDSEL1_REGEN 1
37
38/* we need to tell the EBU that we have nand attached and set it up properly */
39#define BUSCON1_SETUP (1 << 22)
40#define BUSCON1_BCGEN_RES (0x3 << 12)
41#define BUSCON1_WAITWRC2 (2 << 8)
42#define BUSCON1_WAITRDC2 (2 << 6)
43#define BUSCON1_HOLDC1 (1 << 4)
44#define BUSCON1_RECOVC1 (1 << 2)
45#define BUSCON1_CMULT4 1
46
47#define NAND_CON_CE (1 << 20)
48#define NAND_CON_OUT_CS1 (1 << 10)
49#define NAND_CON_IN_CS1 (1 << 8)
50#define NAND_CON_PRE_P (1 << 7)
51#define NAND_CON_WP_P (1 << 6)
52#define NAND_CON_SE_P (1 << 5)
53#define NAND_CON_CS_P (1 << 4)
54#define NAND_CON_CSMUX (1 << 1)
55#define NAND_CON_NANDM 1
56
57static void xway_reset_chip(struct nand_chip *chip)
58{
59 unsigned long nandaddr = (unsigned long) chip->IO_ADDR_W;
60 unsigned long flags;
61
62 nandaddr &= ~NAND_WRITE_ADDR;
63 nandaddr |= NAND_WRITE_CMD;
64
65 /* finish with a reset */
66 spin_lock_irqsave(&ebu_lock, flags);
67 writeb(NAND_WRITE_CMD_RESET, (void __iomem *) nandaddr);
68 while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
69 ;
70 spin_unlock_irqrestore(&ebu_lock, flags);
71}
72
73static void xway_select_chip(struct mtd_info *mtd, int chip)
74{
75
76 switch (chip) {
77 case -1:
78 ltq_ebu_w32_mask(NAND_CON_CE, 0, EBU_NAND_CON);
79 ltq_ebu_w32_mask(NAND_CON_NANDM, 0, EBU_NAND_CON);
80 break;
81 case 0:
82 ltq_ebu_w32_mask(0, NAND_CON_NANDM, EBU_NAND_CON);
83 ltq_ebu_w32_mask(0, NAND_CON_CE, EBU_NAND_CON);
84 break;
85 default:
86 BUG();
87 }
88}
89
90static void xway_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
91{
92 struct nand_chip *this = mtd->priv;
93 unsigned long nandaddr = (unsigned long) this->IO_ADDR_W;
94 unsigned long flags;
95
96 if (ctrl & NAND_CTRL_CHANGE) {
97 nandaddr &= ~(NAND_WRITE_CMD | NAND_WRITE_ADDR);
98 if (ctrl & NAND_CLE)
99 nandaddr |= NAND_WRITE_CMD;
100 else
101 nandaddr |= NAND_WRITE_ADDR;
102 this->IO_ADDR_W = (void __iomem *) nandaddr;
103 }
104
105 if (cmd != NAND_CMD_NONE) {
106 spin_lock_irqsave(&ebu_lock, flags);
107 writeb(cmd, this->IO_ADDR_W);
108 while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
109 ;
110 spin_unlock_irqrestore(&ebu_lock, flags);
111 }
112}
113
114static int xway_dev_ready(struct mtd_info *mtd)
115{
116 return ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_RD;
117}
118
119static unsigned char xway_read_byte(struct mtd_info *mtd)
120{
121 struct nand_chip *this = mtd->priv;
122 unsigned long nandaddr = (unsigned long) this->IO_ADDR_R;
123 unsigned long flags;
124 int ret;
125
126 spin_lock_irqsave(&ebu_lock, flags);
127 ret = ltq_r8((void __iomem *)(nandaddr + NAND_READ_DATA));
128 spin_unlock_irqrestore(&ebu_lock, flags);
129
130 return ret;
131}
132
133static int xway_nand_probe(struct platform_device *pdev)
134{
135 struct nand_chip *this = platform_get_drvdata(pdev);
136 unsigned long nandaddr = (unsigned long) this->IO_ADDR_W;
137 const __be32 *cs = of_get_property(pdev->dev.of_node,
138 "lantiq,cs", NULL);
139 u32 cs_flag = 0;
140
141 /* load our CS from the DT. Either we find a valid 1 or default to 0 */
142 if (cs && (*cs == 1))
143 cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1;
144
145 /* setup the EBU to run in NAND mode on our base addr */
146 ltq_ebu_w32(CPHYSADDR(nandaddr)
147 | ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1);
148
149 ltq_ebu_w32(BUSCON1_SETUP | BUSCON1_BCGEN_RES | BUSCON1_WAITWRC2
150 | BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1
151 | BUSCON1_CMULT4, LTQ_EBU_BUSCON1);
152
153 ltq_ebu_w32(NAND_CON_NANDM | NAND_CON_CSMUX | NAND_CON_CS_P
154 | NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P
155 | cs_flag, EBU_NAND_CON);
156
157 /* finish with a reset */
158 xway_reset_chip(this);
159
160 return 0;
161}
162
163/* allow users to override the partition in DT using the cmdline */
164static const char *part_probes[] = { "cmdlinepart", "ofpart", NULL };
165
166static struct platform_nand_data xway_nand_data = {
167 .chip = {
168 .nr_chips = 1,
169 .chip_delay = 30,
170 .part_probe_types = part_probes,
171 },
172 .ctrl = {
173 .probe = xway_nand_probe,
174 .cmd_ctrl = xway_cmd_ctrl,
175 .dev_ready = xway_dev_ready,
176 .select_chip = xway_select_chip,
177 .read_byte = xway_read_byte,
178 }
179};
180
181/*
182 * Try to find the node inside the DT. If it is available attach out
183 * platform_nand_data
184 */
185static int __init xway_register_nand(void)
186{
187 struct device_node *node;
188 struct platform_device *pdev;
189
190 node = of_find_compatible_node(NULL, NULL, "lantiq,nand-xway");
191 if (!node)
192 return -ENOENT;
193 pdev = of_find_device_by_node(node);
194 if (!pdev)
195 return -EINVAL;
196 pdev->dev.platform_data = &xway_nand_data;
197 of_node_put(node);
198 return 0;
199}
200
201subsys_initcall(xway_register_nand);
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 9e2dfd517aa5..8dd6ba52404a 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -346,7 +346,6 @@ static int sm_write_sector(struct sm_ftl *ftl,
346 ret = mtd_write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops); 346 ret = mtd_write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
347 347
348 /* Now we assume that hardware will catch write bitflip errors */ 348 /* Now we assume that hardware will catch write bitflip errors */
349 /* If you are paranoid, use CONFIG_MTD_NAND_VERIFY_WRITE */
350 349
351 if (ret) { 350 if (ret) {
352 dbg("write to block %d at zone %d, failed with error %d", 351 dbg("write to block %d at zone %d, failed with error %d",
diff --git a/drivers/mtd/tests/Makefile b/drivers/mtd/tests/Makefile
index b44dcab940d8..bd0065c0d359 100644
--- a/drivers/mtd/tests/Makefile
+++ b/drivers/mtd/tests/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_MTD_TESTS) += mtd_stresstest.o
6obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o 6obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o
7obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o 7obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o
8obj-$(CONFIG_MTD_TESTS) += mtd_nandecctest.o 8obj-$(CONFIG_MTD_TESTS) += mtd_nandecctest.o
9obj-$(CONFIG_MTD_TESTS) += mtd_nandbiterrs.o
diff --git a/drivers/mtd/tests/mtd_nandbiterrs.c b/drivers/mtd/tests/mtd_nandbiterrs.c
new file mode 100644
index 000000000000..cc8d62cb280c
--- /dev/null
+++ b/drivers/mtd/tests/mtd_nandbiterrs.c
@@ -0,0 +1,460 @@
1/*
2 * Copyright © 2012 NetCommWireless
3 * Iwo Mergler <Iwo.Mergler@netcommwireless.com.au>
4 *
5 * Test for multi-bit error recovery on a NAND page This mostly tests the
6 * ECC controller / driver.
7 *
8 * There are two test modes:
9 *
10 * 0 - artificially inserting bit errors until the ECC fails
11 * This is the default method and fairly quick. It should
12 * be independent of the quality of the FLASH.
13 *
14 * 1 - re-writing the same pattern repeatedly until the ECC fails.
15 * This method relies on the physics of NAND FLASH to eventually
16 * generate '0' bits if '1' has been written sufficient times.
17 * Depending on the NAND, the first bit errors will appear after
18 * 1000 or more writes and then will usually snowball, reaching the
19 * limits of the ECC quickly.
20 *
21 * The test stops after 10000 cycles, should your FLASH be
22 * exceptionally good and not generate bit errors before that. Try
23 * a different page in that case.
24 *
25 * Please note that neither of these tests will significantly 'use up' any
26 * FLASH endurance. Only a maximum of two erase operations will be performed.
27 *
28 *
29 * This program is free software; you can redistribute it and/or modify it
30 * under the terms of the GNU General Public License version 2 as published by
31 * the Free Software Foundation.
32 *
33 * This program is distributed in the hope that it will be useful, but WITHOUT
34 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
35 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
36 * more details.
37 *
38 * You should have received a copy of the GNU General Public License along with
39 * this program; see the file COPYING. If not, write to the Free Software
40 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
41 */
42#include <linux/init.h>
43#include <linux/module.h>
44#include <linux/moduleparam.h>
45#include <linux/mtd/mtd.h>
46#include <linux/err.h>
47#include <linux/mtd/nand.h>
48#include <linux/slab.h>
49
50#define msg(FMT, VA...) pr_info("mtd_nandbiterrs: "FMT, ##VA)
51
52static int dev;
53module_param(dev, int, S_IRUGO);
54MODULE_PARM_DESC(dev, "MTD device number to use");
55
56static unsigned page_offset;
57module_param(page_offset, uint, S_IRUGO);
58MODULE_PARM_DESC(page_offset, "Page number relative to dev start");
59
60static unsigned seed;
61module_param(seed, uint, S_IRUGO);
62MODULE_PARM_DESC(seed, "Random seed");
63
64static int mode;
65module_param(mode, int, S_IRUGO);
66MODULE_PARM_DESC(mode, "0=incremental errors, 1=overwrite test");
67
68static unsigned max_overwrite = 10000;
69
70static loff_t offset; /* Offset of the page we're using. */
71static unsigned eraseblock; /* Eraseblock number for our page. */
72
73/* We assume that the ECC can correct up to a certain number
74 * of biterrors per subpage. */
75static unsigned subsize; /* Size of subpages */
76static unsigned subcount; /* Number of subpages per page */
77
78static struct mtd_info *mtd; /* MTD device */
79
80static uint8_t *wbuffer; /* One page write / compare buffer */
81static uint8_t *rbuffer; /* One page read buffer */
82
83/* 'random' bytes from known offsets */
84static uint8_t hash(unsigned offset)
85{
86 unsigned v = offset;
87 unsigned char c;
88 v ^= 0x7f7edfd3;
89 v = v ^ (v >> 3);
90 v = v ^ (v >> 5);
91 v = v ^ (v >> 13);
92 c = v & 0xFF;
93 /* Reverse bits of result. */
94 c = (c & 0x0F) << 4 | (c & 0xF0) >> 4;
95 c = (c & 0x33) << 2 | (c & 0xCC) >> 2;
96 c = (c & 0x55) << 1 | (c & 0xAA) >> 1;
97 return c;
98}
99
100static int erase_block(void)
101{
102 int err;
103 struct erase_info ei;
104 loff_t addr = eraseblock * mtd->erasesize;
105
106 msg("erase_block\n");
107
108 memset(&ei, 0, sizeof(struct erase_info));
109 ei.mtd = mtd;
110 ei.addr = addr;
111 ei.len = mtd->erasesize;
112
113 err = mtd_erase(mtd, &ei);
114 if (err || ei.state == MTD_ERASE_FAILED) {
115 msg("error %d while erasing\n", err);
116 if (!err)
117 err = -EIO;
118 return err;
119 }
120
121 return 0;
122}
123
124/* Writes wbuffer to page */
125static int write_page(int log)
126{
127 int err = 0;
128 size_t written;
129
130 if (log)
131 msg("write_page\n");
132
133 err = mtd_write(mtd, offset, mtd->writesize, &written, wbuffer);
134 if (err || written != mtd->writesize) {
135 msg("error: write failed at %#llx\n", (long long)offset);
136 if (!err)
137 err = -EIO;
138 }
139
140 return err;
141}
142
143/* Re-writes the data area while leaving the OOB alone. */
144static int rewrite_page(int log)
145{
146 int err = 0;
147 struct mtd_oob_ops ops;
148
149 if (log)
150 msg("rewrite page\n");
151
152 ops.mode = MTD_OPS_RAW; /* No ECC */
153 ops.len = mtd->writesize;
154 ops.retlen = 0;
155 ops.ooblen = 0;
156 ops.oobretlen = 0;
157 ops.ooboffs = 0;
158 ops.datbuf = wbuffer;
159 ops.oobbuf = NULL;
160
161 err = mtd_write_oob(mtd, offset, &ops);
162 if (err || ops.retlen != mtd->writesize) {
163 msg("error: write_oob failed (%d)\n", err);
164 if (!err)
165 err = -EIO;
166 }
167
168 return err;
169}
170
171/* Reads page into rbuffer. Returns number of corrected bit errors (>=0)
172 * or error (<0) */
173static int read_page(int log)
174{
175 int err = 0;
176 size_t read;
177 struct mtd_ecc_stats oldstats;
178
179 if (log)
180 msg("read_page\n");
181
182 /* Saving last mtd stats */
183 memcpy(&oldstats, &mtd->ecc_stats, sizeof(oldstats));
184
185 err = mtd_read(mtd, offset, mtd->writesize, &read, rbuffer);
186 if (err == -EUCLEAN)
187 err = mtd->ecc_stats.corrected - oldstats.corrected;
188
189 if (err < 0 || read != mtd->writesize) {
190 msg("error: read failed at %#llx\n", (long long)offset);
191 if (err >= 0)
192 err = -EIO;
193 }
194
195 return err;
196}
197
198/* Verifies rbuffer against random sequence */
199static int verify_page(int log)
200{
201 unsigned i, errs = 0;
202
203 if (log)
204 msg("verify_page\n");
205
206 for (i = 0; i < mtd->writesize; i++) {
207 if (rbuffer[i] != hash(i+seed)) {
208 msg("Error: page offset %u, expected %02x, got %02x\n",
209 i, hash(i+seed), rbuffer[i]);
210 errs++;
211 }
212 }
213
214 if (errs)
215 return -EIO;
216 else
217 return 0;
218}
219
220#define CBIT(v, n) ((v) & (1 << (n)))
221#define BCLR(v, n) ((v) = (v) & ~(1 << (n)))
222
223/* Finds the first '1' bit in wbuffer starting at offset 'byte'
224 * and sets it to '0'. */
225static int insert_biterror(unsigned byte)
226{
227 int bit;
228
229 while (byte < mtd->writesize) {
230 for (bit = 7; bit >= 0; bit--) {
231 if (CBIT(wbuffer[byte], bit)) {
232 BCLR(wbuffer[byte], bit);
233 msg("Inserted biterror @ %u/%u\n", byte, bit);
234 return 0;
235 }
236 }
237 byte++;
238 }
239 msg("biterror: Failed to find a '1' bit\n");
240 return -EIO;
241}
242
243/* Writes 'random' data to page and then introduces deliberate bit
244 * errors into the page, while verifying each step. */
245static int incremental_errors_test(void)
246{
247 int err = 0;
248 unsigned i;
249 unsigned errs_per_subpage = 0;
250
251 msg("incremental biterrors test\n");
252
253 for (i = 0; i < mtd->writesize; i++)
254 wbuffer[i] = hash(i+seed);
255
256 err = write_page(1);
257 if (err)
258 goto exit;
259
260 while (1) {
261
262 err = rewrite_page(1);
263 if (err)
264 goto exit;
265
266 err = read_page(1);
267 if (err > 0)
268 msg("Read reported %d corrected bit errors\n", err);
269 if (err < 0) {
270 msg("After %d biterrors per subpage, read reported error %d\n",
271 errs_per_subpage, err);
272 err = 0;
273 goto exit;
274 }
275
276 err = verify_page(1);
277 if (err) {
278 msg("ECC failure, read data is incorrect despite read success\n");
279 goto exit;
280 }
281
282 msg("Successfully corrected %d bit errors per subpage\n",
283 errs_per_subpage);
284
285 for (i = 0; i < subcount; i++) {
286 err = insert_biterror(i * subsize);
287 if (err < 0)
288 goto exit;
289 }
290 errs_per_subpage++;
291 }
292
293exit:
294 return err;
295}
296
297
298/* Writes 'random' data to page and then re-writes that same data repeatedly.
299 This eventually develops bit errors (bits written as '1' will slowly become
300 '0'), which are corrected as far as the ECC is capable of. */
301static int overwrite_test(void)
302{
303 int err = 0;
304 unsigned i;
305 unsigned max_corrected = 0;
306 unsigned opno = 0;
307 /* We don't expect more than this many correctable bit errors per
308 * page. */
309 #define MAXBITS 512
310 static unsigned bitstats[MAXBITS]; /* bit error histogram. */
311
312 memset(bitstats, 0, sizeof(bitstats));
313
314 msg("overwrite biterrors test\n");
315
316 for (i = 0; i < mtd->writesize; i++)
317 wbuffer[i] = hash(i+seed);
318
319 err = write_page(1);
320 if (err)
321 goto exit;
322
323 while (opno < max_overwrite) {
324
325 err = rewrite_page(0);
326 if (err)
327 break;
328
329 err = read_page(0);
330 if (err >= 0) {
331 if (err >= MAXBITS) {
332 msg("Implausible number of bit errors corrected\n");
333 err = -EIO;
334 break;
335 }
336 bitstats[err]++;
337 if (err > max_corrected) {
338 max_corrected = err;
339 msg("Read reported %d corrected bit errors\n",
340 err);
341 }
342 } else { /* err < 0 */
343 msg("Read reported error %d\n", err);
344 err = 0;
345 break;
346 }
347
348 err = verify_page(0);
349 if (err) {
350 bitstats[max_corrected] = opno;
351 msg("ECC failure, read data is incorrect despite read success\n");
352 break;
353 }
354
355 opno++;
356 }
357
358 /* At this point bitstats[0] contains the number of ops with no bit
359 * errors, bitstats[1] the number of ops with 1 bit error, etc. */
360 msg("Bit error histogram (%d operations total):\n", opno);
361 for (i = 0; i < max_corrected; i++)
362 msg("Page reads with %3d corrected bit errors: %d\n",
363 i, bitstats[i]);
364
365exit:
366 return err;
367}
368
369static int __init mtd_nandbiterrs_init(void)
370{
371 int err = 0;
372
373 msg("\n");
374 msg("==================================================\n");
375 msg("MTD device: %d\n", dev);
376
377 mtd = get_mtd_device(NULL, dev);
378 if (IS_ERR(mtd)) {
379 err = PTR_ERR(mtd);
380 msg("error: cannot get MTD device\n");
381 goto exit_mtddev;
382 }
383
384 if (mtd->type != MTD_NANDFLASH) {
385 msg("this test requires NAND flash\n");
386 err = -ENODEV;
387 goto exit_nand;
388 }
389
390 msg("MTD device size %llu, eraseblock=%u, page=%u, oob=%u\n",
391 (unsigned long long)mtd->size, mtd->erasesize,
392 mtd->writesize, mtd->oobsize);
393
394 subsize = mtd->writesize >> mtd->subpage_sft;
395 subcount = mtd->writesize / subsize;
396
397 msg("Device uses %d subpages of %d bytes\n", subcount, subsize);
398
399 offset = page_offset * mtd->writesize;
400 eraseblock = mtd_div_by_eb(offset, mtd);
401
402 msg("Using page=%u, offset=%llu, eraseblock=%u\n",
403 page_offset, offset, eraseblock);
404
405 wbuffer = kmalloc(mtd->writesize, GFP_KERNEL);
406 if (!wbuffer) {
407 err = -ENOMEM;
408 goto exit_wbuffer;
409 }
410
411 rbuffer = kmalloc(mtd->writesize, GFP_KERNEL);
412 if (!rbuffer) {
413 err = -ENOMEM;
414 goto exit_rbuffer;
415 }
416
417 err = erase_block();
418 if (err)
419 goto exit_error;
420
421 if (mode == 0)
422 err = incremental_errors_test();
423 else
424 err = overwrite_test();
425
426 if (err)
427 goto exit_error;
428
429 /* We leave the block un-erased in case of test failure. */
430 err = erase_block();
431 if (err)
432 goto exit_error;
433
434 err = -EIO;
435 msg("finished successfully.\n");
436 msg("==================================================\n");
437
438exit_error:
439 kfree(rbuffer);
440exit_rbuffer:
441 kfree(wbuffer);
442exit_wbuffer:
443 /* Nothing */
444exit_nand:
445 put_mtd_device(mtd);
446exit_mtddev:
447 return err;
448}
449
450static void __exit mtd_nandbiterrs_exit(void)
451{
452 return;
453}
454
455module_init(mtd_nandbiterrs_init);
456module_exit(mtd_nandbiterrs_exit);
457
458MODULE_DESCRIPTION("NAND bit error recovery test");
459MODULE_AUTHOR("Iwo Mergler");
460MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/mtd_nandecctest.c b/drivers/mtd/tests/mtd_nandecctest.c
index 70d6d7d0d656..b437fa425077 100644
--- a/drivers/mtd/tests/mtd_nandecctest.c
+++ b/drivers/mtd/tests/mtd_nandecctest.c
@@ -4,60 +4,287 @@
4#include <linux/random.h> 4#include <linux/random.h>
5#include <linux/string.h> 5#include <linux/string.h>
6#include <linux/bitops.h> 6#include <linux/bitops.h>
7#include <linux/jiffies.h> 7#include <linux/slab.h>
8#include <linux/mtd/nand_ecc.h> 8#include <linux/mtd/nand_ecc.h>
9 9
10/*
11 * Test the implementation for software ECC
12 *
13 * No actual MTD device is needed, So we don't need to warry about losing
14 * important data by human error.
15 *
16 * This covers possible patterns of corruption which can be reliably corrected
17 * or detected.
18 */
19
10#if defined(CONFIG_MTD_NAND) || defined(CONFIG_MTD_NAND_MODULE) 20#if defined(CONFIG_MTD_NAND) || defined(CONFIG_MTD_NAND_MODULE)
11 21
12static void inject_single_bit_error(void *data, size_t size) 22struct nand_ecc_test {
23 const char *name;
24 void (*prepare)(void *, void *, void *, void *, const size_t);
25 int (*verify)(void *, void *, void *, const size_t);
26};
27
28/*
29 * The reason for this __change_bit_le() instead of __change_bit() is to inject
30 * bit error properly within the region which is not a multiple of
31 * sizeof(unsigned long) on big-endian systems
32 */
33#ifdef __LITTLE_ENDIAN
34#define __change_bit_le(nr, addr) __change_bit(nr, addr)
35#elif defined(__BIG_ENDIAN)
36#define __change_bit_le(nr, addr) \
37 __change_bit((nr) ^ ((BITS_PER_LONG - 1) & ~0x7), addr)
38#else
39#error "Unknown byte order"
40#endif
41
42static void single_bit_error_data(void *error_data, void *correct_data,
43 size_t size)
13{ 44{
14 unsigned long offset = random32() % (size * BITS_PER_BYTE); 45 unsigned int offset = random32() % (size * BITS_PER_BYTE);
15 46
16 __change_bit(offset, data); 47 memcpy(error_data, correct_data, size);
48 __change_bit_le(offset, error_data);
17} 49}
18 50
19static unsigned char data[512]; 51static void double_bit_error_data(void *error_data, void *correct_data,
20static unsigned char error_data[512]; 52 size_t size)
53{
54 unsigned int offset[2];
55
56 offset[0] = random32() % (size * BITS_PER_BYTE);
57 do {
58 offset[1] = random32() % (size * BITS_PER_BYTE);
59 } while (offset[0] == offset[1]);
21 60
22static int nand_ecc_test(const size_t size) 61 memcpy(error_data, correct_data, size);
62
63 __change_bit_le(offset[0], error_data);
64 __change_bit_le(offset[1], error_data);
65}
66
67static unsigned int random_ecc_bit(size_t size)
23{ 68{
24 unsigned char code[3]; 69 unsigned int offset = random32() % (3 * BITS_PER_BYTE);
25 unsigned char error_code[3]; 70
26 char testname[30]; 71 if (size == 256) {
72 /*
73 * Don't inject a bit error into the insignificant bits (16th
74 * and 17th bit) in ECC code for 256 byte data block
75 */
76 while (offset == 16 || offset == 17)
77 offset = random32() % (3 * BITS_PER_BYTE);
78 }
27 79
28 BUG_ON(sizeof(data) < size); 80 return offset;
81}
29 82
30 sprintf(testname, "nand-ecc-%zu", size); 83static void single_bit_error_ecc(void *error_ecc, void *correct_ecc,
84 size_t size)
85{
86 unsigned int offset = random_ecc_bit(size);
31 87
32 get_random_bytes(data, size); 88 memcpy(error_ecc, correct_ecc, 3);
89 __change_bit_le(offset, error_ecc);
90}
33 91
34 memcpy(error_data, data, size); 92static void double_bit_error_ecc(void *error_ecc, void *correct_ecc,
35 inject_single_bit_error(error_data, size); 93 size_t size)
94{
95 unsigned int offset[2];
36 96
37 __nand_calculate_ecc(data, size, code); 97 offset[0] = random_ecc_bit(size);
38 __nand_calculate_ecc(error_data, size, error_code); 98 do {
39 __nand_correct_data(error_data, code, error_code, size); 99 offset[1] = random_ecc_bit(size);
100 } while (offset[0] == offset[1]);
40 101
41 if (!memcmp(data, error_data, size)) { 102 memcpy(error_ecc, correct_ecc, 3);
42 printk(KERN_INFO "mtd_nandecctest: ok - %s\n", testname); 103 __change_bit_le(offset[0], error_ecc);
104 __change_bit_le(offset[1], error_ecc);
105}
106
107static void no_bit_error(void *error_data, void *error_ecc,
108 void *correct_data, void *correct_ecc, const size_t size)
109{
110 memcpy(error_data, correct_data, size);
111 memcpy(error_ecc, correct_ecc, 3);
112}
113
114static int no_bit_error_verify(void *error_data, void *error_ecc,
115 void *correct_data, const size_t size)
116{
117 unsigned char calc_ecc[3];
118 int ret;
119
120 __nand_calculate_ecc(error_data, size, calc_ecc);
121 ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size);
122 if (ret == 0 && !memcmp(correct_data, error_data, size))
43 return 0; 123 return 0;
44 }
45 124
46 printk(KERN_ERR "mtd_nandecctest: not ok - %s\n", testname); 125 return -EINVAL;
126}
127
128static void single_bit_error_in_data(void *error_data, void *error_ecc,
129 void *correct_data, void *correct_ecc, const size_t size)
130{
131 single_bit_error_data(error_data, correct_data, size);
132 memcpy(error_ecc, correct_ecc, 3);
133}
134
135static void single_bit_error_in_ecc(void *error_data, void *error_ecc,
136 void *correct_data, void *correct_ecc, const size_t size)
137{
138 memcpy(error_data, correct_data, size);
139 single_bit_error_ecc(error_ecc, correct_ecc, size);
140}
141
142static int single_bit_error_correct(void *error_data, void *error_ecc,
143 void *correct_data, const size_t size)
144{
145 unsigned char calc_ecc[3];
146 int ret;
47 147
48 printk(KERN_DEBUG "hexdump of data:\n"); 148 __nand_calculate_ecc(error_data, size, calc_ecc);
49 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4, 149 ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size);
50 data, size, false); 150 if (ret == 1 && !memcmp(correct_data, error_data, size))
51 printk(KERN_DEBUG "hexdump of error data:\n"); 151 return 0;
52 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4, 152
153 return -EINVAL;
154}
155
156static void double_bit_error_in_data(void *error_data, void *error_ecc,
157 void *correct_data, void *correct_ecc, const size_t size)
158{
159 double_bit_error_data(error_data, correct_data, size);
160 memcpy(error_ecc, correct_ecc, 3);
161}
162
163static void single_bit_error_in_data_and_ecc(void *error_data, void *error_ecc,
164 void *correct_data, void *correct_ecc, const size_t size)
165{
166 single_bit_error_data(error_data, correct_data, size);
167 single_bit_error_ecc(error_ecc, correct_ecc, size);
168}
169
170static void double_bit_error_in_ecc(void *error_data, void *error_ecc,
171 void *correct_data, void *correct_ecc, const size_t size)
172{
173 memcpy(error_data, correct_data, size);
174 double_bit_error_ecc(error_ecc, correct_ecc, size);
175}
176
177static int double_bit_error_detect(void *error_data, void *error_ecc,
178 void *correct_data, const size_t size)
179{
180 unsigned char calc_ecc[3];
181 int ret;
182
183 __nand_calculate_ecc(error_data, size, calc_ecc);
184 ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size);
185
186 return (ret == -1) ? 0 : -EINVAL;
187}
188
189static const struct nand_ecc_test nand_ecc_test[] = {
190 {
191 .name = "no-bit-error",
192 .prepare = no_bit_error,
193 .verify = no_bit_error_verify,
194 },
195 {
196 .name = "single-bit-error-in-data-correct",
197 .prepare = single_bit_error_in_data,
198 .verify = single_bit_error_correct,
199 },
200 {
201 .name = "single-bit-error-in-ecc-correct",
202 .prepare = single_bit_error_in_ecc,
203 .verify = single_bit_error_correct,
204 },
205 {
206 .name = "double-bit-error-in-data-detect",
207 .prepare = double_bit_error_in_data,
208 .verify = double_bit_error_detect,
209 },
210 {
211 .name = "single-bit-error-in-data-and-ecc-detect",
212 .prepare = single_bit_error_in_data_and_ecc,
213 .verify = double_bit_error_detect,
214 },
215 {
216 .name = "double-bit-error-in-ecc-detect",
217 .prepare = double_bit_error_in_ecc,
218 .verify = double_bit_error_detect,
219 },
220};
221
222static void dump_data_ecc(void *error_data, void *error_ecc, void *correct_data,
223 void *correct_ecc, const size_t size)
224{
225 pr_info("hexdump of error data:\n");
226 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
53 error_data, size, false); 227 error_data, size, false);
228 print_hex_dump(KERN_INFO, "hexdump of error ecc: ",
229 DUMP_PREFIX_NONE, 16, 1, error_ecc, 3, false);
230
231 pr_info("hexdump of correct data:\n");
232 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
233 correct_data, size, false);
234 print_hex_dump(KERN_INFO, "hexdump of correct ecc: ",
235 DUMP_PREFIX_NONE, 16, 1, correct_ecc, 3, false);
236}
237
238static int nand_ecc_test_run(const size_t size)
239{
240 int i;
241 int err = 0;
242 void *error_data;
243 void *error_ecc;
244 void *correct_data;
245 void *correct_ecc;
54 246
55 return -1; 247 error_data = kmalloc(size, GFP_KERNEL);
248 error_ecc = kmalloc(3, GFP_KERNEL);
249 correct_data = kmalloc(size, GFP_KERNEL);
250 correct_ecc = kmalloc(3, GFP_KERNEL);
251
252 if (!error_data || !error_ecc || !correct_data || !correct_ecc) {
253 err = -ENOMEM;
254 goto error;
255 }
256
257 get_random_bytes(correct_data, size);
258 __nand_calculate_ecc(correct_data, size, correct_ecc);
259
260 for (i = 0; i < ARRAY_SIZE(nand_ecc_test); i++) {
261 nand_ecc_test[i].prepare(error_data, error_ecc,
262 correct_data, correct_ecc, size);
263 err = nand_ecc_test[i].verify(error_data, error_ecc,
264 correct_data, size);
265
266 if (err) {
267 pr_err("mtd_nandecctest: not ok - %s-%zd\n",
268 nand_ecc_test[i].name, size);
269 dump_data_ecc(error_data, error_ecc,
270 correct_data, correct_ecc, size);
271 break;
272 }
273 pr_info("mtd_nandecctest: ok - %s-%zd\n",
274 nand_ecc_test[i].name, size);
275 }
276error:
277 kfree(error_data);
278 kfree(error_ecc);
279 kfree(correct_data);
280 kfree(correct_ecc);
281
282 return err;
56} 283}
57 284
58#else 285#else
59 286
60static int nand_ecc_test(const size_t size) 287static int nand_ecc_test_run(const size_t size)
61{ 288{
62 return 0; 289 return 0;
63} 290}
@@ -66,12 +293,13 @@ static int nand_ecc_test(const size_t size)
66 293
67static int __init ecc_test_init(void) 294static int __init ecc_test_init(void)
68{ 295{
69 srandom32(jiffies); 296 int err;
70 297
71 nand_ecc_test(256); 298 err = nand_ecc_test_run(256);
72 nand_ecc_test(512); 299 if (err)
300 return err;
73 301
74 return 0; 302 return nand_ecc_test_run(512);
75} 303}
76 304
77static void __exit ecc_test_exit(void) 305static void __exit ecc_test_exit(void)
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c
index 2aec4f3b72be..42b0f7456fc4 100644
--- a/drivers/mtd/tests/mtd_speedtest.c
+++ b/drivers/mtd/tests/mtd_speedtest.c
@@ -26,6 +26,7 @@
26#include <linux/mtd/mtd.h> 26#include <linux/mtd/mtd.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/sched.h> 28#include <linux/sched.h>
29#include <linux/random.h>
29 30
30#define PRINT_PREF KERN_INFO "mtd_speedtest: " 31#define PRINT_PREF KERN_INFO "mtd_speedtest: "
31 32
@@ -47,25 +48,13 @@ static int ebcnt;
47static int pgcnt; 48static int pgcnt;
48static int goodebcnt; 49static int goodebcnt;
49static struct timeval start, finish; 50static struct timeval start, finish;
50static unsigned long next = 1;
51
52static inline unsigned int simple_rand(void)
53{
54 next = next * 1103515245 + 12345;
55 return (unsigned int)((next / 65536) % 32768);
56}
57
58static inline void simple_srand(unsigned long seed)
59{
60 next = seed;
61}
62 51
63static void set_random_data(unsigned char *buf, size_t len) 52static void set_random_data(unsigned char *buf, size_t len)
64{ 53{
65 size_t i; 54 size_t i;
66 55
67 for (i = 0; i < len; ++i) 56 for (i = 0; i < len; ++i)
68 buf[i] = simple_rand(); 57 buf[i] = random32();
69} 58}
70 59
71static int erase_eraseblock(int ebnum) 60static int erase_eraseblock(int ebnum)
@@ -407,7 +396,6 @@ static int __init mtd_speedtest_init(void)
407 goto out; 396 goto out;
408 } 397 }
409 398
410 simple_srand(1);
411 set_random_data(iobuf, mtd->erasesize); 399 set_random_data(iobuf, mtd->erasesize);
412 400
413 err = scan_for_bad_eraseblocks(); 401 err = scan_for_bad_eraseblocks();
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
index 7b33f22d0b58..cb268cebf01a 100644
--- a/drivers/mtd/tests/mtd_stresstest.c
+++ b/drivers/mtd/tests/mtd_stresstest.c
@@ -27,6 +27,7 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/sched.h> 28#include <linux/sched.h>
29#include <linux/vmalloc.h> 29#include <linux/vmalloc.h>
30#include <linux/random.h>
30 31
31#define PRINT_PREF KERN_INFO "mtd_stresstest: " 32#define PRINT_PREF KERN_INFO "mtd_stresstest: "
32 33
@@ -48,28 +49,13 @@ static int pgsize;
48static int bufsize; 49static int bufsize;
49static int ebcnt; 50static int ebcnt;
50static int pgcnt; 51static int pgcnt;
51static unsigned long next = 1;
52
53static inline unsigned int simple_rand(void)
54{
55 next = next * 1103515245 + 12345;
56 return (unsigned int)((next / 65536) % 32768);
57}
58
59static inline void simple_srand(unsigned long seed)
60{
61 next = seed;
62}
63 52
64static int rand_eb(void) 53static int rand_eb(void)
65{ 54{
66 int eb; 55 unsigned int eb;
67 56
68again: 57again:
69 if (ebcnt < 32768) 58 eb = random32();
70 eb = simple_rand();
71 else
72 eb = (simple_rand() << 15) | simple_rand();
73 /* Read or write up 2 eraseblocks at a time - hence 'ebcnt - 1' */ 59 /* Read or write up 2 eraseblocks at a time - hence 'ebcnt - 1' */
74 eb %= (ebcnt - 1); 60 eb %= (ebcnt - 1);
75 if (bbt[eb]) 61 if (bbt[eb])
@@ -79,24 +65,18 @@ again:
79 65
80static int rand_offs(void) 66static int rand_offs(void)
81{ 67{
82 int offs; 68 unsigned int offs;
83 69
84 if (bufsize < 32768) 70 offs = random32();
85 offs = simple_rand();
86 else
87 offs = (simple_rand() << 15) | simple_rand();
88 offs %= bufsize; 71 offs %= bufsize;
89 return offs; 72 return offs;
90} 73}
91 74
92static int rand_len(int offs) 75static int rand_len(int offs)
93{ 76{
94 int len; 77 unsigned int len;
95 78
96 if (bufsize < 32768) 79 len = random32();
97 len = simple_rand();
98 else
99 len = (simple_rand() << 15) | simple_rand();
100 len %= (bufsize - offs); 80 len %= (bufsize - offs);
101 return len; 81 return len;
102} 82}
@@ -211,7 +191,7 @@ static int do_write(void)
211 191
212static int do_operation(void) 192static int do_operation(void)
213{ 193{
214 if (simple_rand() & 1) 194 if (random32() & 1)
215 return do_read(); 195 return do_read();
216 else 196 else
217 return do_write(); 197 return do_write();
@@ -302,9 +282,8 @@ static int __init mtd_stresstest_init(void)
302 } 282 }
303 for (i = 0; i < ebcnt; i++) 283 for (i = 0; i < ebcnt; i++)
304 offsets[i] = mtd->erasesize; 284 offsets[i] = mtd->erasesize;
305 simple_srand(current->pid);
306 for (i = 0; i < bufsize; i++) 285 for (i = 0; i < bufsize; i++)
307 writebuf[i] = simple_rand(); 286 writebuf[i] = random32();
308 287
309 err = scan_for_bad_eraseblocks(); 288 err = scan_for_bad_eraseblocks();
310 if (err) 289 if (err)