aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/Kconfig4
-rw-r--r--drivers/mtd/ar7part.c6
-rw-r--r--drivers/mtd/bcm47xxpart.c49
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c217
-rw-r--r--drivers/mtd/cmdlinepart.c47
-rw-r--r--drivers/mtd/devices/Makefile4
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.c55
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.h15
-rw-r--r--drivers/mtd/devices/elm.c404
-rw-r--r--drivers/mtd/devices/m25p80.c100
-rw-r--r--drivers/mtd/maps/Kconfig2
-rw-r--r--drivers/mtd/maps/physmap_of.c9
-rw-r--r--drivers/mtd/maps/uclinux.c30
-rw-r--r--drivers/mtd/nand/atmel_nand.c141
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h4
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/main.c14
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c4
-rw-r--r--drivers/mtd/nand/davinci_nand.c24
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c233
-rw-r--r--drivers/mtd/nand/gpmi-nand/bch-regs.h22
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c9
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c63
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h4
-rw-r--r--drivers/mtd/nand/mxc_nand.c11
-rw-r--r--drivers/mtd/nand/nand_base.c8
-rw-r--r--drivers/mtd/nand/nand_ecc.c5
-rw-r--r--drivers/mtd/nand/nandsim.c6
-rw-r--r--drivers/mtd/nand/omap2.c583
-rw-r--r--drivers/mtd/ofpart.c7
-rw-r--r--drivers/mtd/tests/mtd_nandecctest.c10
-rw-r--r--drivers/mtd/tests/mtd_stresstest.c8
-rw-r--r--drivers/mtd/tests/mtd_torturetest.c25
-rw-r--r--drivers/mtd/ubi/debug.h6
33 files changed, 1800 insertions, 329 deletions
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 03f2eb5627ec..557bec599f4f 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -74,8 +74,8 @@ config MTD_REDBOOT_PARTS_READONLY
74endif # MTD_REDBOOT_PARTS 74endif # MTD_REDBOOT_PARTS
75 75
76config MTD_CMDLINE_PARTS 76config MTD_CMDLINE_PARTS
77 bool "Command line partition table parsing" 77 tristate "Command line partition table parsing"
78 depends on MTD = "y" 78 depends on MTD
79 ---help--- 79 ---help---
80 Allow generic configuration of the MTD partition tables via the kernel 80 Allow generic configuration of the MTD partition tables via the kernel
81 command line. Multiple flash resources are supported for hardware where 81 command line. Multiple flash resources are supported for hardware where
diff --git a/drivers/mtd/ar7part.c b/drivers/mtd/ar7part.c
index 7c057a05adb6..ddc0a4287a4b 100644
--- a/drivers/mtd/ar7part.c
+++ b/drivers/mtd/ar7part.c
@@ -142,7 +142,13 @@ static int __init ar7_parser_init(void)
142 return register_mtd_parser(&ar7_parser); 142 return register_mtd_parser(&ar7_parser);
143} 143}
144 144
145static void __exit ar7_parser_exit(void)
146{
147 deregister_mtd_parser(&ar7_parser);
148}
149
145module_init(ar7_parser_init); 150module_init(ar7_parser_init);
151module_exit(ar7_parser_exit);
146 152
147MODULE_LICENSE("GPL"); 153MODULE_LICENSE("GPL");
148MODULE_AUTHOR( "Felix Fietkau <nbd@openwrt.org>, " 154MODULE_AUTHOR( "Felix Fietkau <nbd@openwrt.org>, "
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
index 6a1180502cc1..63feb75cc8e0 100644
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -19,12 +19,6 @@
19/* 10 parts were found on sflash on Netgear WNDR4500 */ 19/* 10 parts were found on sflash on Netgear WNDR4500 */
20#define BCM47XXPART_MAX_PARTS 12 20#define BCM47XXPART_MAX_PARTS 12
21 21
22/*
23 * Amount of bytes we read when analyzing each block of flash memory.
24 * Set it big enough to allow detecting partition and reading important data.
25 */
26#define BCM47XXPART_BYTES_TO_READ 0x404
27
28/* Magics */ 22/* Magics */
29#define BOARD_DATA_MAGIC 0x5246504D /* MPFR */ 23#define BOARD_DATA_MAGIC 0x5246504D /* MPFR */
30#define POT_MAGIC1 0x54544f50 /* POTT */ 24#define POT_MAGIC1 0x54544f50 /* POTT */
@@ -59,13 +53,21 @@ static int bcm47xxpart_parse(struct mtd_info *master,
59 uint32_t *buf; 53 uint32_t *buf;
60 size_t bytes_read; 54 size_t bytes_read;
61 uint32_t offset; 55 uint32_t offset;
62 uint32_t blocksize = 0x10000; 56 uint32_t blocksize = master->erasesize;
63 struct trx_header *trx; 57 struct trx_header *trx;
58 int trx_part = -1;
59 int last_trx_part = -1;
60 int max_bytes_to_read = 0x8004;
61
62 if (blocksize <= 0x10000)
63 blocksize = 0x10000;
64 if (blocksize == 0x20000)
65 max_bytes_to_read = 0x18004;
64 66
65 /* Alloc */ 67 /* Alloc */
66 parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS, 68 parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS,
67 GFP_KERNEL); 69 GFP_KERNEL);
68 buf = kzalloc(BCM47XXPART_BYTES_TO_READ, GFP_KERNEL); 70 buf = kzalloc(max_bytes_to_read, GFP_KERNEL);
69 71
70 /* Parse block by block looking for magics */ 72 /* Parse block by block looking for magics */
71 for (offset = 0; offset <= master->size - blocksize; 73 for (offset = 0; offset <= master->size - blocksize;
@@ -80,7 +82,7 @@ static int bcm47xxpart_parse(struct mtd_info *master,
80 } 82 }
81 83
82 /* Read beginning of the block */ 84 /* Read beginning of the block */
83 if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ, 85 if (mtd_read(master, offset, max_bytes_to_read,
84 &bytes_read, (uint8_t *)buf) < 0) { 86 &bytes_read, (uint8_t *)buf) < 0) {
85 pr_err("mtd_read error while parsing (offset: 0x%X)!\n", 87 pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
86 offset); 88 offset);
@@ -95,9 +97,16 @@ static int bcm47xxpart_parse(struct mtd_info *master,
95 } 97 }
96 98
97 /* Standard NVRAM */ 99 /* Standard NVRAM */
98 if (buf[0x000 / 4] == NVRAM_HEADER) { 100 if (buf[0x000 / 4] == NVRAM_HEADER ||
101 buf[0x1000 / 4] == NVRAM_HEADER ||
102 buf[0x8000 / 4] == NVRAM_HEADER ||
103 (blocksize == 0x20000 && (
104 buf[0x10000 / 4] == NVRAM_HEADER ||
105 buf[0x11000 / 4] == NVRAM_HEADER ||
106 buf[0x18000 / 4] == NVRAM_HEADER))) {
99 bcm47xxpart_add_part(&parts[curr_part++], "nvram", 107 bcm47xxpart_add_part(&parts[curr_part++], "nvram",
100 offset, 0); 108 offset, 0);
109 offset = rounddown(offset, blocksize);
101 continue; 110 continue;
102 } 111 }
103 112
@@ -131,6 +140,10 @@ static int bcm47xxpart_parse(struct mtd_info *master,
131 if (buf[0x000 / 4] == TRX_MAGIC) { 140 if (buf[0x000 / 4] == TRX_MAGIC) {
132 trx = (struct trx_header *)buf; 141 trx = (struct trx_header *)buf;
133 142
143 trx_part = curr_part;
144 bcm47xxpart_add_part(&parts[curr_part++], "firmware",
145 offset, 0);
146
134 i = 0; 147 i = 0;
135 /* We have LZMA loader if offset[2] points to sth */ 148 /* We have LZMA loader if offset[2] points to sth */
136 if (trx->offset[2]) { 149 if (trx->offset[2]) {
@@ -154,6 +167,8 @@ static int bcm47xxpart_parse(struct mtd_info *master,
154 offset + trx->offset[i], 0); 167 offset + trx->offset[i], 0);
155 i++; 168 i++;
156 169
170 last_trx_part = curr_part - 1;
171
157 /* 172 /*
158 * We have whole TRX scanned, skip to the next part. Use 173 * We have whole TRX scanned, skip to the next part. Use
159 * roundown (not roundup), as the loop will increase 174 * roundown (not roundup), as the loop will increase
@@ -169,11 +184,15 @@ static int bcm47xxpart_parse(struct mtd_info *master,
169 * Assume that partitions end at the beginning of the one they are 184 * Assume that partitions end at the beginning of the one they are
170 * followed by. 185 * followed by.
171 */ 186 */
172 for (i = 0; i < curr_part - 1; i++) 187 for (i = 0; i < curr_part; i++) {
173 parts[i].size = parts[i + 1].offset - parts[i].offset; 188 u64 next_part_offset = (i < curr_part - 1) ?
174 if (curr_part > 0) 189 parts[i + 1].offset : master->size;
175 parts[curr_part - 1].size = 190
176 master->size - parts[curr_part - 1].offset; 191 parts[i].size = next_part_offset - parts[i].offset;
192 if (i == last_trx_part && trx_part >= 0)
193 parts[trx_part].size = next_part_offset -
194 parts[trx_part].offset;
195 }
177 196
178 *pparts = parts; 197 *pparts = parts;
179 return curr_part; 198 return curr_part;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index b86197286f24..fff665d59a0d 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -33,6 +33,8 @@
33#include <linux/delay.h> 33#include <linux/delay.h>
34#include <linux/interrupt.h> 34#include <linux/interrupt.h>
35#include <linux/reboot.h> 35#include <linux/reboot.h>
36#include <linux/of.h>
37#include <linux/of_platform.h>
36#include <linux/mtd/map.h> 38#include <linux/mtd/map.h>
37#include <linux/mtd/mtd.h> 39#include <linux/mtd/mtd.h>
38#include <linux/mtd/cfi.h> 40#include <linux/mtd/cfi.h>
@@ -74,6 +76,10 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
74static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 76static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
75static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 77static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
76 78
79static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
80static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
81static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
82
77static struct mtd_chip_driver cfi_amdstd_chipdrv = { 83static struct mtd_chip_driver cfi_amdstd_chipdrv = {
78 .probe = NULL, /* Not usable directly */ 84 .probe = NULL, /* Not usable directly */
79 .destroy = cfi_amdstd_destroy, 85 .destroy = cfi_amdstd_destroy,
@@ -496,6 +502,7 @@ static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
496struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) 502struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
497{ 503{
498 struct cfi_private *cfi = map->fldrv_priv; 504 struct cfi_private *cfi = map->fldrv_priv;
505 struct device_node __maybe_unused *np = map->device_node;
499 struct mtd_info *mtd; 506 struct mtd_info *mtd;
500 int i; 507 int i;
501 508
@@ -570,6 +577,17 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
570 cfi_tell_features(extp); 577 cfi_tell_features(extp);
571#endif 578#endif
572 579
580#ifdef CONFIG_OF
581 if (np && of_property_read_bool(
582 np, "use-advanced-sector-protection")
583 && extp->BlkProtUnprot == 8) {
584 printk(KERN_INFO " Advanced Sector Protection (PPB Locking) supported\n");
585 mtd->_lock = cfi_ppb_lock;
586 mtd->_unlock = cfi_ppb_unlock;
587 mtd->_is_locked = cfi_ppb_is_locked;
588 }
589#endif
590
573 bootloc = extp->TopBottom; 591 bootloc = extp->TopBottom;
574 if ((bootloc < 2) || (bootloc > 5)) { 592 if ((bootloc < 2) || (bootloc > 5)) {
575 printk(KERN_WARNING "%s: CFI contains unrecognised boot " 593 printk(KERN_WARNING "%s: CFI contains unrecognised boot "
@@ -2172,6 +2190,205 @@ static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2172 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL); 2190 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
2173} 2191}
2174 2192
2193/*
2194 * Advanced Sector Protection - PPB (Persistent Protection Bit) locking
2195 */
2196
2197struct ppb_lock {
2198 struct flchip *chip;
2199 loff_t offset;
2200 int locked;
2201};
2202
2203#define MAX_SECTORS 512
2204
2205#define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1)
2206#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2)
2207#define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3)
2208
2209static int __maybe_unused do_ppb_xxlock(struct map_info *map,
2210 struct flchip *chip,
2211 unsigned long adr, int len, void *thunk)
2212{
2213 struct cfi_private *cfi = map->fldrv_priv;
2214 unsigned long timeo;
2215 int ret;
2216
2217 mutex_lock(&chip->mutex);
2218 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
2219 if (ret) {
2220 mutex_unlock(&chip->mutex);
2221 return ret;
2222 }
2223
2224 pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len);
2225
2226 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2227 cfi->device_type, NULL);
2228 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2229 cfi->device_type, NULL);
2230 /* PPB entry command */
2231 cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi,
2232 cfi->device_type, NULL);
2233
2234 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2235 chip->state = FL_LOCKING;
2236 map_write(map, CMD(0xA0), chip->start + adr);
2237 map_write(map, CMD(0x00), chip->start + adr);
2238 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2239 /*
2240 * Unlocking of one specific sector is not supported, so we
2241 * have to unlock all sectors of this device instead
2242 */
2243 chip->state = FL_UNLOCKING;
2244 map_write(map, CMD(0x80), chip->start);
2245 map_write(map, CMD(0x30), chip->start);
2246 } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) {
2247 chip->state = FL_JEDEC_QUERY;
2248 /* Return locked status: 0->locked, 1->unlocked */
2249 ret = !cfi_read_query(map, adr);
2250 } else
2251 BUG();
2252
2253 /*
2254 * Wait for some time as unlocking of all sectors takes quite long
2255 */
2256 timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */
2257 for (;;) {
2258 if (chip_ready(map, adr))
2259 break;
2260
2261 if (time_after(jiffies, timeo)) {
2262 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
2263 ret = -EIO;
2264 break;
2265 }
2266
2267 UDELAY(map, chip, adr, 1);
2268 }
2269
2270 /* Exit BC commands */
2271 map_write(map, CMD(0x90), chip->start);
2272 map_write(map, CMD(0x00), chip->start);
2273
2274 chip->state = FL_READY;
2275 put_chip(map, chip, adr + chip->start);
2276 mutex_unlock(&chip->mutex);
2277
2278 return ret;
2279}
2280
2281static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs,
2282 uint64_t len)
2283{
2284 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2285 DO_XXLOCK_ONEBLOCK_LOCK);
2286}
2287
2288static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
2289 uint64_t len)
2290{
2291 struct mtd_erase_region_info *regions = mtd->eraseregions;
2292 struct map_info *map = mtd->priv;
2293 struct cfi_private *cfi = map->fldrv_priv;
2294 struct ppb_lock *sect;
2295 unsigned long adr;
2296 loff_t offset;
2297 uint64_t length;
2298 int chipnum;
2299 int i;
2300 int sectors;
2301 int ret;
2302
2303 /*
2304 * PPB unlocking always unlocks all sectors of the flash chip.
2305 * We need to re-lock all previously locked sectors. So lets
2306 * first check the locking status of all sectors and save
2307 * it for future use.
2308 */
2309 sect = kzalloc(MAX_SECTORS * sizeof(struct ppb_lock), GFP_KERNEL);
2310 if (!sect)
2311 return -ENOMEM;
2312
2313 /*
2314 * This code to walk all sectors is a slightly modified version
2315 * of the cfi_varsize_frob() code.
2316 */
2317 i = 0;
2318 chipnum = 0;
2319 adr = 0;
2320 sectors = 0;
2321 offset = 0;
2322 length = mtd->size;
2323
2324 while (length) {
2325 int size = regions[i].erasesize;
2326
2327 /*
2328 * Only test sectors that shall not be unlocked. The other
2329 * sectors shall be unlocked, so lets keep their locking
2330 * status at "unlocked" (locked=0) for the final re-locking.
2331 */
2332 if ((adr < ofs) || (adr >= (ofs + len))) {
2333 sect[sectors].chip = &cfi->chips[chipnum];
2334 sect[sectors].offset = offset;
2335 sect[sectors].locked = do_ppb_xxlock(
2336 map, &cfi->chips[chipnum], adr, 0,
2337 DO_XXLOCK_ONEBLOCK_GETLOCK);
2338 }
2339
2340 adr += size;
2341 offset += size;
2342 length -= size;
2343
2344 if (offset == regions[i].offset + size * regions[i].numblocks)
2345 i++;
2346
2347 if (adr >> cfi->chipshift) {
2348 adr = 0;
2349 chipnum++;
2350
2351 if (chipnum >= cfi->numchips)
2352 break;
2353 }
2354
2355 sectors++;
2356 if (sectors >= MAX_SECTORS) {
2357 printk(KERN_ERR "Only %d sectors for PPB locking supported!\n",
2358 MAX_SECTORS);
2359 kfree(sect);
2360 return -EINVAL;
2361 }
2362 }
2363
2364 /* Now unlock the whole chip */
2365 ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2366 DO_XXLOCK_ONEBLOCK_UNLOCK);
2367 if (ret) {
2368 kfree(sect);
2369 return ret;
2370 }
2371
2372 /*
2373 * PPB unlocking always unlocks all sectors of the flash chip.
2374 * We need to re-lock all previously locked sectors.
2375 */
2376 for (i = 0; i < sectors; i++) {
2377 if (sect[i].locked)
2378 do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0,
2379 DO_XXLOCK_ONEBLOCK_LOCK);
2380 }
2381
2382 kfree(sect);
2383 return ret;
2384}
2385
2386static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs,
2387 uint64_t len)
2388{
2389 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2390 DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0;
2391}
2175 2392
2176static void cfi_amdstd_sync (struct mtd_info *mtd) 2393static void cfi_amdstd_sync (struct mtd_info *mtd)
2177{ 2394{
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index c533f27d863f..721caebbc5cc 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -22,11 +22,22 @@
22 * 22 *
23 * mtdparts=<mtddef>[;<mtddef] 23 * mtdparts=<mtddef>[;<mtddef]
24 * <mtddef> := <mtd-id>:<partdef>[,<partdef>] 24 * <mtddef> := <mtd-id>:<partdef>[,<partdef>]
25 * where <mtd-id> is the name from the "cat /proc/mtd" command 25 * <partdef> := <size>[@<offset>][<name>][ro][lk]
26 * <partdef> := <size>[@offset][<name>][ro][lk]
27 * <mtd-id> := unique name used in mapping driver/device (mtd->name) 26 * <mtd-id> := unique name used in mapping driver/device (mtd->name)
28 * <size> := standard linux memsize OR "-" to denote all remaining space 27 * <size> := standard linux memsize OR "-" to denote all remaining space
28 * size is automatically truncated at end of device
29 * if specified or trucated size is 0 the part is skipped
30 * <offset> := standard linux memsize
31 * if omitted the part will immediately follow the previous part
32 * or 0 if the first part
29 * <name> := '(' NAME ')' 33 * <name> := '(' NAME ')'
34 * NAME will appear in /proc/mtd
35 *
36 * <size> and <offset> can be specified such that the parts are out of order
37 * in physical memory and may even overlap.
38 *
39 * The parts are assigned MTD numbers in the order they are specified in the
40 * command line regardless of their order in physical memory.
30 * 41 *
31 * Examples: 42 * Examples:
32 * 43 *
@@ -70,6 +81,7 @@ struct cmdline_mtd_partition {
70static struct cmdline_mtd_partition *partitions; 81static struct cmdline_mtd_partition *partitions;
71 82
72/* the command line passed to mtdpart_setup() */ 83/* the command line passed to mtdpart_setup() */
84static char *mtdparts;
73static char *cmdline; 85static char *cmdline;
74static int cmdline_parsed; 86static int cmdline_parsed;
75 87
@@ -330,6 +342,14 @@ static int parse_cmdline_partitions(struct mtd_info *master,
330 if (part->parts[i].size == SIZE_REMAINING) 342 if (part->parts[i].size == SIZE_REMAINING)
331 part->parts[i].size = master->size - offset; 343 part->parts[i].size = master->size - offset;
332 344
345 if (offset + part->parts[i].size > master->size) {
346 printk(KERN_WARNING ERRP
347 "%s: partitioning exceeds flash size, truncating\n",
348 part->mtd_id);
349 part->parts[i].size = master->size - offset;
350 }
351 offset += part->parts[i].size;
352
333 if (part->parts[i].size == 0) { 353 if (part->parts[i].size == 0) {
334 printk(KERN_WARNING ERRP 354 printk(KERN_WARNING ERRP
335 "%s: skipping zero sized partition\n", 355 "%s: skipping zero sized partition\n",
@@ -337,16 +357,8 @@ static int parse_cmdline_partitions(struct mtd_info *master,
337 part->num_parts--; 357 part->num_parts--;
338 memmove(&part->parts[i], &part->parts[i + 1], 358 memmove(&part->parts[i], &part->parts[i + 1],
339 sizeof(*part->parts) * (part->num_parts - i)); 359 sizeof(*part->parts) * (part->num_parts - i));
340 continue; 360 i--;
341 }
342
343 if (offset + part->parts[i].size > master->size) {
344 printk(KERN_WARNING ERRP
345 "%s: partitioning exceeds flash size, truncating\n",
346 part->mtd_id);
347 part->parts[i].size = master->size - offset;
348 } 361 }
349 offset += part->parts[i].size;
350 } 362 }
351 363
352 *pparts = kmemdup(part->parts, sizeof(*part->parts) * part->num_parts, 364 *pparts = kmemdup(part->parts, sizeof(*part->parts) * part->num_parts,
@@ -365,7 +377,7 @@ static int parse_cmdline_partitions(struct mtd_info *master,
365 * 377 *
366 * This function needs to be visible for bootloaders. 378 * This function needs to be visible for bootloaders.
367 */ 379 */
368static int mtdpart_setup(char *s) 380static int __init mtdpart_setup(char *s)
369{ 381{
370 cmdline = s; 382 cmdline = s;
371 return 1; 383 return 1;
@@ -381,10 +393,21 @@ static struct mtd_part_parser cmdline_parser = {
381 393
382static int __init cmdline_parser_init(void) 394static int __init cmdline_parser_init(void)
383{ 395{
396 if (mtdparts)
397 mtdpart_setup(mtdparts);
384 return register_mtd_parser(&cmdline_parser); 398 return register_mtd_parser(&cmdline_parser);
385} 399}
386 400
401static void __exit cmdline_parser_exit(void)
402{
403 deregister_mtd_parser(&cmdline_parser);
404}
405
387module_init(cmdline_parser_init); 406module_init(cmdline_parser_init);
407module_exit(cmdline_parser_exit);
408
409MODULE_PARM_DESC(mtdparts, "Partitioning specification");
410module_param(mtdparts, charp, 0);
388 411
389MODULE_LICENSE("GPL"); 412MODULE_LICENSE("GPL");
390MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>"); 413MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>");
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index 395733a30ef4..369a1943ca25 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -17,8 +17,10 @@ obj-$(CONFIG_MTD_LART) += lart.o
17obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o 17obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o
18obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o 18obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o
19obj-$(CONFIG_MTD_M25P80) += m25p80.o 19obj-$(CONFIG_MTD_M25P80) += m25p80.o
20obj-$(CONFIG_MTD_NAND_OMAP_BCH) += elm.o
20obj-$(CONFIG_MTD_SPEAR_SMI) += spear_smi.o 21obj-$(CONFIG_MTD_SPEAR_SMI) += spear_smi.o
21obj-$(CONFIG_MTD_SST25L) += sst25l.o 22obj-$(CONFIG_MTD_SST25L) += sst25l.o
22obj-$(CONFIG_MTD_BCM47XXSFLASH) += bcm47xxsflash.o 23obj-$(CONFIG_MTD_BCM47XXSFLASH) += bcm47xxsflash.o
23 24
24CFLAGS_docg3.o += -I$(src) \ No newline at end of file 25
26CFLAGS_docg3.o += -I$(src)
diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c
index 4714584aa993..95266285acb1 100644
--- a/drivers/mtd/devices/bcm47xxsflash.c
+++ b/drivers/mtd/devices/bcm47xxsflash.c
@@ -5,6 +5,8 @@
5#include <linux/platform_device.h> 5#include <linux/platform_device.h>
6#include <linux/bcma/bcma.h> 6#include <linux/bcma/bcma.h>
7 7
8#include "bcm47xxsflash.h"
9
8MODULE_LICENSE("GPL"); 10MODULE_LICENSE("GPL");
9MODULE_DESCRIPTION("Serial flash driver for BCMA bus"); 11MODULE_DESCRIPTION("Serial flash driver for BCMA bus");
10 12
@@ -13,26 +15,28 @@ static const char *probes[] = { "bcm47xxpart", NULL };
13static int bcm47xxsflash_read(struct mtd_info *mtd, loff_t from, size_t len, 15static int bcm47xxsflash_read(struct mtd_info *mtd, loff_t from, size_t len,
14 size_t *retlen, u_char *buf) 16 size_t *retlen, u_char *buf)
15{ 17{
16 struct bcma_sflash *sflash = mtd->priv; 18 struct bcm47xxsflash *b47s = mtd->priv;
17 19
18 /* Check address range */ 20 /* Check address range */
19 if ((from + len) > mtd->size) 21 if ((from + len) > mtd->size)
20 return -EINVAL; 22 return -EINVAL;
21 23
22 memcpy_fromio(buf, (void __iomem *)KSEG0ADDR(sflash->window + from), 24 memcpy_fromio(buf, (void __iomem *)KSEG0ADDR(b47s->window + from),
23 len); 25 len);
26 *retlen = len;
24 27
25 return len; 28 return len;
26} 29}
27 30
28static void bcm47xxsflash_fill_mtd(struct bcma_sflash *sflash, 31static void bcm47xxsflash_fill_mtd(struct bcm47xxsflash *b47s)
29 struct mtd_info *mtd)
30{ 32{
31 mtd->priv = sflash; 33 struct mtd_info *mtd = &b47s->mtd;
34
35 mtd->priv = b47s;
32 mtd->name = "bcm47xxsflash"; 36 mtd->name = "bcm47xxsflash";
33 mtd->owner = THIS_MODULE; 37 mtd->owner = THIS_MODULE;
34 mtd->type = MTD_ROM; 38 mtd->type = MTD_ROM;
35 mtd->size = sflash->size; 39 mtd->size = b47s->size;
36 mtd->_read = bcm47xxsflash_read; 40 mtd->_read = bcm47xxsflash_read;
37 41
38 /* TODO: implement writing support and verify/change following code */ 42 /* TODO: implement writing support and verify/change following code */
@@ -40,19 +44,30 @@ static void bcm47xxsflash_fill_mtd(struct bcma_sflash *sflash,
40 mtd->writebufsize = mtd->writesize = 1; 44 mtd->writebufsize = mtd->writesize = 1;
41} 45}
42 46
43static int bcm47xxsflash_probe(struct platform_device *pdev) 47/**************************************************
48 * BCMA
49 **************************************************/
50
51static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
44{ 52{
45 struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev); 53 struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev);
54 struct bcm47xxsflash *b47s;
46 int err; 55 int err;
47 56
48 sflash->mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL); 57 b47s = kzalloc(sizeof(*b47s), GFP_KERNEL);
49 if (!sflash->mtd) { 58 if (!b47s) {
50 err = -ENOMEM; 59 err = -ENOMEM;
51 goto out; 60 goto out;
52 } 61 }
53 bcm47xxsflash_fill_mtd(sflash, sflash->mtd); 62 sflash->priv = b47s;
54 63
55 err = mtd_device_parse_register(sflash->mtd, probes, NULL, NULL, 0); 64 b47s->window = sflash->window;
65 b47s->blocksize = sflash->blocksize;
66 b47s->numblocks = sflash->numblocks;
67 b47s->size = sflash->size;
68 bcm47xxsflash_fill_mtd(b47s);
69
70 err = mtd_device_parse_register(&b47s->mtd, probes, NULL, NULL, 0);
56 if (err) { 71 if (err) {
57 pr_err("Failed to register MTD device: %d\n", err); 72 pr_err("Failed to register MTD device: %d\n", err);
58 goto err_dev_reg; 73 goto err_dev_reg;
@@ -61,34 +76,40 @@ static int bcm47xxsflash_probe(struct platform_device *pdev)
61 return 0; 76 return 0;
62 77
63err_dev_reg: 78err_dev_reg:
64 kfree(sflash->mtd); 79 kfree(&b47s->mtd);
65out: 80out:
66 return err; 81 return err;
67} 82}
68 83
69static int bcm47xxsflash_remove(struct platform_device *pdev) 84static int bcm47xxsflash_bcma_remove(struct platform_device *pdev)
70{ 85{
71 struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev); 86 struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev);
87 struct bcm47xxsflash *b47s = sflash->priv;
72 88
73 mtd_device_unregister(sflash->mtd); 89 mtd_device_unregister(&b47s->mtd);
74 kfree(sflash->mtd); 90 kfree(b47s);
75 91
76 return 0; 92 return 0;
77} 93}
78 94
79static struct platform_driver bcma_sflash_driver = { 95static struct platform_driver bcma_sflash_driver = {
80 .remove = bcm47xxsflash_remove, 96 .probe = bcm47xxsflash_bcma_probe,
97 .remove = bcm47xxsflash_bcma_remove,
81 .driver = { 98 .driver = {
82 .name = "bcma_sflash", 99 .name = "bcma_sflash",
83 .owner = THIS_MODULE, 100 .owner = THIS_MODULE,
84 }, 101 },
85}; 102};
86 103
104/**************************************************
105 * Init
106 **************************************************/
107
87static int __init bcm47xxsflash_init(void) 108static int __init bcm47xxsflash_init(void)
88{ 109{
89 int err; 110 int err;
90 111
91 err = platform_driver_probe(&bcma_sflash_driver, bcm47xxsflash_probe); 112 err = platform_driver_register(&bcma_sflash_driver);
92 if (err) 113 if (err)
93 pr_err("Failed to register BCMA serial flash driver: %d\n", 114 pr_err("Failed to register BCMA serial flash driver: %d\n",
94 err); 115 err);
diff --git a/drivers/mtd/devices/bcm47xxsflash.h b/drivers/mtd/devices/bcm47xxsflash.h
new file mode 100644
index 000000000000..ebf6f710e23c
--- /dev/null
+++ b/drivers/mtd/devices/bcm47xxsflash.h
@@ -0,0 +1,15 @@
1#ifndef __BCM47XXSFLASH_H
2#define __BCM47XXSFLASH_H
3
4#include <linux/mtd/mtd.h>
5
6struct bcm47xxsflash {
7 u32 window;
8 u32 blocksize;
9 u16 numblocks;
10 u32 size;
11
12 struct mtd_info mtd;
13};
14
15#endif /* BCM47XXSFLASH */
diff --git a/drivers/mtd/devices/elm.c b/drivers/mtd/devices/elm.c
new file mode 100644
index 000000000000..2ec5da9ee248
--- /dev/null
+++ b/drivers/mtd/devices/elm.c
@@ -0,0 +1,404 @@
1/*
2 * Error Location Module
3 *
4 * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/platform_device.h>
19#include <linux/module.h>
20#include <linux/interrupt.h>
21#include <linux/io.h>
22#include <linux/of.h>
23#include <linux/pm_runtime.h>
24#include <linux/platform_data/elm.h>
25
26#define ELM_IRQSTATUS 0x018
27#define ELM_IRQENABLE 0x01c
28#define ELM_LOCATION_CONFIG 0x020
29#define ELM_PAGE_CTRL 0x080
30#define ELM_SYNDROME_FRAGMENT_0 0x400
31#define ELM_SYNDROME_FRAGMENT_6 0x418
32#define ELM_LOCATION_STATUS 0x800
33#define ELM_ERROR_LOCATION_0 0x880
34
35/* ELM Interrupt Status Register */
36#define INTR_STATUS_PAGE_VALID BIT(8)
37
38/* ELM Interrupt Enable Register */
39#define INTR_EN_PAGE_MASK BIT(8)
40
41/* ELM Location Configuration Register */
42#define ECC_BCH_LEVEL_MASK 0x3
43
44/* ELM syndrome */
45#define ELM_SYNDROME_VALID BIT(16)
46
47/* ELM_LOCATION_STATUS Register */
48#define ECC_CORRECTABLE_MASK BIT(8)
49#define ECC_NB_ERRORS_MASK 0x1f
50
51/* ELM_ERROR_LOCATION_0-15 Registers */
52#define ECC_ERROR_LOCATION_MASK 0x1fff
53
54#define ELM_ECC_SIZE 0x7ff
55
56#define SYNDROME_FRAGMENT_REG_SIZE 0x40
57#define ERROR_LOCATION_SIZE 0x100
58
59struct elm_info {
60 struct device *dev;
61 void __iomem *elm_base;
62 struct completion elm_completion;
63 struct list_head list;
64 enum bch_ecc bch_type;
65};
66
67static LIST_HEAD(elm_devices);
68
69static void elm_write_reg(struct elm_info *info, int offset, u32 val)
70{
71 writel(val, info->elm_base + offset);
72}
73
74static u32 elm_read_reg(struct elm_info *info, int offset)
75{
76 return readl(info->elm_base + offset);
77}
78
79/**
80 * elm_config - Configure ELM module
81 * @dev: ELM device
82 * @bch_type: Type of BCH ecc
83 */
84void elm_config(struct device *dev, enum bch_ecc bch_type)
85{
86 u32 reg_val;
87 struct elm_info *info = dev_get_drvdata(dev);
88
89 reg_val = (bch_type & ECC_BCH_LEVEL_MASK) | (ELM_ECC_SIZE << 16);
90 elm_write_reg(info, ELM_LOCATION_CONFIG, reg_val);
91 info->bch_type = bch_type;
92}
93EXPORT_SYMBOL(elm_config);
94
95/**
96 * elm_configure_page_mode - Enable/Disable page mode
97 * @info: elm info
98 * @index: index number of syndrome fragment vector
99 * @enable: enable/disable flag for page mode
100 *
101 * Enable page mode for syndrome fragment index
102 */
103static void elm_configure_page_mode(struct elm_info *info, int index,
104 bool enable)
105{
106 u32 reg_val;
107
108 reg_val = elm_read_reg(info, ELM_PAGE_CTRL);
109 if (enable)
110 reg_val |= BIT(index); /* enable page mode */
111 else
112 reg_val &= ~BIT(index); /* disable page mode */
113
114 elm_write_reg(info, ELM_PAGE_CTRL, reg_val);
115}
116
117/**
118 * elm_load_syndrome - Load ELM syndrome reg
119 * @info: elm info
120 * @err_vec: elm error vectors
121 * @ecc: buffer with calculated ecc
122 *
123 * Load syndrome fragment registers with calculated ecc in reverse order.
124 */
125static void elm_load_syndrome(struct elm_info *info,
126 struct elm_errorvec *err_vec, u8 *ecc)
127{
128 int i, offset;
129 u32 val;
130
131 for (i = 0; i < ERROR_VECTOR_MAX; i++) {
132
133 /* Check error reported */
134 if (err_vec[i].error_reported) {
135 elm_configure_page_mode(info, i, true);
136 offset = ELM_SYNDROME_FRAGMENT_0 +
137 SYNDROME_FRAGMENT_REG_SIZE * i;
138
139 /* BCH8 */
140 if (info->bch_type) {
141
142 /* syndrome fragment 0 = ecc[9-12B] */
143 val = cpu_to_be32(*(u32 *) &ecc[9]);
144 elm_write_reg(info, offset, val);
145
146 /* syndrome fragment 1 = ecc[5-8B] */
147 offset += 4;
148 val = cpu_to_be32(*(u32 *) &ecc[5]);
149 elm_write_reg(info, offset, val);
150
151 /* syndrome fragment 2 = ecc[1-4B] */
152 offset += 4;
153 val = cpu_to_be32(*(u32 *) &ecc[1]);
154 elm_write_reg(info, offset, val);
155
156 /* syndrome fragment 3 = ecc[0B] */
157 offset += 4;
158 val = ecc[0];
159 elm_write_reg(info, offset, val);
160 } else {
161 /* syndrome fragment 0 = ecc[20-52b] bits */
162 val = (cpu_to_be32(*(u32 *) &ecc[3]) >> 4) |
163 ((ecc[2] & 0xf) << 28);
164 elm_write_reg(info, offset, val);
165
166 /* syndrome fragment 1 = ecc[0-20b] bits */
167 offset += 4;
168 val = cpu_to_be32(*(u32 *) &ecc[0]) >> 12;
169 elm_write_reg(info, offset, val);
170 }
171 }
172
173 /* Update ecc pointer with ecc byte size */
174 ecc += info->bch_type ? BCH8_SIZE : BCH4_SIZE;
175 }
176}
177
178/**
179 * elm_start_processing - start elm syndrome processing
180 * @info: elm info
181 * @err_vec: elm error vectors
182 *
183 * Set syndrome valid bit for syndrome fragment registers for which
184 * elm syndrome fragment registers are loaded. This enables elm module
185 * to start processing syndrome vectors.
186 */
187static void elm_start_processing(struct elm_info *info,
188 struct elm_errorvec *err_vec)
189{
190 int i, offset;
191 u32 reg_val;
192
193 /*
194 * Set syndrome vector valid, so that ELM module
195 * will process it for vectors error is reported
196 */
197 for (i = 0; i < ERROR_VECTOR_MAX; i++) {
198 if (err_vec[i].error_reported) {
199 offset = ELM_SYNDROME_FRAGMENT_6 +
200 SYNDROME_FRAGMENT_REG_SIZE * i;
201 reg_val = elm_read_reg(info, offset);
202 reg_val |= ELM_SYNDROME_VALID;
203 elm_write_reg(info, offset, reg_val);
204 }
205 }
206}
207
208/**
209 * elm_error_correction - locate correctable error position
210 * @info: elm info
211 * @err_vec: elm error vectors
212 *
213 * On completion of processing by elm module, error location status
214 * register updated with correctable/uncorrectable error information.
215 * In case of correctable errors, number of errors located from
216 * elm location status register & read the positions from
217 * elm error location register.
218 */
219static void elm_error_correction(struct elm_info *info,
220 struct elm_errorvec *err_vec)
221{
222 int i, j, errors = 0;
223 int offset;
224 u32 reg_val;
225
226 for (i = 0; i < ERROR_VECTOR_MAX; i++) {
227
228 /* Check error reported */
229 if (err_vec[i].error_reported) {
230 offset = ELM_LOCATION_STATUS + ERROR_LOCATION_SIZE * i;
231 reg_val = elm_read_reg(info, offset);
232
233 /* Check correctable error or not */
234 if (reg_val & ECC_CORRECTABLE_MASK) {
235 offset = ELM_ERROR_LOCATION_0 +
236 ERROR_LOCATION_SIZE * i;
237
238 /* Read count of correctable errors */
239 err_vec[i].error_count = reg_val &
240 ECC_NB_ERRORS_MASK;
241
242 /* Update the error locations in error vector */
243 for (j = 0; j < err_vec[i].error_count; j++) {
244
245 reg_val = elm_read_reg(info, offset);
246 err_vec[i].error_loc[j] = reg_val &
247 ECC_ERROR_LOCATION_MASK;
248
249 /* Update error location register */
250 offset += 4;
251 }
252
253 errors += err_vec[i].error_count;
254 } else {
255 err_vec[i].error_uncorrectable = true;
256 }
257
258 /* Clearing interrupts for processed error vectors */
259 elm_write_reg(info, ELM_IRQSTATUS, BIT(i));
260
261 /* Disable page mode */
262 elm_configure_page_mode(info, i, false);
263 }
264 }
265}
266
267/**
268 * elm_decode_bch_error_page - Locate error position
269 * @dev: device pointer
270 * @ecc_calc: calculated ECC bytes from GPMC
271 * @err_vec: elm error vectors
272 *
273 * Called with one or more error reported vectors & vectors with
274 * error reported is updated in err_vec[].error_reported
275 */
276void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc,
277 struct elm_errorvec *err_vec)
278{
279 struct elm_info *info = dev_get_drvdata(dev);
280 u32 reg_val;
281
282 /* Enable page mode interrupt */
283 reg_val = elm_read_reg(info, ELM_IRQSTATUS);
284 elm_write_reg(info, ELM_IRQSTATUS, reg_val & INTR_STATUS_PAGE_VALID);
285 elm_write_reg(info, ELM_IRQENABLE, INTR_EN_PAGE_MASK);
286
287 /* Load valid ecc byte to syndrome fragment register */
288 elm_load_syndrome(info, err_vec, ecc_calc);
289
290 /* Enable syndrome processing for which syndrome fragment is updated */
291 elm_start_processing(info, err_vec);
292
293 /* Wait for ELM module to finish locating error correction */
294 wait_for_completion(&info->elm_completion);
295
296 /* Disable page mode interrupt */
297 reg_val = elm_read_reg(info, ELM_IRQENABLE);
298 elm_write_reg(info, ELM_IRQENABLE, reg_val & ~INTR_EN_PAGE_MASK);
299 elm_error_correction(info, err_vec);
300}
301EXPORT_SYMBOL(elm_decode_bch_error_page);
302
303static irqreturn_t elm_isr(int this_irq, void *dev_id)
304{
305 u32 reg_val;
306 struct elm_info *info = dev_id;
307
308 reg_val = elm_read_reg(info, ELM_IRQSTATUS);
309
310 /* All error vectors processed */
311 if (reg_val & INTR_STATUS_PAGE_VALID) {
312 elm_write_reg(info, ELM_IRQSTATUS,
313 reg_val & INTR_STATUS_PAGE_VALID);
314 complete(&info->elm_completion);
315 return IRQ_HANDLED;
316 }
317
318 return IRQ_NONE;
319}
320
321static int elm_probe(struct platform_device *pdev)
322{
323 int ret = 0;
324 struct resource *res, *irq;
325 struct elm_info *info;
326
327 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
328 if (!info) {
329 dev_err(&pdev->dev, "failed to allocate memory\n");
330 return -ENOMEM;
331 }
332
333 info->dev = &pdev->dev;
334
335 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
336 if (!irq) {
337 dev_err(&pdev->dev, "no irq resource defined\n");
338 return -ENODEV;
339 }
340
341 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
342 if (!res) {
343 dev_err(&pdev->dev, "no memory resource defined\n");
344 return -ENODEV;
345 }
346
347 info->elm_base = devm_request_and_ioremap(&pdev->dev, res);
348 if (!info->elm_base)
349 return -EADDRNOTAVAIL;
350
351 ret = devm_request_irq(&pdev->dev, irq->start, elm_isr, 0,
352 pdev->name, info);
353 if (ret) {
354 dev_err(&pdev->dev, "failure requesting irq %i\n", irq->start);
355 return ret;
356 }
357
358 pm_runtime_enable(&pdev->dev);
359 if (pm_runtime_get_sync(&pdev->dev)) {
360 ret = -EINVAL;
361 pm_runtime_disable(&pdev->dev);
362 dev_err(&pdev->dev, "can't enable clock\n");
363 return ret;
364 }
365
366 init_completion(&info->elm_completion);
367 INIT_LIST_HEAD(&info->list);
368 list_add(&info->list, &elm_devices);
369 platform_set_drvdata(pdev, info);
370 return ret;
371}
372
373static int elm_remove(struct platform_device *pdev)
374{
375 pm_runtime_put_sync(&pdev->dev);
376 pm_runtime_disable(&pdev->dev);
377 platform_set_drvdata(pdev, NULL);
378 return 0;
379}
380
381#ifdef CONFIG_OF
382static const struct of_device_id elm_of_match[] = {
383 { .compatible = "ti,am3352-elm" },
384 {},
385};
386MODULE_DEVICE_TABLE(of, elm_of_match);
387#endif
388
389static struct platform_driver elm_driver = {
390 .driver = {
391 .name = "elm",
392 .owner = THIS_MODULE,
393 .of_match_table = of_match_ptr(elm_of_match),
394 },
395 .probe = elm_probe,
396 .remove = elm_remove,
397};
398
399module_platform_driver(elm_driver);
400
401MODULE_DESCRIPTION("ELM driver for BCH error correction");
402MODULE_AUTHOR("Texas Instruments");
403MODULE_ALIAS("platform: elm");
404MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 4eeeb2d7f6ea..5b6b0728be21 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -565,6 +565,96 @@ time_out:
565 return ret; 565 return ret;
566} 566}
567 567
568static int m25p80_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
569{
570 struct m25p *flash = mtd_to_m25p(mtd);
571 uint32_t offset = ofs;
572 uint8_t status_old, status_new;
573 int res = 0;
574
575 mutex_lock(&flash->lock);
576 /* Wait until finished previous command */
577 if (wait_till_ready(flash)) {
578 res = 1;
579 goto err;
580 }
581
582 status_old = read_sr(flash);
583
584 if (offset < flash->mtd.size-(flash->mtd.size/2))
585 status_new = status_old | SR_BP2 | SR_BP1 | SR_BP0;
586 else if (offset < flash->mtd.size-(flash->mtd.size/4))
587 status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1;
588 else if (offset < flash->mtd.size-(flash->mtd.size/8))
589 status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0;
590 else if (offset < flash->mtd.size-(flash->mtd.size/16))
591 status_new = (status_old & ~(SR_BP0|SR_BP1)) | SR_BP2;
592 else if (offset < flash->mtd.size-(flash->mtd.size/32))
593 status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0;
594 else if (offset < flash->mtd.size-(flash->mtd.size/64))
595 status_new = (status_old & ~(SR_BP2|SR_BP0)) | SR_BP1;
596 else
597 status_new = (status_old & ~(SR_BP2|SR_BP1)) | SR_BP0;
598
599 /* Only modify protection if it will not unlock other areas */
600 if ((status_new&(SR_BP2|SR_BP1|SR_BP0)) >
601 (status_old&(SR_BP2|SR_BP1|SR_BP0))) {
602 write_enable(flash);
603 if (write_sr(flash, status_new) < 0) {
604 res = 1;
605 goto err;
606 }
607 }
608
609err: mutex_unlock(&flash->lock);
610 return res;
611}
612
613static int m25p80_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
614{
615 struct m25p *flash = mtd_to_m25p(mtd);
616 uint32_t offset = ofs;
617 uint8_t status_old, status_new;
618 int res = 0;
619
620 mutex_lock(&flash->lock);
621 /* Wait until finished previous command */
622 if (wait_till_ready(flash)) {
623 res = 1;
624 goto err;
625 }
626
627 status_old = read_sr(flash);
628
629 if (offset+len > flash->mtd.size-(flash->mtd.size/64))
630 status_new = status_old & ~(SR_BP2|SR_BP1|SR_BP0);
631 else if (offset+len > flash->mtd.size-(flash->mtd.size/32))
632 status_new = (status_old & ~(SR_BP2|SR_BP1)) | SR_BP0;
633 else if (offset+len > flash->mtd.size-(flash->mtd.size/16))
634 status_new = (status_old & ~(SR_BP2|SR_BP0)) | SR_BP1;
635 else if (offset+len > flash->mtd.size-(flash->mtd.size/8))
636 status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0;
637 else if (offset+len > flash->mtd.size-(flash->mtd.size/4))
638 status_new = (status_old & ~(SR_BP0|SR_BP1)) | SR_BP2;
639 else if (offset+len > flash->mtd.size-(flash->mtd.size/2))
640 status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0;
641 else
642 status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1;
643
644 /* Only modify protection if it will not lock other areas */
645 if ((status_new&(SR_BP2|SR_BP1|SR_BP0)) <
646 (status_old&(SR_BP2|SR_BP1|SR_BP0))) {
647 write_enable(flash);
648 if (write_sr(flash, status_new) < 0) {
649 res = 1;
650 goto err;
651 }
652 }
653
654err: mutex_unlock(&flash->lock);
655 return res;
656}
657
568/****************************************************************************/ 658/****************************************************************************/
569 659
570/* 660/*
@@ -642,6 +732,10 @@ static const struct spi_device_id m25p_ids[] = {
642 /* Everspin */ 732 /* Everspin */
643 { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2) }, 733 { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2) },
644 734
735 /* GigaDevice */
736 { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) },
737 { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) },
738
645 /* Intel/Numonyx -- xxxs33b */ 739 /* Intel/Numonyx -- xxxs33b */
646 { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) }, 740 { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
647 { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) }, 741 { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
@@ -899,6 +993,12 @@ static int m25p_probe(struct spi_device *spi)
899 flash->mtd._erase = m25p80_erase; 993 flash->mtd._erase = m25p80_erase;
900 flash->mtd._read = m25p80_read; 994 flash->mtd._read = m25p80_read;
901 995
996 /* flash protection support for STmicro chips */
997 if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ST) {
998 flash->mtd._lock = m25p80_lock;
999 flash->mtd._unlock = m25p80_unlock;
1000 }
1001
902 /* sst flash chips use AAI word program */ 1002 /* sst flash chips use AAI word program */
903 if (JEDEC_MFR(info->jedec_id) == CFI_MFR_SST) 1003 if (JEDEC_MFR(info->jedec_id) == CFI_MFR_SST)
904 flash->mtd._write = sst_write; 1004 flash->mtd._write = sst_write;
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 62ba82c396c2..3ed17c4d4358 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -429,7 +429,7 @@ config MTD_GPIO_ADDR
429 429
430config MTD_UCLINUX 430config MTD_UCLINUX
431 bool "Generic uClinux RAM/ROM filesystem support" 431 bool "Generic uClinux RAM/ROM filesystem support"
432 depends on MTD_RAM=y && (!MMU || COLDFIRE) 432 depends on (MTD_RAM=y || MTD_ROM=y) && (!MMU || COLDFIRE)
433 help 433 help
434 Map driver to support image based filesystems for uClinux. 434 Map driver to support image based filesystems for uClinux.
435 435
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 7901d72c9242..363939dfad05 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -68,9 +68,6 @@ static int of_flash_remove(struct platform_device *dev)
68 kfree(info->list[i].res); 68 kfree(info->list[i].res);
69 } 69 }
70 } 70 }
71
72 kfree(info);
73
74 return 0; 71 return 0;
75} 72}
76 73
@@ -199,8 +196,9 @@ static int of_flash_probe(struct platform_device *dev)
199 map_indirect = of_property_read_bool(dp, "no-unaligned-direct-access"); 196 map_indirect = of_property_read_bool(dp, "no-unaligned-direct-access");
200 197
201 err = -ENOMEM; 198 err = -ENOMEM;
202 info = kzalloc(sizeof(struct of_flash) + 199 info = devm_kzalloc(&dev->dev,
203 sizeof(struct of_flash_list) * count, GFP_KERNEL); 200 sizeof(struct of_flash) +
201 sizeof(struct of_flash_list) * count, GFP_KERNEL);
204 if (!info) 202 if (!info)
205 goto err_flash_remove; 203 goto err_flash_remove;
206 204
@@ -241,6 +239,7 @@ static int of_flash_probe(struct platform_device *dev)
241 info->list[i].map.phys = res.start; 239 info->list[i].map.phys = res.start;
242 info->list[i].map.size = res_size; 240 info->list[i].map.size = res_size;
243 info->list[i].map.bankwidth = be32_to_cpup(width); 241 info->list[i].map.bankwidth = be32_to_cpup(width);
242 info->list[i].map.device_node = dp;
244 243
245 err = -ENOMEM; 244 err = -ENOMEM;
246 info->list[i].map.virt = ioremap(info->list[i].map.phys, 245 info->list[i].map.virt = ioremap(info->list[i].map.phys,
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 299bf88a6f41..c1af83db5202 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -23,12 +23,26 @@
23 23
24/****************************************************************************/ 24/****************************************************************************/
25 25
26#ifdef CONFIG_MTD_ROM
27#define MAP_NAME "rom"
28#else
29#define MAP_NAME "ram"
30#endif
31
32/*
33 * Blackfin uses uclinux_ram_map during startup, so it must not be static.
34 * Provide a dummy declaration to make sparse happy.
35 */
36extern struct map_info uclinux_ram_map;
37
26struct map_info uclinux_ram_map = { 38struct map_info uclinux_ram_map = {
27 .name = "RAM", 39 .name = MAP_NAME,
28 .phys = (unsigned long)__bss_stop,
29 .size = 0, 40 .size = 0,
30}; 41};
31 42
43static unsigned long physaddr = -1;
44module_param(physaddr, ulong, S_IRUGO);
45
32static struct mtd_info *uclinux_ram_mtdinfo; 46static struct mtd_info *uclinux_ram_mtdinfo;
33 47
34/****************************************************************************/ 48/****************************************************************************/
@@ -60,11 +74,17 @@ static int __init uclinux_mtd_init(void)
60 struct map_info *mapp; 74 struct map_info *mapp;
61 75
62 mapp = &uclinux_ram_map; 76 mapp = &uclinux_ram_map;
77
78 if (physaddr == -1)
79 mapp->phys = (resource_size_t)__bss_stop;
80 else
81 mapp->phys = physaddr;
82
63 if (!mapp->size) 83 if (!mapp->size)
64 mapp->size = PAGE_ALIGN(ntohl(*((unsigned long *)(mapp->phys + 8)))); 84 mapp->size = PAGE_ALIGN(ntohl(*((unsigned long *)(mapp->phys + 8))));
65 mapp->bankwidth = 4; 85 mapp->bankwidth = 4;
66 86
67 printk("uclinux[mtd]: RAM probe address=0x%x size=0x%x\n", 87 printk("uclinux[mtd]: probe address=0x%x size=0x%x\n",
68 (int) mapp->phys, (int) mapp->size); 88 (int) mapp->phys, (int) mapp->size);
69 89
70 /* 90 /*
@@ -82,7 +102,7 @@ static int __init uclinux_mtd_init(void)
82 102
83 simple_map_init(mapp); 103 simple_map_init(mapp);
84 104
85 mtd = do_map_probe("map_ram", mapp); 105 mtd = do_map_probe("map_" MAP_NAME, mapp);
86 if (!mtd) { 106 if (!mtd) {
87 printk("uclinux[mtd]: failed to find a mapping?\n"); 107 printk("uclinux[mtd]: failed to find a mapping?\n");
88 return(-ENXIO); 108 return(-ENXIO);
@@ -118,6 +138,6 @@ module_exit(uclinux_mtd_cleanup);
118 138
119MODULE_LICENSE("GPL"); 139MODULE_LICENSE("GPL");
120MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com>"); 140MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com>");
121MODULE_DESCRIPTION("Generic RAM based MTD for uClinux"); 141MODULE_DESCRIPTION("Generic MTD for uClinux");
122 142
123/****************************************************************************/ 143/****************************************************************************/
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index c516a9408087..ffcbcca2fd2d 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -101,6 +101,8 @@ struct atmel_nand_host {
101 u8 pmecc_corr_cap; 101 u8 pmecc_corr_cap;
102 u16 pmecc_sector_size; 102 u16 pmecc_sector_size;
103 u32 pmecc_lookup_table_offset; 103 u32 pmecc_lookup_table_offset;
104 u32 pmecc_lookup_table_offset_512;
105 u32 pmecc_lookup_table_offset_1024;
104 106
105 int pmecc_bytes_per_sector; 107 int pmecc_bytes_per_sector;
106 int pmecc_sector_number; 108 int pmecc_sector_number;
@@ -908,6 +910,84 @@ static void atmel_pmecc_core_init(struct mtd_info *mtd)
908 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE); 910 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
909} 911}
910 912
913/*
914 * Get ECC requirement in ONFI parameters, returns -1 if ONFI
915 * parameters is not supported.
916 * return 0 if success to get the ECC requirement.
917 */
918static int get_onfi_ecc_param(struct nand_chip *chip,
919 int *ecc_bits, int *sector_size)
920{
921 *ecc_bits = *sector_size = 0;
922
923 if (chip->onfi_params.ecc_bits == 0xff)
924 /* TODO: the sector_size and ecc_bits need to be find in
925 * extended ecc parameter, currently we don't support it.
926 */
927 return -1;
928
929 *ecc_bits = chip->onfi_params.ecc_bits;
930
931 /* The default sector size (ecc codeword size) is 512 */
932 *sector_size = 512;
933
934 return 0;
935}
936
937/*
938 * Get ecc requirement from ONFI parameters ecc requirement.
939 * If pmecc-cap, pmecc-sector-size in DTS are not specified, this function
940 * will set them according to ONFI ecc requirement. Otherwise, use the
941 * value in DTS file.
942 * return 0 if success. otherwise return error code.
943 */
944static int pmecc_choose_ecc(struct atmel_nand_host *host,
945 int *cap, int *sector_size)
946{
947 /* Get ECC requirement from ONFI parameters */
948 *cap = *sector_size = 0;
949 if (host->nand_chip.onfi_version) {
950 if (!get_onfi_ecc_param(&host->nand_chip, cap, sector_size))
951 dev_info(host->dev, "ONFI params, minimum required ECC: %d bits in %d bytes\n",
952 *cap, *sector_size);
953 else
954 dev_info(host->dev, "NAND chip ECC reqirement is in Extended ONFI parameter, we don't support yet.\n");
955 } else {
956 dev_info(host->dev, "NAND chip is not ONFI compliant, assume ecc_bits is 2 in 512 bytes");
957 }
958 if (*cap == 0 && *sector_size == 0) {
959 *cap = 2;
960 *sector_size = 512;
961 }
962
963 /* If dts file doesn't specify then use the one in ONFI parameters */
964 if (host->pmecc_corr_cap == 0) {
965 /* use the most fitable ecc bits (the near bigger one ) */
966 if (*cap <= 2)
967 host->pmecc_corr_cap = 2;
968 else if (*cap <= 4)
969 host->pmecc_corr_cap = 4;
970 else if (*cap < 8)
971 host->pmecc_corr_cap = 8;
972 else if (*cap < 12)
973 host->pmecc_corr_cap = 12;
974 else if (*cap < 24)
975 host->pmecc_corr_cap = 24;
976 else
977 return -EINVAL;
978 }
979 if (host->pmecc_sector_size == 0) {
980 /* use the most fitable sector size (the near smaller one ) */
981 if (*sector_size >= 1024)
982 host->pmecc_sector_size = 1024;
983 else if (*sector_size >= 512)
984 host->pmecc_sector_size = 512;
985 else
986 return -EINVAL;
987 }
988 return 0;
989}
990
911static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev, 991static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev,
912 struct atmel_nand_host *host) 992 struct atmel_nand_host *host)
913{ 993{
@@ -916,8 +996,22 @@ static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev,
916 struct resource *regs, *regs_pmerr, *regs_rom; 996 struct resource *regs, *regs_pmerr, *regs_rom;
917 int cap, sector_size, err_no; 997 int cap, sector_size, err_no;
918 998
999 err_no = pmecc_choose_ecc(host, &cap, &sector_size);
1000 if (err_no) {
1001 dev_err(host->dev, "The NAND flash's ECC requirement are not support!");
1002 return err_no;
1003 }
1004
1005 if (cap != host->pmecc_corr_cap ||
1006 sector_size != host->pmecc_sector_size)
1007 dev_info(host->dev, "WARNING: Be Caution! Using different PMECC parameters from Nand ONFI ECC reqirement.\n");
1008
919 cap = host->pmecc_corr_cap; 1009 cap = host->pmecc_corr_cap;
920 sector_size = host->pmecc_sector_size; 1010 sector_size = host->pmecc_sector_size;
1011 host->pmecc_lookup_table_offset = (sector_size == 512) ?
1012 host->pmecc_lookup_table_offset_512 :
1013 host->pmecc_lookup_table_offset_1024;
1014
921 dev_info(host->dev, "Initialize PMECC params, cap: %d, sector: %d\n", 1015 dev_info(host->dev, "Initialize PMECC params, cap: %d, sector: %d\n",
922 cap, sector_size); 1016 cap, sector_size);
923 1017
@@ -1215,7 +1309,7 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
1215static int atmel_of_init_port(struct atmel_nand_host *host, 1309static int atmel_of_init_port(struct atmel_nand_host *host,
1216 struct device_node *np) 1310 struct device_node *np)
1217{ 1311{
1218 u32 val, table_offset; 1312 u32 val;
1219 u32 offset[2]; 1313 u32 offset[2];
1220 int ecc_mode; 1314 int ecc_mode;
1221 struct atmel_nand_data *board = &host->board; 1315 struct atmel_nand_data *board = &host->board;
@@ -1259,42 +1353,41 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
1259 1353
1260 /* use PMECC, get correction capability, sector size and lookup 1354 /* use PMECC, get correction capability, sector size and lookup
1261 * table offset. 1355 * table offset.
1356 * If correction bits and sector size are not specified, then find
1357 * them from NAND ONFI parameters.
1262 */ 1358 */
1263 if (of_property_read_u32(np, "atmel,pmecc-cap", &val) != 0) { 1359 if (of_property_read_u32(np, "atmel,pmecc-cap", &val) == 0) {
1264 dev_err(host->dev, "Cannot decide PMECC Capability\n"); 1360 if ((val != 2) && (val != 4) && (val != 8) && (val != 12) &&
1265 return -EINVAL; 1361 (val != 24)) {
1266 } else if ((val != 2) && (val != 4) && (val != 8) && (val != 12) && 1362 dev_err(host->dev,
1267 (val != 24)) { 1363 "Unsupported PMECC correction capability: %d; should be 2, 4, 8, 12 or 24\n",
1268 dev_err(host->dev, 1364 val);
1269 "Unsupported PMECC correction capability: %d; should be 2, 4, 8, 12 or 24\n", 1365 return -EINVAL;
1270 val); 1366 }
1271 return -EINVAL; 1367 host->pmecc_corr_cap = (u8)val;
1272 } 1368 }
1273 host->pmecc_corr_cap = (u8)val;
1274 1369
1275 if (of_property_read_u32(np, "atmel,pmecc-sector-size", &val) != 0) { 1370 if (of_property_read_u32(np, "atmel,pmecc-sector-size", &val) == 0) {
1276 dev_err(host->dev, "Cannot decide PMECC Sector Size\n"); 1371 if ((val != 512) && (val != 1024)) {
1277 return -EINVAL; 1372 dev_err(host->dev,
1278 } else if ((val != 512) && (val != 1024)) { 1373 "Unsupported PMECC sector size: %d; should be 512 or 1024 bytes\n",
1279 dev_err(host->dev, 1374 val);
1280 "Unsupported PMECC sector size: %d; should be 512 or 1024 bytes\n", 1375 return -EINVAL;
1281 val); 1376 }
1282 return -EINVAL; 1377 host->pmecc_sector_size = (u16)val;
1283 } 1378 }
1284 host->pmecc_sector_size = (u16)val;
1285 1379
1286 if (of_property_read_u32_array(np, "atmel,pmecc-lookup-table-offset", 1380 if (of_property_read_u32_array(np, "atmel,pmecc-lookup-table-offset",
1287 offset, 2) != 0) { 1381 offset, 2) != 0) {
1288 dev_err(host->dev, "Cannot get PMECC lookup table offset\n"); 1382 dev_err(host->dev, "Cannot get PMECC lookup table offset\n");
1289 return -EINVAL; 1383 return -EINVAL;
1290 } 1384 }
1291 table_offset = host->pmecc_sector_size == 512 ? offset[0] : offset[1]; 1385 if (!offset[0] && !offset[1]) {
1292
1293 if (!table_offset) {
1294 dev_err(host->dev, "Invalid PMECC lookup table offset\n"); 1386 dev_err(host->dev, "Invalid PMECC lookup table offset\n");
1295 return -EINVAL; 1387 return -EINVAL;
1296 } 1388 }
1297 host->pmecc_lookup_table_offset = table_offset; 1389 host->pmecc_lookup_table_offset_512 = offset[0];
1390 host->pmecc_lookup_table_offset_1024 = offset[1];
1298 1391
1299 return 0; 1392 return 0;
1300} 1393}
diff --git a/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h b/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
index 0bdb2ce4da75..c005a62330b1 100644
--- a/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
+++ b/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
@@ -1,6 +1,10 @@
1#ifndef __BCM47XXNFLASH_H 1#ifndef __BCM47XXNFLASH_H
2#define __BCM47XXNFLASH_H 2#define __BCM47XXNFLASH_H
3 3
4#ifndef pr_fmt
5#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6#endif
7
4#include <linux/mtd/mtd.h> 8#include <linux/mtd/mtd.h>
5#include <linux/mtd/nand.h> 9#include <linux/mtd/nand.h>
6 10
diff --git a/drivers/mtd/nand/bcm47xxnflash/main.c b/drivers/mtd/nand/bcm47xxnflash/main.c
index 8363a9a5fa3f..7bae569fdc79 100644
--- a/drivers/mtd/nand/bcm47xxnflash/main.c
+++ b/drivers/mtd/nand/bcm47xxnflash/main.c
@@ -9,14 +9,14 @@
9 * 9 *
10 */ 10 */
11 11
12#include "bcm47xxnflash.h"
13
12#include <linux/module.h> 14#include <linux/module.h>
13#include <linux/kernel.h> 15#include <linux/kernel.h>
14#include <linux/slab.h> 16#include <linux/slab.h>
15#include <linux/platform_device.h> 17#include <linux/platform_device.h>
16#include <linux/bcma/bcma.h> 18#include <linux/bcma/bcma.h>
17 19
18#include "bcm47xxnflash.h"
19
20MODULE_DESCRIPTION("NAND flash driver for BCMA bus"); 20MODULE_DESCRIPTION("NAND flash driver for BCMA bus");
21MODULE_LICENSE("GPL"); 21MODULE_LICENSE("GPL");
22MODULE_AUTHOR("Rafał Miłecki"); 22MODULE_AUTHOR("Rafał Miłecki");
@@ -77,6 +77,7 @@ static int bcm47xxnflash_remove(struct platform_device *pdev)
77} 77}
78 78
79static struct platform_driver bcm47xxnflash_driver = { 79static struct platform_driver bcm47xxnflash_driver = {
80 .probe = bcm47xxnflash_probe,
80 .remove = bcm47xxnflash_remove, 81 .remove = bcm47xxnflash_remove,
81 .driver = { 82 .driver = {
82 .name = "bcma_nflash", 83 .name = "bcma_nflash",
@@ -88,13 +89,10 @@ static int __init bcm47xxnflash_init(void)
88{ 89{
89 int err; 90 int err;
90 91
91 /* 92 err = platform_driver_register(&bcm47xxnflash_driver);
92 * Platform device "bcma_nflash" exists on SoCs and is registered very
93 * early, it won't be added during runtime (use platform_driver_probe).
94 */
95 err = platform_driver_probe(&bcm47xxnflash_driver, bcm47xxnflash_probe);
96 if (err) 93 if (err)
97 pr_err("Failed to register serial flash driver: %d\n", err); 94 pr_err("Failed to register bcm47xx nand flash driver: %d\n",
95 err);
98 96
99 return err; 97 return err;
100} 98}
diff --git a/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
index 595de4012e71..b2ab373c9eef 100644
--- a/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
+++ b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
@@ -9,13 +9,13 @@
9 * 9 *
10 */ 10 */
11 11
12#include "bcm47xxnflash.h"
13
12#include <linux/module.h> 14#include <linux/module.h>
13#include <linux/kernel.h> 15#include <linux/kernel.h>
14#include <linux/slab.h> 16#include <linux/slab.h>
15#include <linux/bcma/bcma.h> 17#include <linux/bcma/bcma.h>
16 18
17#include "bcm47xxnflash.h"
18
19/* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has 19/* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has
20 * shown ~1000 retries as maxiumum. */ 20 * shown ~1000 retries as maxiumum. */
21#define NFLASH_READY_RETRIES 10000 21#define NFLASH_READY_RETRIES 10000
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index feae55c7b880..94e17af8e450 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -606,7 +606,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
606 if (pdev->id < 0 || pdev->id > 3) 606 if (pdev->id < 0 || pdev->id > 3)
607 return -ENODEV; 607 return -ENODEV;
608 608
609 info = kzalloc(sizeof(*info), GFP_KERNEL); 609 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
610 if (!info) { 610 if (!info) {
611 dev_err(&pdev->dev, "unable to allocate memory\n"); 611 dev_err(&pdev->dev, "unable to allocate memory\n");
612 ret = -ENOMEM; 612 ret = -ENOMEM;
@@ -623,11 +623,11 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
623 goto err_nomem; 623 goto err_nomem;
624 } 624 }
625 625
626 vaddr = ioremap(res1->start, resource_size(res1)); 626 vaddr = devm_request_and_ioremap(&pdev->dev, res1);
627 base = ioremap(res2->start, resource_size(res2)); 627 base = devm_request_and_ioremap(&pdev->dev, res2);
628 if (!vaddr || !base) { 628 if (!vaddr || !base) {
629 dev_err(&pdev->dev, "ioremap failed\n"); 629 dev_err(&pdev->dev, "ioremap failed\n");
630 ret = -EINVAL; 630 ret = -EADDRNOTAVAIL;
631 goto err_ioremap; 631 goto err_ioremap;
632 } 632 }
633 633
@@ -717,7 +717,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
717 } 717 }
718 info->chip.ecc.mode = ecc_mode; 718 info->chip.ecc.mode = ecc_mode;
719 719
720 info->clk = clk_get(&pdev->dev, "aemif"); 720 info->clk = devm_clk_get(&pdev->dev, "aemif");
721 if (IS_ERR(info->clk)) { 721 if (IS_ERR(info->clk)) {
722 ret = PTR_ERR(info->clk); 722 ret = PTR_ERR(info->clk);
723 dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret); 723 dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
@@ -845,8 +845,6 @@ err_timing:
845 clk_disable_unprepare(info->clk); 845 clk_disable_unprepare(info->clk);
846 846
847err_clk_enable: 847err_clk_enable:
848 clk_put(info->clk);
849
850 spin_lock_irq(&davinci_nand_lock); 848 spin_lock_irq(&davinci_nand_lock);
851 if (ecc_mode == NAND_ECC_HW_SYNDROME) 849 if (ecc_mode == NAND_ECC_HW_SYNDROME)
852 ecc4_busy = false; 850 ecc4_busy = false;
@@ -855,13 +853,7 @@ err_clk_enable:
855err_ecc: 853err_ecc:
856err_clk: 854err_clk:
857err_ioremap: 855err_ioremap:
858 if (base)
859 iounmap(base);
860 if (vaddr)
861 iounmap(vaddr);
862
863err_nomem: 856err_nomem:
864 kfree(info);
865 return ret; 857 return ret;
866} 858}
867 859
@@ -874,15 +866,9 @@ static int __exit nand_davinci_remove(struct platform_device *pdev)
874 ecc4_busy = false; 866 ecc4_busy = false;
875 spin_unlock_irq(&davinci_nand_lock); 867 spin_unlock_irq(&davinci_nand_lock);
876 868
877 iounmap(info->base);
878 iounmap(info->vaddr);
879
880 nand_release(&info->mtd); 869 nand_release(&info->mtd);
881 870
882 clk_disable_unprepare(info->clk); 871 clk_disable_unprepare(info->clk);
883 clk_put(info->clk);
884
885 kfree(info);
886 872
887 return 0; 873 return 0;
888} 874}
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index ad6222627fed..f1f7f12ab501 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -176,8 +176,8 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
176 176
177 ifc_nand_ctrl->page = page_addr; 177 ifc_nand_ctrl->page = page_addr;
178 /* Program ROW0/COL0 */ 178 /* Program ROW0/COL0 */
179 out_be32(&ifc->ifc_nand.row0, page_addr); 179 iowrite32be(page_addr, &ifc->ifc_nand.row0);
180 out_be32(&ifc->ifc_nand.col0, (oob ? IFC_NAND_COL_MS : 0) | column); 180 iowrite32be((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0);
181 181
182 buf_num = page_addr & priv->bufnum_mask; 182 buf_num = page_addr & priv->bufnum_mask;
183 183
@@ -239,18 +239,19 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
239 int i; 239 int i;
240 240
241 /* set the chip select for NAND Transaction */ 241 /* set the chip select for NAND Transaction */
242 out_be32(&ifc->ifc_nand.nand_csel, priv->bank << IFC_NAND_CSEL_SHIFT); 242 iowrite32be(priv->bank << IFC_NAND_CSEL_SHIFT,
243 &ifc->ifc_nand.nand_csel);
243 244
244 dev_vdbg(priv->dev, 245 dev_vdbg(priv->dev,
245 "%s: fir0=%08x fcr0=%08x\n", 246 "%s: fir0=%08x fcr0=%08x\n",
246 __func__, 247 __func__,
247 in_be32(&ifc->ifc_nand.nand_fir0), 248 ioread32be(&ifc->ifc_nand.nand_fir0),
248 in_be32(&ifc->ifc_nand.nand_fcr0)); 249 ioread32be(&ifc->ifc_nand.nand_fcr0));
249 250
250 ctrl->nand_stat = 0; 251 ctrl->nand_stat = 0;
251 252
252 /* start read/write seq */ 253 /* start read/write seq */
253 out_be32(&ifc->ifc_nand.nandseq_strt, IFC_NAND_SEQ_STRT_FIR_STRT); 254 iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
254 255
255 /* wait for command complete flag or timeout */ 256 /* wait for command complete flag or timeout */
256 wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat, 257 wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
@@ -273,7 +274,7 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
273 int sector_end = sector + chip->ecc.steps - 1; 274 int sector_end = sector + chip->ecc.steps - 1;
274 275
275 for (i = sector / 4; i <= sector_end / 4; i++) 276 for (i = sector / 4; i <= sector_end / 4; i++)
276 eccstat[i] = in_be32(&ifc->ifc_nand.nand_eccstat[i]); 277 eccstat[i] = ioread32be(&ifc->ifc_nand.nand_eccstat[i]);
277 278
278 for (i = sector; i <= sector_end; i++) { 279 for (i = sector; i <= sector_end; i++) {
279 errors = check_read_ecc(mtd, ctrl, eccstat, i); 280 errors = check_read_ecc(mtd, ctrl, eccstat, i);
@@ -313,31 +314,33 @@ static void fsl_ifc_do_read(struct nand_chip *chip,
313 314
314 /* Program FIR/IFC_NAND_FCR0 for Small/Large page */ 315 /* Program FIR/IFC_NAND_FCR0 for Small/Large page */
315 if (mtd->writesize > 512) { 316 if (mtd->writesize > 512) {
316 out_be32(&ifc->ifc_nand.nand_fir0, 317 iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
317 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 318 (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
318 (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | 319 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
319 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | 320 (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
320 (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) | 321 (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT),
321 (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT)); 322 &ifc->ifc_nand.nand_fir0);
322 out_be32(&ifc->ifc_nand.nand_fir1, 0x0); 323 iowrite32be(0x0, &ifc->ifc_nand.nand_fir1);
323 324
324 out_be32(&ifc->ifc_nand.nand_fcr0, 325 iowrite32be((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
325 (NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) | 326 (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT),
326 (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT)); 327 &ifc->ifc_nand.nand_fcr0);
327 } else { 328 } else {
328 out_be32(&ifc->ifc_nand.nand_fir0, 329 iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
329 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 330 (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
330 (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | 331 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
331 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | 332 (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT),
332 (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT)); 333 &ifc->ifc_nand.nand_fir0);
333 out_be32(&ifc->ifc_nand.nand_fir1, 0x0); 334 iowrite32be(0x0, &ifc->ifc_nand.nand_fir1);
334 335
335 if (oob) 336 if (oob)
336 out_be32(&ifc->ifc_nand.nand_fcr0, 337 iowrite32be(NAND_CMD_READOOB <<
337 NAND_CMD_READOOB << IFC_NAND_FCR0_CMD0_SHIFT); 338 IFC_NAND_FCR0_CMD0_SHIFT,
339 &ifc->ifc_nand.nand_fcr0);
338 else 340 else
339 out_be32(&ifc->ifc_nand.nand_fcr0, 341 iowrite32be(NAND_CMD_READ0 <<
340 NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT); 342 IFC_NAND_FCR0_CMD0_SHIFT,
343 &ifc->ifc_nand.nand_fcr0);
341 } 344 }
342} 345}
343 346
@@ -357,7 +360,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
357 switch (command) { 360 switch (command) {
358 /* READ0 read the entire buffer to use hardware ECC. */ 361 /* READ0 read the entire buffer to use hardware ECC. */
359 case NAND_CMD_READ0: 362 case NAND_CMD_READ0:
360 out_be32(&ifc->ifc_nand.nand_fbcr, 0); 363 iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
361 set_addr(mtd, 0, page_addr, 0); 364 set_addr(mtd, 0, page_addr, 0);
362 365
363 ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize; 366 ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
@@ -372,7 +375,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
372 375
373 /* READOOB reads only the OOB because no ECC is performed. */ 376 /* READOOB reads only the OOB because no ECC is performed. */
374 case NAND_CMD_READOOB: 377 case NAND_CMD_READOOB:
375 out_be32(&ifc->ifc_nand.nand_fbcr, mtd->oobsize - column); 378 iowrite32be(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr);
376 set_addr(mtd, column, page_addr, 1); 379 set_addr(mtd, column, page_addr, 1);
377 380
378 ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize; 381 ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
@@ -388,19 +391,19 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
388 if (command == NAND_CMD_PARAM) 391 if (command == NAND_CMD_PARAM)
389 timing = IFC_FIR_OP_RBCD; 392 timing = IFC_FIR_OP_RBCD;
390 393
391 out_be32(&ifc->ifc_nand.nand_fir0, 394 iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
392 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 395 (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
393 (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | 396 (timing << IFC_NAND_FIR0_OP2_SHIFT),
394 (timing << IFC_NAND_FIR0_OP2_SHIFT)); 397 &ifc->ifc_nand.nand_fir0);
395 out_be32(&ifc->ifc_nand.nand_fcr0, 398 iowrite32be(command << IFC_NAND_FCR0_CMD0_SHIFT,
396 command << IFC_NAND_FCR0_CMD0_SHIFT); 399 &ifc->ifc_nand.nand_fcr0);
397 out_be32(&ifc->ifc_nand.row3, column); 400 iowrite32be(column, &ifc->ifc_nand.row3);
398 401
399 /* 402 /*
400 * although currently it's 8 bytes for READID, we always read 403 * although currently it's 8 bytes for READID, we always read
401 * the maximum 256 bytes(for PARAM) 404 * the maximum 256 bytes(for PARAM)
402 */ 405 */
403 out_be32(&ifc->ifc_nand.nand_fbcr, 256); 406 iowrite32be(256, &ifc->ifc_nand.nand_fbcr);
404 ifc_nand_ctrl->read_bytes = 256; 407 ifc_nand_ctrl->read_bytes = 256;
405 408
406 set_addr(mtd, 0, 0, 0); 409 set_addr(mtd, 0, 0, 0);
@@ -415,16 +418,16 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
415 418
416 /* ERASE2 uses the block and page address from ERASE1 */ 419 /* ERASE2 uses the block and page address from ERASE1 */
417 case NAND_CMD_ERASE2: 420 case NAND_CMD_ERASE2:
418 out_be32(&ifc->ifc_nand.nand_fir0, 421 iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
419 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 422 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
420 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) | 423 (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT),
421 (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT)); 424 &ifc->ifc_nand.nand_fir0);
422 425
423 out_be32(&ifc->ifc_nand.nand_fcr0, 426 iowrite32be((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
424 (NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) | 427 (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT),
425 (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT)); 428 &ifc->ifc_nand.nand_fcr0);
426 429
427 out_be32(&ifc->ifc_nand.nand_fbcr, 0); 430 iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
428 ifc_nand_ctrl->read_bytes = 0; 431 ifc_nand_ctrl->read_bytes = 0;
429 fsl_ifc_run_command(mtd); 432 fsl_ifc_run_command(mtd);
430 return; 433 return;
@@ -440,26 +443,28 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
440 (NAND_CMD_SEQIN << IFC_NAND_FCR0_CMD0_SHIFT) | 443 (NAND_CMD_SEQIN << IFC_NAND_FCR0_CMD0_SHIFT) |
441 (NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD1_SHIFT); 444 (NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD1_SHIFT);
442 445
443 out_be32(&ifc->ifc_nand.nand_fir0, 446 iowrite32be(
444 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 447 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
445 (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | 448 (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
446 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | 449 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
447 (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) | 450 (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
448 (IFC_FIR_OP_CW1 << IFC_NAND_FIR0_OP4_SHIFT)); 451 (IFC_FIR_OP_CW1 << IFC_NAND_FIR0_OP4_SHIFT),
452 &ifc->ifc_nand.nand_fir0);
449 } else { 453 } else {
450 nand_fcr0 = ((NAND_CMD_PAGEPROG << 454 nand_fcr0 = ((NAND_CMD_PAGEPROG <<
451 IFC_NAND_FCR0_CMD1_SHIFT) | 455 IFC_NAND_FCR0_CMD1_SHIFT) |
452 (NAND_CMD_SEQIN << 456 (NAND_CMD_SEQIN <<
453 IFC_NAND_FCR0_CMD2_SHIFT)); 457 IFC_NAND_FCR0_CMD2_SHIFT));
454 458
455 out_be32(&ifc->ifc_nand.nand_fir0, 459 iowrite32be(
456 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 460 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
457 (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) | 461 (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) |
458 (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) | 462 (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) |
459 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) | 463 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) |
460 (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT)); 464 (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT),
461 out_be32(&ifc->ifc_nand.nand_fir1, 465 &ifc->ifc_nand.nand_fir0);
462 (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT)); 466 iowrite32be(IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT,
467 &ifc->ifc_nand.nand_fir1);
463 468
464 if (column >= mtd->writesize) 469 if (column >= mtd->writesize)
465 nand_fcr0 |= 470 nand_fcr0 |=
@@ -474,7 +479,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
474 column -= mtd->writesize; 479 column -= mtd->writesize;
475 ifc_nand_ctrl->oob = 1; 480 ifc_nand_ctrl->oob = 1;
476 } 481 }
477 out_be32(&ifc->ifc_nand.nand_fcr0, nand_fcr0); 482 iowrite32be(nand_fcr0, &ifc->ifc_nand.nand_fcr0);
478 set_addr(mtd, column, page_addr, ifc_nand_ctrl->oob); 483 set_addr(mtd, column, page_addr, ifc_nand_ctrl->oob);
479 return; 484 return;
480 } 485 }
@@ -482,10 +487,11 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
482 /* PAGEPROG reuses all of the setup from SEQIN and adds the length */ 487 /* PAGEPROG reuses all of the setup from SEQIN and adds the length */
483 case NAND_CMD_PAGEPROG: { 488 case NAND_CMD_PAGEPROG: {
484 if (ifc_nand_ctrl->oob) { 489 if (ifc_nand_ctrl->oob) {
485 out_be32(&ifc->ifc_nand.nand_fbcr, 490 iowrite32be(ifc_nand_ctrl->index -
486 ifc_nand_ctrl->index - ifc_nand_ctrl->column); 491 ifc_nand_ctrl->column,
492 &ifc->ifc_nand.nand_fbcr);
487 } else { 493 } else {
488 out_be32(&ifc->ifc_nand.nand_fbcr, 0); 494 iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
489 } 495 }
490 496
491 fsl_ifc_run_command(mtd); 497 fsl_ifc_run_command(mtd);
@@ -493,12 +499,12 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
493 } 499 }
494 500
495 case NAND_CMD_STATUS: 501 case NAND_CMD_STATUS:
496 out_be32(&ifc->ifc_nand.nand_fir0, 502 iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
497 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 503 (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT),
498 (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT)); 504 &ifc->ifc_nand.nand_fir0);
499 out_be32(&ifc->ifc_nand.nand_fcr0, 505 iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
500 NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT); 506 &ifc->ifc_nand.nand_fcr0);
501 out_be32(&ifc->ifc_nand.nand_fbcr, 1); 507 iowrite32be(1, &ifc->ifc_nand.nand_fbcr);
502 set_addr(mtd, 0, 0, 0); 508 set_addr(mtd, 0, 0, 0);
503 ifc_nand_ctrl->read_bytes = 1; 509 ifc_nand_ctrl->read_bytes = 1;
504 510
@@ -512,10 +518,10 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
512 return; 518 return;
513 519
514 case NAND_CMD_RESET: 520 case NAND_CMD_RESET:
515 out_be32(&ifc->ifc_nand.nand_fir0, 521 iowrite32be(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT,
516 IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT); 522 &ifc->ifc_nand.nand_fir0);
517 out_be32(&ifc->ifc_nand.nand_fcr0, 523 iowrite32be(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT,
518 NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT); 524 &ifc->ifc_nand.nand_fcr0);
519 fsl_ifc_run_command(mtd); 525 fsl_ifc_run_command(mtd);
520 return; 526 return;
521 527
@@ -639,18 +645,18 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
639 u32 nand_fsr; 645 u32 nand_fsr;
640 646
641 /* Use READ_STATUS command, but wait for the device to be ready */ 647 /* Use READ_STATUS command, but wait for the device to be ready */
642 out_be32(&ifc->ifc_nand.nand_fir0, 648 iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
643 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 649 (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT),
644 (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT)); 650 &ifc->ifc_nand.nand_fir0);
645 out_be32(&ifc->ifc_nand.nand_fcr0, NAND_CMD_STATUS << 651 iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
646 IFC_NAND_FCR0_CMD0_SHIFT); 652 &ifc->ifc_nand.nand_fcr0);
647 out_be32(&ifc->ifc_nand.nand_fbcr, 1); 653 iowrite32be(1, &ifc->ifc_nand.nand_fbcr);
648 set_addr(mtd, 0, 0, 0); 654 set_addr(mtd, 0, 0, 0);
649 ifc_nand_ctrl->read_bytes = 1; 655 ifc_nand_ctrl->read_bytes = 1;
650 656
651 fsl_ifc_run_command(mtd); 657 fsl_ifc_run_command(mtd);
652 658
653 nand_fsr = in_be32(&ifc->ifc_nand.nand_fsr); 659 nand_fsr = ioread32be(&ifc->ifc_nand.nand_fsr);
654 660
655 /* 661 /*
656 * The chip always seems to report that it is 662 * The chip always seems to report that it is
@@ -744,34 +750,34 @@ static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
744 uint32_t cs = priv->bank; 750 uint32_t cs = priv->bank;
745 751
746 /* Save CSOR and CSOR_ext */ 752 /* Save CSOR and CSOR_ext */
747 csor = in_be32(&ifc->csor_cs[cs].csor); 753 csor = ioread32be(&ifc->csor_cs[cs].csor);
748 csor_ext = in_be32(&ifc->csor_cs[cs].csor_ext); 754 csor_ext = ioread32be(&ifc->csor_cs[cs].csor_ext);
749 755
750 /* chage PageSize 8K and SpareSize 1K*/ 756 /* chage PageSize 8K and SpareSize 1K*/
751 csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000; 757 csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000;
752 out_be32(&ifc->csor_cs[cs].csor, csor_8k); 758 iowrite32be(csor_8k, &ifc->csor_cs[cs].csor);
753 out_be32(&ifc->csor_cs[cs].csor_ext, 0x0000400); 759 iowrite32be(0x0000400, &ifc->csor_cs[cs].csor_ext);
754 760
755 /* READID */ 761 /* READID */
756 out_be32(&ifc->ifc_nand.nand_fir0, 762 iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
757 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 763 (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
758 (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | 764 (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT),
759 (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT)); 765 &ifc->ifc_nand.nand_fir0);
760 out_be32(&ifc->ifc_nand.nand_fcr0, 766 iowrite32be(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT,
761 NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT); 767 &ifc->ifc_nand.nand_fcr0);
762 out_be32(&ifc->ifc_nand.row3, 0x0); 768 iowrite32be(0x0, &ifc->ifc_nand.row3);
763 769
764 out_be32(&ifc->ifc_nand.nand_fbcr, 0x0); 770 iowrite32be(0x0, &ifc->ifc_nand.nand_fbcr);
765 771
766 /* Program ROW0/COL0 */ 772 /* Program ROW0/COL0 */
767 out_be32(&ifc->ifc_nand.row0, 0x0); 773 iowrite32be(0x0, &ifc->ifc_nand.row0);
768 out_be32(&ifc->ifc_nand.col0, 0x0); 774 iowrite32be(0x0, &ifc->ifc_nand.col0);
769 775
770 /* set the chip select for NAND Transaction */ 776 /* set the chip select for NAND Transaction */
771 out_be32(&ifc->ifc_nand.nand_csel, cs << IFC_NAND_CSEL_SHIFT); 777 iowrite32be(cs << IFC_NAND_CSEL_SHIFT, &ifc->ifc_nand.nand_csel);
772 778
773 /* start read seq */ 779 /* start read seq */
774 out_be32(&ifc->ifc_nand.nandseq_strt, IFC_NAND_SEQ_STRT_FIR_STRT); 780 iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
775 781
776 /* wait for command complete flag or timeout */ 782 /* wait for command complete flag or timeout */
777 wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat, 783 wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
@@ -781,8 +787,8 @@ static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
781 printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n"); 787 printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n");
782 788
783 /* Restore CSOR and CSOR_ext */ 789 /* Restore CSOR and CSOR_ext */
784 out_be32(&ifc->csor_cs[cs].csor, csor); 790 iowrite32be(csor, &ifc->csor_cs[cs].csor);
785 out_be32(&ifc->csor_cs[cs].csor_ext, csor_ext); 791 iowrite32be(csor_ext, &ifc->csor_cs[cs].csor_ext);
786} 792}
787 793
788static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) 794static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
@@ -799,7 +805,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
799 805
800 /* fill in nand_chip structure */ 806 /* fill in nand_chip structure */
801 /* set up function call table */ 807 /* set up function call table */
802 if ((in_be32(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16) 808 if ((ioread32be(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16)
803 chip->read_byte = fsl_ifc_read_byte16; 809 chip->read_byte = fsl_ifc_read_byte16;
804 else 810 else
805 chip->read_byte = fsl_ifc_read_byte; 811 chip->read_byte = fsl_ifc_read_byte;
@@ -813,13 +819,13 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
813 chip->bbt_td = &bbt_main_descr; 819 chip->bbt_td = &bbt_main_descr;
814 chip->bbt_md = &bbt_mirror_descr; 820 chip->bbt_md = &bbt_mirror_descr;
815 821
816 out_be32(&ifc->ifc_nand.ncfgr, 0x0); 822 iowrite32be(0x0, &ifc->ifc_nand.ncfgr);
817 823
818 /* set up nand options */ 824 /* set up nand options */
819 chip->bbt_options = NAND_BBT_USE_FLASH; 825 chip->bbt_options = NAND_BBT_USE_FLASH;
820 826
821 827
822 if (in_be32(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) { 828 if (ioread32be(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) {
823 chip->read_byte = fsl_ifc_read_byte16; 829 chip->read_byte = fsl_ifc_read_byte16;
824 chip->options |= NAND_BUSWIDTH_16; 830 chip->options |= NAND_BUSWIDTH_16;
825 } else { 831 } else {
@@ -832,7 +838,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
832 chip->ecc.read_page = fsl_ifc_read_page; 838 chip->ecc.read_page = fsl_ifc_read_page;
833 chip->ecc.write_page = fsl_ifc_write_page; 839 chip->ecc.write_page = fsl_ifc_write_page;
834 840
835 csor = in_be32(&ifc->csor_cs[priv->bank].csor); 841 csor = ioread32be(&ifc->csor_cs[priv->bank].csor);
836 842
837 /* Hardware generates ECC per 512 Bytes */ 843 /* Hardware generates ECC per 512 Bytes */
838 chip->ecc.size = 512; 844 chip->ecc.size = 512;
@@ -884,7 +890,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
884 chip->ecc.mode = NAND_ECC_SOFT; 890 chip->ecc.mode = NAND_ECC_SOFT;
885 } 891 }
886 892
887 ver = in_be32(&ifc->ifc_rev); 893 ver = ioread32be(&ifc->ifc_rev);
888 if (ver == FSL_IFC_V1_1_0) 894 if (ver == FSL_IFC_V1_1_0)
889 fsl_ifc_sram_init(priv); 895 fsl_ifc_sram_init(priv);
890 896
@@ -910,7 +916,7 @@ static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv)
910static int match_bank(struct fsl_ifc_regs __iomem *ifc, int bank, 916static int match_bank(struct fsl_ifc_regs __iomem *ifc, int bank,
911 phys_addr_t addr) 917 phys_addr_t addr)
912{ 918{
913 u32 cspr = in_be32(&ifc->cspr_cs[bank].cspr); 919 u32 cspr = ioread32be(&ifc->cspr_cs[bank].cspr);
914 920
915 if (!(cspr & CSPR_V)) 921 if (!(cspr & CSPR_V))
916 return 0; 922 return 0;
@@ -997,17 +1003,16 @@ static int fsl_ifc_nand_probe(struct platform_device *dev)
997 1003
998 dev_set_drvdata(priv->dev, priv); 1004 dev_set_drvdata(priv->dev, priv);
999 1005
1000 out_be32(&ifc->ifc_nand.nand_evter_en, 1006 iowrite32be(IFC_NAND_EVTER_EN_OPC_EN |
1001 IFC_NAND_EVTER_EN_OPC_EN | 1007 IFC_NAND_EVTER_EN_FTOER_EN |
1002 IFC_NAND_EVTER_EN_FTOER_EN | 1008 IFC_NAND_EVTER_EN_WPER_EN,
1003 IFC_NAND_EVTER_EN_WPER_EN); 1009 &ifc->ifc_nand.nand_evter_en);
1004 1010
1005 /* enable NAND Machine Interrupts */ 1011 /* enable NAND Machine Interrupts */
1006 out_be32(&ifc->ifc_nand.nand_evter_intr_en, 1012 iowrite32be(IFC_NAND_EVTER_INTR_OPCIR_EN |
1007 IFC_NAND_EVTER_INTR_OPCIR_EN | 1013 IFC_NAND_EVTER_INTR_FTOERIR_EN |
1008 IFC_NAND_EVTER_INTR_FTOERIR_EN | 1014 IFC_NAND_EVTER_INTR_WPERIR_EN,
1009 IFC_NAND_EVTER_INTR_WPERIR_EN); 1015 &ifc->ifc_nand.nand_evter_intr_en);
1010
1011 priv->mtd.name = kasprintf(GFP_KERNEL, "%x.flash", (unsigned)res.start); 1016 priv->mtd.name = kasprintf(GFP_KERNEL, "%x.flash", (unsigned)res.start);
1012 if (!priv->mtd.name) { 1017 if (!priv->mtd.name) {
1013 ret = -ENOMEM; 1018 ret = -ENOMEM;
diff --git a/drivers/mtd/nand/gpmi-nand/bch-regs.h b/drivers/mtd/nand/gpmi-nand/bch-regs.h
index a0924515c396..588f5374047c 100644
--- a/drivers/mtd/nand/gpmi-nand/bch-regs.h
+++ b/drivers/mtd/nand/gpmi-nand/bch-regs.h
@@ -61,6 +61,16 @@
61 & BM_BCH_FLASH0LAYOUT0_ECC0) \ 61 & BM_BCH_FLASH0LAYOUT0_ECC0) \
62 ) 62 )
63 63
64#define MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14 10
65#define MX6Q_BM_BCH_FLASH0LAYOUT0_GF_13_14 \
66 (0x1 << MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14)
67#define BF_BCH_FLASH0LAYOUT0_GF(v, x) \
68 ((GPMI_IS_MX6Q(x) && ((v) == 14)) \
69 ? (((1) << MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14) \
70 & MX6Q_BM_BCH_FLASH0LAYOUT0_GF_13_14) \
71 : 0 \
72 )
73
64#define BP_BCH_FLASH0LAYOUT0_DATA0_SIZE 0 74#define BP_BCH_FLASH0LAYOUT0_DATA0_SIZE 0
65#define BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \ 75#define BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \
66 (0xfff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE) 76 (0xfff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
@@ -93,6 +103,16 @@
93 & BM_BCH_FLASH0LAYOUT1_ECCN) \ 103 & BM_BCH_FLASH0LAYOUT1_ECCN) \
94 ) 104 )
95 105
106#define MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14 10
107#define MX6Q_BM_BCH_FLASH0LAYOUT1_GF_13_14 \
108 (0x1 << MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14)
109#define BF_BCH_FLASH0LAYOUT1_GF(v, x) \
110 ((GPMI_IS_MX6Q(x) && ((v) == 14)) \
111 ? (((1) << MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14) \
112 & MX6Q_BM_BCH_FLASH0LAYOUT1_GF_13_14) \
113 : 0 \
114 )
115
96#define BP_BCH_FLASH0LAYOUT1_DATAN_SIZE 0 116#define BP_BCH_FLASH0LAYOUT1_DATAN_SIZE 0
97#define BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \ 117#define BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \
98 (0xfff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE) 118 (0xfff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
@@ -103,4 +123,6 @@
103 ? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \ 123 ? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
104 : ((v) & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \ 124 : ((v) & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
105 ) 125 )
126
127#define HW_BCH_VERSION 0x00000160
106#endif 128#endif
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index d84699c7968e..4f8857fa48a7 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -208,6 +208,11 @@ void gpmi_dump_info(struct gpmi_nand_data *this)
208 } 208 }
209 209
210 /* start to print out the BCH info */ 210 /* start to print out the BCH info */
211 pr_err("Show BCH registers :\n");
212 for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
213 reg = readl(r->bch_regs + i * 0x10);
214 pr_err("offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
215 }
211 pr_err("BCH Geometry :\n"); 216 pr_err("BCH Geometry :\n");
212 pr_err("GF length : %u\n", geo->gf_len); 217 pr_err("GF length : %u\n", geo->gf_len);
213 pr_err("ECC Strength : %u\n", geo->ecc_strength); 218 pr_err("ECC Strength : %u\n", geo->ecc_strength);
@@ -232,6 +237,7 @@ int bch_set_geometry(struct gpmi_nand_data *this)
232 unsigned int metadata_size; 237 unsigned int metadata_size;
233 unsigned int ecc_strength; 238 unsigned int ecc_strength;
234 unsigned int page_size; 239 unsigned int page_size;
240 unsigned int gf_len;
235 int ret; 241 int ret;
236 242
237 if (common_nfc_set_geometry(this)) 243 if (common_nfc_set_geometry(this))
@@ -242,6 +248,7 @@ int bch_set_geometry(struct gpmi_nand_data *this)
242 metadata_size = bch_geo->metadata_size; 248 metadata_size = bch_geo->metadata_size;
243 ecc_strength = bch_geo->ecc_strength >> 1; 249 ecc_strength = bch_geo->ecc_strength >> 1;
244 page_size = bch_geo->page_size; 250 page_size = bch_geo->page_size;
251 gf_len = bch_geo->gf_len;
245 252
246 ret = gpmi_enable_clk(this); 253 ret = gpmi_enable_clk(this);
247 if (ret) 254 if (ret)
@@ -263,11 +270,13 @@ int bch_set_geometry(struct gpmi_nand_data *this)
263 writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count) 270 writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count)
264 | BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size) 271 | BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size)
265 | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) 272 | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this)
273 | BF_BCH_FLASH0LAYOUT0_GF(gf_len, this)
266 | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this), 274 | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this),
267 r->bch_regs + HW_BCH_FLASH0LAYOUT0); 275 r->bch_regs + HW_BCH_FLASH0LAYOUT0);
268 276
269 writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size) 277 writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size)
270 | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) 278 | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this)
279 | BF_BCH_FLASH0LAYOUT1_GF(gf_len, this)
271 | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this), 280 | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this),
272 r->bch_regs + HW_BCH_FLASH0LAYOUT1); 281 r->bch_regs + HW_BCH_FLASH0LAYOUT1);
273 282
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index e9b1c47e3cf9..717881a3d1b8 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -94,6 +94,25 @@ static inline int get_ecc_strength(struct gpmi_nand_data *this)
94 return round_down(ecc_strength, 2); 94 return round_down(ecc_strength, 2);
95} 95}
96 96
97static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
98{
99 struct bch_geometry *geo = &this->bch_geometry;
100
101 /* Do the sanity check. */
102 if (GPMI_IS_MX23(this) || GPMI_IS_MX28(this)) {
103 /* The mx23/mx28 only support the GF13. */
104 if (geo->gf_len == 14)
105 return false;
106
107 if (geo->ecc_strength > MXS_ECC_STRENGTH_MAX)
108 return false;
109 } else if (GPMI_IS_MX6Q(this)) {
110 if (geo->ecc_strength > MX6_ECC_STRENGTH_MAX)
111 return false;
112 }
113 return true;
114}
115
97int common_nfc_set_geometry(struct gpmi_nand_data *this) 116int common_nfc_set_geometry(struct gpmi_nand_data *this)
98{ 117{
99 struct bch_geometry *geo = &this->bch_geometry; 118 struct bch_geometry *geo = &this->bch_geometry;
@@ -112,17 +131,24 @@ int common_nfc_set_geometry(struct gpmi_nand_data *this)
112 /* The default for the length of Galois Field. */ 131 /* The default for the length of Galois Field. */
113 geo->gf_len = 13; 132 geo->gf_len = 13;
114 133
115 /* The default for chunk size. There is no oobsize greater then 512. */ 134 /* The default for chunk size. */
116 geo->ecc_chunk_size = 512; 135 geo->ecc_chunk_size = 512;
117 while (geo->ecc_chunk_size < mtd->oobsize) 136 while (geo->ecc_chunk_size < mtd->oobsize) {
118 geo->ecc_chunk_size *= 2; /* keep C >= O */ 137 geo->ecc_chunk_size *= 2; /* keep C >= O */
138 geo->gf_len = 14;
139 }
119 140
120 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size; 141 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
121 142
122 /* We use the same ECC strength for all chunks. */ 143 /* We use the same ECC strength for all chunks. */
123 geo->ecc_strength = get_ecc_strength(this); 144 geo->ecc_strength = get_ecc_strength(this);
124 if (!geo->ecc_strength) { 145 if (!gpmi_check_ecc(this)) {
125 pr_err("wrong ECC strength.\n"); 146 dev_err(this->dev,
147 "We can not support this nand chip."
148 " Its required ecc strength(%d) is beyond our"
149 " capability(%d).\n", geo->ecc_strength,
150 (GPMI_IS_MX6Q(this) ? MX6_ECC_STRENGTH_MAX
151 : MXS_ECC_STRENGTH_MAX));
126 return -EINVAL; 152 return -EINVAL;
127 } 153 }
128 154
@@ -920,8 +946,7 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
920 dma_addr_t auxiliary_phys; 946 dma_addr_t auxiliary_phys;
921 unsigned int i; 947 unsigned int i;
922 unsigned char *status; 948 unsigned char *status;
923 unsigned int failed; 949 unsigned int max_bitflips = 0;
924 unsigned int corrected;
925 int ret; 950 int ret;
926 951
927 pr_debug("page number is : %d\n", page); 952 pr_debug("page number is : %d\n", page);
@@ -945,35 +970,25 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
945 payload_virt, payload_phys); 970 payload_virt, payload_phys);
946 if (ret) { 971 if (ret) {
947 pr_err("Error in ECC-based read: %d\n", ret); 972 pr_err("Error in ECC-based read: %d\n", ret);
948 goto exit_nfc; 973 return ret;
949 } 974 }
950 975
951 /* handle the block mark swapping */ 976 /* handle the block mark swapping */
952 block_mark_swapping(this, payload_virt, auxiliary_virt); 977 block_mark_swapping(this, payload_virt, auxiliary_virt);
953 978
954 /* Loop over status bytes, accumulating ECC status. */ 979 /* Loop over status bytes, accumulating ECC status. */
955 failed = 0; 980 status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
956 corrected = 0;
957 status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
958 981
959 for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) { 982 for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) {
960 if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED)) 983 if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
961 continue; 984 continue;
962 985
963 if (*status == STATUS_UNCORRECTABLE) { 986 if (*status == STATUS_UNCORRECTABLE) {
964 failed++; 987 mtd->ecc_stats.failed++;
965 continue; 988 continue;
966 } 989 }
967 corrected += *status; 990 mtd->ecc_stats.corrected += *status;
968 } 991 max_bitflips = max_t(unsigned int, max_bitflips, *status);
969
970 /*
971 * Propagate ECC status to the owning MTD only when failed or
972 * corrected times nearly reaches our ECC correction threshold.
973 */
974 if (failed || corrected >= (nfc_geo->ecc_strength - 1)) {
975 mtd->ecc_stats.failed += failed;
976 mtd->ecc_stats.corrected += corrected;
977 } 992 }
978 993
979 if (oob_required) { 994 if (oob_required) {
@@ -995,8 +1010,8 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
995 this->payload_virt, this->payload_phys, 1010 this->payload_virt, this->payload_phys,
996 nfc_geo->payload_size, 1011 nfc_geo->payload_size,
997 payload_virt, payload_phys); 1012 payload_virt, payload_phys);
998exit_nfc: 1013
999 return ret; 1014 return max_bitflips;
1000} 1015}
1001 1016
1002static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip, 1017static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
@@ -1668,8 +1683,8 @@ exit_nfc_init:
1668 release_resources(this); 1683 release_resources(this);
1669exit_acquire_resources: 1684exit_acquire_resources:
1670 platform_set_drvdata(pdev, NULL); 1685 platform_set_drvdata(pdev, NULL);
1671 kfree(this);
1672 dev_err(this->dev, "driver registration failed: %d\n", ret); 1686 dev_err(this->dev, "driver registration failed: %d\n", ret);
1687 kfree(this);
1673 1688
1674 return ret; 1689 return ret;
1675} 1690}
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
index 3d93a5e39090..072947731277 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -284,6 +284,10 @@ extern int gpmi_read_page(struct gpmi_nand_data *,
284#define STATUS_ERASED 0xff 284#define STATUS_ERASED 0xff
285#define STATUS_UNCORRECTABLE 0xfe 285#define STATUS_UNCORRECTABLE 0xfe
286 286
287/* BCH's bit correction capability. */
288#define MXS_ECC_STRENGTH_MAX 20 /* mx23 and mx28 */
289#define MX6_ECC_STRENGTH_MAX 40
290
287/* Use the platform_id to distinguish different Archs. */ 291/* Use the platform_id to distinguish different Archs. */
288#define IS_MX23 0x0 292#define IS_MX23 0x0
289#define IS_MX28 0x1 293#define IS_MX28 0x1
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 60ac5b98b718..07e5784e5cd3 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -530,12 +530,23 @@ static void send_page_v1(struct mtd_info *mtd, unsigned int ops)
530 530
531static void send_read_id_v3(struct mxc_nand_host *host) 531static void send_read_id_v3(struct mxc_nand_host *host)
532{ 532{
533 struct nand_chip *this = &host->nand;
534
533 /* Read ID into main buffer */ 535 /* Read ID into main buffer */
534 writel(NFC_ID, NFC_V3_LAUNCH); 536 writel(NFC_ID, NFC_V3_LAUNCH);
535 537
536 wait_op_done(host, true); 538 wait_op_done(host, true);
537 539
538 memcpy32_fromio(host->data_buf, host->main_area0, 16); 540 memcpy32_fromio(host->data_buf, host->main_area0, 16);
541
542 if (this->options & NAND_BUSWIDTH_16) {
543 /* compress the ID info */
544 host->data_buf[1] = host->data_buf[2];
545 host->data_buf[2] = host->data_buf[4];
546 host->data_buf[3] = host->data_buf[6];
547 host->data_buf[4] = host->data_buf[8];
548 host->data_buf[5] = host->data_buf[10];
549 }
539} 550}
540 551
541/* Request the NANDFC to perform a read of the NAND device ID. */ 552/* Request the NANDFC to perform a read of the NAND device ID. */
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 3766682a0289..43214151b882 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -825,13 +825,8 @@ static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
825static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip) 825static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
826{ 826{
827 827
828 unsigned long timeo = jiffies;
829 int status, state = chip->state; 828 int status, state = chip->state;
830 829 unsigned long timeo = (state == FL_ERASING ? 400 : 20);
831 if (state == FL_ERASING)
832 timeo += (HZ * 400) / 1000;
833 else
834 timeo += (HZ * 20) / 1000;
835 830
836 led_trigger_event(nand_led_trigger, LED_FULL); 831 led_trigger_event(nand_led_trigger, LED_FULL);
837 832
@@ -849,6 +844,7 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
849 if (in_interrupt() || oops_in_progress) 844 if (in_interrupt() || oops_in_progress)
850 panic_nand_wait(mtd, chip, timeo); 845 panic_nand_wait(mtd, chip, timeo);
851 else { 846 else {
847 timeo = jiffies + msecs_to_jiffies(timeo);
852 while (time_before(jiffies, timeo)) { 848 while (time_before(jiffies, timeo)) {
853 if (chip->dev_ready) { 849 if (chip->dev_ready) {
854 if (chip->dev_ready(mtd)) 850 if (chip->dev_ready(mtd))
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
index b7cfe0d37121..053c9a2d47c3 100644
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -55,8 +55,7 @@ struct mtd_info;
55#define MODULE_AUTHOR(x) /* x */ 55#define MODULE_AUTHOR(x) /* x */
56#define MODULE_DESCRIPTION(x) /* x */ 56#define MODULE_DESCRIPTION(x) /* x */
57 57
58#define printk printf 58#define pr_err printf
59#define KERN_ERR ""
60#endif 59#endif
61 60
62/* 61/*
@@ -507,7 +506,7 @@ int __nand_correct_data(unsigned char *buf,
507 if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1) 506 if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1)
508 return 1; /* error in ECC data; no action needed */ 507 return 1; /* error in ECC data; no action needed */
509 508
510 printk(KERN_ERR "uncorrectable error : "); 509 pr_err("%s: uncorrectable ECC error", __func__);
511 return -1; 510 return -1;
512} 511}
513EXPORT_SYMBOL(__nand_correct_data); 512EXPORT_SYMBOL(__nand_correct_data);
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 8f30d385bfa3..891c52a30e6a 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -1468,12 +1468,12 @@ int do_read_error(struct nandsim *ns, int num)
1468 1468
1469void do_bit_flips(struct nandsim *ns, int num) 1469void do_bit_flips(struct nandsim *ns, int num)
1470{ 1470{
1471 if (bitflips && random32() < (1 << 22)) { 1471 if (bitflips && prandom_u32() < (1 << 22)) {
1472 int flips = 1; 1472 int flips = 1;
1473 if (bitflips > 1) 1473 if (bitflips > 1)
1474 flips = (random32() % (int) bitflips) + 1; 1474 flips = (prandom_u32() % (int) bitflips) + 1;
1475 while (flips--) { 1475 while (flips--) {
1476 int pos = random32() % (num * 8); 1476 int pos = prandom_u32() % (num * 8);
1477 ns->buf.byte[pos / 8] ^= (1 << (pos % 8)); 1477 ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
1478 NS_WARN("read_page: flipping bit %d in page %d " 1478 NS_WARN("read_page: flipping bit %d in page %d "
1479 "reading from %d ecc: corrected=%u failed=%u\n", 1479 "reading from %d ecc: corrected=%u failed=%u\n",
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 1d333497cfcb..8e820ddf4e08 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -22,9 +22,12 @@
22#include <linux/omap-dma.h> 22#include <linux/omap-dma.h>
23#include <linux/io.h> 23#include <linux/io.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/of.h>
26#include <linux/of_device.h>
25 27
26#ifdef CONFIG_MTD_NAND_OMAP_BCH 28#ifdef CONFIG_MTD_NAND_OMAP_BCH
27#include <linux/bch.h> 29#include <linux/bch.h>
30#include <linux/platform_data/elm.h>
28#endif 31#endif
29 32
30#include <linux/platform_data/mtd-nand-omap2.h> 33#include <linux/platform_data/mtd-nand-omap2.h>
@@ -117,6 +120,33 @@
117 120
118#define OMAP24XX_DMA_GPMC 4 121#define OMAP24XX_DMA_GPMC 4
119 122
123#define BCH8_MAX_ERROR 8 /* upto 8 bit correctable */
124#define BCH4_MAX_ERROR 4 /* upto 4 bit correctable */
125
126#define SECTOR_BYTES 512
127/* 4 bit padding to make byte aligned, 56 = 52 + 4 */
128#define BCH4_BIT_PAD 4
129#define BCH8_ECC_MAX ((SECTOR_BYTES + BCH8_ECC_OOB_BYTES) * 8)
130#define BCH4_ECC_MAX ((SECTOR_BYTES + BCH4_ECC_OOB_BYTES) * 8)
131
132/* GPMC ecc engine settings for read */
133#define BCH_WRAPMODE_1 1 /* BCH wrap mode 1 */
134#define BCH8R_ECC_SIZE0 0x1a /* ecc_size0 = 26 */
135#define BCH8R_ECC_SIZE1 0x2 /* ecc_size1 = 2 */
136#define BCH4R_ECC_SIZE0 0xd /* ecc_size0 = 13 */
137#define BCH4R_ECC_SIZE1 0x3 /* ecc_size1 = 3 */
138
139/* GPMC ecc engine settings for write */
140#define BCH_WRAPMODE_6 6 /* BCH wrap mode 6 */
141#define BCH_ECC_SIZE0 0x0 /* ecc_size0 = 0, no oob protection */
142#define BCH_ECC_SIZE1 0x20 /* ecc_size1 = 32 */
143
144#ifdef CONFIG_MTD_NAND_OMAP_BCH
145static u_char bch8_vector[] = {0xf3, 0xdb, 0x14, 0x16, 0x8b, 0xd2, 0xbe, 0xcc,
146 0xac, 0x6b, 0xff, 0x99, 0x7b};
147static u_char bch4_vector[] = {0x00, 0x6b, 0x31, 0xdd, 0x41, 0xbc, 0x10};
148#endif
149
120/* oob info generated runtime depending on ecc algorithm and layout selected */ 150/* oob info generated runtime depending on ecc algorithm and layout selected */
121static struct nand_ecclayout omap_oobinfo; 151static struct nand_ecclayout omap_oobinfo;
122/* Define some generic bad / good block scan pattern which are used 152/* Define some generic bad / good block scan pattern which are used
@@ -156,6 +186,9 @@ struct omap_nand_info {
156#ifdef CONFIG_MTD_NAND_OMAP_BCH 186#ifdef CONFIG_MTD_NAND_OMAP_BCH
157 struct bch_control *bch; 187 struct bch_control *bch;
158 struct nand_ecclayout ecclayout; 188 struct nand_ecclayout ecclayout;
189 bool is_elm_used;
190 struct device *elm_dev;
191 struct device_node *of_node;
159#endif 192#endif
160}; 193};
161 194
@@ -1031,6 +1064,13 @@ static int omap_dev_ready(struct mtd_info *mtd)
1031 * omap3_enable_hwecc_bch - Program OMAP3 GPMC to perform BCH ECC correction 1064 * omap3_enable_hwecc_bch - Program OMAP3 GPMC to perform BCH ECC correction
1032 * @mtd: MTD device structure 1065 * @mtd: MTD device structure
1033 * @mode: Read/Write mode 1066 * @mode: Read/Write mode
1067 *
1068 * When using BCH, sector size is hardcoded to 512 bytes.
1069 * Using wrapping mode 6 both for reading and writing if ELM module not uses
1070 * for error correction.
1071 * On writing,
1072 * eccsize0 = 0 (no additional protected byte in spare area)
1073 * eccsize1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
1034 */ 1074 */
1035static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode) 1075static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
1036{ 1076{
@@ -1039,32 +1079,57 @@ static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
1039 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 1079 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1040 mtd); 1080 mtd);
1041 struct nand_chip *chip = mtd->priv; 1081 struct nand_chip *chip = mtd->priv;
1042 u32 val; 1082 u32 val, wr_mode;
1083 unsigned int ecc_size1, ecc_size0;
1084
1085 /* Using wrapping mode 6 for writing */
1086 wr_mode = BCH_WRAPMODE_6;
1043 1087
1044 nerrors = (info->nand.ecc.bytes == 13) ? 8 : 4;
1045 dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
1046 nsectors = 1;
1047 /* 1088 /*
1048 * Program GPMC to perform correction on one 512-byte sector at a time. 1089 * ECC engine enabled for valid ecc_size0 nibbles
1049 * Using 4 sectors at a time (i.e. ecc.size = 2048) is also possible and 1090 * and disabled for ecc_size1 nibbles.
1050 * gives a slight (5%) performance gain (but requires additional code).
1051 */ 1091 */
1092 ecc_size0 = BCH_ECC_SIZE0;
1093 ecc_size1 = BCH_ECC_SIZE1;
1094
1095 /* Perform ecc calculation on 512-byte sector */
1096 nsectors = 1;
1097
1098 /* Update number of error correction */
1099 nerrors = info->nand.ecc.strength;
1100
1101 /* Multi sector reading/writing for NAND flash with page size < 4096 */
1102 if (info->is_elm_used && (mtd->writesize <= 4096)) {
1103 if (mode == NAND_ECC_READ) {
1104 /* Using wrapping mode 1 for reading */
1105 wr_mode = BCH_WRAPMODE_1;
1106
1107 /*
1108 * ECC engine enabled for ecc_size0 nibbles
1109 * and disabled for ecc_size1 nibbles.
1110 */
1111 ecc_size0 = (nerrors == 8) ?
1112 BCH8R_ECC_SIZE0 : BCH4R_ECC_SIZE0;
1113 ecc_size1 = (nerrors == 8) ?
1114 BCH8R_ECC_SIZE1 : BCH4R_ECC_SIZE1;
1115 }
1116
1117 /* Perform ecc calculation for one page (< 4096) */
1118 nsectors = info->nand.ecc.steps;
1119 }
1052 1120
1053 writel(ECC1, info->reg.gpmc_ecc_control); 1121 writel(ECC1, info->reg.gpmc_ecc_control);
1054 1122
1055 /* 1123 /* Configure ecc size for BCH */
1056 * When using BCH, sector size is hardcoded to 512 bytes. 1124 val = (ecc_size1 << ECCSIZE1_SHIFT) | (ecc_size0 << ECCSIZE0_SHIFT);
1057 * Here we are using wrapping mode 6 both for reading and writing, with:
1058 * size0 = 0 (no additional protected byte in spare area)
1059 * size1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
1060 */
1061 val = (32 << ECCSIZE1_SHIFT) | (0 << ECCSIZE0_SHIFT);
1062 writel(val, info->reg.gpmc_ecc_size_config); 1125 writel(val, info->reg.gpmc_ecc_size_config);
1063 1126
1127 dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
1128
1064 /* BCH configuration */ 1129 /* BCH configuration */
1065 val = ((1 << 16) | /* enable BCH */ 1130 val = ((1 << 16) | /* enable BCH */
1066 (((nerrors == 8) ? 1 : 0) << 12) | /* 8 or 4 bits */ 1131 (((nerrors == 8) ? 1 : 0) << 12) | /* 8 or 4 bits */
1067 (0x06 << 8) | /* wrap mode = 6 */ 1132 (wr_mode << 8) | /* wrap mode */
1068 (dev_width << 7) | /* bus width */ 1133 (dev_width << 7) | /* bus width */
1069 (((nsectors-1) & 0x7) << 4) | /* number of sectors */ 1134 (((nsectors-1) & 0x7) << 4) | /* number of sectors */
1070 (info->gpmc_cs << 1) | /* ECC CS */ 1135 (info->gpmc_cs << 1) | /* ECC CS */
@@ -1072,7 +1137,7 @@ static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
1072 1137
1073 writel(val, info->reg.gpmc_ecc_config); 1138 writel(val, info->reg.gpmc_ecc_config);
1074 1139
1075 /* clear ecc and enable bits */ 1140 /* Clear ecc and enable bits */
1076 writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control); 1141 writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
1077} 1142}
1078 1143
@@ -1162,6 +1227,298 @@ static int omap3_calculate_ecc_bch8(struct mtd_info *mtd, const u_char *dat,
1162} 1227}
1163 1228
1164/** 1229/**
1230 * omap3_calculate_ecc_bch - Generate bytes of ECC bytes
1231 * @mtd: MTD device structure
1232 * @dat: The pointer to data on which ecc is computed
1233 * @ecc_code: The ecc_code buffer
1234 *
1235 * Support calculating of BCH4/8 ecc vectors for the page
1236 */
1237static int omap3_calculate_ecc_bch(struct mtd_info *mtd, const u_char *dat,
1238 u_char *ecc_code)
1239{
1240 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1241 mtd);
1242 unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4;
1243 int i, eccbchtsel;
1244
1245 nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
1246 /*
1247 * find BCH scheme used
1248 * 0 -> BCH4
1249 * 1 -> BCH8
1250 */
1251 eccbchtsel = ((readl(info->reg.gpmc_ecc_config) >> 12) & 0x3);
1252
1253 for (i = 0; i < nsectors; i++) {
1254
1255 /* Read hw-computed remainder */
1256 bch_val1 = readl(info->reg.gpmc_bch_result0[i]);
1257 bch_val2 = readl(info->reg.gpmc_bch_result1[i]);
1258 if (eccbchtsel) {
1259 bch_val3 = readl(info->reg.gpmc_bch_result2[i]);
1260 bch_val4 = readl(info->reg.gpmc_bch_result3[i]);
1261 }
1262
1263 if (eccbchtsel) {
1264 /* BCH8 ecc scheme */
1265 *ecc_code++ = (bch_val4 & 0xFF);
1266 *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
1267 *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
1268 *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
1269 *ecc_code++ = (bch_val3 & 0xFF);
1270 *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
1271 *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
1272 *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
1273 *ecc_code++ = (bch_val2 & 0xFF);
1274 *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
1275 *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
1276 *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
1277 *ecc_code++ = (bch_val1 & 0xFF);
1278 /*
1279 * Setting 14th byte to zero to handle
1280 * erased page & maintain compatibility
1281 * with RBL
1282 */
1283 *ecc_code++ = 0x0;
1284 } else {
1285 /* BCH4 ecc scheme */
1286 *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
1287 *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
1288 *ecc_code++ = ((bch_val2 & 0xF) << 4) |
1289 ((bch_val1 >> 28) & 0xF);
1290 *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
1291 *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
1292 *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
1293 *ecc_code++ = ((bch_val1 & 0xF) << 4);
1294 /*
1295 * Setting 8th byte to zero to handle
1296 * erased page
1297 */
1298 *ecc_code++ = 0x0;
1299 }
1300 }
1301
1302 return 0;
1303}
1304
1305/**
1306 * erased_sector_bitflips - count bit flips
1307 * @data: data sector buffer
1308 * @oob: oob buffer
1309 * @info: omap_nand_info
1310 *
1311 * Check the bit flips in erased page falls below correctable level.
1312 * If falls below, report the page as erased with correctable bit
1313 * flip, else report as uncorrectable page.
1314 */
1315static int erased_sector_bitflips(u_char *data, u_char *oob,
1316 struct omap_nand_info *info)
1317{
1318 int flip_bits = 0, i;
1319
1320 for (i = 0; i < info->nand.ecc.size; i++) {
1321 flip_bits += hweight8(~data[i]);
1322 if (flip_bits > info->nand.ecc.strength)
1323 return 0;
1324 }
1325
1326 for (i = 0; i < info->nand.ecc.bytes - 1; i++) {
1327 flip_bits += hweight8(~oob[i]);
1328 if (flip_bits > info->nand.ecc.strength)
1329 return 0;
1330 }
1331
1332 /*
1333 * Bit flips falls in correctable level.
1334 * Fill data area with 0xFF
1335 */
1336 if (flip_bits) {
1337 memset(data, 0xFF, info->nand.ecc.size);
1338 memset(oob, 0xFF, info->nand.ecc.bytes);
1339 }
1340
1341 return flip_bits;
1342}
1343
1344/**
1345 * omap_elm_correct_data - corrects page data area in case error reported
1346 * @mtd: MTD device structure
1347 * @data: page data
1348 * @read_ecc: ecc read from nand flash
1349 * @calc_ecc: ecc read from HW ECC registers
1350 *
1351 * Calculated ecc vector reported as zero in case of non-error pages.
1352 * In case of error/erased pages non-zero error vector is reported.
1353 * In case of non-zero ecc vector, check read_ecc at fixed offset
1354 * (x = 13/7 in case of BCH8/4 == 0) to find page programmed or not.
1355 * To handle bit flips in this data, count the number of 0's in
1356 * read_ecc[x] and check if it greater than 4. If it is less, it is
1357 * programmed page, else erased page.
1358 *
1359 * 1. If page is erased, check with standard ecc vector (ecc vector
1360 * for erased page to find any bit flip). If check fails, bit flip
1361 * is present in erased page. Count the bit flips in erased page and
1362 * if it falls under correctable level, report page with 0xFF and
1363 * update the correctable bit information.
1364 * 2. If error is reported on programmed page, update elm error
1365 * vector and correct the page with ELM error correction routine.
1366 *
1367 */
1368static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
1369 u_char *read_ecc, u_char *calc_ecc)
1370{
1371 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1372 mtd);
1373 int eccsteps = info->nand.ecc.steps;
1374 int i , j, stat = 0;
1375 int eccsize, eccflag, ecc_vector_size;
1376 struct elm_errorvec err_vec[ERROR_VECTOR_MAX];
1377 u_char *ecc_vec = calc_ecc;
1378 u_char *spare_ecc = read_ecc;
1379 u_char *erased_ecc_vec;
1380 enum bch_ecc type;
1381 bool is_error_reported = false;
1382
1383 /* Initialize elm error vector to zero */
1384 memset(err_vec, 0, sizeof(err_vec));
1385
1386 if (info->nand.ecc.strength == BCH8_MAX_ERROR) {
1387 type = BCH8_ECC;
1388 erased_ecc_vec = bch8_vector;
1389 } else {
1390 type = BCH4_ECC;
1391 erased_ecc_vec = bch4_vector;
1392 }
1393
1394 ecc_vector_size = info->nand.ecc.bytes;
1395
1396 /*
1397 * Remove extra byte padding for BCH8 RBL
1398 * compatibility and erased page handling
1399 */
1400 eccsize = ecc_vector_size - 1;
1401
1402 for (i = 0; i < eccsteps ; i++) {
1403 eccflag = 0; /* initialize eccflag */
1404
1405 /*
1406 * Check any error reported,
1407 * In case of error, non zero ecc reported.
1408 */
1409
1410 for (j = 0; (j < eccsize); j++) {
1411 if (calc_ecc[j] != 0) {
1412 eccflag = 1; /* non zero ecc, error present */
1413 break;
1414 }
1415 }
1416
1417 if (eccflag == 1) {
1418 /*
1419 * Set threshold to minimum of 4, half of ecc.strength/2
1420 * to allow max bit flip in byte to 4
1421 */
1422 unsigned int threshold = min_t(unsigned int, 4,
1423 info->nand.ecc.strength / 2);
1424
1425 /*
1426 * Check data area is programmed by counting
1427 * number of 0's at fixed offset in spare area.
1428 * Checking count of 0's against threshold.
1429 * In case programmed page expects at least threshold
1430 * zeros in byte.
1431 * If zeros are less than threshold for programmed page/
1432 * zeros are more than threshold erased page, either
1433 * case page reported as uncorrectable.
1434 */
1435 if (hweight8(~read_ecc[eccsize]) >= threshold) {
1436 /*
1437 * Update elm error vector as
1438 * data area is programmed
1439 */
1440 err_vec[i].error_reported = true;
1441 is_error_reported = true;
1442 } else {
1443 /* Error reported in erased page */
1444 int bitflip_count;
1445 u_char *buf = &data[info->nand.ecc.size * i];
1446
1447 if (memcmp(calc_ecc, erased_ecc_vec, eccsize)) {
1448 bitflip_count = erased_sector_bitflips(
1449 buf, read_ecc, info);
1450
1451 if (bitflip_count)
1452 stat += bitflip_count;
1453 else
1454 return -EINVAL;
1455 }
1456 }
1457 }
1458
1459 /* Update the ecc vector */
1460 calc_ecc += ecc_vector_size;
1461 read_ecc += ecc_vector_size;
1462 }
1463
1464 /* Check if any error reported */
1465 if (!is_error_reported)
1466 return 0;
1467
1468 /* Decode BCH error using ELM module */
1469 elm_decode_bch_error_page(info->elm_dev, ecc_vec, err_vec);
1470
1471 for (i = 0; i < eccsteps; i++) {
1472 if (err_vec[i].error_reported) {
1473 for (j = 0; j < err_vec[i].error_count; j++) {
1474 u32 bit_pos, byte_pos, error_max, pos;
1475
1476 if (type == BCH8_ECC)
1477 error_max = BCH8_ECC_MAX;
1478 else
1479 error_max = BCH4_ECC_MAX;
1480
1481 if (info->nand.ecc.strength == BCH8_MAX_ERROR)
1482 pos = err_vec[i].error_loc[j];
1483 else
1484 /* Add 4 to take care 4 bit padding */
1485 pos = err_vec[i].error_loc[j] +
1486 BCH4_BIT_PAD;
1487
1488 /* Calculate bit position of error */
1489 bit_pos = pos % 8;
1490
1491 /* Calculate byte position of error */
1492 byte_pos = (error_max - pos - 1) / 8;
1493
1494 if (pos < error_max) {
1495 if (byte_pos < 512)
1496 data[byte_pos] ^= 1 << bit_pos;
1497 else
1498 spare_ecc[byte_pos - 512] ^=
1499 1 << bit_pos;
1500 }
1501 /* else, not interested to correct ecc */
1502 }
1503 }
1504
1505 /* Update number of correctable errors */
1506 stat += err_vec[i].error_count;
1507
1508 /* Update page data with sector size */
1509 data += info->nand.ecc.size;
1510 spare_ecc += ecc_vector_size;
1511 }
1512
1513 for (i = 0; i < eccsteps; i++)
1514 /* Return error if uncorrectable error present */
1515 if (err_vec[i].error_uncorrectable)
1516 return -EINVAL;
1517
1518 return stat;
1519}
1520
1521/**
1165 * omap3_correct_data_bch - Decode received data and correct errors 1522 * omap3_correct_data_bch - Decode received data and correct errors
1166 * @mtd: MTD device structure 1523 * @mtd: MTD device structure
1167 * @data: page data 1524 * @data: page data
@@ -1194,6 +1551,92 @@ static int omap3_correct_data_bch(struct mtd_info *mtd, u_char *data,
1194} 1551}
1195 1552
1196/** 1553/**
1554 * omap_write_page_bch - BCH ecc based write page function for entire page
1555 * @mtd: mtd info structure
1556 * @chip: nand chip info structure
1557 * @buf: data buffer
1558 * @oob_required: must write chip->oob_poi to OOB
1559 *
1560 * Custom write page method evolved to support multi sector writing in one shot
1561 */
1562static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
1563 const uint8_t *buf, int oob_required)
1564{
1565 int i;
1566 uint8_t *ecc_calc = chip->buffers->ecccalc;
1567 uint32_t *eccpos = chip->ecc.layout->eccpos;
1568
1569 /* Enable GPMC ecc engine */
1570 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
1571
1572 /* Write data */
1573 chip->write_buf(mtd, buf, mtd->writesize);
1574
1575 /* Update ecc vector from GPMC result registers */
1576 chip->ecc.calculate(mtd, buf, &ecc_calc[0]);
1577
1578 for (i = 0; i < chip->ecc.total; i++)
1579 chip->oob_poi[eccpos[i]] = ecc_calc[i];
1580
1581 /* Write ecc vector to OOB area */
1582 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1583 return 0;
1584}
1585
1586/**
1587 * omap_read_page_bch - BCH ecc based page read function for entire page
1588 * @mtd: mtd info structure
1589 * @chip: nand chip info structure
1590 * @buf: buffer to store read data
1591 * @oob_required: caller requires OOB data read to chip->oob_poi
1592 * @page: page number to read
1593 *
1594 * For BCH ecc scheme, GPMC used for syndrome calculation and ELM module
1595 * used for error correction.
1596 * Custom method evolved to support ELM error correction & multi sector
1597 * reading. On reading page data area is read along with OOB data with
1598 * ecc engine enabled. ecc vector updated after read of OOB data.
1599 * For non error pages ecc vector reported as zero.
1600 */
1601static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
1602 uint8_t *buf, int oob_required, int page)
1603{
1604 uint8_t *ecc_calc = chip->buffers->ecccalc;
1605 uint8_t *ecc_code = chip->buffers->ecccode;
1606 uint32_t *eccpos = chip->ecc.layout->eccpos;
1607 uint8_t *oob = &chip->oob_poi[eccpos[0]];
1608 uint32_t oob_pos = mtd->writesize + chip->ecc.layout->eccpos[0];
1609 int stat;
1610 unsigned int max_bitflips = 0;
1611
1612 /* Enable GPMC ecc engine */
1613 chip->ecc.hwctl(mtd, NAND_ECC_READ);
1614
1615 /* Read data */
1616 chip->read_buf(mtd, buf, mtd->writesize);
1617
1618 /* Read oob bytes */
1619 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_pos, -1);
1620 chip->read_buf(mtd, oob, chip->ecc.total);
1621
1622 /* Calculate ecc bytes */
1623 chip->ecc.calculate(mtd, buf, ecc_calc);
1624
1625 memcpy(ecc_code, &chip->oob_poi[eccpos[0]], chip->ecc.total);
1626
1627 stat = chip->ecc.correct(mtd, buf, ecc_code, ecc_calc);
1628
1629 if (stat < 0) {
1630 mtd->ecc_stats.failed++;
1631 } else {
1632 mtd->ecc_stats.corrected += stat;
1633 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1634 }
1635
1636 return max_bitflips;
1637}
1638
1639/**
1197 * omap3_free_bch - Release BCH ecc resources 1640 * omap3_free_bch - Release BCH ecc resources
1198 * @mtd: MTD device structure 1641 * @mtd: MTD device structure
1199 */ 1642 */
@@ -1218,43 +1661,86 @@ static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
1218 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 1661 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1219 mtd); 1662 mtd);
1220#ifdef CONFIG_MTD_NAND_OMAP_BCH8 1663#ifdef CONFIG_MTD_NAND_OMAP_BCH8
1221 const int hw_errors = 8; 1664 const int hw_errors = BCH8_MAX_ERROR;
1222#else 1665#else
1223 const int hw_errors = 4; 1666 const int hw_errors = BCH4_MAX_ERROR;
1224#endif 1667#endif
1668 enum bch_ecc bch_type;
1669 const __be32 *parp;
1670 int lenp;
1671 struct device_node *elm_node;
1672
1225 info->bch = NULL; 1673 info->bch = NULL;
1226 1674
1227 max_errors = (ecc_opt == OMAP_ECC_BCH8_CODE_HW) ? 8 : 4; 1675 max_errors = (ecc_opt == OMAP_ECC_BCH8_CODE_HW) ?
1676 BCH8_MAX_ERROR : BCH4_MAX_ERROR;
1228 if (max_errors != hw_errors) { 1677 if (max_errors != hw_errors) {
1229 pr_err("cannot configure %d-bit BCH ecc, only %d-bit supported", 1678 pr_err("cannot configure %d-bit BCH ecc, only %d-bit supported",
1230 max_errors, hw_errors); 1679 max_errors, hw_errors);
1231 goto fail; 1680 goto fail;
1232 } 1681 }
1233 1682
1234 /* software bch library is only used to detect and locate errors */ 1683 info->nand.ecc.size = 512;
1235 info->bch = init_bch(13, max_errors, 0x201b /* hw polynomial */); 1684 info->nand.ecc.hwctl = omap3_enable_hwecc_bch;
1236 if (!info->bch) 1685 info->nand.ecc.mode = NAND_ECC_HW;
1237 goto fail; 1686 info->nand.ecc.strength = max_errors;
1238 1687
1239 info->nand.ecc.size = 512; 1688 if (hw_errors == BCH8_MAX_ERROR)
1240 info->nand.ecc.hwctl = omap3_enable_hwecc_bch; 1689 bch_type = BCH8_ECC;
1241 info->nand.ecc.correct = omap3_correct_data_bch; 1690 else
1242 info->nand.ecc.mode = NAND_ECC_HW; 1691 bch_type = BCH4_ECC;
1243 1692
1244 /* 1693 /* Detect availability of ELM module */
1245 * The number of corrected errors in an ecc block that will trigger 1694 parp = of_get_property(info->of_node, "elm_id", &lenp);
1246 * block scrubbing defaults to the ecc strength (4 or 8). 1695 if ((parp == NULL) && (lenp != (sizeof(void *) * 2))) {
1247 * Set mtd->bitflip_threshold here to define a custom threshold. 1696 pr_err("Missing elm_id property, fall back to Software BCH\n");
1248 */ 1697 info->is_elm_used = false;
1698 } else {
1699 struct platform_device *pdev;
1249 1700
1250 if (max_errors == 8) { 1701 elm_node = of_find_node_by_phandle(be32_to_cpup(parp));
1251 info->nand.ecc.strength = 8; 1702 pdev = of_find_device_by_node(elm_node);
1252 info->nand.ecc.bytes = 13; 1703 info->elm_dev = &pdev->dev;
1253 info->nand.ecc.calculate = omap3_calculate_ecc_bch8; 1704 elm_config(info->elm_dev, bch_type);
1705 info->is_elm_used = true;
1706 }
1707
1708 if (info->is_elm_used && (mtd->writesize <= 4096)) {
1709
1710 if (hw_errors == BCH8_MAX_ERROR)
1711 info->nand.ecc.bytes = BCH8_SIZE;
1712 else
1713 info->nand.ecc.bytes = BCH4_SIZE;
1714
1715 info->nand.ecc.correct = omap_elm_correct_data;
1716 info->nand.ecc.calculate = omap3_calculate_ecc_bch;
1717 info->nand.ecc.read_page = omap_read_page_bch;
1718 info->nand.ecc.write_page = omap_write_page_bch;
1254 } else { 1719 } else {
1255 info->nand.ecc.strength = 4; 1720 /*
1256 info->nand.ecc.bytes = 7; 1721 * software bch library is only used to detect and
1257 info->nand.ecc.calculate = omap3_calculate_ecc_bch4; 1722 * locate errors
1723 */
1724 info->bch = init_bch(13, max_errors,
1725 0x201b /* hw polynomial */);
1726 if (!info->bch)
1727 goto fail;
1728
1729 info->nand.ecc.correct = omap3_correct_data_bch;
1730
1731 /*
1732 * The number of corrected errors in an ecc block that will
1733 * trigger block scrubbing defaults to the ecc strength (4 or 8)
1734 * Set mtd->bitflip_threshold here to define a custom threshold.
1735 */
1736
1737 if (max_errors == 8) {
1738 info->nand.ecc.bytes = 13;
1739 info->nand.ecc.calculate = omap3_calculate_ecc_bch8;
1740 } else {
1741 info->nand.ecc.bytes = 7;
1742 info->nand.ecc.calculate = omap3_calculate_ecc_bch4;
1743 }
1258 } 1744 }
1259 1745
1260 pr_info("enabling NAND BCH ecc with %d-bit correction\n", max_errors); 1746 pr_info("enabling NAND BCH ecc with %d-bit correction\n", max_errors);
@@ -1270,7 +1756,7 @@ fail:
1270 */ 1756 */
1271static int omap3_init_bch_tail(struct mtd_info *mtd) 1757static int omap3_init_bch_tail(struct mtd_info *mtd)
1272{ 1758{
1273 int i, steps; 1759 int i, steps, offset;
1274 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 1760 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1275 mtd); 1761 mtd);
1276 struct nand_ecclayout *layout = &info->ecclayout; 1762 struct nand_ecclayout *layout = &info->ecclayout;
@@ -1292,11 +1778,21 @@ static int omap3_init_bch_tail(struct mtd_info *mtd)
1292 goto fail; 1778 goto fail;
1293 } 1779 }
1294 1780
1781 /* ECC layout compatible with RBL for BCH8 */
1782 if (info->is_elm_used && (info->nand.ecc.bytes == BCH8_SIZE))
1783 offset = 2;
1784 else
1785 offset = mtd->oobsize - layout->eccbytes;
1786
1295 /* put ecc bytes at oob tail */ 1787 /* put ecc bytes at oob tail */
1296 for (i = 0; i < layout->eccbytes; i++) 1788 for (i = 0; i < layout->eccbytes; i++)
1297 layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i; 1789 layout->eccpos[i] = offset + i;
1790
1791 if (info->is_elm_used && (info->nand.ecc.bytes == BCH8_SIZE))
1792 layout->oobfree[0].offset = 2 + layout->eccbytes * steps;
1793 else
1794 layout->oobfree[0].offset = 2;
1298 1795
1299 layout->oobfree[0].offset = 2;
1300 layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes; 1796 layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
1301 info->nand.ecc.layout = layout; 1797 info->nand.ecc.layout = layout;
1302 1798
@@ -1360,6 +1856,9 @@ static int omap_nand_probe(struct platform_device *pdev)
1360 1856
1361 info->nand.options = pdata->devsize; 1857 info->nand.options = pdata->devsize;
1362 info->nand.options |= NAND_SKIP_BBTSCAN; 1858 info->nand.options |= NAND_SKIP_BBTSCAN;
1859#ifdef CONFIG_MTD_NAND_OMAP_BCH
1860 info->of_node = pdata->of_node;
1861#endif
1363 1862
1364 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1863 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1365 if (res == NULL) { 1864 if (res == NULL) {
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index dbd3aa574eaf..30bd907a260a 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -174,7 +174,14 @@ out:
174 return rc; 174 return rc;
175} 175}
176 176
177static void __exit ofpart_parser_exit(void)
178{
179 deregister_mtd_parser(&ofpart_parser);
180 deregister_mtd_parser(&ofoldpart_parser);
181}
182
177module_init(ofpart_parser_init); 183module_init(ofpart_parser_init);
184module_exit(ofpart_parser_exit);
178 185
179MODULE_LICENSE("GPL"); 186MODULE_LICENSE("GPL");
180MODULE_DESCRIPTION("Parser for MTD partitioning information in device tree"); 187MODULE_DESCRIPTION("Parser for MTD partitioning information in device tree");
diff --git a/drivers/mtd/tests/mtd_nandecctest.c b/drivers/mtd/tests/mtd_nandecctest.c
index db2f22e7966a..70106607c247 100644
--- a/drivers/mtd/tests/mtd_nandecctest.c
+++ b/drivers/mtd/tests/mtd_nandecctest.c
@@ -44,7 +44,7 @@ struct nand_ecc_test {
44static void single_bit_error_data(void *error_data, void *correct_data, 44static void single_bit_error_data(void *error_data, void *correct_data,
45 size_t size) 45 size_t size)
46{ 46{
47 unsigned int offset = random32() % (size * BITS_PER_BYTE); 47 unsigned int offset = prandom_u32() % (size * BITS_PER_BYTE);
48 48
49 memcpy(error_data, correct_data, size); 49 memcpy(error_data, correct_data, size);
50 __change_bit_le(offset, error_data); 50 __change_bit_le(offset, error_data);
@@ -55,9 +55,9 @@ static void double_bit_error_data(void *error_data, void *correct_data,
55{ 55{
56 unsigned int offset[2]; 56 unsigned int offset[2];
57 57
58 offset[0] = random32() % (size * BITS_PER_BYTE); 58 offset[0] = prandom_u32() % (size * BITS_PER_BYTE);
59 do { 59 do {
60 offset[1] = random32() % (size * BITS_PER_BYTE); 60 offset[1] = prandom_u32() % (size * BITS_PER_BYTE);
61 } while (offset[0] == offset[1]); 61 } while (offset[0] == offset[1]);
62 62
63 memcpy(error_data, correct_data, size); 63 memcpy(error_data, correct_data, size);
@@ -68,7 +68,7 @@ static void double_bit_error_data(void *error_data, void *correct_data,
68 68
69static unsigned int random_ecc_bit(size_t size) 69static unsigned int random_ecc_bit(size_t size)
70{ 70{
71 unsigned int offset = random32() % (3 * BITS_PER_BYTE); 71 unsigned int offset = prandom_u32() % (3 * BITS_PER_BYTE);
72 72
73 if (size == 256) { 73 if (size == 256) {
74 /* 74 /*
@@ -76,7 +76,7 @@ static unsigned int random_ecc_bit(size_t size)
76 * and 17th bit) in ECC code for 256 byte data block 76 * and 17th bit) in ECC code for 256 byte data block
77 */ 77 */
78 while (offset == 16 || offset == 17) 78 while (offset == 16 || offset == 17)
79 offset = random32() % (3 * BITS_PER_BYTE); 79 offset = prandom_u32() % (3 * BITS_PER_BYTE);
80 } 80 }
81 81
82 return offset; 82 return offset;
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
index 2d7e6cffd6d4..787f539d16ca 100644
--- a/drivers/mtd/tests/mtd_stresstest.c
+++ b/drivers/mtd/tests/mtd_stresstest.c
@@ -55,7 +55,7 @@ static int rand_eb(void)
55 unsigned int eb; 55 unsigned int eb;
56 56
57again: 57again:
58 eb = random32(); 58 eb = prandom_u32();
59 /* Read or write up 2 eraseblocks at a time - hence 'ebcnt - 1' */ 59 /* Read or write up 2 eraseblocks at a time - hence 'ebcnt - 1' */
60 eb %= (ebcnt - 1); 60 eb %= (ebcnt - 1);
61 if (bbt[eb]) 61 if (bbt[eb])
@@ -67,7 +67,7 @@ static int rand_offs(void)
67{ 67{
68 unsigned int offs; 68 unsigned int offs;
69 69
70 offs = random32(); 70 offs = prandom_u32();
71 offs %= bufsize; 71 offs %= bufsize;
72 return offs; 72 return offs;
73} 73}
@@ -76,7 +76,7 @@ static int rand_len(int offs)
76{ 76{
77 unsigned int len; 77 unsigned int len;
78 78
79 len = random32(); 79 len = prandom_u32();
80 len %= (bufsize - offs); 80 len %= (bufsize - offs);
81 return len; 81 return len;
82} 82}
@@ -191,7 +191,7 @@ static int do_write(void)
191 191
192static int do_operation(void) 192static int do_operation(void)
193{ 193{
194 if (random32() & 1) 194 if (prandom_u32() & 1)
195 return do_read(); 195 return do_read();
196 else 196 else
197 return do_write(); 197 return do_write();
diff --git a/drivers/mtd/tests/mtd_torturetest.c b/drivers/mtd/tests/mtd_torturetest.c
index c4cde1e9eddb..3a9f6a6a79f9 100644
--- a/drivers/mtd/tests/mtd_torturetest.c
+++ b/drivers/mtd/tests/mtd_torturetest.c
@@ -208,7 +208,7 @@ static inline int write_pattern(int ebnum, void *buf)
208static int __init tort_init(void) 208static int __init tort_init(void)
209{ 209{
210 int err = 0, i, infinite = !cycles_count; 210 int err = 0, i, infinite = !cycles_count;
211 int bad_ebs[ebcnt]; 211 int *bad_ebs;
212 212
213 printk(KERN_INFO "\n"); 213 printk(KERN_INFO "\n");
214 printk(KERN_INFO "=================================================\n"); 214 printk(KERN_INFO "=================================================\n");
@@ -250,28 +250,24 @@ static int __init tort_init(void)
250 250
251 err = -ENOMEM; 251 err = -ENOMEM;
252 patt_5A5 = kmalloc(mtd->erasesize, GFP_KERNEL); 252 patt_5A5 = kmalloc(mtd->erasesize, GFP_KERNEL);
253 if (!patt_5A5) { 253 if (!patt_5A5)
254 pr_err("error: cannot allocate memory\n");
255 goto out_mtd; 254 goto out_mtd;
256 }
257 255
258 patt_A5A = kmalloc(mtd->erasesize, GFP_KERNEL); 256 patt_A5A = kmalloc(mtd->erasesize, GFP_KERNEL);
259 if (!patt_A5A) { 257 if (!patt_A5A)
260 pr_err("error: cannot allocate memory\n");
261 goto out_patt_5A5; 258 goto out_patt_5A5;
262 }
263 259
264 patt_FF = kmalloc(mtd->erasesize, GFP_KERNEL); 260 patt_FF = kmalloc(mtd->erasesize, GFP_KERNEL);
265 if (!patt_FF) { 261 if (!patt_FF)
266 pr_err("error: cannot allocate memory\n");
267 goto out_patt_A5A; 262 goto out_patt_A5A;
268 }
269 263
270 check_buf = kmalloc(mtd->erasesize, GFP_KERNEL); 264 check_buf = kmalloc(mtd->erasesize, GFP_KERNEL);
271 if (!check_buf) { 265 if (!check_buf)
272 pr_err("error: cannot allocate memory\n");
273 goto out_patt_FF; 266 goto out_patt_FF;
274 } 267
268 bad_ebs = kcalloc(ebcnt, sizeof(*bad_ebs), GFP_KERNEL);
269 if (!bad_ebs)
270 goto out_check_buf;
275 271
276 err = 0; 272 err = 0;
277 273
@@ -290,7 +286,6 @@ static int __init tort_init(void)
290 /* 286 /*
291 * Check if there is a bad eraseblock among those we are going to test. 287 * Check if there is a bad eraseblock among those we are going to test.
292 */ 288 */
293 memset(&bad_ebs[0], 0, sizeof(int) * ebcnt);
294 if (mtd_can_have_bb(mtd)) { 289 if (mtd_can_have_bb(mtd)) {
295 for (i = eb; i < eb + ebcnt; i++) { 290 for (i = eb; i < eb + ebcnt; i++) {
296 err = mtd_block_isbad(mtd, (loff_t)i * mtd->erasesize); 291 err = mtd_block_isbad(mtd, (loff_t)i * mtd->erasesize);
@@ -394,6 +389,8 @@ out:
394 389
395 pr_info("finished after %u erase cycles\n", 390 pr_info("finished after %u erase cycles\n",
396 erase_cycles); 391 erase_cycles);
392 kfree(bad_ebs);
393out_check_buf:
397 kfree(check_buf); 394 kfree(check_buf);
398out_patt_FF: 395out_patt_FF:
399 kfree(patt_FF); 396 kfree(patt_FF);
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 33f8f3b2c9b2..cba89fcd1587 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -86,7 +86,7 @@ static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
86static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi) 86static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
87{ 87{
88 if (ubi->dbg.emulate_bitflips) 88 if (ubi->dbg.emulate_bitflips)
89 return !(random32() % 200); 89 return !(prandom_u32() % 200);
90 return 0; 90 return 0;
91} 91}
92 92
@@ -100,7 +100,7 @@ static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
100static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi) 100static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
101{ 101{
102 if (ubi->dbg.emulate_io_failures) 102 if (ubi->dbg.emulate_io_failures)
103 return !(random32() % 500); 103 return !(prandom_u32() % 500);
104 return 0; 104 return 0;
105} 105}
106 106
@@ -114,7 +114,7 @@ static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
114static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi) 114static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
115{ 115{
116 if (ubi->dbg.emulate_io_failures) 116 if (ubi->dbg.emulate_io_failures)
117 return !(random32() % 400); 117 return !(prandom_u32() % 400);
118 return 0; 118 return 0;
119} 119}
120 120