aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/Kconfig3
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c83
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c283
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c33
-rw-r--r--drivers/mtd/chips/cfi_util.c6
-rw-r--r--drivers/mtd/chips/fwh_lock.h4
-rw-r--r--drivers/mtd/chips/map_absent.c10
-rw-r--r--drivers/mtd/chips/map_ram.c14
-rw-r--r--drivers/mtd/chips/map_rom.c13
-rw-r--r--drivers/mtd/devices/Kconfig7
-rw-r--r--drivers/mtd/devices/Makefile1
-rw-r--r--drivers/mtd/devices/block2mtd.c28
-rw-r--r--drivers/mtd/devices/doc2000.c25
-rw-r--r--drivers/mtd/devices/doc2001.c22
-rw-r--r--drivers/mtd/devices/doc2001plus.c22
-rw-r--r--drivers/mtd/devices/docg3.c201
-rw-r--r--drivers/mtd/devices/docg3.h20
-rw-r--r--drivers/mtd/devices/lart.c17
-rw-r--r--drivers/mtd/devices/m25p80.c56
-rw-r--r--drivers/mtd/devices/ms02-nv.c12
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c50
-rw-r--r--drivers/mtd/devices/mtdram.c35
-rw-r--r--drivers/mtd/devices/phram.c76
-rw-r--r--drivers/mtd/devices/pmc551.c99
-rw-r--r--drivers/mtd/devices/slram.c41
-rw-r--r--drivers/mtd/devices/spear_smi.c1147
-rw-r--r--drivers/mtd/devices/sst25l.c46
-rw-r--r--drivers/mtd/inftlcore.c2
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c37
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c4
-rw-r--r--drivers/mtd/maps/dc21285.c2
-rw-r--r--drivers/mtd/maps/gpio-addr-flash.c4
-rw-r--r--drivers/mtd/maps/h720x-flash.c4
-rw-r--r--drivers/mtd/maps/impa7.c2
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c2
-rw-r--r--drivers/mtd/maps/ixp2000.c2
-rw-r--r--drivers/mtd/maps/ixp4xx.c5
-rw-r--r--drivers/mtd/maps/l440gx.c14
-rw-r--r--drivers/mtd/maps/lantiq-flash.c6
-rw-r--r--drivers/mtd/maps/latch-addr-flash.c5
-rw-r--r--drivers/mtd/maps/pcmciamtd.c13
-rw-r--r--drivers/mtd/maps/physmap.c24
-rw-r--r--drivers/mtd/maps/plat-ram.c5
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c3
-rw-r--r--drivers/mtd/maps/rbtx4939-flash.c4
-rw-r--r--drivers/mtd/maps/sa1100-flash.c18
-rw-r--r--drivers/mtd/maps/solutionengine.c4
-rw-r--r--drivers/mtd/maps/uclinux.c2
-rw-r--r--drivers/mtd/maps/vmu-flash.c14
-rw-r--r--drivers/mtd/maps/wr_sbc82xx_flash.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c1
-rw-r--r--drivers/mtd/mtdblock.c8
-rw-r--r--drivers/mtd/mtdchar.c4
-rw-r--r--drivers/mtd/mtdconcat.c106
-rw-r--r--drivers/mtd/mtdcore.c271
-rw-r--r--drivers/mtd/mtdoops.c9
-rw-r--r--drivers/mtd/mtdpart.c200
-rw-r--r--drivers/mtd/nand/Kconfig21
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/alauda.c9
-rw-r--r--drivers/mtd/nand/atmel_nand.c1
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c10
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c2
-rw-r--r--drivers/mtd/nand/cafe_nand.c3
-rw-r--r--drivers/mtd/nand/cmx270_nand.c2
-rw-r--r--drivers/mtd/nand/cs553x_nand.c4
-rw-r--r--drivers/mtd/nand/davinci_nand.c5
-rw-r--r--drivers/mtd/nand/denali.c3
-rw-r--r--drivers/mtd/nand/diskonchip.c1
-rw-r--r--drivers/mtd/nand/docg4.c1377
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c6
-rw-r--r--drivers/mtd/nand/fsmc_nand.c924
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c26
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c14
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h2
-rw-r--r--drivers/mtd/nand/h1910.c4
-rw-r--r--drivers/mtd/nand/jz4740_nand.c11
-rw-r--r--drivers/mtd/nand/mxc_nand.c11
-rw-r--r--drivers/mtd/nand/nand_base.c194
-rw-r--r--drivers/mtd/nand/ndfc.c1
-rw-r--r--drivers/mtd/nand/omap2.c5
-rw-r--r--drivers/mtd/nand/orion_nand.c4
-rw-r--r--drivers/mtd/nand/plat_nand.c5
-rw-r--r--drivers/mtd/nand/ppchameleonevb.c18
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c6
-rw-r--r--drivers/mtd/nand/r852.c1
-rw-r--r--drivers/mtd/nand/rtc_from4.c1
-rw-r--r--drivers/mtd/nand/s3c2410.c5
-rw-r--r--drivers/mtd/nand/sh_flctl.c106
-rw-r--r--drivers/mtd/nand/sharpsl.c5
-rw-r--r--drivers/mtd/nand/tmio_nand.c7
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c3
-rw-r--r--drivers/mtd/nftlcore.c7
-rw-r--r--drivers/mtd/onenand/generic.c6
-rw-r--r--drivers/mtd/onenand/omap2.c6
-rw-r--r--drivers/mtd/onenand/onenand_base.c68
-rw-r--r--drivers/mtd/onenand/samsung.c6
-rw-r--r--drivers/mtd/redboot.c6
-rw-r--r--drivers/mtd/sm_ftl.c2
-rw-r--r--drivers/mtd/ubi/gluebi.c29
100 files changed, 4662 insertions, 1397 deletions
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 284cf3433720..5760c1a4b3f6 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -304,9 +304,6 @@ config MTD_OOPS
304 buffer in a flash partition where it can be read back at some 304 buffer in a flash partition where it can be read back at some
305 later point. 305 later point.
306 306
307 To use, add console=ttyMTDx to the kernel command line,
308 where x is the MTD device number to use.
309
310config MTD_SWAP 307config MTD_SWAP
311 tristate "Swap on MTD device support" 308 tristate "Swap on MTD device support"
312 depends on MTD && SWAP 309 depends on MTD && SWAP
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 9bcd1f415f43..dbbd2edfb812 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -87,7 +87,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **
87 87
88static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, 88static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
89 size_t *retlen, void **virt, resource_size_t *phys); 89 size_t *retlen, void **virt, resource_size_t *phys);
90static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len); 90static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
91 91
92static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode); 92static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
93static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode); 93static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
@@ -262,9 +262,9 @@ static void fixup_st_m28w320cb(struct mtd_info *mtd)
262static void fixup_use_point(struct mtd_info *mtd) 262static void fixup_use_point(struct mtd_info *mtd)
263{ 263{
264 struct map_info *map = mtd->priv; 264 struct map_info *map = mtd->priv;
265 if (!mtd->point && map_is_linear(map)) { 265 if (!mtd->_point && map_is_linear(map)) {
266 mtd->point = cfi_intelext_point; 266 mtd->_point = cfi_intelext_point;
267 mtd->unpoint = cfi_intelext_unpoint; 267 mtd->_unpoint = cfi_intelext_unpoint;
268 } 268 }
269} 269}
270 270
@@ -274,8 +274,8 @@ static void fixup_use_write_buffers(struct mtd_info *mtd)
274 struct cfi_private *cfi = map->fldrv_priv; 274 struct cfi_private *cfi = map->fldrv_priv;
275 if (cfi->cfiq->BufWriteTimeoutTyp) { 275 if (cfi->cfiq->BufWriteTimeoutTyp) {
276 printk(KERN_INFO "Using buffer write method\n" ); 276 printk(KERN_INFO "Using buffer write method\n" );
277 mtd->write = cfi_intelext_write_buffers; 277 mtd->_write = cfi_intelext_write_buffers;
278 mtd->writev = cfi_intelext_writev; 278 mtd->_writev = cfi_intelext_writev;
279 } 279 }
280} 280}
281 281
@@ -443,15 +443,15 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
443 mtd->type = MTD_NORFLASH; 443 mtd->type = MTD_NORFLASH;
444 444
445 /* Fill in the default mtd operations */ 445 /* Fill in the default mtd operations */
446 mtd->erase = cfi_intelext_erase_varsize; 446 mtd->_erase = cfi_intelext_erase_varsize;
447 mtd->read = cfi_intelext_read; 447 mtd->_read = cfi_intelext_read;
448 mtd->write = cfi_intelext_write_words; 448 mtd->_write = cfi_intelext_write_words;
449 mtd->sync = cfi_intelext_sync; 449 mtd->_sync = cfi_intelext_sync;
450 mtd->lock = cfi_intelext_lock; 450 mtd->_lock = cfi_intelext_lock;
451 mtd->unlock = cfi_intelext_unlock; 451 mtd->_unlock = cfi_intelext_unlock;
452 mtd->is_locked = cfi_intelext_is_locked; 452 mtd->_is_locked = cfi_intelext_is_locked;
453 mtd->suspend = cfi_intelext_suspend; 453 mtd->_suspend = cfi_intelext_suspend;
454 mtd->resume = cfi_intelext_resume; 454 mtd->_resume = cfi_intelext_resume;
455 mtd->flags = MTD_CAP_NORFLASH; 455 mtd->flags = MTD_CAP_NORFLASH;
456 mtd->name = map->name; 456 mtd->name = map->name;
457 mtd->writesize = 1; 457 mtd->writesize = 1;
@@ -600,12 +600,12 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
600 } 600 }
601 601
602#ifdef CONFIG_MTD_OTP 602#ifdef CONFIG_MTD_OTP
603 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg; 603 mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
604 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg; 604 mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
605 mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg; 605 mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
606 mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg; 606 mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
607 mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info; 607 mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
608 mtd->get_user_prot_info = cfi_intelext_get_user_prot_info; 608 mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
609#endif 609#endif
610 610
611 /* This function has the potential to distort the reality 611 /* This function has the potential to distort the reality
@@ -1017,8 +1017,6 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
1017 case FL_READY: 1017 case FL_READY:
1018 case FL_STATUS: 1018 case FL_STATUS:
1019 case FL_JEDEC_QUERY: 1019 case FL_JEDEC_QUERY:
1020 /* We should really make set_vpp() count, rather than doing this */
1021 DISABLE_VPP(map);
1022 break; 1020 break;
1023 default: 1021 default:
1024 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate); 1022 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
@@ -1324,7 +1322,7 @@ static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1324 int chipnum; 1322 int chipnum;
1325 int ret = 0; 1323 int ret = 0;
1326 1324
1327 if (!map->virt || (from + len > mtd->size)) 1325 if (!map->virt)
1328 return -EINVAL; 1326 return -EINVAL;
1329 1327
1330 /* Now lock the chip(s) to POINT state */ 1328 /* Now lock the chip(s) to POINT state */
@@ -1334,7 +1332,6 @@ static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1334 ofs = from - (chipnum << cfi->chipshift); 1332 ofs = from - (chipnum << cfi->chipshift);
1335 1333
1336 *virt = map->virt + cfi->chips[chipnum].start + ofs; 1334 *virt = map->virt + cfi->chips[chipnum].start + ofs;
1337 *retlen = 0;
1338 if (phys) 1335 if (phys)
1339 *phys = map->phys + cfi->chips[chipnum].start + ofs; 1336 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1340 1337
@@ -1369,12 +1366,12 @@ static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1369 return 0; 1366 return 0;
1370} 1367}
1371 1368
1372static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 1369static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1373{ 1370{
1374 struct map_info *map = mtd->priv; 1371 struct map_info *map = mtd->priv;
1375 struct cfi_private *cfi = map->fldrv_priv; 1372 struct cfi_private *cfi = map->fldrv_priv;
1376 unsigned long ofs; 1373 unsigned long ofs;
1377 int chipnum; 1374 int chipnum, err = 0;
1378 1375
1379 /* Now unlock the chip(s) POINT state */ 1376 /* Now unlock the chip(s) POINT state */
1380 1377
@@ -1382,7 +1379,7 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1382 chipnum = (from >> cfi->chipshift); 1379 chipnum = (from >> cfi->chipshift);
1383 ofs = from - (chipnum << cfi->chipshift); 1380 ofs = from - (chipnum << cfi->chipshift);
1384 1381
1385 while (len) { 1382 while (len && !err) {
1386 unsigned long thislen; 1383 unsigned long thislen;
1387 struct flchip *chip; 1384 struct flchip *chip;
1388 1385
@@ -1400,8 +1397,10 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1400 chip->ref_point_counter--; 1397 chip->ref_point_counter--;
1401 if(chip->ref_point_counter == 0) 1398 if(chip->ref_point_counter == 0)
1402 chip->state = FL_READY; 1399 chip->state = FL_READY;
1403 } else 1400 } else {
1404 printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */ 1401 printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
1402 err = -EINVAL;
1403 }
1405 1404
1406 put_chip(map, chip, chip->start); 1405 put_chip(map, chip, chip->start);
1407 mutex_unlock(&chip->mutex); 1406 mutex_unlock(&chip->mutex);
@@ -1410,6 +1409,8 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1410 ofs = 0; 1409 ofs = 0;
1411 chipnum++; 1410 chipnum++;
1412 } 1411 }
1412
1413 return err;
1413} 1414}
1414 1415
1415static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) 1416static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
@@ -1456,8 +1457,6 @@ static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, siz
1456 chipnum = (from >> cfi->chipshift); 1457 chipnum = (from >> cfi->chipshift);
1457 ofs = from - (chipnum << cfi->chipshift); 1458 ofs = from - (chipnum << cfi->chipshift);
1458 1459
1459 *retlen = 0;
1460
1461 while (len) { 1460 while (len) {
1462 unsigned long thislen; 1461 unsigned long thislen;
1463 1462
@@ -1551,7 +1550,8 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1551 } 1550 }
1552 1551
1553 xip_enable(map, chip, adr); 1552 xip_enable(map, chip, adr);
1554 out: put_chip(map, chip, adr); 1553 out: DISABLE_VPP(map);
1554 put_chip(map, chip, adr);
1555 mutex_unlock(&chip->mutex); 1555 mutex_unlock(&chip->mutex);
1556 return ret; 1556 return ret;
1557} 1557}
@@ -1565,10 +1565,6 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le
1565 int chipnum; 1565 int chipnum;
1566 unsigned long ofs; 1566 unsigned long ofs;
1567 1567
1568 *retlen = 0;
1569 if (!len)
1570 return 0;
1571
1572 chipnum = to >> cfi->chipshift; 1568 chipnum = to >> cfi->chipshift;
1573 ofs = to - (chipnum << cfi->chipshift); 1569 ofs = to - (chipnum << cfi->chipshift);
1574 1570
@@ -1794,7 +1790,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1794 } 1790 }
1795 1791
1796 xip_enable(map, chip, cmd_adr); 1792 xip_enable(map, chip, cmd_adr);
1797 out: put_chip(map, chip, cmd_adr); 1793 out: DISABLE_VPP(map);
1794 put_chip(map, chip, cmd_adr);
1798 mutex_unlock(&chip->mutex); 1795 mutex_unlock(&chip->mutex);
1799 return ret; 1796 return ret;
1800} 1797}
@@ -1813,7 +1810,6 @@ static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1813 for (i = 0; i < count; i++) 1810 for (i = 0; i < count; i++)
1814 len += vecs[i].iov_len; 1811 len += vecs[i].iov_len;
1815 1812
1816 *retlen = 0;
1817 if (!len) 1813 if (!len)
1818 return 0; 1814 return 0;
1819 1815
@@ -1932,6 +1928,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1932 ret = -EIO; 1928 ret = -EIO;
1933 } else if (chipstatus & 0x20 && retries--) { 1929 } else if (chipstatus & 0x20 && retries--) {
1934 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus); 1930 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1931 DISABLE_VPP(map);
1935 put_chip(map, chip, adr); 1932 put_chip(map, chip, adr);
1936 mutex_unlock(&chip->mutex); 1933 mutex_unlock(&chip->mutex);
1937 goto retry; 1934 goto retry;
@@ -1944,7 +1941,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1944 } 1941 }
1945 1942
1946 xip_enable(map, chip, adr); 1943 xip_enable(map, chip, adr);
1947 out: put_chip(map, chip, adr); 1944 out: DISABLE_VPP(map);
1945 put_chip(map, chip, adr);
1948 mutex_unlock(&chip->mutex); 1946 mutex_unlock(&chip->mutex);
1949 return ret; 1947 return ret;
1950} 1948}
@@ -2086,7 +2084,8 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
2086 } 2084 }
2087 2085
2088 xip_enable(map, chip, adr); 2086 xip_enable(map, chip, adr);
2089out: put_chip(map, chip, adr); 2087 out: DISABLE_VPP(map);
2088 put_chip(map, chip, adr);
2090 mutex_unlock(&chip->mutex); 2089 mutex_unlock(&chip->mutex);
2091 return ret; 2090 return ret;
2092} 2091}
@@ -2483,7 +2482,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
2483 allowed to. Or should we return -EAGAIN, because the upper layers 2482 allowed to. Or should we return -EAGAIN, because the upper layers
2484 ought to have already shut down anything which was using the device 2483 ought to have already shut down anything which was using the device
2485 anyway? The latter for now. */ 2484 anyway? The latter for now. */
2486 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate); 2485 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
2487 ret = -EAGAIN; 2486 ret = -EAGAIN;
2488 case FL_PM_SUSPENDED: 2487 case FL_PM_SUSPENDED:
2489 break; 2488 break;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 8d70895a58d6..d02592e6a0f0 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -59,6 +59,9 @@ static void cfi_amdstd_resume (struct mtd_info *);
59static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *); 59static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
60static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 60static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61 61
62static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
63 size_t *retlen, const u_char *buf);
64
62static void cfi_amdstd_destroy(struct mtd_info *); 65static void cfi_amdstd_destroy(struct mtd_info *);
63 66
64struct mtd_info *cfi_cmdset_0002(struct map_info *, int); 67struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
@@ -189,7 +192,7 @@ static void fixup_use_write_buffers(struct mtd_info *mtd)
189 struct cfi_private *cfi = map->fldrv_priv; 192 struct cfi_private *cfi = map->fldrv_priv;
190 if (cfi->cfiq->BufWriteTimeoutTyp) { 193 if (cfi->cfiq->BufWriteTimeoutTyp) {
191 pr_debug("Using buffer write method\n" ); 194 pr_debug("Using buffer write method\n" );
192 mtd->write = cfi_amdstd_write_buffers; 195 mtd->_write = cfi_amdstd_write_buffers;
193 } 196 }
194} 197}
195 198
@@ -228,8 +231,8 @@ static void fixup_convert_atmel_pri(struct mtd_info *mtd)
228static void fixup_use_secsi(struct mtd_info *mtd) 231static void fixup_use_secsi(struct mtd_info *mtd)
229{ 232{
230 /* Setup for chips with a secsi area */ 233 /* Setup for chips with a secsi area */
231 mtd->read_user_prot_reg = cfi_amdstd_secsi_read; 234 mtd->_read_user_prot_reg = cfi_amdstd_secsi_read;
232 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read; 235 mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read;
233} 236}
234 237
235static void fixup_use_erase_chip(struct mtd_info *mtd) 238static void fixup_use_erase_chip(struct mtd_info *mtd)
@@ -238,7 +241,7 @@ static void fixup_use_erase_chip(struct mtd_info *mtd)
238 struct cfi_private *cfi = map->fldrv_priv; 241 struct cfi_private *cfi = map->fldrv_priv;
239 if ((cfi->cfiq->NumEraseRegions == 1) && 242 if ((cfi->cfiq->NumEraseRegions == 1) &&
240 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) { 243 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
241 mtd->erase = cfi_amdstd_erase_chip; 244 mtd->_erase = cfi_amdstd_erase_chip;
242 } 245 }
243 246
244} 247}
@@ -249,8 +252,8 @@ static void fixup_use_erase_chip(struct mtd_info *mtd)
249 */ 252 */
250static void fixup_use_atmel_lock(struct mtd_info *mtd) 253static void fixup_use_atmel_lock(struct mtd_info *mtd)
251{ 254{
252 mtd->lock = cfi_atmel_lock; 255 mtd->_lock = cfi_atmel_lock;
253 mtd->unlock = cfi_atmel_unlock; 256 mtd->_unlock = cfi_atmel_unlock;
254 mtd->flags |= MTD_POWERUP_LOCK; 257 mtd->flags |= MTD_POWERUP_LOCK;
255} 258}
256 259
@@ -429,12 +432,12 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
429 mtd->type = MTD_NORFLASH; 432 mtd->type = MTD_NORFLASH;
430 433
431 /* Fill in the default mtd operations */ 434 /* Fill in the default mtd operations */
432 mtd->erase = cfi_amdstd_erase_varsize; 435 mtd->_erase = cfi_amdstd_erase_varsize;
433 mtd->write = cfi_amdstd_write_words; 436 mtd->_write = cfi_amdstd_write_words;
434 mtd->read = cfi_amdstd_read; 437 mtd->_read = cfi_amdstd_read;
435 mtd->sync = cfi_amdstd_sync; 438 mtd->_sync = cfi_amdstd_sync;
436 mtd->suspend = cfi_amdstd_suspend; 439 mtd->_suspend = cfi_amdstd_suspend;
437 mtd->resume = cfi_amdstd_resume; 440 mtd->_resume = cfi_amdstd_resume;
438 mtd->flags = MTD_CAP_NORFLASH; 441 mtd->flags = MTD_CAP_NORFLASH;
439 mtd->name = map->name; 442 mtd->name = map->name;
440 mtd->writesize = 1; 443 mtd->writesize = 1;
@@ -443,6 +446,7 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
443 pr_debug("MTD %s(): write buffer size %d\n", __func__, 446 pr_debug("MTD %s(): write buffer size %d\n", __func__,
444 mtd->writebufsize); 447 mtd->writebufsize);
445 448
449 mtd->_panic_write = cfi_amdstd_panic_write;
446 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot; 450 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
447 451
448 if (cfi->cfi_mode==CFI_MODE_CFI){ 452 if (cfi->cfi_mode==CFI_MODE_CFI){
@@ -770,8 +774,6 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
770 774
771 case FL_READY: 775 case FL_READY:
772 case FL_STATUS: 776 case FL_STATUS:
773 /* We should really make set_vpp() count, rather than doing this */
774 DISABLE_VPP(map);
775 break; 777 break;
776 default: 778 default:
777 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate); 779 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
@@ -1013,13 +1015,9 @@ static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_
1013 int ret = 0; 1015 int ret = 0;
1014 1016
1015 /* ofs: offset within the first chip that the first read should start */ 1017 /* ofs: offset within the first chip that the first read should start */
1016
1017 chipnum = (from >> cfi->chipshift); 1018 chipnum = (from >> cfi->chipshift);
1018 ofs = from - (chipnum << cfi->chipshift); 1019 ofs = from - (chipnum << cfi->chipshift);
1019 1020
1020
1021 *retlen = 0;
1022
1023 while (len) { 1021 while (len) {
1024 unsigned long thislen; 1022 unsigned long thislen;
1025 1023
@@ -1097,16 +1095,11 @@ static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len,
1097 int chipnum; 1095 int chipnum;
1098 int ret = 0; 1096 int ret = 0;
1099 1097
1100
1101 /* ofs: offset within the first chip that the first read should start */ 1098 /* ofs: offset within the first chip that the first read should start */
1102
1103 /* 8 secsi bytes per chip */ 1099 /* 8 secsi bytes per chip */
1104 chipnum=from>>3; 1100 chipnum=from>>3;
1105 ofs=from & 7; 1101 ofs=from & 7;
1106 1102
1107
1108 *retlen = 0;
1109
1110 while (len) { 1103 while (len) {
1111 unsigned long thislen; 1104 unsigned long thislen;
1112 1105
@@ -1234,6 +1227,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1234 xip_enable(map, chip, adr); 1227 xip_enable(map, chip, adr);
1235 op_done: 1228 op_done:
1236 chip->state = FL_READY; 1229 chip->state = FL_READY;
1230 DISABLE_VPP(map);
1237 put_chip(map, chip, adr); 1231 put_chip(map, chip, adr);
1238 mutex_unlock(&chip->mutex); 1232 mutex_unlock(&chip->mutex);
1239 1233
@@ -1251,10 +1245,6 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1251 unsigned long ofs, chipstart; 1245 unsigned long ofs, chipstart;
1252 DECLARE_WAITQUEUE(wait, current); 1246 DECLARE_WAITQUEUE(wait, current);
1253 1247
1254 *retlen = 0;
1255 if (!len)
1256 return 0;
1257
1258 chipnum = to >> cfi->chipshift; 1248 chipnum = to >> cfi->chipshift;
1259 ofs = to - (chipnum << cfi->chipshift); 1249 ofs = to - (chipnum << cfi->chipshift);
1260 chipstart = cfi->chips[chipnum].start; 1250 chipstart = cfi->chips[chipnum].start;
@@ -1476,6 +1466,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1476 ret = -EIO; 1466 ret = -EIO;
1477 op_done: 1467 op_done:
1478 chip->state = FL_READY; 1468 chip->state = FL_READY;
1469 DISABLE_VPP(map);
1479 put_chip(map, chip, adr); 1470 put_chip(map, chip, adr);
1480 mutex_unlock(&chip->mutex); 1471 mutex_unlock(&chip->mutex);
1481 1472
@@ -1493,10 +1484,6 @@ static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1493 int chipnum; 1484 int chipnum;
1494 unsigned long ofs; 1485 unsigned long ofs;
1495 1486
1496 *retlen = 0;
1497 if (!len)
1498 return 0;
1499
1500 chipnum = to >> cfi->chipshift; 1487 chipnum = to >> cfi->chipshift;
1501 ofs = to - (chipnum << cfi->chipshift); 1488 ofs = to - (chipnum << cfi->chipshift);
1502 1489
@@ -1562,6 +1549,238 @@ static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1562 return 0; 1549 return 0;
1563} 1550}
1564 1551
1552/*
1553 * Wait for the flash chip to become ready to write data
1554 *
1555 * This is only called during the panic_write() path. When panic_write()
1556 * is called, the kernel is in the process of a panic, and will soon be
1557 * dead. Therefore we don't take any locks, and attempt to get access
1558 * to the chip as soon as possible.
1559 */
1560static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
1561 unsigned long adr)
1562{
1563 struct cfi_private *cfi = map->fldrv_priv;
1564 int retries = 10;
1565 int i;
1566
1567 /*
1568 * If the driver thinks the chip is idle, and no toggle bits
1569 * are changing, then the chip is actually idle for sure.
1570 */
1571 if (chip->state == FL_READY && chip_ready(map, adr))
1572 return 0;
1573
1574 /*
1575 * Try several times to reset the chip and then wait for it
1576 * to become idle. The upper limit of a few milliseconds of
1577 * delay isn't a big problem: the kernel is dying anyway. It
1578 * is more important to save the messages.
1579 */
1580 while (retries > 0) {
1581 const unsigned long timeo = (HZ / 1000) + 1;
1582
1583 /* send the reset command */
1584 map_write(map, CMD(0xF0), chip->start);
1585
1586 /* wait for the chip to become ready */
1587 for (i = 0; i < jiffies_to_usecs(timeo); i++) {
1588 if (chip_ready(map, adr))
1589 return 0;
1590
1591 udelay(1);
1592 }
1593 }
1594
1595 /* the chip never became ready */
1596 return -EBUSY;
1597}
1598
1599/*
1600 * Write out one word of data to a single flash chip during a kernel panic
1601 *
1602 * This is only called during the panic_write() path. When panic_write()
1603 * is called, the kernel is in the process of a panic, and will soon be
1604 * dead. Therefore we don't take any locks, and attempt to get access
1605 * to the chip as soon as possible.
1606 *
1607 * The implementation of this routine is intentionally similar to
1608 * do_write_oneword(), in order to ease code maintenance.
1609 */
1610static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
1611 unsigned long adr, map_word datum)
1612{
1613 const unsigned long uWriteTimeout = (HZ / 1000) + 1;
1614 struct cfi_private *cfi = map->fldrv_priv;
1615 int retry_cnt = 0;
1616 map_word oldd;
1617 int ret = 0;
1618 int i;
1619
1620 adr += chip->start;
1621
1622 ret = cfi_amdstd_panic_wait(map, chip, adr);
1623 if (ret)
1624 return ret;
1625
1626 pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
1627 __func__, adr, datum.x[0]);
1628
1629 /*
1630 * Check for a NOP for the case when the datum to write is already
1631 * present - it saves time and works around buggy chips that corrupt
1632 * data at other locations when 0xff is written to a location that
1633 * already contains 0xff.
1634 */
1635 oldd = map_read(map, adr);
1636 if (map_word_equal(map, oldd, datum)) {
1637 pr_debug("MTD %s(): NOP\n", __func__);
1638 goto op_done;
1639 }
1640
1641 ENABLE_VPP(map);
1642
1643retry:
1644 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1645 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1646 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1647 map_write(map, datum, adr);
1648
1649 for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
1650 if (chip_ready(map, adr))
1651 break;
1652
1653 udelay(1);
1654 }
1655
1656 if (!chip_good(map, adr, datum)) {
1657 /* reset on all failures. */
1658 map_write(map, CMD(0xF0), chip->start);
1659 /* FIXME - should have reset delay before continuing */
1660
1661 if (++retry_cnt <= MAX_WORD_RETRIES)
1662 goto retry;
1663
1664 ret = -EIO;
1665 }
1666
1667op_done:
1668 DISABLE_VPP(map);
1669 return ret;
1670}
1671
1672/*
1673 * Write out some data during a kernel panic
1674 *
1675 * This is used by the mtdoops driver to save the dying messages from a
1676 * kernel which has panic'd.
1677 *
1678 * This routine ignores all of the locking used throughout the rest of the
1679 * driver, in order to ensure that the data gets written out no matter what
1680 * state this driver (and the flash chip itself) was in when the kernel crashed.
1681 *
1682 * The implementation of this routine is intentionally similar to
1683 * cfi_amdstd_write_words(), in order to ease code maintenance.
1684 */
1685static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
1686 size_t *retlen, const u_char *buf)
1687{
1688 struct map_info *map = mtd->priv;
1689 struct cfi_private *cfi = map->fldrv_priv;
1690 unsigned long ofs, chipstart;
1691 int ret = 0;
1692 int chipnum;
1693
1694 chipnum = to >> cfi->chipshift;
1695 ofs = to - (chipnum << cfi->chipshift);
1696 chipstart = cfi->chips[chipnum].start;
1697
1698 /* If it's not bus aligned, do the first byte write */
1699 if (ofs & (map_bankwidth(map) - 1)) {
1700 unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
1701 int i = ofs - bus_ofs;
1702 int n = 0;
1703 map_word tmp_buf;
1704
1705 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
1706 if (ret)
1707 return ret;
1708
1709 /* Load 'tmp_buf' with old contents of flash */
1710 tmp_buf = map_read(map, bus_ofs + chipstart);
1711
1712 /* Number of bytes to copy from buffer */
1713 n = min_t(int, len, map_bankwidth(map) - i);
1714
1715 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1716
1717 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1718 bus_ofs, tmp_buf);
1719 if (ret)
1720 return ret;
1721
1722 ofs += n;
1723 buf += n;
1724 (*retlen) += n;
1725 len -= n;
1726
1727 if (ofs >> cfi->chipshift) {
1728 chipnum++;
1729 ofs = 0;
1730 if (chipnum == cfi->numchips)
1731 return 0;
1732 }
1733 }
1734
1735 /* We are now aligned, write as much as possible */
1736 while (len >= map_bankwidth(map)) {
1737 map_word datum;
1738
1739 datum = map_word_load(map, buf);
1740
1741 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1742 ofs, datum);
1743 if (ret)
1744 return ret;
1745
1746 ofs += map_bankwidth(map);
1747 buf += map_bankwidth(map);
1748 (*retlen) += map_bankwidth(map);
1749 len -= map_bankwidth(map);
1750
1751 if (ofs >> cfi->chipshift) {
1752 chipnum++;
1753 ofs = 0;
1754 if (chipnum == cfi->numchips)
1755 return 0;
1756
1757 chipstart = cfi->chips[chipnum].start;
1758 }
1759 }
1760
1761 /* Write the trailing bytes if any */
1762 if (len & (map_bankwidth(map) - 1)) {
1763 map_word tmp_buf;
1764
1765 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
1766 if (ret)
1767 return ret;
1768
1769 tmp_buf = map_read(map, ofs + chipstart);
1770
1771 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1772
1773 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1774 ofs, tmp_buf);
1775 if (ret)
1776 return ret;
1777
1778 (*retlen) += len;
1779 }
1780
1781 return 0;
1782}
1783
1565 1784
1566/* 1785/*
1567 * Handle devices with one erase region, that only implement 1786 * Handle devices with one erase region, that only implement
@@ -1649,6 +1868,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1649 1868
1650 chip->state = FL_READY; 1869 chip->state = FL_READY;
1651 xip_enable(map, chip, adr); 1870 xip_enable(map, chip, adr);
1871 DISABLE_VPP(map);
1652 put_chip(map, chip, adr); 1872 put_chip(map, chip, adr);
1653 mutex_unlock(&chip->mutex); 1873 mutex_unlock(&chip->mutex);
1654 1874
@@ -1739,6 +1959,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1739 } 1959 }
1740 1960
1741 chip->state = FL_READY; 1961 chip->state = FL_READY;
1962 DISABLE_VPP(map);
1742 put_chip(map, chip, adr); 1963 put_chip(map, chip, adr);
1743 mutex_unlock(&chip->mutex); 1964 mutex_unlock(&chip->mutex);
1744 return ret; 1965 return ret;
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 85e80180b65b..096993f9711e 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -228,15 +228,15 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map)
228 } 228 }
229 229
230 /* Also select the correct geometry setup too */ 230 /* Also select the correct geometry setup too */
231 mtd->erase = cfi_staa_erase_varsize; 231 mtd->_erase = cfi_staa_erase_varsize;
232 mtd->read = cfi_staa_read; 232 mtd->_read = cfi_staa_read;
233 mtd->write = cfi_staa_write_buffers; 233 mtd->_write = cfi_staa_write_buffers;
234 mtd->writev = cfi_staa_writev; 234 mtd->_writev = cfi_staa_writev;
235 mtd->sync = cfi_staa_sync; 235 mtd->_sync = cfi_staa_sync;
236 mtd->lock = cfi_staa_lock; 236 mtd->_lock = cfi_staa_lock;
237 mtd->unlock = cfi_staa_unlock; 237 mtd->_unlock = cfi_staa_unlock;
238 mtd->suspend = cfi_staa_suspend; 238 mtd->_suspend = cfi_staa_suspend;
239 mtd->resume = cfi_staa_resume; 239 mtd->_resume = cfi_staa_resume;
240 mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE; 240 mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE;
241 mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */ 241 mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
242 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 242 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
@@ -394,8 +394,6 @@ static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t
394 chipnum = (from >> cfi->chipshift); 394 chipnum = (from >> cfi->chipshift);
395 ofs = from - (chipnum << cfi->chipshift); 395 ofs = from - (chipnum << cfi->chipshift);
396 396
397 *retlen = 0;
398
399 while (len) { 397 while (len) {
400 unsigned long thislen; 398 unsigned long thislen;
401 399
@@ -617,10 +615,6 @@ static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
617 int chipnum; 615 int chipnum;
618 unsigned long ofs; 616 unsigned long ofs;
619 617
620 *retlen = 0;
621 if (!len)
622 return 0;
623
624 chipnum = to >> cfi->chipshift; 618 chipnum = to >> cfi->chipshift;
625 ofs = to - (chipnum << cfi->chipshift); 619 ofs = to - (chipnum << cfi->chipshift);
626 620
@@ -904,12 +898,6 @@ static int cfi_staa_erase_varsize(struct mtd_info *mtd,
904 int i, first; 898 int i, first;
905 struct mtd_erase_region_info *regions = mtd->eraseregions; 899 struct mtd_erase_region_info *regions = mtd->eraseregions;
906 900
907 if (instr->addr > mtd->size)
908 return -EINVAL;
909
910 if ((instr->len + instr->addr) > mtd->size)
911 return -EINVAL;
912
913 /* Check that both start and end of the requested erase are 901 /* Check that both start and end of the requested erase are
914 * aligned with the erasesize at the appropriate addresses. 902 * aligned with the erasesize at the appropriate addresses.
915 */ 903 */
@@ -1155,9 +1143,6 @@ static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1155 if (len & (mtd->erasesize -1)) 1143 if (len & (mtd->erasesize -1))
1156 return -EINVAL; 1144 return -EINVAL;
1157 1145
1158 if ((len + ofs) > mtd->size)
1159 return -EINVAL;
1160
1161 chipnum = ofs >> cfi->chipshift; 1146 chipnum = ofs >> cfi->chipshift;
1162 adr = ofs - (chipnum << cfi->chipshift); 1147 adr = ofs - (chipnum << cfi->chipshift);
1163 1148
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index 8e464054a631..f992418f40a8 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -173,12 +173,6 @@ int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
173 int i, first; 173 int i, first;
174 struct mtd_erase_region_info *regions = mtd->eraseregions; 174 struct mtd_erase_region_info *regions = mtd->eraseregions;
175 175
176 if (ofs > mtd->size)
177 return -EINVAL;
178
179 if ((len + ofs) > mtd->size)
180 return -EINVAL;
181
182 /* Check that both start and end of the requested erase are 176 /* Check that both start and end of the requested erase are
183 * aligned with the erasesize at the appropriate addresses. 177 * aligned with the erasesize at the appropriate addresses.
184 */ 178 */
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
index 89c6595454a5..800b0e853e86 100644
--- a/drivers/mtd/chips/fwh_lock.h
+++ b/drivers/mtd/chips/fwh_lock.h
@@ -101,7 +101,7 @@ static void fixup_use_fwh_lock(struct mtd_info *mtd)
101{ 101{
102 printk(KERN_NOTICE "using fwh lock/unlock method\n"); 102 printk(KERN_NOTICE "using fwh lock/unlock method\n");
103 /* Setup for the chips with the fwh lock method */ 103 /* Setup for the chips with the fwh lock method */
104 mtd->lock = fwh_lock_varsize; 104 mtd->_lock = fwh_lock_varsize;
105 mtd->unlock = fwh_unlock_varsize; 105 mtd->_unlock = fwh_unlock_varsize;
106} 106}
107#endif /* FWH_LOCK_H */ 107#endif /* FWH_LOCK_H */
diff --git a/drivers/mtd/chips/map_absent.c b/drivers/mtd/chips/map_absent.c
index f2b872946871..f7a5bca92aef 100644
--- a/drivers/mtd/chips/map_absent.c
+++ b/drivers/mtd/chips/map_absent.c
@@ -55,10 +55,10 @@ static struct mtd_info *map_absent_probe(struct map_info *map)
55 mtd->name = map->name; 55 mtd->name = map->name;
56 mtd->type = MTD_ABSENT; 56 mtd->type = MTD_ABSENT;
57 mtd->size = map->size; 57 mtd->size = map->size;
58 mtd->erase = map_absent_erase; 58 mtd->_erase = map_absent_erase;
59 mtd->read = map_absent_read; 59 mtd->_read = map_absent_read;
60 mtd->write = map_absent_write; 60 mtd->_write = map_absent_write;
61 mtd->sync = map_absent_sync; 61 mtd->_sync = map_absent_sync;
62 mtd->flags = 0; 62 mtd->flags = 0;
63 mtd->erasesize = PAGE_SIZE; 63 mtd->erasesize = PAGE_SIZE;
64 mtd->writesize = 1; 64 mtd->writesize = 1;
@@ -70,13 +70,11 @@ static struct mtd_info *map_absent_probe(struct map_info *map)
70 70
71static int map_absent_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 71static int map_absent_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
72{ 72{
73 *retlen = 0;
74 return -ENODEV; 73 return -ENODEV;
75} 74}
76 75
77static int map_absent_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) 76static int map_absent_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
78{ 77{
79 *retlen = 0;
80 return -ENODEV; 78 return -ENODEV;
81} 79}
82 80
diff --git a/drivers/mtd/chips/map_ram.c b/drivers/mtd/chips/map_ram.c
index 67640ccb2d41..991c2a1c05d3 100644
--- a/drivers/mtd/chips/map_ram.c
+++ b/drivers/mtd/chips/map_ram.c
@@ -64,11 +64,11 @@ static struct mtd_info *map_ram_probe(struct map_info *map)
64 mtd->name = map->name; 64 mtd->name = map->name;
65 mtd->type = MTD_RAM; 65 mtd->type = MTD_RAM;
66 mtd->size = map->size; 66 mtd->size = map->size;
67 mtd->erase = mapram_erase; 67 mtd->_erase = mapram_erase;
68 mtd->get_unmapped_area = mapram_unmapped_area; 68 mtd->_get_unmapped_area = mapram_unmapped_area;
69 mtd->read = mapram_read; 69 mtd->_read = mapram_read;
70 mtd->write = mapram_write; 70 mtd->_write = mapram_write;
71 mtd->sync = mapram_nop; 71 mtd->_sync = mapram_nop;
72 mtd->flags = MTD_CAP_RAM; 72 mtd->flags = MTD_CAP_RAM;
73 mtd->writesize = 1; 73 mtd->writesize = 1;
74 74
@@ -122,14 +122,10 @@ static int mapram_erase (struct mtd_info *mtd, struct erase_info *instr)
122 unsigned long i; 122 unsigned long i;
123 123
124 allff = map_word_ff(map); 124 allff = map_word_ff(map);
125
126 for (i=0; i<instr->len; i += map_bankwidth(map)) 125 for (i=0; i<instr->len; i += map_bankwidth(map))
127 map_write(map, allff, instr->addr + i); 126 map_write(map, allff, instr->addr + i);
128
129 instr->state = MTD_ERASE_DONE; 127 instr->state = MTD_ERASE_DONE;
130
131 mtd_erase_callback(instr); 128 mtd_erase_callback(instr);
132
133 return 0; 129 return 0;
134} 130}
135 131
diff --git a/drivers/mtd/chips/map_rom.c b/drivers/mtd/chips/map_rom.c
index 593f73d480d2..47a43cf7e5c6 100644
--- a/drivers/mtd/chips/map_rom.c
+++ b/drivers/mtd/chips/map_rom.c
@@ -41,11 +41,11 @@ static struct mtd_info *map_rom_probe(struct map_info *map)
41 mtd->name = map->name; 41 mtd->name = map->name;
42 mtd->type = MTD_ROM; 42 mtd->type = MTD_ROM;
43 mtd->size = map->size; 43 mtd->size = map->size;
44 mtd->get_unmapped_area = maprom_unmapped_area; 44 mtd->_get_unmapped_area = maprom_unmapped_area;
45 mtd->read = maprom_read; 45 mtd->_read = maprom_read;
46 mtd->write = maprom_write; 46 mtd->_write = maprom_write;
47 mtd->sync = maprom_nop; 47 mtd->_sync = maprom_nop;
48 mtd->erase = maprom_erase; 48 mtd->_erase = maprom_erase;
49 mtd->flags = MTD_CAP_ROM; 49 mtd->flags = MTD_CAP_ROM;
50 mtd->erasesize = map->size; 50 mtd->erasesize = map->size;
51 mtd->writesize = 1; 51 mtd->writesize = 1;
@@ -85,8 +85,7 @@ static void maprom_nop(struct mtd_info *mtd)
85 85
86static int maprom_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) 86static int maprom_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
87{ 87{
88 printk(KERN_NOTICE "maprom_write called\n"); 88 return -EROFS;
89 return -EIO;
90} 89}
91 90
92static int maprom_erase (struct mtd_info *mtd, struct erase_info *info) 91static int maprom_erase (struct mtd_info *mtd, struct erase_info *info)
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 8d3dac40d7e6..4cdb2af7bf44 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -103,6 +103,13 @@ config M25PXX_USE_FAST_READ
103 help 103 help
104 This option enables FAST_READ access supported by ST M25Pxx. 104 This option enables FAST_READ access supported by ST M25Pxx.
105 105
106config MTD_SPEAR_SMI
107 tristate "SPEAR MTD NOR Support through SMI controller"
108 depends on PLAT_SPEAR
109 default y
110 help
111 This enable SNOR support on SPEAR platforms using SMI controller
112
106config MTD_SST25L 113config MTD_SST25L
107 tristate "Support SST25L (non JEDEC) SPI Flash chips" 114 tristate "Support SST25L (non JEDEC) SPI Flash chips"
108 depends on SPI_MASTER 115 depends on SPI_MASTER
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index 56c7cd462f11..a4dd1d822b6c 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_MTD_LART) += lart.o
17obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o 17obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o
18obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o 18obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o
19obj-$(CONFIG_MTD_M25P80) += m25p80.o 19obj-$(CONFIG_MTD_M25P80) += m25p80.o
20obj-$(CONFIG_MTD_SPEAR_SMI) += spear_smi.o
20obj-$(CONFIG_MTD_SST25L) += sst25l.o 21obj-$(CONFIG_MTD_SST25L) += sst25l.o
21 22
22CFLAGS_docg3.o += -I$(src) \ No newline at end of file 23CFLAGS_docg3.o += -I$(src) \ No newline at end of file
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index e7e46d1e7463..a4a80b742e65 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -104,14 +104,6 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
104 int offset = from & (PAGE_SIZE-1); 104 int offset = from & (PAGE_SIZE-1);
105 int cpylen; 105 int cpylen;
106 106
107 if (from > mtd->size)
108 return -EINVAL;
109 if (from + len > mtd->size)
110 len = mtd->size - from;
111
112 if (retlen)
113 *retlen = 0;
114
115 while (len) { 107 while (len) {
116 if ((offset + len) > PAGE_SIZE) 108 if ((offset + len) > PAGE_SIZE)
117 cpylen = PAGE_SIZE - offset; // multiple pages 109 cpylen = PAGE_SIZE - offset; // multiple pages
@@ -148,8 +140,6 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
148 int offset = to & ~PAGE_MASK; // page offset 140 int offset = to & ~PAGE_MASK; // page offset
149 int cpylen; 141 int cpylen;
150 142
151 if (retlen)
152 *retlen = 0;
153 while (len) { 143 while (len) {
154 if ((offset+len) > PAGE_SIZE) 144 if ((offset+len) > PAGE_SIZE)
155 cpylen = PAGE_SIZE - offset; // multiple pages 145 cpylen = PAGE_SIZE - offset; // multiple pages
@@ -188,13 +178,6 @@ static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
188 struct block2mtd_dev *dev = mtd->priv; 178 struct block2mtd_dev *dev = mtd->priv;
189 int err; 179 int err;
190 180
191 if (!len)
192 return 0;
193 if (to >= mtd->size)
194 return -ENOSPC;
195 if (to + len > mtd->size)
196 len = mtd->size - to;
197
198 mutex_lock(&dev->write_mutex); 181 mutex_lock(&dev->write_mutex);
199 err = _block2mtd_write(dev, buf, to, len, retlen); 182 err = _block2mtd_write(dev, buf, to, len, retlen);
200 mutex_unlock(&dev->write_mutex); 183 mutex_unlock(&dev->write_mutex);
@@ -283,13 +266,14 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
283 dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK; 266 dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
284 dev->mtd.erasesize = erase_size; 267 dev->mtd.erasesize = erase_size;
285 dev->mtd.writesize = 1; 268 dev->mtd.writesize = 1;
269 dev->mtd.writebufsize = PAGE_SIZE;
286 dev->mtd.type = MTD_RAM; 270 dev->mtd.type = MTD_RAM;
287 dev->mtd.flags = MTD_CAP_RAM; 271 dev->mtd.flags = MTD_CAP_RAM;
288 dev->mtd.erase = block2mtd_erase; 272 dev->mtd._erase = block2mtd_erase;
289 dev->mtd.write = block2mtd_write; 273 dev->mtd._write = block2mtd_write;
290 dev->mtd.writev = mtd_writev; 274 dev->mtd._writev = mtd_writev;
291 dev->mtd.sync = block2mtd_sync; 275 dev->mtd._sync = block2mtd_sync;
292 dev->mtd.read = block2mtd_read; 276 dev->mtd._read = block2mtd_read;
293 dev->mtd.priv = dev; 277 dev->mtd.priv = dev;
294 dev->mtd.owner = THIS_MODULE; 278 dev->mtd.owner = THIS_MODULE;
295 279
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
index b1cdf6479019..a4eb8b5b85ec 100644
--- a/drivers/mtd/devices/doc2000.c
+++ b/drivers/mtd/devices/doc2000.c
@@ -562,14 +562,15 @@ void DoC2k_init(struct mtd_info *mtd)
562 562
563 mtd->type = MTD_NANDFLASH; 563 mtd->type = MTD_NANDFLASH;
564 mtd->flags = MTD_CAP_NANDFLASH; 564 mtd->flags = MTD_CAP_NANDFLASH;
565 mtd->writesize = 512; 565 mtd->writebufsize = mtd->writesize = 512;
566 mtd->oobsize = 16; 566 mtd->oobsize = 16;
567 mtd->ecc_strength = 2;
567 mtd->owner = THIS_MODULE; 568 mtd->owner = THIS_MODULE;
568 mtd->erase = doc_erase; 569 mtd->_erase = doc_erase;
569 mtd->read = doc_read; 570 mtd->_read = doc_read;
570 mtd->write = doc_write; 571 mtd->_write = doc_write;
571 mtd->read_oob = doc_read_oob; 572 mtd->_read_oob = doc_read_oob;
572 mtd->write_oob = doc_write_oob; 573 mtd->_write_oob = doc_write_oob;
573 this->curfloor = -1; 574 this->curfloor = -1;
574 this->curchip = -1; 575 this->curchip = -1;
575 mutex_init(&this->lock); 576 mutex_init(&this->lock);
@@ -602,13 +603,7 @@ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
602 int i, len256 = 0, ret=0; 603 int i, len256 = 0, ret=0;
603 size_t left = len; 604 size_t left = len;
604 605
605 /* Don't allow read past end of device */
606 if (from >= this->totlen)
607 return -EINVAL;
608
609 mutex_lock(&this->lock); 606 mutex_lock(&this->lock);
610
611 *retlen = 0;
612 while (left) { 607 while (left) {
613 len = left; 608 len = left;
614 609
@@ -748,13 +743,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
748 size_t left = len; 743 size_t left = len;
749 int status; 744 int status;
750 745
751 /* Don't allow write past end of device */
752 if (to >= this->totlen)
753 return -EINVAL;
754
755 mutex_lock(&this->lock); 746 mutex_lock(&this->lock);
756
757 *retlen = 0;
758 while (left) { 747 while (left) {
759 len = left; 748 len = left;
760 749
diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
index 7543b98f46c4..f6927955dab0 100644
--- a/drivers/mtd/devices/doc2001.c
+++ b/drivers/mtd/devices/doc2001.c
@@ -346,14 +346,15 @@ void DoCMil_init(struct mtd_info *mtd)
346 346
347 /* FIXME: erase size is not always 8KiB */ 347 /* FIXME: erase size is not always 8KiB */
348 mtd->erasesize = 0x2000; 348 mtd->erasesize = 0x2000;
349 mtd->writesize = 512; 349 mtd->writebufsize = mtd->writesize = 512;
350 mtd->oobsize = 16; 350 mtd->oobsize = 16;
351 mtd->ecc_strength = 2;
351 mtd->owner = THIS_MODULE; 352 mtd->owner = THIS_MODULE;
352 mtd->erase = doc_erase; 353 mtd->_erase = doc_erase;
353 mtd->read = doc_read; 354 mtd->_read = doc_read;
354 mtd->write = doc_write; 355 mtd->_write = doc_write;
355 mtd->read_oob = doc_read_oob; 356 mtd->_read_oob = doc_read_oob;
356 mtd->write_oob = doc_write_oob; 357 mtd->_write_oob = doc_write_oob;
357 this->curfloor = -1; 358 this->curfloor = -1;
358 this->curchip = -1; 359 this->curchip = -1;
359 360
@@ -383,10 +384,6 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
383 void __iomem *docptr = this->virtadr; 384 void __iomem *docptr = this->virtadr;
384 struct Nand *mychip = &this->chips[from >> (this->chipshift)]; 385 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
385 386
386 /* Don't allow read past end of device */
387 if (from >= this->totlen)
388 return -EINVAL;
389
390 /* Don't allow a single read to cross a 512-byte block boundary */ 387 /* Don't allow a single read to cross a 512-byte block boundary */
391 if (from + len > ((from | 0x1ff) + 1)) 388 if (from + len > ((from | 0x1ff) + 1))
392 len = ((from | 0x1ff) + 1) - from; 389 len = ((from | 0x1ff) + 1) - from;
@@ -494,10 +491,6 @@ static int doc_write (struct mtd_info *mtd, loff_t to, size_t len,
494 void __iomem *docptr = this->virtadr; 491 void __iomem *docptr = this->virtadr;
495 struct Nand *mychip = &this->chips[to >> (this->chipshift)]; 492 struct Nand *mychip = &this->chips[to >> (this->chipshift)];
496 493
497 /* Don't allow write past end of device */
498 if (to >= this->totlen)
499 return -EINVAL;
500
501#if 0 494#if 0
502 /* Don't allow a single write to cross a 512-byte block boundary */ 495 /* Don't allow a single write to cross a 512-byte block boundary */
503 if (to + len > ( (to | 0x1ff) + 1)) 496 if (to + len > ( (to | 0x1ff) + 1))
@@ -599,7 +592,6 @@ static int doc_write (struct mtd_info *mtd, loff_t to, size_t len,
599 printk("Error programming flash\n"); 592 printk("Error programming flash\n");
600 /* Error in programming 593 /* Error in programming
601 FIXME: implement Bad Block Replacement (in nftl.c ??) */ 594 FIXME: implement Bad Block Replacement (in nftl.c ??) */
602 *retlen = 0;
603 ret = -EIO; 595 ret = -EIO;
604 } 596 }
605 dummy = ReadDOC(docptr, LastDataRead); 597 dummy = ReadDOC(docptr, LastDataRead);
diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c
index 177510d0e7ee..04eb2e4aa50f 100644
--- a/drivers/mtd/devices/doc2001plus.c
+++ b/drivers/mtd/devices/doc2001plus.c
@@ -467,14 +467,15 @@ void DoCMilPlus_init(struct mtd_info *mtd)
467 467
468 mtd->type = MTD_NANDFLASH; 468 mtd->type = MTD_NANDFLASH;
469 mtd->flags = MTD_CAP_NANDFLASH; 469 mtd->flags = MTD_CAP_NANDFLASH;
470 mtd->writesize = 512; 470 mtd->writebufsize = mtd->writesize = 512;
471 mtd->oobsize = 16; 471 mtd->oobsize = 16;
472 mtd->ecc_strength = 2;
472 mtd->owner = THIS_MODULE; 473 mtd->owner = THIS_MODULE;
473 mtd->erase = doc_erase; 474 mtd->_erase = doc_erase;
474 mtd->read = doc_read; 475 mtd->_read = doc_read;
475 mtd->write = doc_write; 476 mtd->_write = doc_write;
476 mtd->read_oob = doc_read_oob; 477 mtd->_read_oob = doc_read_oob;
477 mtd->write_oob = doc_write_oob; 478 mtd->_write_oob = doc_write_oob;
478 this->curfloor = -1; 479 this->curfloor = -1;
479 this->curchip = -1; 480 this->curchip = -1;
480 481
@@ -581,10 +582,6 @@ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
581 void __iomem * docptr = this->virtadr; 582 void __iomem * docptr = this->virtadr;
582 struct Nand *mychip = &this->chips[from >> (this->chipshift)]; 583 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
583 584
584 /* Don't allow read past end of device */
585 if (from >= this->totlen)
586 return -EINVAL;
587
588 /* Don't allow a single read to cross a 512-byte block boundary */ 585 /* Don't allow a single read to cross a 512-byte block boundary */
589 if (from + len > ((from | 0x1ff) + 1)) 586 if (from + len > ((from | 0x1ff) + 1))
590 len = ((from | 0x1ff) + 1) - from; 587 len = ((from | 0x1ff) + 1) - from;
@@ -700,10 +697,6 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
700 void __iomem * docptr = this->virtadr; 697 void __iomem * docptr = this->virtadr;
701 struct Nand *mychip = &this->chips[to >> (this->chipshift)]; 698 struct Nand *mychip = &this->chips[to >> (this->chipshift)];
702 699
703 /* Don't allow write past end of device */
704 if (to >= this->totlen)
705 return -EINVAL;
706
707 /* Don't allow writes which aren't exactly one block (512 bytes) */ 700 /* Don't allow writes which aren't exactly one block (512 bytes) */
708 if ((to & 0x1ff) || (len != 0x200)) 701 if ((to & 0x1ff) || (len != 0x200))
709 return -EINVAL; 702 return -EINVAL;
@@ -800,7 +793,6 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
800 printk("MTD: Error 0x%x programming at 0x%x\n", dummy, (int)to); 793 printk("MTD: Error 0x%x programming at 0x%x\n", dummy, (int)to);
801 /* Error in programming 794 /* Error in programming
802 FIXME: implement Bad Block Replacement (in nftl.c ??) */ 795 FIXME: implement Bad Block Replacement (in nftl.c ??) */
803 *retlen = 0;
804 ret = -EIO; 796 ret = -EIO;
805 } 797 }
806 dummy = ReadDOC(docptr, Mplus_LastDataRead); 798 dummy = ReadDOC(docptr, Mplus_LastDataRead);
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index ad11ef0a81f4..8272c02668d6 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -80,14 +80,9 @@ static struct nand_ecclayout docg3_oobinfo = {
80 .oobavail = 8, 80 .oobavail = 8,
81}; 81};
82 82
83/**
84 * struct docg3_bch - BCH engine
85 */
86static struct bch_control *docg3_bch;
87
88static inline u8 doc_readb(struct docg3 *docg3, u16 reg) 83static inline u8 doc_readb(struct docg3 *docg3, u16 reg)
89{ 84{
90 u8 val = readb(docg3->base + reg); 85 u8 val = readb(docg3->cascade->base + reg);
91 86
92 trace_docg3_io(0, 8, reg, (int)val); 87 trace_docg3_io(0, 8, reg, (int)val);
93 return val; 88 return val;
@@ -95,7 +90,7 @@ static inline u8 doc_readb(struct docg3 *docg3, u16 reg)
95 90
96static inline u16 doc_readw(struct docg3 *docg3, u16 reg) 91static inline u16 doc_readw(struct docg3 *docg3, u16 reg)
97{ 92{
98 u16 val = readw(docg3->base + reg); 93 u16 val = readw(docg3->cascade->base + reg);
99 94
100 trace_docg3_io(0, 16, reg, (int)val); 95 trace_docg3_io(0, 16, reg, (int)val);
101 return val; 96 return val;
@@ -103,13 +98,13 @@ static inline u16 doc_readw(struct docg3 *docg3, u16 reg)
103 98
104static inline void doc_writeb(struct docg3 *docg3, u8 val, u16 reg) 99static inline void doc_writeb(struct docg3 *docg3, u8 val, u16 reg)
105{ 100{
106 writeb(val, docg3->base + reg); 101 writeb(val, docg3->cascade->base + reg);
107 trace_docg3_io(1, 8, reg, val); 102 trace_docg3_io(1, 8, reg, val);
108} 103}
109 104
110static inline void doc_writew(struct docg3 *docg3, u16 val, u16 reg) 105static inline void doc_writew(struct docg3 *docg3, u16 val, u16 reg)
111{ 106{
112 writew(val, docg3->base + reg); 107 writew(val, docg3->cascade->base + reg);
113 trace_docg3_io(1, 16, reg, val); 108 trace_docg3_io(1, 16, reg, val);
114} 109}
115 110
@@ -643,7 +638,8 @@ static int doc_ecc_bch_fix_data(struct docg3 *docg3, void *buf, u8 *hwecc)
643 638
644 for (i = 0; i < DOC_ECC_BCH_SIZE; i++) 639 for (i = 0; i < DOC_ECC_BCH_SIZE; i++)
645 ecc[i] = bitrev8(hwecc[i]); 640 ecc[i] = bitrev8(hwecc[i]);
646 numerrs = decode_bch(docg3_bch, NULL, DOC_ECC_BCH_COVERED_BYTES, 641 numerrs = decode_bch(docg3->cascade->bch, NULL,
642 DOC_ECC_BCH_COVERED_BYTES,
647 NULL, ecc, NULL, errorpos); 643 NULL, ecc, NULL, errorpos);
648 BUG_ON(numerrs == -EINVAL); 644 BUG_ON(numerrs == -EINVAL);
649 if (numerrs < 0) 645 if (numerrs < 0)
@@ -734,7 +730,7 @@ err:
734 * doc_read_page_getbytes - Reads bytes from a prepared page 730 * doc_read_page_getbytes - Reads bytes from a prepared page
735 * @docg3: the device 731 * @docg3: the device
736 * @len: the number of bytes to be read (must be a multiple of 4) 732 * @len: the number of bytes to be read (must be a multiple of 4)
737 * @buf: the buffer to be filled in 733 * @buf: the buffer to be filled in (or NULL is forget bytes)
738 * @first: 1 if first time read, DOC_READADDRESS should be set 734 * @first: 1 if first time read, DOC_READADDRESS should be set
739 * 735 *
740 */ 736 */
@@ -849,7 +845,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
849 struct mtd_oob_ops *ops) 845 struct mtd_oob_ops *ops)
850{ 846{
851 struct docg3 *docg3 = mtd->priv; 847 struct docg3 *docg3 = mtd->priv;
852 int block0, block1, page, ret, ofs = 0; 848 int block0, block1, page, ret, skip, ofs = 0;
853 u8 *oobbuf = ops->oobbuf; 849 u8 *oobbuf = ops->oobbuf;
854 u8 *buf = ops->datbuf; 850 u8 *buf = ops->datbuf;
855 size_t len, ooblen, nbdata, nboob; 851 size_t len, ooblen, nbdata, nboob;
@@ -869,34 +865,36 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
869 865
870 doc_dbg("doc_read_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n", 866 doc_dbg("doc_read_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n",
871 from, ops->mode, buf, len, oobbuf, ooblen); 867 from, ops->mode, buf, len, oobbuf, ooblen);
872 if ((len % DOC_LAYOUT_PAGE_SIZE) || (ooblen % DOC_LAYOUT_OOB_SIZE) || 868 if (ooblen % DOC_LAYOUT_OOB_SIZE)
873 (from % DOC_LAYOUT_PAGE_SIZE))
874 return -EINVAL; 869 return -EINVAL;
875 870
876 ret = -EINVAL; 871 if (from + len > mtd->size)
877 calc_block_sector(from + len, &block0, &block1, &page, &ofs, 872 return -EINVAL;
878 docg3->reliable);
879 if (block1 > docg3->max_block)
880 goto err;
881 873
882 ops->oobretlen = 0; 874 ops->oobretlen = 0;
883 ops->retlen = 0; 875 ops->retlen = 0;
884 ret = 0; 876 ret = 0;
877 skip = from % DOC_LAYOUT_PAGE_SIZE;
878 mutex_lock(&docg3->cascade->lock);
885 while (!ret && (len > 0 || ooblen > 0)) { 879 while (!ret && (len > 0 || ooblen > 0)) {
886 calc_block_sector(from, &block0, &block1, &page, &ofs, 880 calc_block_sector(from - skip, &block0, &block1, &page, &ofs,
887 docg3->reliable); 881 docg3->reliable);
888 nbdata = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE); 882 nbdata = min_t(size_t, len, DOC_LAYOUT_PAGE_SIZE - skip);
889 nboob = min_t(size_t, ooblen, (size_t)DOC_LAYOUT_OOB_SIZE); 883 nboob = min_t(size_t, ooblen, (size_t)DOC_LAYOUT_OOB_SIZE);
890 ret = doc_read_page_prepare(docg3, block0, block1, page, ofs); 884 ret = doc_read_page_prepare(docg3, block0, block1, page, ofs);
891 if (ret < 0) 885 if (ret < 0)
892 goto err; 886 goto out;
893 ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES); 887 ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES);
894 if (ret < 0) 888 if (ret < 0)
895 goto err_in_read; 889 goto err_in_read;
896 ret = doc_read_page_getbytes(docg3, nbdata, buf, 1); 890 ret = doc_read_page_getbytes(docg3, skip, NULL, 1);
891 if (ret < skip)
892 goto err_in_read;
893 ret = doc_read_page_getbytes(docg3, nbdata, buf, 0);
897 if (ret < nbdata) 894 if (ret < nbdata)
898 goto err_in_read; 895 goto err_in_read;
899 doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE - nbdata, 896 doc_read_page_getbytes(docg3,
897 DOC_LAYOUT_PAGE_SIZE - nbdata - skip,
900 NULL, 0); 898 NULL, 0);
901 ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0); 899 ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0);
902 if (ret < nboob) 900 if (ret < nboob)
@@ -950,13 +948,15 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
950 len -= nbdata; 948 len -= nbdata;
951 ooblen -= nboob; 949 ooblen -= nboob;
952 from += DOC_LAYOUT_PAGE_SIZE; 950 from += DOC_LAYOUT_PAGE_SIZE;
951 skip = 0;
953 } 952 }
954 953
954out:
955 mutex_unlock(&docg3->cascade->lock);
955 return ret; 956 return ret;
956err_in_read: 957err_in_read:
957 doc_read_page_finish(docg3); 958 doc_read_page_finish(docg3);
958err: 959 goto out;
959 return ret;
960} 960}
961 961
962/** 962/**
@@ -1114,10 +1114,10 @@ static int doc_get_op_status(struct docg3 *docg3)
1114 */ 1114 */
1115static int doc_write_erase_wait_status(struct docg3 *docg3) 1115static int doc_write_erase_wait_status(struct docg3 *docg3)
1116{ 1116{
1117 int status, ret = 0; 1117 int i, status, ret = 0;
1118 1118
1119 if (!doc_is_ready(docg3)) 1119 for (i = 0; !doc_is_ready(docg3) && i < 5; i++)
1120 usleep_range(3000, 3000); 1120 msleep(20);
1121 if (!doc_is_ready(docg3)) { 1121 if (!doc_is_ready(docg3)) {
1122 doc_dbg("Timeout reached and the chip is still not ready\n"); 1122 doc_dbg("Timeout reached and the chip is still not ready\n");
1123 ret = -EAGAIN; 1123 ret = -EAGAIN;
@@ -1196,18 +1196,19 @@ static int doc_erase(struct mtd_info *mtd, struct erase_info *info)
1196 int block0, block1, page, ret, ofs = 0; 1196 int block0, block1, page, ret, ofs = 0;
1197 1197
1198 doc_dbg("doc_erase(from=%lld, len=%lld\n", info->addr, info->len); 1198 doc_dbg("doc_erase(from=%lld, len=%lld\n", info->addr, info->len);
1199 doc_set_device_id(docg3, docg3->device_id);
1200 1199
1201 info->state = MTD_ERASE_PENDING; 1200 info->state = MTD_ERASE_PENDING;
1202 calc_block_sector(info->addr + info->len, &block0, &block1, &page, 1201 calc_block_sector(info->addr + info->len, &block0, &block1, &page,
1203 &ofs, docg3->reliable); 1202 &ofs, docg3->reliable);
1204 ret = -EINVAL; 1203 ret = -EINVAL;
1205 if (block1 > docg3->max_block || page || ofs) 1204 if (info->addr + info->len > mtd->size || page || ofs)
1206 goto reset_err; 1205 goto reset_err;
1207 1206
1208 ret = 0; 1207 ret = 0;
1209 calc_block_sector(info->addr, &block0, &block1, &page, &ofs, 1208 calc_block_sector(info->addr, &block0, &block1, &page, &ofs,
1210 docg3->reliable); 1209 docg3->reliable);
1210 mutex_lock(&docg3->cascade->lock);
1211 doc_set_device_id(docg3, docg3->device_id);
1211 doc_set_reliable_mode(docg3); 1212 doc_set_reliable_mode(docg3);
1212 for (len = info->len; !ret && len > 0; len -= mtd->erasesize) { 1213 for (len = info->len; !ret && len > 0; len -= mtd->erasesize) {
1213 info->state = MTD_ERASING; 1214 info->state = MTD_ERASING;
@@ -1215,6 +1216,7 @@ static int doc_erase(struct mtd_info *mtd, struct erase_info *info)
1215 block0 += 2; 1216 block0 += 2;
1216 block1 += 2; 1217 block1 += 2;
1217 } 1218 }
1219 mutex_unlock(&docg3->cascade->lock);
1218 1220
1219 if (ret) 1221 if (ret)
1220 goto reset_err; 1222 goto reset_err;
@@ -1401,7 +1403,7 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
1401 struct mtd_oob_ops *ops) 1403 struct mtd_oob_ops *ops)
1402{ 1404{
1403 struct docg3 *docg3 = mtd->priv; 1405 struct docg3 *docg3 = mtd->priv;
1404 int block0, block1, page, ret, pofs = 0, autoecc, oobdelta; 1406 int ret, autoecc, oobdelta;
1405 u8 *oobbuf = ops->oobbuf; 1407 u8 *oobbuf = ops->oobbuf;
1406 u8 *buf = ops->datbuf; 1408 u8 *buf = ops->datbuf;
1407 size_t len, ooblen; 1409 size_t len, ooblen;
@@ -1438,12 +1440,8 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
1438 if (len && ooblen && 1440 if (len && ooblen &&
1439 (len / DOC_LAYOUT_PAGE_SIZE) != (ooblen / oobdelta)) 1441 (len / DOC_LAYOUT_PAGE_SIZE) != (ooblen / oobdelta))
1440 return -EINVAL; 1442 return -EINVAL;
1441 1443 if (ofs + len > mtd->size)
1442 ret = -EINVAL; 1444 return -EINVAL;
1443 calc_block_sector(ofs + len, &block0, &block1, &page, &pofs,
1444 docg3->reliable);
1445 if (block1 > docg3->max_block)
1446 goto err;
1447 1445
1448 ops->oobretlen = 0; 1446 ops->oobretlen = 0;
1449 ops->retlen = 0; 1447 ops->retlen = 0;
@@ -1457,6 +1455,7 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
1457 if (autoecc < 0) 1455 if (autoecc < 0)
1458 return autoecc; 1456 return autoecc;
1459 1457
1458 mutex_lock(&docg3->cascade->lock);
1460 while (!ret && len > 0) { 1459 while (!ret && len > 0) {
1461 memset(oob, 0, sizeof(oob)); 1460 memset(oob, 0, sizeof(oob));
1462 if (ofs == docg3->oob_write_ofs) 1461 if (ofs == docg3->oob_write_ofs)
@@ -1477,8 +1476,9 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
1477 } 1476 }
1478 ops->retlen += DOC_LAYOUT_PAGE_SIZE; 1477 ops->retlen += DOC_LAYOUT_PAGE_SIZE;
1479 } 1478 }
1480err: 1479
1481 doc_set_device_id(docg3, 0); 1480 doc_set_device_id(docg3, 0);
1481 mutex_unlock(&docg3->cascade->lock);
1482 return ret; 1482 return ret;
1483} 1483}
1484 1484
@@ -1535,9 +1535,11 @@ static ssize_t dps0_is_key_locked(struct device *dev,
1535 struct docg3 *docg3 = sysfs_dev2docg3(dev, attr); 1535 struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
1536 int dps0; 1536 int dps0;
1537 1537
1538 mutex_lock(&docg3->cascade->lock);
1538 doc_set_device_id(docg3, docg3->device_id); 1539 doc_set_device_id(docg3, docg3->device_id);
1539 dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS); 1540 dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
1540 doc_set_device_id(docg3, 0); 1541 doc_set_device_id(docg3, 0);
1542 mutex_unlock(&docg3->cascade->lock);
1541 1543
1542 return sprintf(buf, "%d\n", !(dps0 & DOC_DPS_KEY_OK)); 1544 return sprintf(buf, "%d\n", !(dps0 & DOC_DPS_KEY_OK));
1543} 1545}
@@ -1548,9 +1550,11 @@ static ssize_t dps1_is_key_locked(struct device *dev,
1548 struct docg3 *docg3 = sysfs_dev2docg3(dev, attr); 1550 struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
1549 int dps1; 1551 int dps1;
1550 1552
1553 mutex_lock(&docg3->cascade->lock);
1551 doc_set_device_id(docg3, docg3->device_id); 1554 doc_set_device_id(docg3, docg3->device_id);
1552 dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS); 1555 dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
1553 doc_set_device_id(docg3, 0); 1556 doc_set_device_id(docg3, 0);
1557 mutex_unlock(&docg3->cascade->lock);
1554 1558
1555 return sprintf(buf, "%d\n", !(dps1 & DOC_DPS_KEY_OK)); 1559 return sprintf(buf, "%d\n", !(dps1 & DOC_DPS_KEY_OK));
1556} 1560}
@@ -1565,10 +1569,12 @@ static ssize_t dps0_insert_key(struct device *dev,
1565 if (count != DOC_LAYOUT_DPS_KEY_LENGTH) 1569 if (count != DOC_LAYOUT_DPS_KEY_LENGTH)
1566 return -EINVAL; 1570 return -EINVAL;
1567 1571
1572 mutex_lock(&docg3->cascade->lock);
1568 doc_set_device_id(docg3, docg3->device_id); 1573 doc_set_device_id(docg3, docg3->device_id);
1569 for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++) 1574 for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++)
1570 doc_writeb(docg3, buf[i], DOC_DPS0_KEY); 1575 doc_writeb(docg3, buf[i], DOC_DPS0_KEY);
1571 doc_set_device_id(docg3, 0); 1576 doc_set_device_id(docg3, 0);
1577 mutex_unlock(&docg3->cascade->lock);
1572 return count; 1578 return count;
1573} 1579}
1574 1580
@@ -1582,10 +1588,12 @@ static ssize_t dps1_insert_key(struct device *dev,
1582 if (count != DOC_LAYOUT_DPS_KEY_LENGTH) 1588 if (count != DOC_LAYOUT_DPS_KEY_LENGTH)
1583 return -EINVAL; 1589 return -EINVAL;
1584 1590
1591 mutex_lock(&docg3->cascade->lock);
1585 doc_set_device_id(docg3, docg3->device_id); 1592 doc_set_device_id(docg3, docg3->device_id);
1586 for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++) 1593 for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++)
1587 doc_writeb(docg3, buf[i], DOC_DPS1_KEY); 1594 doc_writeb(docg3, buf[i], DOC_DPS1_KEY);
1588 doc_set_device_id(docg3, 0); 1595 doc_set_device_id(docg3, 0);
1596 mutex_unlock(&docg3->cascade->lock);
1589 return count; 1597 return count;
1590} 1598}
1591 1599
@@ -1601,13 +1609,13 @@ static struct device_attribute doc_sys_attrs[DOC_MAX_NBFLOORS][4] = {
1601}; 1609};
1602 1610
1603static int doc_register_sysfs(struct platform_device *pdev, 1611static int doc_register_sysfs(struct platform_device *pdev,
1604 struct mtd_info **floors) 1612 struct docg3_cascade *cascade)
1605{ 1613{
1606 int ret = 0, floor, i = 0; 1614 int ret = 0, floor, i = 0;
1607 struct device *dev = &pdev->dev; 1615 struct device *dev = &pdev->dev;
1608 1616
1609 for (floor = 0; !ret && floor < DOC_MAX_NBFLOORS && floors[floor]; 1617 for (floor = 0; !ret && floor < DOC_MAX_NBFLOORS &&
1610 floor++) 1618 cascade->floors[floor]; floor++)
1611 for (i = 0; !ret && i < 4; i++) 1619 for (i = 0; !ret && i < 4; i++)
1612 ret = device_create_file(dev, &doc_sys_attrs[floor][i]); 1620 ret = device_create_file(dev, &doc_sys_attrs[floor][i]);
1613 if (!ret) 1621 if (!ret)
@@ -1621,12 +1629,12 @@ static int doc_register_sysfs(struct platform_device *pdev,
1621} 1629}
1622 1630
1623static void doc_unregister_sysfs(struct platform_device *pdev, 1631static void doc_unregister_sysfs(struct platform_device *pdev,
1624 struct mtd_info **floors) 1632 struct docg3_cascade *cascade)
1625{ 1633{
1626 struct device *dev = &pdev->dev; 1634 struct device *dev = &pdev->dev;
1627 int floor, i; 1635 int floor, i;
1628 1636
1629 for (floor = 0; floor < DOC_MAX_NBFLOORS && floors[floor]; 1637 for (floor = 0; floor < DOC_MAX_NBFLOORS && cascade->floors[floor];
1630 floor++) 1638 floor++)
1631 for (i = 0; i < 4; i++) 1639 for (i = 0; i < 4; i++)
1632 device_remove_file(dev, &doc_sys_attrs[floor][i]); 1640 device_remove_file(dev, &doc_sys_attrs[floor][i]);
@@ -1640,7 +1648,11 @@ static int dbg_flashctrl_show(struct seq_file *s, void *p)
1640 struct docg3 *docg3 = (struct docg3 *)s->private; 1648 struct docg3 *docg3 = (struct docg3 *)s->private;
1641 1649
1642 int pos = 0; 1650 int pos = 0;
1643 u8 fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL); 1651 u8 fctrl;
1652
1653 mutex_lock(&docg3->cascade->lock);
1654 fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
1655 mutex_unlock(&docg3->cascade->lock);
1644 1656
1645 pos += seq_printf(s, 1657 pos += seq_printf(s,
1646 "FlashControl : 0x%02x (%s,CE# %s,%s,%s,flash %s)\n", 1658 "FlashControl : 0x%02x (%s,CE# %s,%s,%s,flash %s)\n",
@@ -1658,9 +1670,12 @@ static int dbg_asicmode_show(struct seq_file *s, void *p)
1658{ 1670{
1659 struct docg3 *docg3 = (struct docg3 *)s->private; 1671 struct docg3 *docg3 = (struct docg3 *)s->private;
1660 1672
1661 int pos = 0; 1673 int pos = 0, pctrl, mode;
1662 int pctrl = doc_register_readb(docg3, DOC_ASICMODE); 1674
1663 int mode = pctrl & 0x03; 1675 mutex_lock(&docg3->cascade->lock);
1676 pctrl = doc_register_readb(docg3, DOC_ASICMODE);
1677 mode = pctrl & 0x03;
1678 mutex_unlock(&docg3->cascade->lock);
1664 1679
1665 pos += seq_printf(s, 1680 pos += seq_printf(s,
1666 "%04x : RAM_WE=%d,RSTIN_RESET=%d,BDETCT_RESET=%d,WRITE_ENABLE=%d,POWERDOWN=%d,MODE=%d%d (", 1681 "%04x : RAM_WE=%d,RSTIN_RESET=%d,BDETCT_RESET=%d,WRITE_ENABLE=%d,POWERDOWN=%d,MODE=%d%d (",
@@ -1692,7 +1707,11 @@ static int dbg_device_id_show(struct seq_file *s, void *p)
1692{ 1707{
1693 struct docg3 *docg3 = (struct docg3 *)s->private; 1708 struct docg3 *docg3 = (struct docg3 *)s->private;
1694 int pos = 0; 1709 int pos = 0;
1695 int id = doc_register_readb(docg3, DOC_DEVICESELECT); 1710 int id;
1711
1712 mutex_lock(&docg3->cascade->lock);
1713 id = doc_register_readb(docg3, DOC_DEVICESELECT);
1714 mutex_unlock(&docg3->cascade->lock);
1696 1715
1697 pos += seq_printf(s, "DeviceId = %d\n", id); 1716 pos += seq_printf(s, "DeviceId = %d\n", id);
1698 return pos; 1717 return pos;
@@ -1705,6 +1724,7 @@ static int dbg_protection_show(struct seq_file *s, void *p)
1705 int pos = 0; 1724 int pos = 0;
1706 int protect, dps0, dps0_low, dps0_high, dps1, dps1_low, dps1_high; 1725 int protect, dps0, dps0_low, dps0_high, dps1, dps1_low, dps1_high;
1707 1726
1727 mutex_lock(&docg3->cascade->lock);
1708 protect = doc_register_readb(docg3, DOC_PROTECTION); 1728 protect = doc_register_readb(docg3, DOC_PROTECTION);
1709 dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS); 1729 dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
1710 dps0_low = doc_register_readw(docg3, DOC_DPS0_ADDRLOW); 1730 dps0_low = doc_register_readw(docg3, DOC_DPS0_ADDRLOW);
@@ -1712,6 +1732,7 @@ static int dbg_protection_show(struct seq_file *s, void *p)
1712 dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS); 1732 dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
1713 dps1_low = doc_register_readw(docg3, DOC_DPS1_ADDRLOW); 1733 dps1_low = doc_register_readw(docg3, DOC_DPS1_ADDRLOW);
1714 dps1_high = doc_register_readw(docg3, DOC_DPS1_ADDRHIGH); 1734 dps1_high = doc_register_readw(docg3, DOC_DPS1_ADDRHIGH);
1735 mutex_unlock(&docg3->cascade->lock);
1715 1736
1716 pos += seq_printf(s, "Protection = 0x%02x (", 1737 pos += seq_printf(s, "Protection = 0x%02x (",
1717 protect); 1738 protect);
@@ -1804,7 +1825,7 @@ static void __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
1804 1825
1805 switch (chip_id) { 1826 switch (chip_id) {
1806 case DOC_CHIPID_G3: 1827 case DOC_CHIPID_G3:
1807 mtd->name = kasprintf(GFP_KERNEL, "DiskOnChip G3 floor %d", 1828 mtd->name = kasprintf(GFP_KERNEL, "docg3.%d",
1808 docg3->device_id); 1829 docg3->device_id);
1809 docg3->max_block = 2047; 1830 docg3->max_block = 2047;
1810 break; 1831 break;
@@ -1817,16 +1838,17 @@ static void __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
1817 mtd->erasesize = DOC_LAYOUT_BLOCK_SIZE * DOC_LAYOUT_NBPLANES; 1838 mtd->erasesize = DOC_LAYOUT_BLOCK_SIZE * DOC_LAYOUT_NBPLANES;
1818 if (docg3->reliable == 2) 1839 if (docg3->reliable == 2)
1819 mtd->erasesize /= 2; 1840 mtd->erasesize /= 2;
1820 mtd->writesize = DOC_LAYOUT_PAGE_SIZE; 1841 mtd->writebufsize = mtd->writesize = DOC_LAYOUT_PAGE_SIZE;
1821 mtd->oobsize = DOC_LAYOUT_OOB_SIZE; 1842 mtd->oobsize = DOC_LAYOUT_OOB_SIZE;
1822 mtd->owner = THIS_MODULE; 1843 mtd->owner = THIS_MODULE;
1823 mtd->erase = doc_erase; 1844 mtd->_erase = doc_erase;
1824 mtd->read = doc_read; 1845 mtd->_read = doc_read;
1825 mtd->write = doc_write; 1846 mtd->_write = doc_write;
1826 mtd->read_oob = doc_read_oob; 1847 mtd->_read_oob = doc_read_oob;
1827 mtd->write_oob = doc_write_oob; 1848 mtd->_write_oob = doc_write_oob;
1828 mtd->block_isbad = doc_block_isbad; 1849 mtd->_block_isbad = doc_block_isbad;
1829 mtd->ecclayout = &docg3_oobinfo; 1850 mtd->ecclayout = &docg3_oobinfo;
1851 mtd->ecc_strength = DOC_ECC_BCH_T;
1830} 1852}
1831 1853
1832/** 1854/**
@@ -1834,6 +1856,7 @@ static void __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
1834 * @base: the io space where the device is probed 1856 * @base: the io space where the device is probed
1835 * @floor: the floor of the probed device 1857 * @floor: the floor of the probed device
1836 * @dev: the device 1858 * @dev: the device
1859 * @cascade: the cascade of chips this devices will belong to
1837 * 1860 *
1838 * Checks whether a device at the specified IO range, and floor is available. 1861 * Checks whether a device at the specified IO range, and floor is available.
1839 * 1862 *
@@ -1841,8 +1864,8 @@ static void __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
1841 * if a memory allocation failed. If floor 0 is checked, a reset of the ASIC is 1864 * if a memory allocation failed. If floor 0 is checked, a reset of the ASIC is
1842 * launched. 1865 * launched.
1843 */ 1866 */
1844static struct mtd_info *doc_probe_device(void __iomem *base, int floor, 1867static struct mtd_info * __init
1845 struct device *dev) 1868doc_probe_device(struct docg3_cascade *cascade, int floor, struct device *dev)
1846{ 1869{
1847 int ret, bbt_nbpages; 1870 int ret, bbt_nbpages;
1848 u16 chip_id, chip_id_inv; 1871 u16 chip_id, chip_id_inv;
@@ -1865,7 +1888,7 @@ static struct mtd_info *doc_probe_device(void __iomem *base, int floor,
1865 1888
1866 docg3->dev = dev; 1889 docg3->dev = dev;
1867 docg3->device_id = floor; 1890 docg3->device_id = floor;
1868 docg3->base = base; 1891 docg3->cascade = cascade;
1869 doc_set_device_id(docg3, docg3->device_id); 1892 doc_set_device_id(docg3, docg3->device_id);
1870 if (!floor) 1893 if (!floor)
1871 doc_set_asic_mode(docg3, DOC_ASICMODE_RESET); 1894 doc_set_asic_mode(docg3, DOC_ASICMODE_RESET);
@@ -1882,7 +1905,7 @@ static struct mtd_info *doc_probe_device(void __iomem *base, int floor,
1882 switch (chip_id) { 1905 switch (chip_id) {
1883 case DOC_CHIPID_G3: 1906 case DOC_CHIPID_G3:
1884 doc_info("Found a G3 DiskOnChip at addr %p, floor %d\n", 1907 doc_info("Found a G3 DiskOnChip at addr %p, floor %d\n",
1885 base, floor); 1908 docg3->cascade->base, floor);
1886 break; 1909 break;
1887 default: 1910 default:
1888 doc_err("Chip id %04x is not a DiskOnChip G3 chip\n", chip_id); 1911 doc_err("Chip id %04x is not a DiskOnChip G3 chip\n", chip_id);
@@ -1927,10 +1950,12 @@ static void doc_release_device(struct mtd_info *mtd)
1927static int docg3_resume(struct platform_device *pdev) 1950static int docg3_resume(struct platform_device *pdev)
1928{ 1951{
1929 int i; 1952 int i;
1953 struct docg3_cascade *cascade;
1930 struct mtd_info **docg3_floors, *mtd; 1954 struct mtd_info **docg3_floors, *mtd;
1931 struct docg3 *docg3; 1955 struct docg3 *docg3;
1932 1956
1933 docg3_floors = platform_get_drvdata(pdev); 1957 cascade = platform_get_drvdata(pdev);
1958 docg3_floors = cascade->floors;
1934 mtd = docg3_floors[0]; 1959 mtd = docg3_floors[0];
1935 docg3 = mtd->priv; 1960 docg3 = mtd->priv;
1936 1961
@@ -1952,11 +1977,13 @@ static int docg3_resume(struct platform_device *pdev)
1952static int docg3_suspend(struct platform_device *pdev, pm_message_t state) 1977static int docg3_suspend(struct platform_device *pdev, pm_message_t state)
1953{ 1978{
1954 int floor, i; 1979 int floor, i;
1980 struct docg3_cascade *cascade;
1955 struct mtd_info **docg3_floors, *mtd; 1981 struct mtd_info **docg3_floors, *mtd;
1956 struct docg3 *docg3; 1982 struct docg3 *docg3;
1957 u8 ctrl, pwr_down; 1983 u8 ctrl, pwr_down;
1958 1984
1959 docg3_floors = platform_get_drvdata(pdev); 1985 cascade = platform_get_drvdata(pdev);
1986 docg3_floors = cascade->floors;
1960 for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) { 1987 for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {
1961 mtd = docg3_floors[floor]; 1988 mtd = docg3_floors[floor];
1962 if (!mtd) 1989 if (!mtd)
@@ -2006,7 +2033,7 @@ static int __init docg3_probe(struct platform_device *pdev)
2006 struct resource *ress; 2033 struct resource *ress;
2007 void __iomem *base; 2034 void __iomem *base;
2008 int ret, floor, found = 0; 2035 int ret, floor, found = 0;
2009 struct mtd_info **docg3_floors; 2036 struct docg3_cascade *cascade;
2010 2037
2011 ret = -ENXIO; 2038 ret = -ENXIO;
2012 ress = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2039 ress = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2017,17 +2044,19 @@ static int __init docg3_probe(struct platform_device *pdev)
2017 base = ioremap(ress->start, DOC_IOSPACE_SIZE); 2044 base = ioremap(ress->start, DOC_IOSPACE_SIZE);
2018 2045
2019 ret = -ENOMEM; 2046 ret = -ENOMEM;
2020 docg3_floors = kzalloc(sizeof(*docg3_floors) * DOC_MAX_NBFLOORS, 2047 cascade = kzalloc(sizeof(*cascade) * DOC_MAX_NBFLOORS,
2021 GFP_KERNEL); 2048 GFP_KERNEL);
2022 if (!docg3_floors) 2049 if (!cascade)
2023 goto nomem1; 2050 goto nomem1;
2024 docg3_bch = init_bch(DOC_ECC_BCH_M, DOC_ECC_BCH_T, 2051 cascade->base = base;
2052 mutex_init(&cascade->lock);
2053 cascade->bch = init_bch(DOC_ECC_BCH_M, DOC_ECC_BCH_T,
2025 DOC_ECC_BCH_PRIMPOLY); 2054 DOC_ECC_BCH_PRIMPOLY);
2026 if (!docg3_bch) 2055 if (!cascade->bch)
2027 goto nomem2; 2056 goto nomem2;
2028 2057
2029 for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) { 2058 for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {
2030 mtd = doc_probe_device(base, floor, dev); 2059 mtd = doc_probe_device(cascade, floor, dev);
2031 if (IS_ERR(mtd)) { 2060 if (IS_ERR(mtd)) {
2032 ret = PTR_ERR(mtd); 2061 ret = PTR_ERR(mtd);
2033 goto err_probe; 2062 goto err_probe;
@@ -2038,7 +2067,7 @@ static int __init docg3_probe(struct platform_device *pdev)
2038 else 2067 else
2039 continue; 2068 continue;
2040 } 2069 }
2041 docg3_floors[floor] = mtd; 2070 cascade->floors[floor] = mtd;
2042 ret = mtd_device_parse_register(mtd, part_probes, NULL, NULL, 2071 ret = mtd_device_parse_register(mtd, part_probes, NULL, NULL,
2043 0); 2072 0);
2044 if (ret) 2073 if (ret)
@@ -2046,26 +2075,26 @@ static int __init docg3_probe(struct platform_device *pdev)
2046 found++; 2075 found++;
2047 } 2076 }
2048 2077
2049 ret = doc_register_sysfs(pdev, docg3_floors); 2078 ret = doc_register_sysfs(pdev, cascade);
2050 if (ret) 2079 if (ret)
2051 goto err_probe; 2080 goto err_probe;
2052 if (!found) 2081 if (!found)
2053 goto notfound; 2082 goto notfound;
2054 2083
2055 platform_set_drvdata(pdev, docg3_floors); 2084 platform_set_drvdata(pdev, cascade);
2056 doc_dbg_register(docg3_floors[0]->priv); 2085 doc_dbg_register(cascade->floors[0]->priv);
2057 return 0; 2086 return 0;
2058 2087
2059notfound: 2088notfound:
2060 ret = -ENODEV; 2089 ret = -ENODEV;
2061 dev_info(dev, "No supported DiskOnChip found\n"); 2090 dev_info(dev, "No supported DiskOnChip found\n");
2062err_probe: 2091err_probe:
2063 free_bch(docg3_bch); 2092 kfree(cascade->bch);
2064 for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) 2093 for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
2065 if (docg3_floors[floor]) 2094 if (cascade->floors[floor])
2066 doc_release_device(docg3_floors[floor]); 2095 doc_release_device(cascade->floors[floor]);
2067nomem2: 2096nomem2:
2068 kfree(docg3_floors); 2097 kfree(cascade);
2069nomem1: 2098nomem1:
2070 iounmap(base); 2099 iounmap(base);
2071noress: 2100noress:
@@ -2080,19 +2109,19 @@ noress:
2080 */ 2109 */
2081static int __exit docg3_release(struct platform_device *pdev) 2110static int __exit docg3_release(struct platform_device *pdev)
2082{ 2111{
2083 struct mtd_info **docg3_floors = platform_get_drvdata(pdev); 2112 struct docg3_cascade *cascade = platform_get_drvdata(pdev);
2084 struct docg3 *docg3 = docg3_floors[0]->priv; 2113 struct docg3 *docg3 = cascade->floors[0]->priv;
2085 void __iomem *base = docg3->base; 2114 void __iomem *base = cascade->base;
2086 int floor; 2115 int floor;
2087 2116
2088 doc_unregister_sysfs(pdev, docg3_floors); 2117 doc_unregister_sysfs(pdev, cascade);
2089 doc_dbg_unregister(docg3); 2118 doc_dbg_unregister(docg3);
2090 for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) 2119 for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
2091 if (docg3_floors[floor]) 2120 if (cascade->floors[floor])
2092 doc_release_device(docg3_floors[floor]); 2121 doc_release_device(cascade->floors[floor]);
2093 2122
2094 kfree(docg3_floors); 2123 free_bch(docg3->cascade->bch);
2095 free_bch(docg3_bch); 2124 kfree(cascade);
2096 iounmap(base); 2125 iounmap(base);
2097 return 0; 2126 return 0;
2098} 2127}
diff --git a/drivers/mtd/devices/docg3.h b/drivers/mtd/devices/docg3.h
index db0da436b493..19fb93f96a3a 100644
--- a/drivers/mtd/devices/docg3.h
+++ b/drivers/mtd/devices/docg3.h
@@ -22,6 +22,8 @@
22#ifndef _MTD_DOCG3_H 22#ifndef _MTD_DOCG3_H
23#define _MTD_DOCG3_H 23#define _MTD_DOCG3_H
24 24
25#include <linux/mtd/mtd.h>
26
25/* 27/*
26 * Flash memory areas : 28 * Flash memory areas :
27 * - 0x0000 .. 0x07ff : IPL 29 * - 0x0000 .. 0x07ff : IPL
@@ -267,9 +269,23 @@
267#define DOC_LAYOUT_DPS_KEY_LENGTH 8 269#define DOC_LAYOUT_DPS_KEY_LENGTH 8
268 270
269/** 271/**
272 * struct docg3_cascade - Cascade of 1 to 4 docg3 chips
273 * @floors: floors (ie. one physical docg3 chip is one floor)
274 * @base: IO space to access all chips in the cascade
275 * @bch: the BCH correcting control structure
276 * @lock: lock to protect docg3 IO space from concurrent accesses
277 */
278struct docg3_cascade {
279 struct mtd_info *floors[DOC_MAX_NBFLOORS];
280 void __iomem *base;
281 struct bch_control *bch;
282 struct mutex lock;
283};
284
285/**
270 * struct docg3 - DiskOnChip driver private data 286 * struct docg3 - DiskOnChip driver private data
271 * @dev: the device currently under control 287 * @dev: the device currently under control
272 * @base: mapped IO space 288 * @cascade: the cascade this device belongs to
273 * @device_id: number of the cascaded DoCG3 device (0, 1, 2 or 3) 289 * @device_id: number of the cascaded DoCG3 device (0, 1, 2 or 3)
274 * @if_cfg: if true, reads are on 16bits, else reads are on 8bits 290 * @if_cfg: if true, reads are on 16bits, else reads are on 8bits
275 291
@@ -287,7 +303,7 @@
287 */ 303 */
288struct docg3 { 304struct docg3 {
289 struct device *dev; 305 struct device *dev;
290 void __iomem *base; 306 struct docg3_cascade *cascade;
291 unsigned int device_id:4; 307 unsigned int device_id:4;
292 unsigned int if_cfg:1; 308 unsigned int if_cfg:1;
293 unsigned int reliable:2; 309 unsigned int reliable:2;
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index 3a11ea628e58..82bd00af5cc3 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -367,9 +367,6 @@ static int flash_erase (struct mtd_info *mtd,struct erase_info *instr)
367 printk (KERN_DEBUG "%s(addr = 0x%.8x, len = %d)\n", __func__, instr->addr, instr->len); 367 printk (KERN_DEBUG "%s(addr = 0x%.8x, len = %d)\n", __func__, instr->addr, instr->len);
368#endif 368#endif
369 369
370 /* sanity checks */
371 if (instr->addr + instr->len > mtd->size) return (-EINVAL);
372
373 /* 370 /*
374 * check that both start and end of the requested erase are 371 * check that both start and end of the requested erase are
375 * aligned with the erasesize at the appropriate addresses. 372 * aligned with the erasesize at the appropriate addresses.
@@ -440,10 +437,6 @@ static int flash_read (struct mtd_info *mtd,loff_t from,size_t len,size_t *retle
440 printk (KERN_DEBUG "%s(from = 0x%.8x, len = %d)\n", __func__, (__u32)from, len); 437 printk (KERN_DEBUG "%s(from = 0x%.8x, len = %d)\n", __func__, (__u32)from, len);
441#endif 438#endif
442 439
443 /* sanity checks */
444 if (!len) return (0);
445 if (from + len > mtd->size) return (-EINVAL);
446
447 /* we always read len bytes */ 440 /* we always read len bytes */
448 *retlen = len; 441 *retlen = len;
449 442
@@ -522,11 +515,8 @@ static int flash_write (struct mtd_info *mtd,loff_t to,size_t len,size_t *retlen
522 printk (KERN_DEBUG "%s(to = 0x%.8x, len = %d)\n", __func__, (__u32)to, len); 515 printk (KERN_DEBUG "%s(to = 0x%.8x, len = %d)\n", __func__, (__u32)to, len);
523#endif 516#endif
524 517
525 *retlen = 0;
526
527 /* sanity checks */ 518 /* sanity checks */
528 if (!len) return (0); 519 if (!len) return (0);
529 if (to + len > mtd->size) return (-EINVAL);
530 520
531 /* first, we write a 0xFF.... padded byte until we reach a dword boundary */ 521 /* first, we write a 0xFF.... padded byte until we reach a dword boundary */
532 if (to & (BUSWIDTH - 1)) 522 if (to & (BUSWIDTH - 1))
@@ -630,14 +620,15 @@ static int __init lart_flash_init (void)
630 mtd.name = module_name; 620 mtd.name = module_name;
631 mtd.type = MTD_NORFLASH; 621 mtd.type = MTD_NORFLASH;
632 mtd.writesize = 1; 622 mtd.writesize = 1;
623 mtd.writebufsize = 4;
633 mtd.flags = MTD_CAP_NORFLASH; 624 mtd.flags = MTD_CAP_NORFLASH;
634 mtd.size = FLASH_BLOCKSIZE_PARAM * FLASH_NUMBLOCKS_16m_PARAM + FLASH_BLOCKSIZE_MAIN * FLASH_NUMBLOCKS_16m_MAIN; 625 mtd.size = FLASH_BLOCKSIZE_PARAM * FLASH_NUMBLOCKS_16m_PARAM + FLASH_BLOCKSIZE_MAIN * FLASH_NUMBLOCKS_16m_MAIN;
635 mtd.erasesize = FLASH_BLOCKSIZE_MAIN; 626 mtd.erasesize = FLASH_BLOCKSIZE_MAIN;
636 mtd.numeraseregions = ARRAY_SIZE(erase_regions); 627 mtd.numeraseregions = ARRAY_SIZE(erase_regions);
637 mtd.eraseregions = erase_regions; 628 mtd.eraseregions = erase_regions;
638 mtd.erase = flash_erase; 629 mtd._erase = flash_erase;
639 mtd.read = flash_read; 630 mtd._read = flash_read;
640 mtd.write = flash_write; 631 mtd._write = flash_write;
641 mtd.owner = THIS_MODULE; 632 mtd.owner = THIS_MODULE;
642 633
643#ifdef LART_DEBUG 634#ifdef LART_DEBUG
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 7c60dddbefc0..1924d247c1cb 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -288,9 +288,6 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
288 __func__, (long long)instr->addr, 288 __func__, (long long)instr->addr,
289 (long long)instr->len); 289 (long long)instr->len);
290 290
291 /* sanity checks */
292 if (instr->addr + instr->len > flash->mtd.size)
293 return -EINVAL;
294 div_u64_rem(instr->len, mtd->erasesize, &rem); 291 div_u64_rem(instr->len, mtd->erasesize, &rem);
295 if (rem) 292 if (rem)
296 return -EINVAL; 293 return -EINVAL;
@@ -349,13 +346,6 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
349 pr_debug("%s: %s from 0x%08x, len %zd\n", dev_name(&flash->spi->dev), 346 pr_debug("%s: %s from 0x%08x, len %zd\n", dev_name(&flash->spi->dev),
350 __func__, (u32)from, len); 347 __func__, (u32)from, len);
351 348
352 /* sanity checks */
353 if (!len)
354 return 0;
355
356 if (from + len > flash->mtd.size)
357 return -EINVAL;
358
359 spi_message_init(&m); 349 spi_message_init(&m);
360 memset(t, 0, (sizeof t)); 350 memset(t, 0, (sizeof t));
361 351
@@ -371,9 +361,6 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
371 t[1].len = len; 361 t[1].len = len;
372 spi_message_add_tail(&t[1], &m); 362 spi_message_add_tail(&t[1], &m);
373 363
374 /* Byte count starts at zero. */
375 *retlen = 0;
376
377 mutex_lock(&flash->lock); 364 mutex_lock(&flash->lock);
378 365
379 /* Wait till previous write/erase is done. */ 366 /* Wait till previous write/erase is done. */
@@ -417,15 +404,6 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
417 pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev), 404 pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev),
418 __func__, (u32)to, len); 405 __func__, (u32)to, len);
419 406
420 *retlen = 0;
421
422 /* sanity checks */
423 if (!len)
424 return(0);
425
426 if (to + len > flash->mtd.size)
427 return -EINVAL;
428
429 spi_message_init(&m); 407 spi_message_init(&m);
430 memset(t, 0, (sizeof t)); 408 memset(t, 0, (sizeof t));
431 409
@@ -509,15 +487,6 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
509 pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev), 487 pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev),
510 __func__, (u32)to, len); 488 __func__, (u32)to, len);
511 489
512 *retlen = 0;
513
514 /* sanity checks */
515 if (!len)
516 return 0;
517
518 if (to + len > flash->mtd.size)
519 return -EINVAL;
520
521 spi_message_init(&m); 490 spi_message_init(&m);
522 memset(t, 0, (sizeof t)); 491 memset(t, 0, (sizeof t));
523 492
@@ -908,14 +877,14 @@ static int __devinit m25p_probe(struct spi_device *spi)
908 flash->mtd.writesize = 1; 877 flash->mtd.writesize = 1;
909 flash->mtd.flags = MTD_CAP_NORFLASH; 878 flash->mtd.flags = MTD_CAP_NORFLASH;
910 flash->mtd.size = info->sector_size * info->n_sectors; 879 flash->mtd.size = info->sector_size * info->n_sectors;
911 flash->mtd.erase = m25p80_erase; 880 flash->mtd._erase = m25p80_erase;
912 flash->mtd.read = m25p80_read; 881 flash->mtd._read = m25p80_read;
913 882
914 /* sst flash chips use AAI word program */ 883 /* sst flash chips use AAI word program */
915 if (JEDEC_MFR(info->jedec_id) == CFI_MFR_SST) 884 if (JEDEC_MFR(info->jedec_id) == CFI_MFR_SST)
916 flash->mtd.write = sst_write; 885 flash->mtd._write = sst_write;
917 else 886 else
918 flash->mtd.write = m25p80_write; 887 flash->mtd._write = m25p80_write;
919 888
920 /* prefer "small sector" erase if possible */ 889 /* prefer "small sector" erase if possible */
921 if (info->flags & SECT_4K) { 890 if (info->flags & SECT_4K) {
@@ -932,6 +901,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
932 ppdata.of_node = spi->dev.of_node; 901 ppdata.of_node = spi->dev.of_node;
933 flash->mtd.dev.parent = &spi->dev; 902 flash->mtd.dev.parent = &spi->dev;
934 flash->page_size = info->page_size; 903 flash->page_size = info->page_size;
904 flash->mtd.writebufsize = flash->page_size;
935 905
936 if (info->addr_width) 906 if (info->addr_width)
937 flash->addr_width = info->addr_width; 907 flash->addr_width = info->addr_width;
@@ -1004,21 +974,7 @@ static struct spi_driver m25p80_driver = {
1004 */ 974 */
1005}; 975};
1006 976
1007 977module_spi_driver(m25p80_driver);
1008static int __init m25p80_init(void)
1009{
1010 return spi_register_driver(&m25p80_driver);
1011}
1012
1013
1014static void __exit m25p80_exit(void)
1015{
1016 spi_unregister_driver(&m25p80_driver);
1017}
1018
1019
1020module_init(m25p80_init);
1021module_exit(m25p80_exit);
1022 978
1023MODULE_LICENSE("GPL"); 979MODULE_LICENSE("GPL");
1024MODULE_AUTHOR("Mike Lavender"); 980MODULE_AUTHOR("Mike Lavender");
diff --git a/drivers/mtd/devices/ms02-nv.c b/drivers/mtd/devices/ms02-nv.c
index 8423fb6d4f26..182849d39c61 100644
--- a/drivers/mtd/devices/ms02-nv.c
+++ b/drivers/mtd/devices/ms02-nv.c
@@ -59,12 +59,8 @@ static int ms02nv_read(struct mtd_info *mtd, loff_t from,
59{ 59{
60 struct ms02nv_private *mp = mtd->priv; 60 struct ms02nv_private *mp = mtd->priv;
61 61
62 if (from + len > mtd->size)
63 return -EINVAL;
64
65 memcpy(buf, mp->uaddr + from, len); 62 memcpy(buf, mp->uaddr + from, len);
66 *retlen = len; 63 *retlen = len;
67
68 return 0; 64 return 0;
69} 65}
70 66
@@ -73,12 +69,8 @@ static int ms02nv_write(struct mtd_info *mtd, loff_t to,
73{ 69{
74 struct ms02nv_private *mp = mtd->priv; 70 struct ms02nv_private *mp = mtd->priv;
75 71
76 if (to + len > mtd->size)
77 return -EINVAL;
78
79 memcpy(mp->uaddr + to, buf, len); 72 memcpy(mp->uaddr + to, buf, len);
80 *retlen = len; 73 *retlen = len;
81
82 return 0; 74 return 0;
83} 75}
84 76
@@ -215,8 +207,8 @@ static int __init ms02nv_init_one(ulong addr)
215 mtd->size = fixsize; 207 mtd->size = fixsize;
216 mtd->name = (char *)ms02nv_name; 208 mtd->name = (char *)ms02nv_name;
217 mtd->owner = THIS_MODULE; 209 mtd->owner = THIS_MODULE;
218 mtd->read = ms02nv_read; 210 mtd->_read = ms02nv_read;
219 mtd->write = ms02nv_write; 211 mtd->_write = ms02nv_write;
220 mtd->writesize = 1; 212 mtd->writesize = 1;
221 213
222 ret = -EIO; 214 ret = -EIO;
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 236057ead0d2..928fb0e6d73a 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -164,9 +164,6 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
164 dev_name(&spi->dev), (long long)instr->addr, 164 dev_name(&spi->dev), (long long)instr->addr,
165 (long long)instr->len); 165 (long long)instr->len);
166 166
167 /* Sanity checks */
168 if (instr->addr + instr->len > mtd->size)
169 return -EINVAL;
170 div_u64_rem(instr->len, priv->page_size, &rem); 167 div_u64_rem(instr->len, priv->page_size, &rem);
171 if (rem) 168 if (rem)
172 return -EINVAL; 169 return -EINVAL;
@@ -252,14 +249,6 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
252 pr_debug("%s: read 0x%x..0x%x\n", dev_name(&priv->spi->dev), 249 pr_debug("%s: read 0x%x..0x%x\n", dev_name(&priv->spi->dev),
253 (unsigned)from, (unsigned)(from + len)); 250 (unsigned)from, (unsigned)(from + len));
254 251
255 *retlen = 0;
256
257 /* Sanity checks */
258 if (!len)
259 return 0;
260 if (from + len > mtd->size)
261 return -EINVAL;
262
263 /* Calculate flash page/byte address */ 252 /* Calculate flash page/byte address */
264 addr = (((unsigned)from / priv->page_size) << priv->page_offset) 253 addr = (((unsigned)from / priv->page_size) << priv->page_offset)
265 + ((unsigned)from % priv->page_size); 254 + ((unsigned)from % priv->page_size);
@@ -328,14 +317,6 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
328 pr_debug("%s: write 0x%x..0x%x\n", 317 pr_debug("%s: write 0x%x..0x%x\n",
329 dev_name(&spi->dev), (unsigned)to, (unsigned)(to + len)); 318 dev_name(&spi->dev), (unsigned)to, (unsigned)(to + len));
330 319
331 *retlen = 0;
332
333 /* Sanity checks */
334 if (!len)
335 return 0;
336 if ((to + len) > mtd->size)
337 return -EINVAL;
338
339 spi_message_init(&msg); 320 spi_message_init(&msg);
340 321
341 x[0].tx_buf = command = priv->command; 322 x[0].tx_buf = command = priv->command;
@@ -490,8 +471,6 @@ static ssize_t otp_read(struct spi_device *spi, unsigned base,
490 471
491 if ((off + len) > 64) 472 if ((off + len) > 64)
492 len = 64 - off; 473 len = 64 - off;
493 if (len == 0)
494 return len;
495 474
496 spi_message_init(&m); 475 spi_message_init(&m);
497 476
@@ -611,16 +590,16 @@ static int dataflash_write_user_otp(struct mtd_info *mtd,
611 590
612static char *otp_setup(struct mtd_info *device, char revision) 591static char *otp_setup(struct mtd_info *device, char revision)
613{ 592{
614 device->get_fact_prot_info = dataflash_get_otp_info; 593 device->_get_fact_prot_info = dataflash_get_otp_info;
615 device->read_fact_prot_reg = dataflash_read_fact_otp; 594 device->_read_fact_prot_reg = dataflash_read_fact_otp;
616 device->get_user_prot_info = dataflash_get_otp_info; 595 device->_get_user_prot_info = dataflash_get_otp_info;
617 device->read_user_prot_reg = dataflash_read_user_otp; 596 device->_read_user_prot_reg = dataflash_read_user_otp;
618 597
619 /* rev c parts (at45db321c and at45db1281 only!) use a 598 /* rev c parts (at45db321c and at45db1281 only!) use a
620 * different write procedure; not (yet?) implemented. 599 * different write procedure; not (yet?) implemented.
621 */ 600 */
622 if (revision > 'c') 601 if (revision > 'c')
623 device->write_user_prot_reg = dataflash_write_user_otp; 602 device->_write_user_prot_reg = dataflash_write_user_otp;
624 603
625 return ", OTP"; 604 return ", OTP";
626} 605}
@@ -672,9 +651,9 @@ add_dataflash_otp(struct spi_device *spi, char *name,
672 device->owner = THIS_MODULE; 651 device->owner = THIS_MODULE;
673 device->type = MTD_DATAFLASH; 652 device->type = MTD_DATAFLASH;
674 device->flags = MTD_WRITEABLE; 653 device->flags = MTD_WRITEABLE;
675 device->erase = dataflash_erase; 654 device->_erase = dataflash_erase;
676 device->read = dataflash_read; 655 device->_read = dataflash_read;
677 device->write = dataflash_write; 656 device->_write = dataflash_write;
678 device->priv = priv; 657 device->priv = priv;
679 658
680 device->dev.parent = &spi->dev; 659 device->dev.parent = &spi->dev;
@@ -946,18 +925,7 @@ static struct spi_driver dataflash_driver = {
946 /* FIXME: investigate suspend and resume... */ 925 /* FIXME: investigate suspend and resume... */
947}; 926};
948 927
949static int __init dataflash_init(void) 928module_spi_driver(dataflash_driver);
950{
951 return spi_register_driver(&dataflash_driver);
952}
953module_init(dataflash_init);
954
955static void __exit dataflash_exit(void)
956{
957 spi_unregister_driver(&dataflash_driver);
958}
959module_exit(dataflash_exit);
960
961 929
962MODULE_LICENSE("GPL"); 930MODULE_LICENSE("GPL");
963MODULE_AUTHOR("Andrew Victor, David Brownell"); 931MODULE_AUTHOR("Andrew Victor, David Brownell");
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c
index 2562689ba6b4..ec59d65897fb 100644
--- a/drivers/mtd/devices/mtdram.c
+++ b/drivers/mtd/devices/mtdram.c
@@ -34,34 +34,23 @@ static struct mtd_info *mtd_info;
34 34
35static int ram_erase(struct mtd_info *mtd, struct erase_info *instr) 35static int ram_erase(struct mtd_info *mtd, struct erase_info *instr)
36{ 36{
37 if (instr->addr + instr->len > mtd->size)
38 return -EINVAL;
39
40 memset((char *)mtd->priv + instr->addr, 0xff, instr->len); 37 memset((char *)mtd->priv + instr->addr, 0xff, instr->len);
41
42 instr->state = MTD_ERASE_DONE; 38 instr->state = MTD_ERASE_DONE;
43 mtd_erase_callback(instr); 39 mtd_erase_callback(instr);
44
45 return 0; 40 return 0;
46} 41}
47 42
48static int ram_point(struct mtd_info *mtd, loff_t from, size_t len, 43static int ram_point(struct mtd_info *mtd, loff_t from, size_t len,
49 size_t *retlen, void **virt, resource_size_t *phys) 44 size_t *retlen, void **virt, resource_size_t *phys)
50{ 45{
51 if (from + len > mtd->size)
52 return -EINVAL;
53
54 /* can we return a physical address with this driver? */
55 if (phys)
56 return -EINVAL;
57
58 *virt = mtd->priv + from; 46 *virt = mtd->priv + from;
59 *retlen = len; 47 *retlen = len;
60 return 0; 48 return 0;
61} 49}
62 50
63static void ram_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 51static int ram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
64{ 52{
53 return 0;
65} 54}
66 55
67/* 56/*
@@ -80,11 +69,7 @@ static unsigned long ram_get_unmapped_area(struct mtd_info *mtd,
80static int ram_read(struct mtd_info *mtd, loff_t from, size_t len, 69static int ram_read(struct mtd_info *mtd, loff_t from, size_t len,
81 size_t *retlen, u_char *buf) 70 size_t *retlen, u_char *buf)
82{ 71{
83 if (from + len > mtd->size)
84 return -EINVAL;
85
86 memcpy(buf, mtd->priv + from, len); 72 memcpy(buf, mtd->priv + from, len);
87
88 *retlen = len; 73 *retlen = len;
89 return 0; 74 return 0;
90} 75}
@@ -92,11 +77,7 @@ static int ram_read(struct mtd_info *mtd, loff_t from, size_t len,
92static int ram_write(struct mtd_info *mtd, loff_t to, size_t len, 77static int ram_write(struct mtd_info *mtd, loff_t to, size_t len,
93 size_t *retlen, const u_char *buf) 78 size_t *retlen, const u_char *buf)
94{ 79{
95 if (to + len > mtd->size)
96 return -EINVAL;
97
98 memcpy((char *)mtd->priv + to, buf, len); 80 memcpy((char *)mtd->priv + to, buf, len);
99
100 *retlen = len; 81 *retlen = len;
101 return 0; 82 return 0;
102} 83}
@@ -126,12 +107,12 @@ int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
126 mtd->priv = mapped_address; 107 mtd->priv = mapped_address;
127 108
128 mtd->owner = THIS_MODULE; 109 mtd->owner = THIS_MODULE;
129 mtd->erase = ram_erase; 110 mtd->_erase = ram_erase;
130 mtd->point = ram_point; 111 mtd->_point = ram_point;
131 mtd->unpoint = ram_unpoint; 112 mtd->_unpoint = ram_unpoint;
132 mtd->get_unmapped_area = ram_get_unmapped_area; 113 mtd->_get_unmapped_area = ram_get_unmapped_area;
133 mtd->read = ram_read; 114 mtd->_read = ram_read;
134 mtd->write = ram_write; 115 mtd->_write = ram_write;
135 116
136 if (mtd_device_register(mtd, NULL, 0)) 117 if (mtd_device_register(mtd, NULL, 0))
137 return -EIO; 118 return -EIO;
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 23423bd00b06..67823de68db6 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -33,45 +33,33 @@ struct phram_mtd_list {
33 33
34static LIST_HEAD(phram_list); 34static LIST_HEAD(phram_list);
35 35
36
37static int phram_erase(struct mtd_info *mtd, struct erase_info *instr) 36static int phram_erase(struct mtd_info *mtd, struct erase_info *instr)
38{ 37{
39 u_char *start = mtd->priv; 38 u_char *start = mtd->priv;
40 39
41 if (instr->addr + instr->len > mtd->size)
42 return -EINVAL;
43
44 memset(start + instr->addr, 0xff, instr->len); 40 memset(start + instr->addr, 0xff, instr->len);
45 41
46 /* This'll catch a few races. Free the thing before returning :) 42 /*
43 * This'll catch a few races. Free the thing before returning :)
47 * I don't feel at all ashamed. This kind of thing is possible anyway 44 * I don't feel at all ashamed. This kind of thing is possible anyway
48 * with flash, but unlikely. 45 * with flash, but unlikely.
49 */ 46 */
50
51 instr->state = MTD_ERASE_DONE; 47 instr->state = MTD_ERASE_DONE;
52
53 mtd_erase_callback(instr); 48 mtd_erase_callback(instr);
54
55 return 0; 49 return 0;
56} 50}
57 51
58static int phram_point(struct mtd_info *mtd, loff_t from, size_t len, 52static int phram_point(struct mtd_info *mtd, loff_t from, size_t len,
59 size_t *retlen, void **virt, resource_size_t *phys) 53 size_t *retlen, void **virt, resource_size_t *phys)
60{ 54{
61 if (from + len > mtd->size)
62 return -EINVAL;
63
64 /* can we return a physical address with this driver? */
65 if (phys)
66 return -EINVAL;
67
68 *virt = mtd->priv + from; 55 *virt = mtd->priv + from;
69 *retlen = len; 56 *retlen = len;
70 return 0; 57 return 0;
71} 58}
72 59
73static void phram_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 60static int phram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
74{ 61{
62 return 0;
75} 63}
76 64
77static int phram_read(struct mtd_info *mtd, loff_t from, size_t len, 65static int phram_read(struct mtd_info *mtd, loff_t from, size_t len,
@@ -79,14 +67,7 @@ static int phram_read(struct mtd_info *mtd, loff_t from, size_t len,
79{ 67{
80 u_char *start = mtd->priv; 68 u_char *start = mtd->priv;
81 69
82 if (from >= mtd->size)
83 return -EINVAL;
84
85 if (len > mtd->size - from)
86 len = mtd->size - from;
87
88 memcpy(buf, start + from, len); 70 memcpy(buf, start + from, len);
89
90 *retlen = len; 71 *retlen = len;
91 return 0; 72 return 0;
92} 73}
@@ -96,20 +77,11 @@ static int phram_write(struct mtd_info *mtd, loff_t to, size_t len,
96{ 77{
97 u_char *start = mtd->priv; 78 u_char *start = mtd->priv;
98 79
99 if (to >= mtd->size)
100 return -EINVAL;
101
102 if (len > mtd->size - to)
103 len = mtd->size - to;
104
105 memcpy(start + to, buf, len); 80 memcpy(start + to, buf, len);
106
107 *retlen = len; 81 *retlen = len;
108 return 0; 82 return 0;
109} 83}
110 84
111
112
113static void unregister_devices(void) 85static void unregister_devices(void)
114{ 86{
115 struct phram_mtd_list *this, *safe; 87 struct phram_mtd_list *this, *safe;
@@ -142,11 +114,11 @@ static int register_device(char *name, unsigned long start, unsigned long len)
142 new->mtd.name = name; 114 new->mtd.name = name;
143 new->mtd.size = len; 115 new->mtd.size = len;
144 new->mtd.flags = MTD_CAP_RAM; 116 new->mtd.flags = MTD_CAP_RAM;
145 new->mtd.erase = phram_erase; 117 new->mtd._erase = phram_erase;
146 new->mtd.point = phram_point; 118 new->mtd._point = phram_point;
147 new->mtd.unpoint = phram_unpoint; 119 new->mtd._unpoint = phram_unpoint;
148 new->mtd.read = phram_read; 120 new->mtd._read = phram_read;
149 new->mtd.write = phram_write; 121 new->mtd._write = phram_write;
150 new->mtd.owner = THIS_MODULE; 122 new->mtd.owner = THIS_MODULE;
151 new->mtd.type = MTD_RAM; 123 new->mtd.type = MTD_RAM;
152 new->mtd.erasesize = PAGE_SIZE; 124 new->mtd.erasesize = PAGE_SIZE;
@@ -233,7 +205,17 @@ static inline void kill_final_newline(char *str)
233 return 1; \ 205 return 1; \
234} while (0) 206} while (0)
235 207
236static int phram_setup(const char *val, struct kernel_param *kp) 208/*
209 * This shall contain the module parameter if any. It is of the form:
210 * - phram=<device>,<address>,<size> for module case
211 * - phram.phram=<device>,<address>,<size> for built-in case
212 * We leave 64 bytes for the device name, 12 for the address and 12 for the
213 * size.
214 * Example: phram.phram=rootfs,0xa0000000,512Mi
215 */
216static __initdata char phram_paramline[64+12+12];
217
218static int __init phram_setup(const char *val)
237{ 219{
238 char buf[64+12+12], *str = buf; 220 char buf[64+12+12], *str = buf;
239 char *token[3]; 221 char *token[3];
@@ -282,12 +264,28 @@ static int phram_setup(const char *val, struct kernel_param *kp)
282 return ret; 264 return ret;
283} 265}
284 266
285module_param_call(phram, phram_setup, NULL, NULL, 000); 267static int __init phram_param_call(const char *val, struct kernel_param *kp)
268{
269 /*
270 * This function is always called before 'init_phram()', whether
271 * built-in or module.
272 */
273 if (strlen(val) >= sizeof(phram_paramline))
274 return -ENOSPC;
275 strcpy(phram_paramline, val);
276
277 return 0;
278}
279
280module_param_call(phram, phram_param_call, NULL, NULL, 000);
286MODULE_PARM_DESC(phram, "Memory region to map. \"phram=<name>,<start>,<length>\""); 281MODULE_PARM_DESC(phram, "Memory region to map. \"phram=<name>,<start>,<length>\"");
287 282
288 283
289static int __init init_phram(void) 284static int __init init_phram(void)
290{ 285{
286 if (phram_paramline[0])
287 return phram_setup(phram_paramline);
288
291 return 0; 289 return 0;
292} 290}
293 291
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index 5d53c5760a6c..0c51b988e1f8 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -94,12 +94,48 @@
94#include <linux/ioctl.h> 94#include <linux/ioctl.h>
95#include <asm/io.h> 95#include <asm/io.h>
96#include <linux/pci.h> 96#include <linux/pci.h>
97
98#include <linux/mtd/mtd.h> 97#include <linux/mtd/mtd.h>
99#include <linux/mtd/pmc551.h> 98
99#define PMC551_VERSION \
100 "Ramix PMC551 PCI Mezzanine Ram Driver. (C) 1999,2000 Nortel Networks.\n"
101
102#define PCI_VENDOR_ID_V3_SEMI 0x11b0
103#define PCI_DEVICE_ID_V3_SEMI_V370PDC 0x0200
104
105#define PMC551_PCI_MEM_MAP0 0x50
106#define PMC551_PCI_MEM_MAP1 0x54
107#define PMC551_PCI_MEM_MAP_MAP_ADDR_MASK 0x3ff00000
108#define PMC551_PCI_MEM_MAP_APERTURE_MASK 0x000000f0
109#define PMC551_PCI_MEM_MAP_REG_EN 0x00000002
110#define PMC551_PCI_MEM_MAP_ENABLE 0x00000001
111
112#define PMC551_SDRAM_MA 0x60
113#define PMC551_SDRAM_CMD 0x62
114#define PMC551_DRAM_CFG 0x64
115#define PMC551_SYS_CTRL_REG 0x78
116
117#define PMC551_DRAM_BLK0 0x68
118#define PMC551_DRAM_BLK1 0x6c
119#define PMC551_DRAM_BLK2 0x70
120#define PMC551_DRAM_BLK3 0x74
121#define PMC551_DRAM_BLK_GET_SIZE(x) (524288 << ((x >> 4) & 0x0f))
122#define PMC551_DRAM_BLK_SET_COL_MUX(x, v) (((x) & ~0x00007000) | (((v) & 0x7) << 12))
123#define PMC551_DRAM_BLK_SET_ROW_MUX(x, v) (((x) & ~0x00000f00) | (((v) & 0xf) << 8))
124
125struct mypriv {
126 struct pci_dev *dev;
127 u_char *start;
128 u32 base_map0;
129 u32 curr_map0;
130 u32 asize;
131 struct mtd_info *nextpmc551;
132};
100 133
101static struct mtd_info *pmc551list; 134static struct mtd_info *pmc551list;
102 135
136static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len,
137 size_t *retlen, void **virt, resource_size_t *phys);
138
103static int pmc551_erase(struct mtd_info *mtd, struct erase_info *instr) 139static int pmc551_erase(struct mtd_info *mtd, struct erase_info *instr)
104{ 140{
105 struct mypriv *priv = mtd->priv; 141 struct mypriv *priv = mtd->priv;
@@ -115,16 +151,6 @@ static int pmc551_erase(struct mtd_info *mtd, struct erase_info *instr)
115#endif 151#endif
116 152
117 end = instr->addr + instr->len - 1; 153 end = instr->addr + instr->len - 1;
118
119 /* Is it past the end? */
120 if (end > mtd->size) {
121#ifdef CONFIG_MTD_PMC551_DEBUG
122 printk(KERN_DEBUG "pmc551_erase() out of bounds (%ld > %ld)\n",
123 (long)end, (long)mtd->size);
124#endif
125 return -EINVAL;
126 }
127
128 eoff_hi = end & ~(priv->asize - 1); 154 eoff_hi = end & ~(priv->asize - 1);
129 soff_hi = instr->addr & ~(priv->asize - 1); 155 soff_hi = instr->addr & ~(priv->asize - 1);
130 eoff_lo = end & (priv->asize - 1); 156 eoff_lo = end & (priv->asize - 1);
@@ -178,18 +204,6 @@ static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len,
178 printk(KERN_DEBUG "pmc551_point(%ld, %ld)\n", (long)from, (long)len); 204 printk(KERN_DEBUG "pmc551_point(%ld, %ld)\n", (long)from, (long)len);
179#endif 205#endif
180 206
181 if (from + len > mtd->size) {
182#ifdef CONFIG_MTD_PMC551_DEBUG
183 printk(KERN_DEBUG "pmc551_point() out of bounds (%ld > %ld)\n",
184 (long)from + len, (long)mtd->size);
185#endif
186 return -EINVAL;
187 }
188
189 /* can we return a physical address with this driver? */
190 if (phys)
191 return -EINVAL;
192
193 soff_hi = from & ~(priv->asize - 1); 207 soff_hi = from & ~(priv->asize - 1);
194 soff_lo = from & (priv->asize - 1); 208 soff_lo = from & (priv->asize - 1);
195 209
@@ -205,11 +219,12 @@ static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len,
205 return 0; 219 return 0;
206} 220}
207 221
208static void pmc551_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 222static int pmc551_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
209{ 223{
210#ifdef CONFIG_MTD_PMC551_DEBUG 224#ifdef CONFIG_MTD_PMC551_DEBUG
211 printk(KERN_DEBUG "pmc551_unpoint()\n"); 225 printk(KERN_DEBUG "pmc551_unpoint()\n");
212#endif 226#endif
227 return 0;
213} 228}
214 229
215static int pmc551_read(struct mtd_info *mtd, loff_t from, size_t len, 230static int pmc551_read(struct mtd_info *mtd, loff_t from, size_t len,
@@ -228,16 +243,6 @@ static int pmc551_read(struct mtd_info *mtd, loff_t from, size_t len,
228#endif 243#endif
229 244
230 end = from + len - 1; 245 end = from + len - 1;
231
232 /* Is it past the end? */
233 if (end > mtd->size) {
234#ifdef CONFIG_MTD_PMC551_DEBUG
235 printk(KERN_DEBUG "pmc551_read() out of bounds (%ld > %ld)\n",
236 (long)end, (long)mtd->size);
237#endif
238 return -EINVAL;
239 }
240
241 soff_hi = from & ~(priv->asize - 1); 246 soff_hi = from & ~(priv->asize - 1);
242 eoff_hi = end & ~(priv->asize - 1); 247 eoff_hi = end & ~(priv->asize - 1);
243 soff_lo = from & (priv->asize - 1); 248 soff_lo = from & (priv->asize - 1);
@@ -295,16 +300,6 @@ static int pmc551_write(struct mtd_info *mtd, loff_t to, size_t len,
295#endif 300#endif
296 301
297 end = to + len - 1; 302 end = to + len - 1;
298 /* Is it past the end? or did the u32 wrap? */
299 if (end > mtd->size) {
300#ifdef CONFIG_MTD_PMC551_DEBUG
301 printk(KERN_DEBUG "pmc551_write() out of bounds (end: %ld, "
302 "size: %ld, to: %ld)\n", (long)end, (long)mtd->size,
303 (long)to);
304#endif
305 return -EINVAL;
306 }
307
308 soff_hi = to & ~(priv->asize - 1); 303 soff_hi = to & ~(priv->asize - 1);
309 eoff_hi = end & ~(priv->asize - 1); 304 eoff_hi = end & ~(priv->asize - 1);
310 soff_lo = to & (priv->asize - 1); 305 soff_lo = to & (priv->asize - 1);
@@ -358,7 +353,7 @@ static int pmc551_write(struct mtd_info *mtd, loff_t to, size_t len,
358 * mechanism 353 * mechanism
359 * returns the size of the memory region found. 354 * returns the size of the memory region found.
360 */ 355 */
361static u32 fixup_pmc551(struct pci_dev *dev) 356static int fixup_pmc551(struct pci_dev *dev)
362{ 357{
363#ifdef CONFIG_MTD_PMC551_BUGFIX 358#ifdef CONFIG_MTD_PMC551_BUGFIX
364 u32 dram_data; 359 u32 dram_data;
@@ -668,7 +663,7 @@ static int __init init_pmc551(void)
668 struct mypriv *priv; 663 struct mypriv *priv;
669 int found = 0; 664 int found = 0;
670 struct mtd_info *mtd; 665 struct mtd_info *mtd;
671 u32 length = 0; 666 int length = 0;
672 667
673 if (msize) { 668 if (msize) {
674 msize = (1 << (ffs(msize) - 1)) << 20; 669 msize = (1 << (ffs(msize) - 1)) << 20;
@@ -786,11 +781,11 @@ static int __init init_pmc551(void)
786 781
787 mtd->size = msize; 782 mtd->size = msize;
788 mtd->flags = MTD_CAP_RAM; 783 mtd->flags = MTD_CAP_RAM;
789 mtd->erase = pmc551_erase; 784 mtd->_erase = pmc551_erase;
790 mtd->read = pmc551_read; 785 mtd->_read = pmc551_read;
791 mtd->write = pmc551_write; 786 mtd->_write = pmc551_write;
792 mtd->point = pmc551_point; 787 mtd->_point = pmc551_point;
793 mtd->unpoint = pmc551_unpoint; 788 mtd->_unpoint = pmc551_unpoint;
794 mtd->type = MTD_RAM; 789 mtd->type = MTD_RAM;
795 mtd->name = "PMC551 RAM board"; 790 mtd->name = "PMC551 RAM board";
796 mtd->erasesize = 0x10000; 791 mtd->erasesize = 0x10000;
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index 288594163c22..8f52fc858e48 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -75,7 +75,7 @@ static slram_mtd_list_t *slram_mtdlist = NULL;
75static int slram_erase(struct mtd_info *, struct erase_info *); 75static int slram_erase(struct mtd_info *, struct erase_info *);
76static int slram_point(struct mtd_info *, loff_t, size_t, size_t *, void **, 76static int slram_point(struct mtd_info *, loff_t, size_t, size_t *, void **,
77 resource_size_t *); 77 resource_size_t *);
78static void slram_unpoint(struct mtd_info *, loff_t, size_t); 78static int slram_unpoint(struct mtd_info *, loff_t, size_t);
79static int slram_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *); 79static int slram_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
80static int slram_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 80static int slram_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
81 81
@@ -83,21 +83,13 @@ static int slram_erase(struct mtd_info *mtd, struct erase_info *instr)
83{ 83{
84 slram_priv_t *priv = mtd->priv; 84 slram_priv_t *priv = mtd->priv;
85 85
86 if (instr->addr + instr->len > mtd->size) {
87 return(-EINVAL);
88 }
89
90 memset(priv->start + instr->addr, 0xff, instr->len); 86 memset(priv->start + instr->addr, 0xff, instr->len);
91
92 /* This'll catch a few races. Free the thing before returning :) 87 /* This'll catch a few races. Free the thing before returning :)
93 * I don't feel at all ashamed. This kind of thing is possible anyway 88 * I don't feel at all ashamed. This kind of thing is possible anyway
94 * with flash, but unlikely. 89 * with flash, but unlikely.
95 */ 90 */
96
97 instr->state = MTD_ERASE_DONE; 91 instr->state = MTD_ERASE_DONE;
98
99 mtd_erase_callback(instr); 92 mtd_erase_callback(instr);
100
101 return(0); 93 return(0);
102} 94}
103 95
@@ -106,20 +98,14 @@ static int slram_point(struct mtd_info *mtd, loff_t from, size_t len,
106{ 98{
107 slram_priv_t *priv = mtd->priv; 99 slram_priv_t *priv = mtd->priv;
108 100
109 /* can we return a physical address with this driver? */
110 if (phys)
111 return -EINVAL;
112
113 if (from + len > mtd->size)
114 return -EINVAL;
115
116 *virt = priv->start + from; 101 *virt = priv->start + from;
117 *retlen = len; 102 *retlen = len;
118 return(0); 103 return(0);
119} 104}
120 105
121static void slram_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 106static int slram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
122{ 107{
108 return 0;
123} 109}
124 110
125static int slram_read(struct mtd_info *mtd, loff_t from, size_t len, 111static int slram_read(struct mtd_info *mtd, loff_t from, size_t len,
@@ -127,14 +113,7 @@ static int slram_read(struct mtd_info *mtd, loff_t from, size_t len,
127{ 113{
128 slram_priv_t *priv = mtd->priv; 114 slram_priv_t *priv = mtd->priv;
129 115
130 if (from > mtd->size)
131 return -EINVAL;
132
133 if (from + len > mtd->size)
134 len = mtd->size - from;
135
136 memcpy(buf, priv->start + from, len); 116 memcpy(buf, priv->start + from, len);
137
138 *retlen = len; 117 *retlen = len;
139 return(0); 118 return(0);
140} 119}
@@ -144,11 +123,7 @@ static int slram_write(struct mtd_info *mtd, loff_t to, size_t len,
144{ 123{
145 slram_priv_t *priv = mtd->priv; 124 slram_priv_t *priv = mtd->priv;
146 125
147 if (to + len > mtd->size)
148 return -EINVAL;
149
150 memcpy(priv->start + to, buf, len); 126 memcpy(priv->start + to, buf, len);
151
152 *retlen = len; 127 *retlen = len;
153 return(0); 128 return(0);
154} 129}
@@ -199,11 +174,11 @@ static int register_device(char *name, unsigned long start, unsigned long length
199 (*curmtd)->mtdinfo->name = name; 174 (*curmtd)->mtdinfo->name = name;
200 (*curmtd)->mtdinfo->size = length; 175 (*curmtd)->mtdinfo->size = length;
201 (*curmtd)->mtdinfo->flags = MTD_CAP_RAM; 176 (*curmtd)->mtdinfo->flags = MTD_CAP_RAM;
202 (*curmtd)->mtdinfo->erase = slram_erase; 177 (*curmtd)->mtdinfo->_erase = slram_erase;
203 (*curmtd)->mtdinfo->point = slram_point; 178 (*curmtd)->mtdinfo->_point = slram_point;
204 (*curmtd)->mtdinfo->unpoint = slram_unpoint; 179 (*curmtd)->mtdinfo->_unpoint = slram_unpoint;
205 (*curmtd)->mtdinfo->read = slram_read; 180 (*curmtd)->mtdinfo->_read = slram_read;
206 (*curmtd)->mtdinfo->write = slram_write; 181 (*curmtd)->mtdinfo->_write = slram_write;
207 (*curmtd)->mtdinfo->owner = THIS_MODULE; 182 (*curmtd)->mtdinfo->owner = THIS_MODULE;
208 (*curmtd)->mtdinfo->type = MTD_RAM; 183 (*curmtd)->mtdinfo->type = MTD_RAM;
209 (*curmtd)->mtdinfo->erasesize = SLRAM_BLK_SZ; 184 (*curmtd)->mtdinfo->erasesize = SLRAM_BLK_SZ;
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
new file mode 100644
index 000000000000..797d43cd3550
--- /dev/null
+++ b/drivers/mtd/devices/spear_smi.c
@@ -0,0 +1,1147 @@
1/*
2 * SMI (Serial Memory Controller) device driver for Serial NOR Flash on
3 * SPEAr platform
4 * The serial nor interface is largely based on drivers/mtd/m25p80.c,
5 * however the SPI interface has been replaced by SMI.
6 *
7 * Copyright © 2010 STMicroelectronics.
8 * Ashish Priyadarshi
9 * Shiraz Hashim <shiraz.hashim@st.com>
10 *
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
14 */
15
16#include <linux/clk.h>
17#include <linux/delay.h>
18#include <linux/device.h>
19#include <linux/err.h>
20#include <linux/errno.h>
21#include <linux/interrupt.h>
22#include <linux/io.h>
23#include <linux/ioport.h>
24#include <linux/jiffies.h>
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/param.h>
28#include <linux/platform_device.h>
29#include <linux/mtd/mtd.h>
30#include <linux/mtd/partitions.h>
31#include <linux/mtd/spear_smi.h>
32#include <linux/mutex.h>
33#include <linux/sched.h>
34#include <linux/slab.h>
35#include <linux/wait.h>
36#include <linux/of.h>
37#include <linux/of_address.h>
38
39/* SMI clock rate */
40#define SMI_MAX_CLOCK_FREQ 50000000 /* 50 MHz */
41
42/* MAX time out to safely come out of a erase or write busy conditions */
43#define SMI_PROBE_TIMEOUT (HZ / 10)
44#define SMI_MAX_TIME_OUT (3 * HZ)
45
46/* timeout for command completion */
47#define SMI_CMD_TIMEOUT (HZ / 10)
48
49/* registers of smi */
50#define SMI_CR1 0x0 /* SMI control register 1 */
51#define SMI_CR2 0x4 /* SMI control register 2 */
52#define SMI_SR 0x8 /* SMI status register */
53#define SMI_TR 0xC /* SMI transmit register */
54#define SMI_RR 0x10 /* SMI receive register */
55
56/* defines for control_reg 1 */
57#define BANK_EN (0xF << 0) /* enables all banks */
58#define DSEL_TIME (0x6 << 4) /* Deselect time 6 + 1 SMI_CK periods */
59#define SW_MODE (0x1 << 28) /* enables SW Mode */
60#define WB_MODE (0x1 << 29) /* Write Burst Mode */
61#define FAST_MODE (0x1 << 15) /* Fast Mode */
62#define HOLD1 (0x1 << 16) /* Clock Hold period selection */
63
64/* defines for control_reg 2 */
65#define SEND (0x1 << 7) /* Send data */
66#define TFIE (0x1 << 8) /* Transmission Flag Interrupt Enable */
67#define WCIE (0x1 << 9) /* Write Complete Interrupt Enable */
68#define RD_STATUS_REG (0x1 << 10) /* reads status reg */
69#define WE (0x1 << 11) /* Write Enable */
70
71#define TX_LEN_SHIFT 0
72#define RX_LEN_SHIFT 4
73#define BANK_SHIFT 12
74
75/* defines for status register */
76#define SR_WIP 0x1 /* Write in progress */
77#define SR_WEL 0x2 /* Write enable latch */
78#define SR_BP0 0x4 /* Block protect 0 */
79#define SR_BP1 0x8 /* Block protect 1 */
80#define SR_BP2 0x10 /* Block protect 2 */
81#define SR_SRWD 0x80 /* SR write protect */
82#define TFF 0x100 /* Transfer Finished Flag */
83#define WCF 0x200 /* Transfer Finished Flag */
84#define ERF1 0x400 /* Forbidden Write Request */
85#define ERF2 0x800 /* Forbidden Access */
86
87#define WM_SHIFT 12
88
89/* flash opcodes */
90#define OPCODE_RDID 0x9f /* Read JEDEC ID */
91
92/* Flash Device Ids maintenance section */
93
94/* data structure to maintain flash ids from different vendors */
95struct flash_device {
96 char *name;
97 u8 erase_cmd;
98 u32 device_id;
99 u32 pagesize;
100 unsigned long sectorsize;
101 unsigned long size_in_bytes;
102};
103
104#define FLASH_ID(n, es, id, psize, ssize, size) \
105{ \
106 .name = n, \
107 .erase_cmd = es, \
108 .device_id = id, \
109 .pagesize = psize, \
110 .sectorsize = ssize, \
111 .size_in_bytes = size \
112}
113
114static struct flash_device flash_devices[] = {
115 FLASH_ID("st m25p16" , 0xd8, 0x00152020, 0x100, 0x10000, 0x200000),
116 FLASH_ID("st m25p32" , 0xd8, 0x00162020, 0x100, 0x10000, 0x400000),
117 FLASH_ID("st m25p64" , 0xd8, 0x00172020, 0x100, 0x10000, 0x800000),
118 FLASH_ID("st m25p128" , 0xd8, 0x00182020, 0x100, 0x40000, 0x1000000),
119 FLASH_ID("st m25p05" , 0xd8, 0x00102020, 0x80 , 0x8000 , 0x10000),
120 FLASH_ID("st m25p10" , 0xd8, 0x00112020, 0x80 , 0x8000 , 0x20000),
121 FLASH_ID("st m25p20" , 0xd8, 0x00122020, 0x100, 0x10000, 0x40000),
122 FLASH_ID("st m25p40" , 0xd8, 0x00132020, 0x100, 0x10000, 0x80000),
123 FLASH_ID("st m25p80" , 0xd8, 0x00142020, 0x100, 0x10000, 0x100000),
124 FLASH_ID("st m45pe10" , 0xd8, 0x00114020, 0x100, 0x10000, 0x20000),
125 FLASH_ID("st m45pe20" , 0xd8, 0x00124020, 0x100, 0x10000, 0x40000),
126 FLASH_ID("st m45pe40" , 0xd8, 0x00134020, 0x100, 0x10000, 0x80000),
127 FLASH_ID("st m45pe80" , 0xd8, 0x00144020, 0x100, 0x10000, 0x100000),
128 FLASH_ID("sp s25fl004" , 0xd8, 0x00120201, 0x100, 0x10000, 0x80000),
129 FLASH_ID("sp s25fl008" , 0xd8, 0x00130201, 0x100, 0x10000, 0x100000),
130 FLASH_ID("sp s25fl016" , 0xd8, 0x00140201, 0x100, 0x10000, 0x200000),
131 FLASH_ID("sp s25fl032" , 0xd8, 0x00150201, 0x100, 0x10000, 0x400000),
132 FLASH_ID("sp s25fl064" , 0xd8, 0x00160201, 0x100, 0x10000, 0x800000),
133 FLASH_ID("atmel 25f512" , 0x52, 0x0065001F, 0x80 , 0x8000 , 0x10000),
134 FLASH_ID("atmel 25f1024" , 0x52, 0x0060001F, 0x100, 0x8000 , 0x20000),
135 FLASH_ID("atmel 25f2048" , 0x52, 0x0063001F, 0x100, 0x10000, 0x40000),
136 FLASH_ID("atmel 25f4096" , 0x52, 0x0064001F, 0x100, 0x10000, 0x80000),
137 FLASH_ID("atmel 25fs040" , 0xd7, 0x0004661F, 0x100, 0x10000, 0x80000),
138 FLASH_ID("mac 25l512" , 0xd8, 0x001020C2, 0x010, 0x10000, 0x10000),
139 FLASH_ID("mac 25l1005" , 0xd8, 0x001120C2, 0x010, 0x10000, 0x20000),
140 FLASH_ID("mac 25l2005" , 0xd8, 0x001220C2, 0x010, 0x10000, 0x40000),
141 FLASH_ID("mac 25l4005" , 0xd8, 0x001320C2, 0x010, 0x10000, 0x80000),
142 FLASH_ID("mac 25l4005a" , 0xd8, 0x001320C2, 0x010, 0x10000, 0x80000),
143 FLASH_ID("mac 25l8005" , 0xd8, 0x001420C2, 0x010, 0x10000, 0x100000),
144 FLASH_ID("mac 25l1605" , 0xd8, 0x001520C2, 0x100, 0x10000, 0x200000),
145 FLASH_ID("mac 25l1605a" , 0xd8, 0x001520C2, 0x010, 0x10000, 0x200000),
146 FLASH_ID("mac 25l3205" , 0xd8, 0x001620C2, 0x100, 0x10000, 0x400000),
147 FLASH_ID("mac 25l3205a" , 0xd8, 0x001620C2, 0x100, 0x10000, 0x400000),
148 FLASH_ID("mac 25l6405" , 0xd8, 0x001720C2, 0x100, 0x10000, 0x800000),
149};
150
151/* Define spear specific structures */
152
153struct spear_snor_flash;
154
155/**
156 * struct spear_smi - Structure for SMI Device
157 *
158 * @clk: functional clock
159 * @status: current status register of SMI.
160 * @clk_rate: functional clock rate of SMI (default: SMI_MAX_CLOCK_FREQ)
161 * @lock: lock to prevent parallel access of SMI.
162 * @io_base: base address for registers of SMI.
163 * @pdev: platform device
164 * @cmd_complete: queue to wait for command completion of NOR-flash.
165 * @num_flashes: number of flashes actually present on board.
166 * @flash: separate structure for each Serial NOR-flash attached to SMI.
167 */
168struct spear_smi {
169 struct clk *clk;
170 u32 status;
171 unsigned long clk_rate;
172 struct mutex lock;
173 void __iomem *io_base;
174 struct platform_device *pdev;
175 wait_queue_head_t cmd_complete;
176 u32 num_flashes;
177 struct spear_snor_flash *flash[MAX_NUM_FLASH_CHIP];
178};
179
180/**
181 * struct spear_snor_flash - Structure for Serial NOR Flash
182 *
183 * @bank: Bank number(0, 1, 2, 3) for each NOR-flash.
184 * @dev_id: Device ID of NOR-flash.
185 * @lock: lock to manage flash read, write and erase operations
186 * @mtd: MTD info for each NOR-flash.
187 * @num_parts: Total number of partition in each bank of NOR-flash.
188 * @parts: Partition info for each bank of NOR-flash.
189 * @page_size: Page size of NOR-flash.
190 * @base_addr: Base address of NOR-flash.
191 * @erase_cmd: erase command may vary on different flash types
192 * @fast_mode: flash supports read in fast mode
193 */
194struct spear_snor_flash {
195 u32 bank;
196 u32 dev_id;
197 struct mutex lock;
198 struct mtd_info mtd;
199 u32 num_parts;
200 struct mtd_partition *parts;
201 u32 page_size;
202 void __iomem *base_addr;
203 u8 erase_cmd;
204 u8 fast_mode;
205};
206
207static inline struct spear_snor_flash *get_flash_data(struct mtd_info *mtd)
208{
209 return container_of(mtd, struct spear_snor_flash, mtd);
210}
211
212/**
213 * spear_smi_read_sr - Read status register of flash through SMI
214 * @dev: structure of SMI information.
215 * @bank: bank to which flash is connected
216 *
217 * This routine will return the status register of the flash chip present at the
218 * given bank.
219 */
220static int spear_smi_read_sr(struct spear_smi *dev, u32 bank)
221{
222 int ret;
223 u32 ctrlreg1;
224
225 mutex_lock(&dev->lock);
226 dev->status = 0; /* Will be set in interrupt handler */
227
228 ctrlreg1 = readl(dev->io_base + SMI_CR1);
229 /* program smi in hw mode */
230 writel(ctrlreg1 & ~(SW_MODE | WB_MODE), dev->io_base + SMI_CR1);
231
232 /* performing a rsr instruction in hw mode */
233 writel((bank << BANK_SHIFT) | RD_STATUS_REG | TFIE,
234 dev->io_base + SMI_CR2);
235
236 /* wait for tff */
237 ret = wait_event_interruptible_timeout(dev->cmd_complete,
238 dev->status & TFF, SMI_CMD_TIMEOUT);
239
240 /* copy dev->status (lower 16 bits) in order to release lock */
241 if (ret > 0)
242 ret = dev->status & 0xffff;
243 else
244 ret = -EIO;
245
246 /* restore the ctrl regs state */
247 writel(ctrlreg1, dev->io_base + SMI_CR1);
248 writel(0, dev->io_base + SMI_CR2);
249 mutex_unlock(&dev->lock);
250
251 return ret;
252}
253
254/**
255 * spear_smi_wait_till_ready - wait till flash is ready
256 * @dev: structure of SMI information.
257 * @bank: flash corresponding to this bank
258 * @timeout: timeout for busy wait condition
259 *
260 * This routine checks for WIP (write in progress) bit in Status register
261 * If successful the routine returns 0 else -EBUSY
262 */
263static int spear_smi_wait_till_ready(struct spear_smi *dev, u32 bank,
264 unsigned long timeout)
265{
266 unsigned long finish;
267 int status;
268
269 finish = jiffies + timeout;
270 do {
271 status = spear_smi_read_sr(dev, bank);
272 if (status < 0)
273 continue; /* try till timeout */
274 else if (!(status & SR_WIP))
275 return 0;
276
277 cond_resched();
278 } while (!time_after_eq(jiffies, finish));
279
280 dev_err(&dev->pdev->dev, "smi controller is busy, timeout\n");
281 return status;
282}
283
284/**
285 * spear_smi_int_handler - SMI Interrupt Handler.
286 * @irq: irq number
287 * @dev_id: structure of SMI device, embedded in dev_id.
288 *
289 * The handler clears all interrupt conditions and records the status in
290 * dev->status which is used by the driver later.
291 */
292static irqreturn_t spear_smi_int_handler(int irq, void *dev_id)
293{
294 u32 status = 0;
295 struct spear_smi *dev = dev_id;
296
297 status = readl(dev->io_base + SMI_SR);
298
299 if (unlikely(!status))
300 return IRQ_NONE;
301
302 /* clear all interrupt conditions */
303 writel(0, dev->io_base + SMI_SR);
304
305 /* copy the status register in dev->status */
306 dev->status |= status;
307
308 /* send the completion */
309 wake_up_interruptible(&dev->cmd_complete);
310
311 return IRQ_HANDLED;
312}
313
314/**
315 * spear_smi_hw_init - initializes the smi controller.
316 * @dev: structure of smi device
317 *
318 * this routine initializes the smi controller wit the default values
319 */
320static void spear_smi_hw_init(struct spear_smi *dev)
321{
322 unsigned long rate = 0;
323 u32 prescale = 0;
324 u32 val;
325
326 rate = clk_get_rate(dev->clk);
327
328 /* functional clock of smi */
329 prescale = DIV_ROUND_UP(rate, dev->clk_rate);
330
331 /*
332 * setting the standard values, fast mode, prescaler for
333 * SMI_MAX_CLOCK_FREQ (50MHz) operation and bank enable
334 */
335 val = HOLD1 | BANK_EN | DSEL_TIME | (prescale << 8);
336
337 mutex_lock(&dev->lock);
338 writel(val, dev->io_base + SMI_CR1);
339 mutex_unlock(&dev->lock);
340}
341
342/**
343 * get_flash_index - match chip id from a flash list.
344 * @flash_id: a valid nor flash chip id obtained from board.
345 *
346 * try to validate the chip id by matching from a list, if not found then simply
347 * returns negative. In case of success returns index in to the flash devices
348 * array.
349 */
350static int get_flash_index(u32 flash_id)
351{
352 int index;
353
354 /* Matches chip-id to entire list of 'serial-nor flash' ids */
355 for (index = 0; index < ARRAY_SIZE(flash_devices); index++) {
356 if (flash_devices[index].device_id == flash_id)
357 return index;
358 }
359
360 /* Memory chip is not listed and not supported */
361 return -ENODEV;
362}
363
364/**
365 * spear_smi_write_enable - Enable the flash to do write operation
366 * @dev: structure of SMI device
367 * @bank: enable write for flash connected to this bank
368 *
369 * Set write enable latch with Write Enable command.
370 * Returns 0 on success.
371 */
372static int spear_smi_write_enable(struct spear_smi *dev, u32 bank)
373{
374 int ret;
375 u32 ctrlreg1;
376
377 mutex_lock(&dev->lock);
378 dev->status = 0; /* Will be set in interrupt handler */
379
380 ctrlreg1 = readl(dev->io_base + SMI_CR1);
381 /* program smi in h/w mode */
382 writel(ctrlreg1 & ~SW_MODE, dev->io_base + SMI_CR1);
383
384 /* give the flash, write enable command */
385 writel((bank << BANK_SHIFT) | WE | TFIE, dev->io_base + SMI_CR2);
386
387 ret = wait_event_interruptible_timeout(dev->cmd_complete,
388 dev->status & TFF, SMI_CMD_TIMEOUT);
389
390 /* restore the ctrl regs state */
391 writel(ctrlreg1, dev->io_base + SMI_CR1);
392 writel(0, dev->io_base + SMI_CR2);
393
394 if (ret <= 0) {
395 ret = -EIO;
396 dev_err(&dev->pdev->dev,
397 "smi controller failed on write enable\n");
398 } else {
399 /* check whether write mode status is set for required bank */
400 if (dev->status & (1 << (bank + WM_SHIFT)))
401 ret = 0;
402 else {
403 dev_err(&dev->pdev->dev, "couldn't enable write\n");
404 ret = -EIO;
405 }
406 }
407
408 mutex_unlock(&dev->lock);
409 return ret;
410}
411
412static inline u32
413get_sector_erase_cmd(struct spear_snor_flash *flash, u32 offset)
414{
415 u32 cmd;
416 u8 *x = (u8 *)&cmd;
417
418 x[0] = flash->erase_cmd;
419 x[1] = offset >> 16;
420 x[2] = offset >> 8;
421 x[3] = offset;
422
423 return cmd;
424}
425
426/**
427 * spear_smi_erase_sector - erase one sector of flash
428 * @dev: structure of SMI information
429 * @command: erase command to be send
430 * @bank: bank to which this command needs to be send
431 * @bytes: size of command
432 *
433 * Erase one sector of flash memory at offset ``offset'' which is any
434 * address within the sector which should be erased.
435 * Returns 0 if successful, non-zero otherwise.
436 */
437static int spear_smi_erase_sector(struct spear_smi *dev,
438 u32 bank, u32 command, u32 bytes)
439{
440 u32 ctrlreg1 = 0;
441 int ret;
442
443 ret = spear_smi_wait_till_ready(dev, bank, SMI_MAX_TIME_OUT);
444 if (ret)
445 return ret;
446
447 ret = spear_smi_write_enable(dev, bank);
448 if (ret)
449 return ret;
450
451 mutex_lock(&dev->lock);
452
453 ctrlreg1 = readl(dev->io_base + SMI_CR1);
454 writel((ctrlreg1 | SW_MODE) & ~WB_MODE, dev->io_base + SMI_CR1);
455
456 /* send command in sw mode */
457 writel(command, dev->io_base + SMI_TR);
458
459 writel((bank << BANK_SHIFT) | SEND | TFIE | (bytes << TX_LEN_SHIFT),
460 dev->io_base + SMI_CR2);
461
462 ret = wait_event_interruptible_timeout(dev->cmd_complete,
463 dev->status & TFF, SMI_CMD_TIMEOUT);
464
465 if (ret <= 0) {
466 ret = -EIO;
467 dev_err(&dev->pdev->dev, "sector erase failed\n");
468 } else
469 ret = 0; /* success */
470
471 /* restore ctrl regs */
472 writel(ctrlreg1, dev->io_base + SMI_CR1);
473 writel(0, dev->io_base + SMI_CR2);
474
475 mutex_unlock(&dev->lock);
476 return ret;
477}
478
479/**
480 * spear_mtd_erase - perform flash erase operation as requested by user
481 * @mtd: Provides the memory characteristics
482 * @e_info: Provides the erase information
483 *
484 * Erase an address range on the flash chip. The address range may extend
485 * one or more erase sectors. Return an error is there is a problem erasing.
486 */
487static int spear_mtd_erase(struct mtd_info *mtd, struct erase_info *e_info)
488{
489 struct spear_snor_flash *flash = get_flash_data(mtd);
490 struct spear_smi *dev = mtd->priv;
491 u32 addr, command, bank;
492 int len, ret;
493
494 if (!flash || !dev)
495 return -ENODEV;
496
497 bank = flash->bank;
498 if (bank > dev->num_flashes - 1) {
499 dev_err(&dev->pdev->dev, "Invalid Bank Num");
500 return -EINVAL;
501 }
502
503 addr = e_info->addr;
504 len = e_info->len;
505
506 mutex_lock(&flash->lock);
507
508 /* now erase sectors in loop */
509 while (len) {
510 command = get_sector_erase_cmd(flash, addr);
511 /* preparing the command for flash */
512 ret = spear_smi_erase_sector(dev, bank, command, 4);
513 if (ret) {
514 e_info->state = MTD_ERASE_FAILED;
515 mutex_unlock(&flash->lock);
516 return ret;
517 }
518 addr += mtd->erasesize;
519 len -= mtd->erasesize;
520 }
521
522 mutex_unlock(&flash->lock);
523 e_info->state = MTD_ERASE_DONE;
524 mtd_erase_callback(e_info);
525
526 return 0;
527}
528
529/**
530 * spear_mtd_read - performs flash read operation as requested by the user
531 * @mtd: MTD information of the memory bank
532 * @from: Address from which to start read
533 * @len: Number of bytes to be read
534 * @retlen: Fills the Number of bytes actually read
535 * @buf: Fills this after reading
536 *
537 * Read an address range from the flash chip. The address range
538 * may be any size provided it is within the physical boundaries.
539 * Returns 0 on success, non zero otherwise
540 */
541static int spear_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
542 size_t *retlen, u8 *buf)
543{
544 struct spear_snor_flash *flash = get_flash_data(mtd);
545 struct spear_smi *dev = mtd->priv;
546 void *src;
547 u32 ctrlreg1, val;
548 int ret;
549
550 if (!flash || !dev)
551 return -ENODEV;
552
553 if (flash->bank > dev->num_flashes - 1) {
554 dev_err(&dev->pdev->dev, "Invalid Bank Num");
555 return -EINVAL;
556 }
557
558 /* select address as per bank number */
559 src = flash->base_addr + from;
560
561 mutex_lock(&flash->lock);
562
563 /* wait till previous write/erase is done. */
564 ret = spear_smi_wait_till_ready(dev, flash->bank, SMI_MAX_TIME_OUT);
565 if (ret) {
566 mutex_unlock(&flash->lock);
567 return ret;
568 }
569
570 mutex_lock(&dev->lock);
571 /* put smi in hw mode not wbt mode */
572 ctrlreg1 = val = readl(dev->io_base + SMI_CR1);
573 val &= ~(SW_MODE | WB_MODE);
574 if (flash->fast_mode)
575 val |= FAST_MODE;
576
577 writel(val, dev->io_base + SMI_CR1);
578
579 memcpy_fromio(buf, (u8 *)src, len);
580
581 /* restore ctrl reg1 */
582 writel(ctrlreg1, dev->io_base + SMI_CR1);
583 mutex_unlock(&dev->lock);
584
585 *retlen = len;
586 mutex_unlock(&flash->lock);
587
588 return 0;
589}
590
591static inline int spear_smi_cpy_toio(struct spear_smi *dev, u32 bank,
592 void *dest, const void *src, size_t len)
593{
594 int ret;
595 u32 ctrlreg1;
596
597 /* wait until finished previous write command. */
598 ret = spear_smi_wait_till_ready(dev, bank, SMI_MAX_TIME_OUT);
599 if (ret)
600 return ret;
601
602 /* put smi in write enable */
603 ret = spear_smi_write_enable(dev, bank);
604 if (ret)
605 return ret;
606
607 /* put smi in hw, write burst mode */
608 mutex_lock(&dev->lock);
609
610 ctrlreg1 = readl(dev->io_base + SMI_CR1);
611 writel((ctrlreg1 | WB_MODE) & ~SW_MODE, dev->io_base + SMI_CR1);
612
613 memcpy_toio(dest, src, len);
614
615 writel(ctrlreg1, dev->io_base + SMI_CR1);
616
617 mutex_unlock(&dev->lock);
618 return 0;
619}
620
621/**
622 * spear_mtd_write - performs write operation as requested by the user.
623 * @mtd: MTD information of the memory bank.
624 * @to: Address to write.
625 * @len: Number of bytes to be written.
626 * @retlen: Number of bytes actually wrote.
627 * @buf: Buffer from which the data to be taken.
628 *
629 * Write an address range to the flash chip. Data must be written in
630 * flash_page_size chunks. The address range may be any size provided
631 * it is within the physical boundaries.
632 * Returns 0 on success, non zero otherwise
633 */
634static int spear_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
635 size_t *retlen, const u8 *buf)
636{
637 struct spear_snor_flash *flash = get_flash_data(mtd);
638 struct spear_smi *dev = mtd->priv;
639 void *dest;
640 u32 page_offset, page_size;
641 int ret;
642
643 if (!flash || !dev)
644 return -ENODEV;
645
646 if (flash->bank > dev->num_flashes - 1) {
647 dev_err(&dev->pdev->dev, "Invalid Bank Num");
648 return -EINVAL;
649 }
650
651 /* select address as per bank number */
652 dest = flash->base_addr + to;
653 mutex_lock(&flash->lock);
654
655 page_offset = (u32)to % flash->page_size;
656
657 /* do if all the bytes fit onto one page */
658 if (page_offset + len <= flash->page_size) {
659 ret = spear_smi_cpy_toio(dev, flash->bank, dest, buf, len);
660 if (!ret)
661 *retlen += len;
662 } else {
663 u32 i;
664
665 /* the size of data remaining on the first page */
666 page_size = flash->page_size - page_offset;
667
668 ret = spear_smi_cpy_toio(dev, flash->bank, dest, buf,
669 page_size);
670 if (ret)
671 goto err_write;
672 else
673 *retlen += page_size;
674
675 /* write everything in pagesize chunks */
676 for (i = page_size; i < len; i += page_size) {
677 page_size = len - i;
678 if (page_size > flash->page_size)
679 page_size = flash->page_size;
680
681 ret = spear_smi_cpy_toio(dev, flash->bank, dest + i,
682 buf + i, page_size);
683 if (ret)
684 break;
685 else
686 *retlen += page_size;
687 }
688 }
689
690err_write:
691 mutex_unlock(&flash->lock);
692
693 return ret;
694}
695
696/**
697 * spear_smi_probe_flash - Detects the NOR Flash chip.
698 * @dev: structure of SMI information.
699 * @bank: bank on which flash must be probed
700 *
701 * This routine will check whether there exists a flash chip on a given memory
702 * bank ID.
703 * Return index of the probed flash in flash devices structure
704 */
705static int spear_smi_probe_flash(struct spear_smi *dev, u32 bank)
706{
707 int ret;
708 u32 val = 0;
709
710 ret = spear_smi_wait_till_ready(dev, bank, SMI_PROBE_TIMEOUT);
711 if (ret)
712 return ret;
713
714 mutex_lock(&dev->lock);
715
716 dev->status = 0; /* Will be set in interrupt handler */
717 /* put smi in sw mode */
718 val = readl(dev->io_base + SMI_CR1);
719 writel(val | SW_MODE, dev->io_base + SMI_CR1);
720
721 /* send readid command in sw mode */
722 writel(OPCODE_RDID, dev->io_base + SMI_TR);
723
724 val = (bank << BANK_SHIFT) | SEND | (1 << TX_LEN_SHIFT) |
725 (3 << RX_LEN_SHIFT) | TFIE;
726 writel(val, dev->io_base + SMI_CR2);
727
728 /* wait for TFF */
729 ret = wait_event_interruptible_timeout(dev->cmd_complete,
730 dev->status & TFF, SMI_CMD_TIMEOUT);
731 if (ret <= 0) {
732 ret = -ENODEV;
733 goto err_probe;
734 }
735
736 /* get memory chip id */
737 val = readl(dev->io_base + SMI_RR);
738 val &= 0x00ffffff;
739 ret = get_flash_index(val);
740
741err_probe:
742 /* clear sw mode */
743 val = readl(dev->io_base + SMI_CR1);
744 writel(val & ~SW_MODE, dev->io_base + SMI_CR1);
745
746 mutex_unlock(&dev->lock);
747 return ret;
748}
749
750
751#ifdef CONFIG_OF
752static int __devinit spear_smi_probe_config_dt(struct platform_device *pdev,
753 struct device_node *np)
754{
755 struct spear_smi_plat_data *pdata = dev_get_platdata(&pdev->dev);
756 struct device_node *pp = NULL;
757 const __be32 *addr;
758 u32 val;
759 int len;
760 int i = 0;
761
762 if (!np)
763 return -ENODEV;
764
765 of_property_read_u32(np, "clock-rate", &val);
766 pdata->clk_rate = val;
767
768 pdata->board_flash_info = devm_kzalloc(&pdev->dev,
769 sizeof(*pdata->board_flash_info),
770 GFP_KERNEL);
771
772 /* Fill structs for each subnode (flash device) */
773 while ((pp = of_get_next_child(np, pp))) {
774 struct spear_smi_flash_info *flash_info;
775
776 flash_info = &pdata->board_flash_info[i];
777 pdata->np[i] = pp;
778
779 /* Read base-addr and size from DT */
780 addr = of_get_property(pp, "reg", &len);
781 pdata->board_flash_info->mem_base = be32_to_cpup(&addr[0]);
782 pdata->board_flash_info->size = be32_to_cpup(&addr[1]);
783
784 if (of_get_property(pp, "st,smi-fast-mode", NULL))
785 pdata->board_flash_info->fast_mode = 1;
786
787 i++;
788 }
789
790 pdata->num_flashes = i;
791
792 return 0;
793}
794#else
795static int __devinit spear_smi_probe_config_dt(struct platform_device *pdev,
796 struct device_node *np)
797{
798 return -ENOSYS;
799}
800#endif
801
802static int spear_smi_setup_banks(struct platform_device *pdev,
803 u32 bank, struct device_node *np)
804{
805 struct spear_smi *dev = platform_get_drvdata(pdev);
806 struct mtd_part_parser_data ppdata = {};
807 struct spear_smi_flash_info *flash_info;
808 struct spear_smi_plat_data *pdata;
809 struct spear_snor_flash *flash;
810 struct mtd_partition *parts = NULL;
811 int count = 0;
812 int flash_index;
813 int ret = 0;
814
815 pdata = dev_get_platdata(&pdev->dev);
816 if (bank > pdata->num_flashes - 1)
817 return -EINVAL;
818
819 flash_info = &pdata->board_flash_info[bank];
820 if (!flash_info)
821 return -ENODEV;
822
823 flash = kzalloc(sizeof(*flash), GFP_ATOMIC);
824 if (!flash)
825 return -ENOMEM;
826 flash->bank = bank;
827 flash->fast_mode = flash_info->fast_mode ? 1 : 0;
828 mutex_init(&flash->lock);
829
830 /* verify whether nor flash is really present on board */
831 flash_index = spear_smi_probe_flash(dev, bank);
832 if (flash_index < 0) {
833 dev_info(&dev->pdev->dev, "smi-nor%d not found\n", bank);
834 ret = flash_index;
835 goto err_probe;
836 }
837 /* map the memory for nor flash chip */
838 flash->base_addr = ioremap(flash_info->mem_base, flash_info->size);
839 if (!flash->base_addr) {
840 ret = -EIO;
841 goto err_probe;
842 }
843
844 dev->flash[bank] = flash;
845 flash->mtd.priv = dev;
846
847 if (flash_info->name)
848 flash->mtd.name = flash_info->name;
849 else
850 flash->mtd.name = flash_devices[flash_index].name;
851
852 flash->mtd.type = MTD_NORFLASH;
853 flash->mtd.writesize = 1;
854 flash->mtd.flags = MTD_CAP_NORFLASH;
855 flash->mtd.size = flash_info->size;
856 flash->mtd.erasesize = flash_devices[flash_index].sectorsize;
857 flash->page_size = flash_devices[flash_index].pagesize;
858 flash->mtd.writebufsize = flash->page_size;
859 flash->erase_cmd = flash_devices[flash_index].erase_cmd;
860 flash->mtd._erase = spear_mtd_erase;
861 flash->mtd._read = spear_mtd_read;
862 flash->mtd._write = spear_mtd_write;
863 flash->dev_id = flash_devices[flash_index].device_id;
864
865 dev_info(&dev->pdev->dev, "mtd .name=%s .size=%llx(%lluM)\n",
866 flash->mtd.name, flash->mtd.size,
867 flash->mtd.size / (1024 * 1024));
868
869 dev_info(&dev->pdev->dev, ".erasesize = 0x%x(%uK)\n",
870 flash->mtd.erasesize, flash->mtd.erasesize / 1024);
871
872#ifndef CONFIG_OF
873 if (flash_info->partitions) {
874 parts = flash_info->partitions;
875 count = flash_info->nr_partitions;
876 }
877#endif
878 ppdata.of_node = np;
879
880 ret = mtd_device_parse_register(&flash->mtd, NULL, &ppdata, parts,
881 count);
882 if (ret) {
883 dev_err(&dev->pdev->dev, "Err MTD partition=%d\n", ret);
884 goto err_map;
885 }
886
887 return 0;
888
889err_map:
890 iounmap(flash->base_addr);
891
892err_probe:
893 kfree(flash);
894 return ret;
895}
896
897/**
898 * spear_smi_probe - Entry routine
899 * @pdev: platform device structure
900 *
901 * This is the first routine which gets invoked during booting and does all
902 * initialization/allocation work. The routine looks for available memory banks,
903 * and do proper init for any found one.
904 * Returns 0 on success, non zero otherwise
905 */
906static int __devinit spear_smi_probe(struct platform_device *pdev)
907{
908 struct device_node *np = pdev->dev.of_node;
909 struct spear_smi_plat_data *pdata = NULL;
910 struct spear_smi *dev;
911 struct resource *smi_base;
912 int irq, ret = 0;
913 int i;
914
915 if (np) {
916 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
917 if (!pdata) {
918 pr_err("%s: ERROR: no memory", __func__);
919 ret = -ENOMEM;
920 goto err;
921 }
922 pdev->dev.platform_data = pdata;
923 ret = spear_smi_probe_config_dt(pdev, np);
924 if (ret) {
925 ret = -ENODEV;
926 dev_err(&pdev->dev, "no platform data\n");
927 goto err;
928 }
929 } else {
930 pdata = dev_get_platdata(&pdev->dev);
931 if (pdata < 0) {
932 ret = -ENODEV;
933 dev_err(&pdev->dev, "no platform data\n");
934 goto err;
935 }
936 }
937
938 smi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
939 if (!smi_base) {
940 ret = -ENODEV;
941 dev_err(&pdev->dev, "invalid smi base address\n");
942 goto err;
943 }
944
945 irq = platform_get_irq(pdev, 0);
946 if (irq < 0) {
947 ret = -ENODEV;
948 dev_err(&pdev->dev, "invalid smi irq\n");
949 goto err;
950 }
951
952 dev = kzalloc(sizeof(*dev), GFP_ATOMIC);
953 if (!dev) {
954 ret = -ENOMEM;
955 dev_err(&pdev->dev, "mem alloc fail\n");
956 goto err;
957 }
958
959 smi_base = request_mem_region(smi_base->start, resource_size(smi_base),
960 pdev->name);
961 if (!smi_base) {
962 ret = -EBUSY;
963 dev_err(&pdev->dev, "request mem region fail\n");
964 goto err_mem;
965 }
966
967 dev->io_base = ioremap(smi_base->start, resource_size(smi_base));
968 if (!dev->io_base) {
969 ret = -EIO;
970 dev_err(&pdev->dev, "ioremap fail\n");
971 goto err_ioremap;
972 }
973
974 dev->pdev = pdev;
975 dev->clk_rate = pdata->clk_rate;
976
977 if (dev->clk_rate < 0 || dev->clk_rate > SMI_MAX_CLOCK_FREQ)
978 dev->clk_rate = SMI_MAX_CLOCK_FREQ;
979
980 dev->num_flashes = pdata->num_flashes;
981
982 if (dev->num_flashes > MAX_NUM_FLASH_CHIP) {
983 dev_err(&pdev->dev, "exceeding max number of flashes\n");
984 dev->num_flashes = MAX_NUM_FLASH_CHIP;
985 }
986
987 dev->clk = clk_get(&pdev->dev, NULL);
988 if (IS_ERR(dev->clk)) {
989 ret = PTR_ERR(dev->clk);
990 goto err_clk;
991 }
992
993 ret = clk_enable(dev->clk);
994 if (ret)
995 goto err_clk_enable;
996
997 ret = request_irq(irq, spear_smi_int_handler, 0, pdev->name, dev);
998 if (ret) {
999 dev_err(&dev->pdev->dev, "SMI IRQ allocation failed\n");
1000 goto err_irq;
1001 }
1002
1003 mutex_init(&dev->lock);
1004 init_waitqueue_head(&dev->cmd_complete);
1005 spear_smi_hw_init(dev);
1006 platform_set_drvdata(pdev, dev);
1007
1008 /* loop for each serial nor-flash which is connected to smi */
1009 for (i = 0; i < dev->num_flashes; i++) {
1010 ret = spear_smi_setup_banks(pdev, i, pdata->np[i]);
1011 if (ret) {
1012 dev_err(&dev->pdev->dev, "bank setup failed\n");
1013 goto err_bank_setup;
1014 }
1015 }
1016
1017 return 0;
1018
1019err_bank_setup:
1020 free_irq(irq, dev);
1021 platform_set_drvdata(pdev, NULL);
1022err_irq:
1023 clk_disable(dev->clk);
1024err_clk_enable:
1025 clk_put(dev->clk);
1026err_clk:
1027 iounmap(dev->io_base);
1028err_ioremap:
1029 release_mem_region(smi_base->start, resource_size(smi_base));
1030err_mem:
1031 kfree(dev);
1032err:
1033 return ret;
1034}
1035
1036/**
1037 * spear_smi_remove - Exit routine
1038 * @pdev: platform device structure
1039 *
1040 * free all allocations and delete the partitions.
1041 */
1042static int __devexit spear_smi_remove(struct platform_device *pdev)
1043{
1044 struct spear_smi *dev;
1045 struct spear_smi_plat_data *pdata;
1046 struct spear_snor_flash *flash;
1047 struct resource *smi_base;
1048 int ret;
1049 int i, irq;
1050
1051 dev = platform_get_drvdata(pdev);
1052 if (!dev) {
1053 dev_err(&pdev->dev, "dev is null\n");
1054 return -ENODEV;
1055 }
1056
1057 pdata = dev_get_platdata(&pdev->dev);
1058
1059 /* clean up for all nor flash */
1060 for (i = 0; i < dev->num_flashes; i++) {
1061 flash = dev->flash[i];
1062 if (!flash)
1063 continue;
1064
1065 /* clean up mtd stuff */
1066 ret = mtd_device_unregister(&flash->mtd);
1067 if (ret)
1068 dev_err(&pdev->dev, "error removing mtd\n");
1069
1070 iounmap(flash->base_addr);
1071 kfree(flash);
1072 }
1073
1074 irq = platform_get_irq(pdev, 0);
1075 free_irq(irq, dev);
1076
1077 clk_disable(dev->clk);
1078 clk_put(dev->clk);
1079 iounmap(dev->io_base);
1080 kfree(dev);
1081
1082 smi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1083 release_mem_region(smi_base->start, resource_size(smi_base));
1084 platform_set_drvdata(pdev, NULL);
1085
1086 return 0;
1087}
1088
1089int spear_smi_suspend(struct platform_device *pdev, pm_message_t state)
1090{
1091 struct spear_smi *dev = platform_get_drvdata(pdev);
1092
1093 if (dev && dev->clk)
1094 clk_disable(dev->clk);
1095
1096 return 0;
1097}
1098
1099int spear_smi_resume(struct platform_device *pdev)
1100{
1101 struct spear_smi *dev = platform_get_drvdata(pdev);
1102 int ret = -EPERM;
1103
1104 if (dev && dev->clk)
1105 ret = clk_enable(dev->clk);
1106
1107 if (!ret)
1108 spear_smi_hw_init(dev);
1109 return ret;
1110}
1111
1112#ifdef CONFIG_OF
1113static const struct of_device_id spear_smi_id_table[] = {
1114 { .compatible = "st,spear600-smi" },
1115 {}
1116};
1117MODULE_DEVICE_TABLE(of, spear_smi_id_table);
1118#endif
1119
1120static struct platform_driver spear_smi_driver = {
1121 .driver = {
1122 .name = "smi",
1123 .bus = &platform_bus_type,
1124 .owner = THIS_MODULE,
1125 .of_match_table = of_match_ptr(spear_smi_id_table),
1126 },
1127 .probe = spear_smi_probe,
1128 .remove = __devexit_p(spear_smi_remove),
1129 .suspend = spear_smi_suspend,
1130 .resume = spear_smi_resume,
1131};
1132
1133static int spear_smi_init(void)
1134{
1135 return platform_driver_register(&spear_smi_driver);
1136}
1137module_init(spear_smi_init);
1138
1139static void spear_smi_exit(void)
1140{
1141 platform_driver_unregister(&spear_smi_driver);
1142}
1143module_exit(spear_smi_exit);
1144
1145MODULE_LICENSE("GPL");
1146MODULE_AUTHOR("Ashish Priyadarshi, Shiraz Hashim <shiraz.hashim@st.com>");
1147MODULE_DESCRIPTION("MTD SMI driver for serial nor flash chips");
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index 5fc198350b94..ab8a2f4c8d60 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -175,9 +175,6 @@ static int sst25l_erase(struct mtd_info *mtd, struct erase_info *instr)
175 int err; 175 int err;
176 176
177 /* Sanity checks */ 177 /* Sanity checks */
178 if (instr->addr + instr->len > flash->mtd.size)
179 return -EINVAL;
180
181 if ((uint32_t)instr->len % mtd->erasesize) 178 if ((uint32_t)instr->len % mtd->erasesize)
182 return -EINVAL; 179 return -EINVAL;
183 180
@@ -223,16 +220,6 @@ static int sst25l_read(struct mtd_info *mtd, loff_t from, size_t len,
223 unsigned char command[4]; 220 unsigned char command[4];
224 int ret; 221 int ret;
225 222
226 /* Sanity checking */
227 if (len == 0)
228 return 0;
229
230 if (from + len > flash->mtd.size)
231 return -EINVAL;
232
233 if (retlen)
234 *retlen = 0;
235
236 spi_message_init(&message); 223 spi_message_init(&message);
237 memset(&transfer, 0, sizeof(transfer)); 224 memset(&transfer, 0, sizeof(transfer));
238 225
@@ -274,13 +261,6 @@ static int sst25l_write(struct mtd_info *mtd, loff_t to, size_t len,
274 int i, j, ret, bytes, copied = 0; 261 int i, j, ret, bytes, copied = 0;
275 unsigned char command[5]; 262 unsigned char command[5];
276 263
277 /* Sanity checks */
278 if (!len)
279 return 0;
280
281 if (to + len > flash->mtd.size)
282 return -EINVAL;
283
284 if ((uint32_t)to % mtd->writesize) 264 if ((uint32_t)to % mtd->writesize)
285 return -EINVAL; 265 return -EINVAL;
286 266
@@ -402,10 +382,11 @@ static int __devinit sst25l_probe(struct spi_device *spi)
402 flash->mtd.flags = MTD_CAP_NORFLASH; 382 flash->mtd.flags = MTD_CAP_NORFLASH;
403 flash->mtd.erasesize = flash_info->erase_size; 383 flash->mtd.erasesize = flash_info->erase_size;
404 flash->mtd.writesize = flash_info->page_size; 384 flash->mtd.writesize = flash_info->page_size;
385 flash->mtd.writebufsize = flash_info->page_size;
405 flash->mtd.size = flash_info->page_size * flash_info->nr_pages; 386 flash->mtd.size = flash_info->page_size * flash_info->nr_pages;
406 flash->mtd.erase = sst25l_erase; 387 flash->mtd._erase = sst25l_erase;
407 flash->mtd.read = sst25l_read; 388 flash->mtd._read = sst25l_read;
408 flash->mtd.write = sst25l_write; 389 flash->mtd._write = sst25l_write;
409 390
410 dev_info(&spi->dev, "%s (%lld KiB)\n", flash_info->name, 391 dev_info(&spi->dev, "%s (%lld KiB)\n", flash_info->name,
411 (long long)flash->mtd.size >> 10); 392 (long long)flash->mtd.size >> 10);
@@ -418,9 +399,9 @@ static int __devinit sst25l_probe(struct spi_device *spi)
418 flash->mtd.numeraseregions); 399 flash->mtd.numeraseregions);
419 400
420 401
421 ret = mtd_device_parse_register(&flash->mtd, NULL, 0, 402 ret = mtd_device_parse_register(&flash->mtd, NULL, NULL,
422 data ? data->parts : NULL, 403 data ? data->parts : NULL,
423 data ? data->nr_parts : 0); 404 data ? data->nr_parts : 0);
424 if (ret) { 405 if (ret) {
425 kfree(flash); 406 kfree(flash);
426 dev_set_drvdata(&spi->dev, NULL); 407 dev_set_drvdata(&spi->dev, NULL);
@@ -450,18 +431,7 @@ static struct spi_driver sst25l_driver = {
450 .remove = __devexit_p(sst25l_remove), 431 .remove = __devexit_p(sst25l_remove),
451}; 432};
452 433
453static int __init sst25l_init(void) 434module_spi_driver(sst25l_driver);
454{
455 return spi_register_driver(&sst25l_driver);
456}
457
458static void __exit sst25l_exit(void)
459{
460 spi_unregister_driver(&sst25l_driver);
461}
462
463module_init(sst25l_init);
464module_exit(sst25l_exit);
465 435
466MODULE_DESCRIPTION("MTD SPI driver for SST25L Flash chips"); 436MODULE_DESCRIPTION("MTD SPI driver for SST25L Flash chips");
467MODULE_AUTHOR("Andre Renaud <andre@bluewatersys.com>, " 437MODULE_AUTHOR("Andre Renaud <andre@bluewatersys.com>, "
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 28646c95cfb8..3af351484098 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -56,7 +56,7 @@ static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
56 if (memcmp(mtd->name, "DiskOnChip", 10)) 56 if (memcmp(mtd->name, "DiskOnChip", 10))
57 return; 57 return;
58 58
59 if (!mtd->block_isbad) { 59 if (!mtd->_block_isbad) {
60 printk(KERN_ERR 60 printk(KERN_ERR
61"INFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n" 61"INFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n"
62"Please use the new diskonchip driver under the NAND subsystem.\n"); 62"Please use the new diskonchip driver under the NAND subsystem.\n");
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index 536bbceaeaad..d3cfe26beeaa 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -40,7 +40,7 @@ static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
40static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 40static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
41static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len, 41static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
42 size_t *retlen, void **mtdbuf, resource_size_t *phys); 42 size_t *retlen, void **mtdbuf, resource_size_t *phys);
43static void lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len); 43static int lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len);
44static int get_chip(struct map_info *map, struct flchip *chip, int mode); 44static int get_chip(struct map_info *map, struct flchip *chip, int mode);
45static int chip_ready(struct map_info *map, struct flchip *chip, int mode); 45static int chip_ready(struct map_info *map, struct flchip *chip, int mode);
46static void put_chip(struct map_info *map, struct flchip *chip); 46static void put_chip(struct map_info *map, struct flchip *chip);
@@ -63,18 +63,18 @@ struct mtd_info *lpddr_cmdset(struct map_info *map)
63 mtd->type = MTD_NORFLASH; 63 mtd->type = MTD_NORFLASH;
64 64
65 /* Fill in the default mtd operations */ 65 /* Fill in the default mtd operations */
66 mtd->read = lpddr_read; 66 mtd->_read = lpddr_read;
67 mtd->type = MTD_NORFLASH; 67 mtd->type = MTD_NORFLASH;
68 mtd->flags = MTD_CAP_NORFLASH; 68 mtd->flags = MTD_CAP_NORFLASH;
69 mtd->flags &= ~MTD_BIT_WRITEABLE; 69 mtd->flags &= ~MTD_BIT_WRITEABLE;
70 mtd->erase = lpddr_erase; 70 mtd->_erase = lpddr_erase;
71 mtd->write = lpddr_write_buffers; 71 mtd->_write = lpddr_write_buffers;
72 mtd->writev = lpddr_writev; 72 mtd->_writev = lpddr_writev;
73 mtd->lock = lpddr_lock; 73 mtd->_lock = lpddr_lock;
74 mtd->unlock = lpddr_unlock; 74 mtd->_unlock = lpddr_unlock;
75 if (map_is_linear(map)) { 75 if (map_is_linear(map)) {
76 mtd->point = lpddr_point; 76 mtd->_point = lpddr_point;
77 mtd->unpoint = lpddr_unpoint; 77 mtd->_unpoint = lpddr_unpoint;
78 } 78 }
79 mtd->size = 1 << lpddr->qinfo->DevSizeShift; 79 mtd->size = 1 << lpddr->qinfo->DevSizeShift;
80 mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift; 80 mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift;
@@ -530,14 +530,12 @@ static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
530 struct flchip *chip = &lpddr->chips[chipnum]; 530 struct flchip *chip = &lpddr->chips[chipnum];
531 int ret = 0; 531 int ret = 0;
532 532
533 if (!map->virt || (adr + len > mtd->size)) 533 if (!map->virt)
534 return -EINVAL; 534 return -EINVAL;
535 535
536 /* ofs: offset within the first chip that the first read should start */ 536 /* ofs: offset within the first chip that the first read should start */
537 ofs = adr - (chipnum << lpddr->chipshift); 537 ofs = adr - (chipnum << lpddr->chipshift);
538
539 *mtdbuf = (void *)map->virt + chip->start + ofs; 538 *mtdbuf = (void *)map->virt + chip->start + ofs;
540 *retlen = 0;
541 539
542 while (len) { 540 while (len) {
543 unsigned long thislen; 541 unsigned long thislen;
@@ -575,11 +573,11 @@ static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
575 return 0; 573 return 0;
576} 574}
577 575
578static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len) 576static int lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
579{ 577{
580 struct map_info *map = mtd->priv; 578 struct map_info *map = mtd->priv;
581 struct lpddr_private *lpddr = map->fldrv_priv; 579 struct lpddr_private *lpddr = map->fldrv_priv;
582 int chipnum = adr >> lpddr->chipshift; 580 int chipnum = adr >> lpddr->chipshift, err = 0;
583 unsigned long ofs; 581 unsigned long ofs;
584 582
585 /* ofs: offset within the first chip that the first read should start */ 583 /* ofs: offset within the first chip that the first read should start */
@@ -603,9 +601,11 @@ static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
603 chip->ref_point_counter--; 601 chip->ref_point_counter--;
604 if (chip->ref_point_counter == 0) 602 if (chip->ref_point_counter == 0)
605 chip->state = FL_READY; 603 chip->state = FL_READY;
606 } else 604 } else {
607 printk(KERN_WARNING "%s: Warning: unpoint called on non" 605 printk(KERN_WARNING "%s: Warning: unpoint called on non"
608 "pointed region\n", map->name); 606 "pointed region\n", map->name);
607 err = -EINVAL;
608 }
609 609
610 put_chip(map, chip); 610 put_chip(map, chip);
611 mutex_unlock(&chip->mutex); 611 mutex_unlock(&chip->mutex);
@@ -614,6 +614,8 @@ static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
614 ofs = 0; 614 ofs = 0;
615 chipnum++; 615 chipnum++;
616 } 616 }
617
618 return err;
617} 619}
618 620
619static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, 621static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
@@ -637,13 +639,11 @@ static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
637 int chipnum; 639 int chipnum;
638 unsigned long ofs, vec_seek, i; 640 unsigned long ofs, vec_seek, i;
639 int wbufsize = 1 << lpddr->qinfo->BufSizeShift; 641 int wbufsize = 1 << lpddr->qinfo->BufSizeShift;
640
641 size_t len = 0; 642 size_t len = 0;
642 643
643 for (i = 0; i < count; i++) 644 for (i = 0; i < count; i++)
644 len += vecs[i].iov_len; 645 len += vecs[i].iov_len;
645 646
646 *retlen = 0;
647 if (!len) 647 if (!len)
648 return 0; 648 return 0;
649 649
@@ -688,9 +688,6 @@ static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr)
688 ofs = instr->addr; 688 ofs = instr->addr;
689 len = instr->len; 689 len = instr->len;
690 690
691 if (ofs > mtd->size || (len + ofs) > mtd->size)
692 return -EINVAL;
693
694 while (len > 0) { 691 while (len > 0) {
695 ret = do_erase_oneblock(mtd, ofs); 692 ret = do_erase_oneblock(mtd, ofs);
696 if (ret) 693 if (ret)
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index 650126c361f1..ef5cde84a8b3 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -164,8 +164,8 @@ static int __devinit bfin_flash_probe(struct platform_device *pdev)
164 return -ENXIO; 164 return -ENXIO;
165 } 165 }
166 166
167 mtd_device_parse_register(state->mtd, part_probe_types, 0, 167 mtd_device_parse_register(state->mtd, part_probe_types, NULL,
168 pdata->parts, pdata->nr_parts); 168 pdata->parts, pdata->nr_parts);
169 169
170 platform_set_drvdata(pdev, state); 170 platform_set_drvdata(pdev, state);
171 171
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
index f43b365b848c..080f06053bd4 100644
--- a/drivers/mtd/maps/dc21285.c
+++ b/drivers/mtd/maps/dc21285.c
@@ -196,7 +196,7 @@ static int __init init_dc21285(void)
196 196
197 dc21285_mtd->owner = THIS_MODULE; 197 dc21285_mtd->owner = THIS_MODULE;
198 198
199 mtd_device_parse_register(dc21285_mtd, probes, 0, NULL, 0); 199 mtd_device_parse_register(dc21285_mtd, probes, NULL, NULL, 0);
200 200
201 if(machine_is_ebsa285()) { 201 if(machine_is_ebsa285()) {
202 /* 202 /*
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
index 33cce895859f..e4de96ba52b3 100644
--- a/drivers/mtd/maps/gpio-addr-flash.c
+++ b/drivers/mtd/maps/gpio-addr-flash.c
@@ -252,8 +252,8 @@ static int __devinit gpio_flash_probe(struct platform_device *pdev)
252 } 252 }
253 253
254 254
255 mtd_device_parse_register(state->mtd, part_probe_types, 0, 255 mtd_device_parse_register(state->mtd, part_probe_types, NULL,
256 pdata->parts, pdata->nr_parts); 256 pdata->parts, pdata->nr_parts);
257 257
258 return 0; 258 return 0;
259} 259}
diff --git a/drivers/mtd/maps/h720x-flash.c b/drivers/mtd/maps/h720x-flash.c
index 49c14187fc66..8ed6cb4529d8 100644
--- a/drivers/mtd/maps/h720x-flash.c
+++ b/drivers/mtd/maps/h720x-flash.c
@@ -85,8 +85,8 @@ static int __init h720x_mtd_init(void)
85 if (mymtd) { 85 if (mymtd) {
86 mymtd->owner = THIS_MODULE; 86 mymtd->owner = THIS_MODULE;
87 87
88 mtd_device_parse_register(mymtd, NULL, 0, 88 mtd_device_parse_register(mymtd, NULL, NULL,
89 h720x_partitions, NUM_PARTITIONS); 89 h720x_partitions, NUM_PARTITIONS);
90 return 0; 90 return 0;
91 } 91 }
92 92
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c
index f47aedb24366..834a06c56f56 100644
--- a/drivers/mtd/maps/impa7.c
+++ b/drivers/mtd/maps/impa7.c
@@ -91,7 +91,7 @@ static int __init init_impa7(void)
91 if (impa7_mtd[i]) { 91 if (impa7_mtd[i]) {
92 impa7_mtd[i]->owner = THIS_MODULE; 92 impa7_mtd[i]->owner = THIS_MODULE;
93 devicesfound++; 93 devicesfound++;
94 mtd_device_parse_register(impa7_mtd[i], NULL, 0, 94 mtd_device_parse_register(impa7_mtd[i], NULL, NULL,
95 partitions, 95 partitions,
96 ARRAY_SIZE(partitions)); 96 ARRAY_SIZE(partitions));
97 } 97 }
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index 08c239604ee4..92e1f41634c7 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -72,7 +72,7 @@ static int __devinit vr_nor_init_partitions(struct vr_nor_mtd *p)
72{ 72{
73 /* register the flash bank */ 73 /* register the flash bank */
74 /* partition the flash bank */ 74 /* partition the flash bank */
75 return mtd_device_parse_register(p->info, NULL, 0, NULL, 0); 75 return mtd_device_parse_register(p->info, NULL, NULL, NULL, 0);
76} 76}
77 77
78static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p) 78static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index fc7d4d0d9a4e..4a41ced0f710 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -226,7 +226,7 @@ static int ixp2000_flash_probe(struct platform_device *dev)
226 } 226 }
227 info->mtd->owner = THIS_MODULE; 227 info->mtd->owner = THIS_MODULE;
228 228
229 err = mtd_device_parse_register(info->mtd, probes, 0, NULL, 0); 229 err = mtd_device_parse_register(info->mtd, probes, NULL, NULL, 0);
230 if (err) 230 if (err)
231 goto Error; 231 goto Error;
232 232
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 8b5410162d70..e864fc6c58f9 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -182,6 +182,9 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
182{ 182{
183 struct flash_platform_data *plat = dev->dev.platform_data; 183 struct flash_platform_data *plat = dev->dev.platform_data;
184 struct ixp4xx_flash_info *info; 184 struct ixp4xx_flash_info *info;
185 struct mtd_part_parser_data ppdata = {
186 .origin = dev->resource->start,
187 };
185 int err = -1; 188 int err = -1;
186 189
187 if (!plat) 190 if (!plat)
@@ -247,7 +250,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
247 /* Use the fast version */ 250 /* Use the fast version */
248 info->map.write = ixp4xx_write16; 251 info->map.write = ixp4xx_write16;
249 252
250 err = mtd_device_parse_register(info->mtd, probes, dev->resource->start, 253 err = mtd_device_parse_register(info->mtd, probes, &ppdata,
251 plat->parts, plat->nr_parts); 254 plat->parts, plat->nr_parts);
252 if (err) { 255 if (err) {
253 printk(KERN_ERR "Could not parse partitions\n"); 256 printk(KERN_ERR "Could not parse partitions\n");
diff --git a/drivers/mtd/maps/l440gx.c b/drivers/mtd/maps/l440gx.c
index dd0360ba2412..74bd98ee635f 100644
--- a/drivers/mtd/maps/l440gx.c
+++ b/drivers/mtd/maps/l440gx.c
@@ -27,17 +27,21 @@ static struct mtd_info *mymtd;
27 27
28 28
29/* Is this really the vpp port? */ 29/* Is this really the vpp port? */
30static DEFINE_SPINLOCK(l440gx_vpp_lock);
31static int l440gx_vpp_refcnt;
30static void l440gx_set_vpp(struct map_info *map, int vpp) 32static void l440gx_set_vpp(struct map_info *map, int vpp)
31{ 33{
32 unsigned long l; 34 unsigned long flags;
33 35
34 l = inl(VPP_PORT); 36 spin_lock_irqsave(&l440gx_vpp_lock, flags);
35 if (vpp) { 37 if (vpp) {
36 l |= 1; 38 if (++l440gx_vpp_refcnt == 1) /* first nested 'on' */
39 outl(inl(VPP_PORT) | 1, VPP_PORT);
37 } else { 40 } else {
38 l &= ~1; 41 if (--l440gx_vpp_refcnt == 0) /* last nested 'off' */
42 outl(inl(VPP_PORT) & ~1, VPP_PORT);
39 } 43 }
40 outl(l, VPP_PORT); 44 spin_unlock_irqrestore(&l440gx_vpp_lock, flags);
41} 45}
42 46
43static struct map_info l440gx_map = { 47static struct map_info l440gx_map = {
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index 7b889de9477b..b5401e355745 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -45,6 +45,7 @@ struct ltq_mtd {
45}; 45};
46 46
47static char ltq_map_name[] = "ltq_nor"; 47static char ltq_map_name[] = "ltq_nor";
48static const char *ltq_probe_types[] __devinitconst = { "cmdlinepart", NULL };
48 49
49static map_word 50static map_word
50ltq_read16(struct map_info *map, unsigned long adr) 51ltq_read16(struct map_info *map, unsigned long adr)
@@ -168,8 +169,9 @@ ltq_mtd_probe(struct platform_device *pdev)
168 cfi->addr_unlock1 ^= 1; 169 cfi->addr_unlock1 ^= 1;
169 cfi->addr_unlock2 ^= 1; 170 cfi->addr_unlock2 ^= 1;
170 171
171 err = mtd_device_parse_register(ltq_mtd->mtd, NULL, 0, 172 err = mtd_device_parse_register(ltq_mtd->mtd, ltq_probe_types, NULL,
172 ltq_mtd_data->parts, ltq_mtd_data->nr_parts); 173 ltq_mtd_data->parts,
174 ltq_mtd_data->nr_parts);
173 if (err) { 175 if (err) {
174 dev_err(&pdev->dev, "failed to add partitions\n"); 176 dev_err(&pdev->dev, "failed to add partitions\n");
175 goto err_destroy; 177 goto err_destroy;
diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c
index 8fed58e3a4a8..3c7ad17fca78 100644
--- a/drivers/mtd/maps/latch-addr-flash.c
+++ b/drivers/mtd/maps/latch-addr-flash.c
@@ -199,8 +199,9 @@ static int __devinit latch_addr_flash_probe(struct platform_device *dev)
199 } 199 }
200 info->mtd->owner = THIS_MODULE; 200 info->mtd->owner = THIS_MODULE;
201 201
202 mtd_device_parse_register(info->mtd, NULL, 0, 202 mtd_device_parse_register(info->mtd, NULL, NULL,
203 latch_addr_data->parts, latch_addr_data->nr_parts); 203 latch_addr_data->parts,
204 latch_addr_data->nr_parts);
204 return 0; 205 return 0;
205 206
206iounmap: 207iounmap:
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index 0259cf583022..a3cfad392ed6 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -294,13 +294,24 @@ static void pcmcia_copy_to(struct map_info *map, unsigned long to, const void *f
294} 294}
295 295
296 296
297static DEFINE_SPINLOCK(pcmcia_vpp_lock);
298static int pcmcia_vpp_refcnt;
297static void pcmciamtd_set_vpp(struct map_info *map, int on) 299static void pcmciamtd_set_vpp(struct map_info *map, int on)
298{ 300{
299 struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; 301 struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
300 struct pcmcia_device *link = dev->p_dev; 302 struct pcmcia_device *link = dev->p_dev;
303 unsigned long flags;
301 304
302 pr_debug("dev = %p on = %d vpp = %d\n\n", dev, on, dev->vpp); 305 pr_debug("dev = %p on = %d vpp = %d\n\n", dev, on, dev->vpp);
303 pcmcia_fixup_vpp(link, on ? dev->vpp : 0); 306 spin_lock_irqsave(&pcmcia_vpp_lock, flags);
307 if (on) {
308 if (++pcmcia_vpp_refcnt == 1) /* first nested 'on' */
309 pcmcia_fixup_vpp(link, dev->vpp);
310 } else {
311 if (--pcmcia_vpp_refcnt == 0) /* last nested 'off' */
312 pcmcia_fixup_vpp(link, 0);
313 }
314 spin_unlock_irqrestore(&pcmcia_vpp_lock, flags);
304} 315}
305 316
306 317
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index abc562653b31..21b0b713cacb 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -27,6 +27,8 @@ struct physmap_flash_info {
27 struct mtd_info *mtd[MAX_RESOURCES]; 27 struct mtd_info *mtd[MAX_RESOURCES];
28 struct mtd_info *cmtd; 28 struct mtd_info *cmtd;
29 struct map_info map[MAX_RESOURCES]; 29 struct map_info map[MAX_RESOURCES];
30 spinlock_t vpp_lock;
31 int vpp_refcnt;
30}; 32};
31 33
32static int physmap_flash_remove(struct platform_device *dev) 34static int physmap_flash_remove(struct platform_device *dev)
@@ -63,12 +65,26 @@ static void physmap_set_vpp(struct map_info *map, int state)
63{ 65{
64 struct platform_device *pdev; 66 struct platform_device *pdev;
65 struct physmap_flash_data *physmap_data; 67 struct physmap_flash_data *physmap_data;
68 struct physmap_flash_info *info;
69 unsigned long flags;
66 70
67 pdev = (struct platform_device *)map->map_priv_1; 71 pdev = (struct platform_device *)map->map_priv_1;
68 physmap_data = pdev->dev.platform_data; 72 physmap_data = pdev->dev.platform_data;
69 73
70 if (physmap_data->set_vpp) 74 if (!physmap_data->set_vpp)
71 physmap_data->set_vpp(pdev, state); 75 return;
76
77 info = platform_get_drvdata(pdev);
78
79 spin_lock_irqsave(&info->vpp_lock, flags);
80 if (state) {
81 if (++info->vpp_refcnt == 1) /* first nested 'on' */
82 physmap_data->set_vpp(pdev, 1);
83 } else {
84 if (--info->vpp_refcnt == 0) /* last nested 'off' */
85 physmap_data->set_vpp(pdev, 0);
86 }
87 spin_unlock_irqrestore(&info->vpp_lock, flags);
72} 88}
73 89
74static const char *rom_probe_types[] = { 90static const char *rom_probe_types[] = {
@@ -172,9 +188,11 @@ static int physmap_flash_probe(struct platform_device *dev)
172 if (err) 188 if (err)
173 goto err_out; 189 goto err_out;
174 190
191 spin_lock_init(&info->vpp_lock);
192
175 part_types = physmap_data->part_probe_types ? : part_probe_types; 193 part_types = physmap_data->part_probe_types ? : part_probe_types;
176 194
177 mtd_device_parse_register(info->cmtd, part_types, 0, 195 mtd_device_parse_register(info->cmtd, part_types, NULL,
178 physmap_data->parts, physmap_data->nr_parts); 196 physmap_data->parts, physmap_data->nr_parts);
179 return 0; 197 return 0;
180 198
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 45876d0e5b8e..891558de3ec1 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -222,8 +222,9 @@ static int platram_probe(struct platform_device *pdev)
222 /* check to see if there are any available partitions, or wether 222 /* check to see if there are any available partitions, or wether
223 * to add this device whole */ 223 * to add this device whole */
224 224
225 err = mtd_device_parse_register(info->mtd, pdata->probes, 0, 225 err = mtd_device_parse_register(info->mtd, pdata->probes, NULL,
226 pdata->partitions, pdata->nr_partitions); 226 pdata->partitions,
227 pdata->nr_partitions);
227 if (!err) 228 if (!err)
228 dev_info(&pdev->dev, "registered mtd device\n"); 229 dev_info(&pdev->dev, "registered mtd device\n");
229 230
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 436d121185b1..81884c277405 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -98,7 +98,8 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
98 } 98 }
99 info->mtd->owner = THIS_MODULE; 99 info->mtd->owner = THIS_MODULE;
100 100
101 mtd_device_parse_register(info->mtd, probes, 0, flash->parts, flash->nr_parts); 101 mtd_device_parse_register(info->mtd, probes, NULL, flash->parts,
102 flash->nr_parts);
102 103
103 platform_set_drvdata(pdev, info); 104 platform_set_drvdata(pdev, info);
104 return 0; 105 return 0;
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
index 3da63fc6f16e..6f52e1f288b6 100644
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -102,8 +102,8 @@ static int rbtx4939_flash_probe(struct platform_device *dev)
102 info->mtd->owner = THIS_MODULE; 102 info->mtd->owner = THIS_MODULE;
103 if (err) 103 if (err)
104 goto err_out; 104 goto err_out;
105 err = mtd_device_parse_register(info->mtd, NULL, 0, 105 err = mtd_device_parse_register(info->mtd, NULL, NULL, pdata->parts,
106 pdata->parts, pdata->nr_parts); 106 pdata->nr_parts);
107 107
108 if (err) 108 if (err)
109 goto err_out; 109 goto err_out;
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index cbc3b7867910..a675bdbcb0fe 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -36,10 +36,22 @@ struct sa_info {
36 struct sa_subdev_info subdev[0]; 36 struct sa_subdev_info subdev[0];
37}; 37};
38 38
39static DEFINE_SPINLOCK(sa1100_vpp_lock);
40static int sa1100_vpp_refcnt;
39static void sa1100_set_vpp(struct map_info *map, int on) 41static void sa1100_set_vpp(struct map_info *map, int on)
40{ 42{
41 struct sa_subdev_info *subdev = container_of(map, struct sa_subdev_info, map); 43 struct sa_subdev_info *subdev = container_of(map, struct sa_subdev_info, map);
42 subdev->plat->set_vpp(on); 44 unsigned long flags;
45
46 spin_lock_irqsave(&sa1100_vpp_lock, flags);
47 if (on) {
48 if (++sa1100_vpp_refcnt == 1) /* first nested 'on' */
49 subdev->plat->set_vpp(1);
50 } else {
51 if (--sa1100_vpp_refcnt == 0) /* last nested 'off' */
52 subdev->plat->set_vpp(0);
53 }
54 spin_unlock_irqrestore(&sa1100_vpp_lock, flags);
43} 55}
44 56
45static void sa1100_destroy_subdev(struct sa_subdev_info *subdev) 57static void sa1100_destroy_subdev(struct sa_subdev_info *subdev)
@@ -252,8 +264,8 @@ static int __devinit sa1100_mtd_probe(struct platform_device *pdev)
252 /* 264 /*
253 * Partition selection stuff. 265 * Partition selection stuff.
254 */ 266 */
255 mtd_device_parse_register(info->mtd, part_probes, 0, 267 mtd_device_parse_register(info->mtd, part_probes, NULL, plat->parts,
256 plat->parts, plat->nr_parts); 268 plat->nr_parts);
257 269
258 platform_set_drvdata(pdev, info); 270 platform_set_drvdata(pdev, info);
259 err = 0; 271 err = 0;
diff --git a/drivers/mtd/maps/solutionengine.c b/drivers/mtd/maps/solutionengine.c
index 496c40704aff..9d900ada6708 100644
--- a/drivers/mtd/maps/solutionengine.c
+++ b/drivers/mtd/maps/solutionengine.c
@@ -92,8 +92,8 @@ static int __init init_soleng_maps(void)
92 mtd_device_register(eprom_mtd, NULL, 0); 92 mtd_device_register(eprom_mtd, NULL, 0);
93 } 93 }
94 94
95 mtd_device_parse_register(flash_mtd, probes, 0, 95 mtd_device_parse_register(flash_mtd, probes, NULL,
96 superh_se_partitions, NUM_PARTITIONS); 96 superh_se_partitions, NUM_PARTITIONS);
97 97
98 return 0; 98 return 0;
99} 99}
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 6793074f3f40..cfff454f628b 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -85,7 +85,7 @@ static int __init uclinux_mtd_init(void)
85 } 85 }
86 86
87 mtd->owner = THIS_MODULE; 87 mtd->owner = THIS_MODULE;
88 mtd->point = uclinux_point; 88 mtd->_point = uclinux_point;
89 mtd->priv = mapp; 89 mtd->priv = mapp;
90 90
91 uclinux_ram_mtdinfo = mtd; 91 uclinux_ram_mtdinfo = mtd;
diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c
index 3a04b078576a..2e2b0945edc7 100644
--- a/drivers/mtd/maps/vmu-flash.c
+++ b/drivers/mtd/maps/vmu-flash.c
@@ -360,9 +360,6 @@ static int vmu_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
360 int index = 0, retval, partition, leftover, numblocks; 360 int index = 0, retval, partition, leftover, numblocks;
361 unsigned char cx; 361 unsigned char cx;
362 362
363 if (len < 1)
364 return -EIO;
365
366 mpart = mtd->priv; 363 mpart = mtd->priv;
367 mdev = mpart->mdev; 364 mdev = mpart->mdev;
368 partition = mpart->partition; 365 partition = mpart->partition;
@@ -434,11 +431,6 @@ static int vmu_flash_write(struct mtd_info *mtd, loff_t to, size_t len,
434 partition = mpart->partition; 431 partition = mpart->partition;
435 card = maple_get_drvdata(mdev); 432 card = maple_get_drvdata(mdev);
436 433
437 /* simple sanity checks */
438 if (len < 1) {
439 error = -EIO;
440 goto failed;
441 }
442 numblocks = card->parts[partition].numblocks; 434 numblocks = card->parts[partition].numblocks;
443 if (to + len > numblocks * card->blocklen) 435 if (to + len > numblocks * card->blocklen)
444 len = numblocks * card->blocklen - to; 436 len = numblocks * card->blocklen - to;
@@ -544,9 +536,9 @@ static void vmu_queryblocks(struct mapleq *mq)
544 mtd_cur->flags = MTD_WRITEABLE|MTD_NO_ERASE; 536 mtd_cur->flags = MTD_WRITEABLE|MTD_NO_ERASE;
545 mtd_cur->size = part_cur->numblocks * card->blocklen; 537 mtd_cur->size = part_cur->numblocks * card->blocklen;
546 mtd_cur->erasesize = card->blocklen; 538 mtd_cur->erasesize = card->blocklen;
547 mtd_cur->write = vmu_flash_write; 539 mtd_cur->_write = vmu_flash_write;
548 mtd_cur->read = vmu_flash_read; 540 mtd_cur->_read = vmu_flash_read;
549 mtd_cur->sync = vmu_flash_sync; 541 mtd_cur->_sync = vmu_flash_sync;
550 mtd_cur->writesize = card->blocklen; 542 mtd_cur->writesize = card->blocklen;
551 543
552 mpart = kmalloc(sizeof(struct mdev_part), GFP_KERNEL); 544 mpart = kmalloc(sizeof(struct mdev_part), GFP_KERNEL);
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c
index aa7e0cb2893c..71b0ba797912 100644
--- a/drivers/mtd/maps/wr_sbc82xx_flash.c
+++ b/drivers/mtd/maps/wr_sbc82xx_flash.c
@@ -142,7 +142,7 @@ static int __init init_sbc82xx_flash(void)
142 nr_parts = ARRAY_SIZE(smallflash_parts); 142 nr_parts = ARRAY_SIZE(smallflash_parts);
143 } 143 }
144 144
145 mtd_device_parse_register(sbcmtd[i], part_probes, 0, 145 mtd_device_parse_register(sbcmtd[i], part_probes, NULL,
146 defparts, nr_parts); 146 defparts, nr_parts);
147 } 147 }
148 return 0; 148 return 0;
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 424ca5f93c6c..f1f06715d4e0 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -233,6 +233,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
233 ret = __get_mtd_device(dev->mtd); 233 ret = __get_mtd_device(dev->mtd);
234 if (ret) 234 if (ret)
235 goto error_release; 235 goto error_release;
236 dev->file_mode = mode;
236 237
237unlock: 238unlock:
238 dev->open++; 239 dev->open++;
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index af6591237b9b..6c6d80736fad 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -321,8 +321,12 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
321 mutex_unlock(&mtdblk->cache_mutex); 321 mutex_unlock(&mtdblk->cache_mutex);
322 322
323 if (!--mtdblk->count) { 323 if (!--mtdblk->count) {
324 /* It was the last usage. Free the cache */ 324 /*
325 mtd_sync(mbd->mtd); 325 * It was the last usage. Free the cache, but only sync if
326 * opened for writing.
327 */
328 if (mbd->file_mode & FMODE_WRITE)
329 mtd_sync(mbd->mtd);
326 vfree(mtdblk->cache_data); 330 vfree(mtdblk->cache_data);
327 } 331 }
328 332
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index c57ae92ebda4..55d82321d307 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -405,7 +405,7 @@ static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
405 if (length > 4096) 405 if (length > 4096)
406 return -EINVAL; 406 return -EINVAL;
407 407
408 if (!mtd->write_oob) 408 if (!mtd->_write_oob)
409 ret = -EOPNOTSUPP; 409 ret = -EOPNOTSUPP;
410 else 410 else
411 ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT; 411 ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT;
@@ -576,7 +576,7 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,
576 !access_ok(VERIFY_READ, req.usr_data, req.len) || 576 !access_ok(VERIFY_READ, req.usr_data, req.len) ||
577 !access_ok(VERIFY_READ, req.usr_oob, req.ooblen)) 577 !access_ok(VERIFY_READ, req.usr_oob, req.ooblen))
578 return -EFAULT; 578 return -EFAULT;
579 if (!mtd->write_oob) 579 if (!mtd->_write_oob)
580 return -EOPNOTSUPP; 580 return -EOPNOTSUPP;
581 581
582 ops.mode = req.mode; 582 ops.mode = req.mode;
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 1ed5103b219b..b9000563b9f4 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -72,8 +72,6 @@ concat_read(struct mtd_info *mtd, loff_t from, size_t len,
72 int ret = 0, err; 72 int ret = 0, err;
73 int i; 73 int i;
74 74
75 *retlen = 0;
76
77 for (i = 0; i < concat->num_subdev; i++) { 75 for (i = 0; i < concat->num_subdev; i++) {
78 struct mtd_info *subdev = concat->subdev[i]; 76 struct mtd_info *subdev = concat->subdev[i];
79 size_t size, retsize; 77 size_t size, retsize;
@@ -126,11 +124,6 @@ concat_write(struct mtd_info *mtd, loff_t to, size_t len,
126 int err = -EINVAL; 124 int err = -EINVAL;
127 int i; 125 int i;
128 126
129 if (!(mtd->flags & MTD_WRITEABLE))
130 return -EROFS;
131
132 *retlen = 0;
133
134 for (i = 0; i < concat->num_subdev; i++) { 127 for (i = 0; i < concat->num_subdev; i++) {
135 struct mtd_info *subdev = concat->subdev[i]; 128 struct mtd_info *subdev = concat->subdev[i];
136 size_t size, retsize; 129 size_t size, retsize;
@@ -145,11 +138,7 @@ concat_write(struct mtd_info *mtd, loff_t to, size_t len,
145 else 138 else
146 size = len; 139 size = len;
147 140
148 if (!(subdev->flags & MTD_WRITEABLE)) 141 err = mtd_write(subdev, to, size, &retsize, buf);
149 err = -EROFS;
150 else
151 err = mtd_write(subdev, to, size, &retsize, buf);
152
153 if (err) 142 if (err)
154 break; 143 break;
155 144
@@ -176,19 +165,10 @@ concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
176 int i; 165 int i;
177 int err = -EINVAL; 166 int err = -EINVAL;
178 167
179 if (!(mtd->flags & MTD_WRITEABLE))
180 return -EROFS;
181
182 *retlen = 0;
183
184 /* Calculate total length of data */ 168 /* Calculate total length of data */
185 for (i = 0; i < count; i++) 169 for (i = 0; i < count; i++)
186 total_len += vecs[i].iov_len; 170 total_len += vecs[i].iov_len;
187 171
188 /* Do not allow write past end of device */
189 if ((to + total_len) > mtd->size)
190 return -EINVAL;
191
192 /* Check alignment */ 172 /* Check alignment */
193 if (mtd->writesize > 1) { 173 if (mtd->writesize > 1) {
194 uint64_t __to = to; 174 uint64_t __to = to;
@@ -224,12 +204,8 @@ concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
224 old_iov_len = vecs_copy[entry_high].iov_len; 204 old_iov_len = vecs_copy[entry_high].iov_len;
225 vecs_copy[entry_high].iov_len = size; 205 vecs_copy[entry_high].iov_len = size;
226 206
227 if (!(subdev->flags & MTD_WRITEABLE)) 207 err = mtd_writev(subdev, &vecs_copy[entry_low],
228 err = -EROFS; 208 entry_high - entry_low + 1, to, &retsize);
229 else
230 err = mtd_writev(subdev, &vecs_copy[entry_low],
231 entry_high - entry_low + 1, to,
232 &retsize);
233 209
234 vecs_copy[entry_high].iov_len = old_iov_len - size; 210 vecs_copy[entry_high].iov_len = old_iov_len - size;
235 vecs_copy[entry_high].iov_base += size; 211 vecs_copy[entry_high].iov_base += size;
@@ -403,15 +379,6 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
403 uint64_t length, offset = 0; 379 uint64_t length, offset = 0;
404 struct erase_info *erase; 380 struct erase_info *erase;
405 381
406 if (!(mtd->flags & MTD_WRITEABLE))
407 return -EROFS;
408
409 if (instr->addr > concat->mtd.size)
410 return -EINVAL;
411
412 if (instr->len + instr->addr > concat->mtd.size)
413 return -EINVAL;
414
415 /* 382 /*
416 * Check for proper erase block alignment of the to-be-erased area. 383 * Check for proper erase block alignment of the to-be-erased area.
417 * It is easier to do this based on the super device's erase 384 * It is easier to do this based on the super device's erase
@@ -459,8 +426,6 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
459 return -EINVAL; 426 return -EINVAL;
460 } 427 }
461 428
462 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
463
464 /* make a local copy of instr to avoid modifying the caller's struct */ 429 /* make a local copy of instr to avoid modifying the caller's struct */
465 erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL); 430 erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
466 431
@@ -499,10 +464,6 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
499 else 464 else
500 erase->len = length; 465 erase->len = length;
501 466
502 if (!(subdev->flags & MTD_WRITEABLE)) {
503 err = -EROFS;
504 break;
505 }
506 length -= erase->len; 467 length -= erase->len;
507 if ((err = concat_dev_erase(subdev, erase))) { 468 if ((err = concat_dev_erase(subdev, erase))) {
508 /* sanity check: should never happen since 469 /* sanity check: should never happen since
@@ -538,9 +499,6 @@ static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
538 struct mtd_concat *concat = CONCAT(mtd); 499 struct mtd_concat *concat = CONCAT(mtd);
539 int i, err = -EINVAL; 500 int i, err = -EINVAL;
540 501
541 if ((len + ofs) > mtd->size)
542 return -EINVAL;
543
544 for (i = 0; i < concat->num_subdev; i++) { 502 for (i = 0; i < concat->num_subdev; i++) {
545 struct mtd_info *subdev = concat->subdev[i]; 503 struct mtd_info *subdev = concat->subdev[i];
546 uint64_t size; 504 uint64_t size;
@@ -575,9 +533,6 @@ static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
575 struct mtd_concat *concat = CONCAT(mtd); 533 struct mtd_concat *concat = CONCAT(mtd);
576 int i, err = 0; 534 int i, err = 0;
577 535
578 if ((len + ofs) > mtd->size)
579 return -EINVAL;
580
581 for (i = 0; i < concat->num_subdev; i++) { 536 for (i = 0; i < concat->num_subdev; i++) {
582 struct mtd_info *subdev = concat->subdev[i]; 537 struct mtd_info *subdev = concat->subdev[i];
583 uint64_t size; 538 uint64_t size;
@@ -650,9 +605,6 @@ static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
650 if (!mtd_can_have_bb(concat->subdev[0])) 605 if (!mtd_can_have_bb(concat->subdev[0]))
651 return res; 606 return res;
652 607
653 if (ofs > mtd->size)
654 return -EINVAL;
655
656 for (i = 0; i < concat->num_subdev; i++) { 608 for (i = 0; i < concat->num_subdev; i++) {
657 struct mtd_info *subdev = concat->subdev[i]; 609 struct mtd_info *subdev = concat->subdev[i];
658 610
@@ -673,12 +625,6 @@ static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
673 struct mtd_concat *concat = CONCAT(mtd); 625 struct mtd_concat *concat = CONCAT(mtd);
674 int i, err = -EINVAL; 626 int i, err = -EINVAL;
675 627
676 if (!mtd_can_have_bb(concat->subdev[0]))
677 return 0;
678
679 if (ofs > mtd->size)
680 return -EINVAL;
681
682 for (i = 0; i < concat->num_subdev; i++) { 628 for (i = 0; i < concat->num_subdev; i++) {
683 struct mtd_info *subdev = concat->subdev[i]; 629 struct mtd_info *subdev = concat->subdev[i];
684 630
@@ -716,10 +662,6 @@ static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
716 continue; 662 continue;
717 } 663 }
718 664
719 /* we've found the subdev over which the mapping will reside */
720 if (offset + len > subdev->size)
721 return (unsigned long) -EINVAL;
722
723 return mtd_get_unmapped_area(subdev, len, offset, flags); 665 return mtd_get_unmapped_area(subdev, len, offset, flags);
724 } 666 }
725 667
@@ -777,16 +719,16 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
777 concat->mtd.subpage_sft = subdev[0]->subpage_sft; 719 concat->mtd.subpage_sft = subdev[0]->subpage_sft;
778 concat->mtd.oobsize = subdev[0]->oobsize; 720 concat->mtd.oobsize = subdev[0]->oobsize;
779 concat->mtd.oobavail = subdev[0]->oobavail; 721 concat->mtd.oobavail = subdev[0]->oobavail;
780 if (subdev[0]->writev) 722 if (subdev[0]->_writev)
781 concat->mtd.writev = concat_writev; 723 concat->mtd._writev = concat_writev;
782 if (subdev[0]->read_oob) 724 if (subdev[0]->_read_oob)
783 concat->mtd.read_oob = concat_read_oob; 725 concat->mtd._read_oob = concat_read_oob;
784 if (subdev[0]->write_oob) 726 if (subdev[0]->_write_oob)
785 concat->mtd.write_oob = concat_write_oob; 727 concat->mtd._write_oob = concat_write_oob;
786 if (subdev[0]->block_isbad) 728 if (subdev[0]->_block_isbad)
787 concat->mtd.block_isbad = concat_block_isbad; 729 concat->mtd._block_isbad = concat_block_isbad;
788 if (subdev[0]->block_markbad) 730 if (subdev[0]->_block_markbad)
789 concat->mtd.block_markbad = concat_block_markbad; 731 concat->mtd._block_markbad = concat_block_markbad;
790 732
791 concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks; 733 concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
792 734
@@ -833,8 +775,8 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
833 if (concat->mtd.writesize != subdev[i]->writesize || 775 if (concat->mtd.writesize != subdev[i]->writesize ||
834 concat->mtd.subpage_sft != subdev[i]->subpage_sft || 776 concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
835 concat->mtd.oobsize != subdev[i]->oobsize || 777 concat->mtd.oobsize != subdev[i]->oobsize ||
836 !concat->mtd.read_oob != !subdev[i]->read_oob || 778 !concat->mtd._read_oob != !subdev[i]->_read_oob ||
837 !concat->mtd.write_oob != !subdev[i]->write_oob) { 779 !concat->mtd._write_oob != !subdev[i]->_write_oob) {
838 kfree(concat); 780 kfree(concat);
839 printk("Incompatible OOB or ECC data on \"%s\"\n", 781 printk("Incompatible OOB or ECC data on \"%s\"\n",
840 subdev[i]->name); 782 subdev[i]->name);
@@ -849,15 +791,15 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
849 concat->num_subdev = num_devs; 791 concat->num_subdev = num_devs;
850 concat->mtd.name = name; 792 concat->mtd.name = name;
851 793
852 concat->mtd.erase = concat_erase; 794 concat->mtd._erase = concat_erase;
853 concat->mtd.read = concat_read; 795 concat->mtd._read = concat_read;
854 concat->mtd.write = concat_write; 796 concat->mtd._write = concat_write;
855 concat->mtd.sync = concat_sync; 797 concat->mtd._sync = concat_sync;
856 concat->mtd.lock = concat_lock; 798 concat->mtd._lock = concat_lock;
857 concat->mtd.unlock = concat_unlock; 799 concat->mtd._unlock = concat_unlock;
858 concat->mtd.suspend = concat_suspend; 800 concat->mtd._suspend = concat_suspend;
859 concat->mtd.resume = concat_resume; 801 concat->mtd._resume = concat_resume;
860 concat->mtd.get_unmapped_area = concat_get_unmapped_area; 802 concat->mtd._get_unmapped_area = concat_get_unmapped_area;
861 803
862 /* 804 /*
863 * Combine the erase block size info of the subdevices: 805 * Combine the erase block size info of the subdevices:
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 9a9ce71a71fc..c837507dfb1c 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -107,7 +107,7 @@ static LIST_HEAD(mtd_notifiers);
107 */ 107 */
108static void mtd_release(struct device *dev) 108static void mtd_release(struct device *dev)
109{ 109{
110 struct mtd_info *mtd = dev_get_drvdata(dev); 110 struct mtd_info __maybe_unused *mtd = dev_get_drvdata(dev);
111 dev_t index = MTD_DEVT(mtd->index); 111 dev_t index = MTD_DEVT(mtd->index);
112 112
113 /* remove /dev/mtdXro node if needed */ 113 /* remove /dev/mtdXro node if needed */
@@ -126,7 +126,7 @@ static int mtd_cls_resume(struct device *dev)
126{ 126{
127 struct mtd_info *mtd = dev_get_drvdata(dev); 127 struct mtd_info *mtd = dev_get_drvdata(dev);
128 128
129 if (mtd && mtd->resume) 129 if (mtd)
130 mtd_resume(mtd); 130 mtd_resume(mtd);
131 return 0; 131 return 0;
132} 132}
@@ -610,8 +610,8 @@ int __get_mtd_device(struct mtd_info *mtd)
610 if (!try_module_get(mtd->owner)) 610 if (!try_module_get(mtd->owner))
611 return -ENODEV; 611 return -ENODEV;
612 612
613 if (mtd->get_device) { 613 if (mtd->_get_device) {
614 err = mtd->get_device(mtd); 614 err = mtd->_get_device(mtd);
615 615
616 if (err) { 616 if (err) {
617 module_put(mtd->owner); 617 module_put(mtd->owner);
@@ -675,14 +675,267 @@ void __put_mtd_device(struct mtd_info *mtd)
675 --mtd->usecount; 675 --mtd->usecount;
676 BUG_ON(mtd->usecount < 0); 676 BUG_ON(mtd->usecount < 0);
677 677
678 if (mtd->put_device) 678 if (mtd->_put_device)
679 mtd->put_device(mtd); 679 mtd->_put_device(mtd);
680 680
681 module_put(mtd->owner); 681 module_put(mtd->owner);
682} 682}
683EXPORT_SYMBOL_GPL(__put_mtd_device); 683EXPORT_SYMBOL_GPL(__put_mtd_device);
684 684
685/* 685/*
686 * Erase is an asynchronous operation. Device drivers are supposed
687 * to call instr->callback() whenever the operation completes, even
688 * if it completes with a failure.
689 * Callers are supposed to pass a callback function and wait for it
690 * to be called before writing to the block.
691 */
692int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
693{
694 if (instr->addr > mtd->size || instr->len > mtd->size - instr->addr)
695 return -EINVAL;
696 if (!(mtd->flags & MTD_WRITEABLE))
697 return -EROFS;
698 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
699 if (!instr->len) {
700 instr->state = MTD_ERASE_DONE;
701 mtd_erase_callback(instr);
702 return 0;
703 }
704 return mtd->_erase(mtd, instr);
705}
706EXPORT_SYMBOL_GPL(mtd_erase);
707
708/*
709 * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
710 */
711int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
712 void **virt, resource_size_t *phys)
713{
714 *retlen = 0;
715 *virt = NULL;
716 if (phys)
717 *phys = 0;
718 if (!mtd->_point)
719 return -EOPNOTSUPP;
720 if (from < 0 || from > mtd->size || len > mtd->size - from)
721 return -EINVAL;
722 if (!len)
723 return 0;
724 return mtd->_point(mtd, from, len, retlen, virt, phys);
725}
726EXPORT_SYMBOL_GPL(mtd_point);
727
728/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
729int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
730{
731 if (!mtd->_point)
732 return -EOPNOTSUPP;
733 if (from < 0 || from > mtd->size || len > mtd->size - from)
734 return -EINVAL;
735 if (!len)
736 return 0;
737 return mtd->_unpoint(mtd, from, len);
738}
739EXPORT_SYMBOL_GPL(mtd_unpoint);
740
741/*
742 * Allow NOMMU mmap() to directly map the device (if not NULL)
743 * - return the address to which the offset maps
744 * - return -ENOSYS to indicate refusal to do the mapping
745 */
746unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
747 unsigned long offset, unsigned long flags)
748{
749 if (!mtd->_get_unmapped_area)
750 return -EOPNOTSUPP;
751 if (offset > mtd->size || len > mtd->size - offset)
752 return -EINVAL;
753 return mtd->_get_unmapped_area(mtd, len, offset, flags);
754}
755EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
756
757int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
758 u_char *buf)
759{
760 *retlen = 0;
761 if (from < 0 || from > mtd->size || len > mtd->size - from)
762 return -EINVAL;
763 if (!len)
764 return 0;
765 return mtd->_read(mtd, from, len, retlen, buf);
766}
767EXPORT_SYMBOL_GPL(mtd_read);
768
769int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
770 const u_char *buf)
771{
772 *retlen = 0;
773 if (to < 0 || to > mtd->size || len > mtd->size - to)
774 return -EINVAL;
775 if (!mtd->_write || !(mtd->flags & MTD_WRITEABLE))
776 return -EROFS;
777 if (!len)
778 return 0;
779 return mtd->_write(mtd, to, len, retlen, buf);
780}
781EXPORT_SYMBOL_GPL(mtd_write);
782
783/*
784 * In blackbox flight recorder like scenarios we want to make successful writes
785 * in interrupt context. panic_write() is only intended to be called when its
786 * known the kernel is about to panic and we need the write to succeed. Since
787 * the kernel is not going to be running for much longer, this function can
788 * break locks and delay to ensure the write succeeds (but not sleep).
789 */
790int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
791 const u_char *buf)
792{
793 *retlen = 0;
794 if (!mtd->_panic_write)
795 return -EOPNOTSUPP;
796 if (to < 0 || to > mtd->size || len > mtd->size - to)
797 return -EINVAL;
798 if (!(mtd->flags & MTD_WRITEABLE))
799 return -EROFS;
800 if (!len)
801 return 0;
802 return mtd->_panic_write(mtd, to, len, retlen, buf);
803}
804EXPORT_SYMBOL_GPL(mtd_panic_write);
805
806/*
807 * Method to access the protection register area, present in some flash
808 * devices. The user data is one time programmable but the factory data is read
809 * only.
810 */
811int mtd_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
812 size_t len)
813{
814 if (!mtd->_get_fact_prot_info)
815 return -EOPNOTSUPP;
816 if (!len)
817 return 0;
818 return mtd->_get_fact_prot_info(mtd, buf, len);
819}
820EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
821
822int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
823 size_t *retlen, u_char *buf)
824{
825 *retlen = 0;
826 if (!mtd->_read_fact_prot_reg)
827 return -EOPNOTSUPP;
828 if (!len)
829 return 0;
830 return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf);
831}
832EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
833
834int mtd_get_user_prot_info(struct mtd_info *mtd, struct otp_info *buf,
835 size_t len)
836{
837 if (!mtd->_get_user_prot_info)
838 return -EOPNOTSUPP;
839 if (!len)
840 return 0;
841 return mtd->_get_user_prot_info(mtd, buf, len);
842}
843EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
844
845int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
846 size_t *retlen, u_char *buf)
847{
848 *retlen = 0;
849 if (!mtd->_read_user_prot_reg)
850 return -EOPNOTSUPP;
851 if (!len)
852 return 0;
853 return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf);
854}
855EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
856
857int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
858 size_t *retlen, u_char *buf)
859{
860 *retlen = 0;
861 if (!mtd->_write_user_prot_reg)
862 return -EOPNOTSUPP;
863 if (!len)
864 return 0;
865 return mtd->_write_user_prot_reg(mtd, to, len, retlen, buf);
866}
867EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
868
869int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
870{
871 if (!mtd->_lock_user_prot_reg)
872 return -EOPNOTSUPP;
873 if (!len)
874 return 0;
875 return mtd->_lock_user_prot_reg(mtd, from, len);
876}
877EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
878
879/* Chip-supported device locking */
880int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
881{
882 if (!mtd->_lock)
883 return -EOPNOTSUPP;
884 if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
885 return -EINVAL;
886 if (!len)
887 return 0;
888 return mtd->_lock(mtd, ofs, len);
889}
890EXPORT_SYMBOL_GPL(mtd_lock);
891
892int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
893{
894 if (!mtd->_unlock)
895 return -EOPNOTSUPP;
896 if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
897 return -EINVAL;
898 if (!len)
899 return 0;
900 return mtd->_unlock(mtd, ofs, len);
901}
902EXPORT_SYMBOL_GPL(mtd_unlock);
903
904int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
905{
906 if (!mtd->_is_locked)
907 return -EOPNOTSUPP;
908 if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
909 return -EINVAL;
910 if (!len)
911 return 0;
912 return mtd->_is_locked(mtd, ofs, len);
913}
914EXPORT_SYMBOL_GPL(mtd_is_locked);
915
916int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
917{
918 if (!mtd->_block_isbad)
919 return 0;
920 if (ofs < 0 || ofs > mtd->size)
921 return -EINVAL;
922 return mtd->_block_isbad(mtd, ofs);
923}
924EXPORT_SYMBOL_GPL(mtd_block_isbad);
925
926int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
927{
928 if (!mtd->_block_markbad)
929 return -EOPNOTSUPP;
930 if (ofs < 0 || ofs > mtd->size)
931 return -EINVAL;
932 if (!(mtd->flags & MTD_WRITEABLE))
933 return -EROFS;
934 return mtd->_block_markbad(mtd, ofs);
935}
936EXPORT_SYMBOL_GPL(mtd_block_markbad);
937
938/*
686 * default_mtd_writev - the default writev method 939 * default_mtd_writev - the default writev method
687 * @mtd: mtd device description object pointer 940 * @mtd: mtd device description object pointer
688 * @vecs: the vectors to write 941 * @vecs: the vectors to write
@@ -729,9 +982,11 @@ int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
729 unsigned long count, loff_t to, size_t *retlen) 982 unsigned long count, loff_t to, size_t *retlen)
730{ 983{
731 *retlen = 0; 984 *retlen = 0;
732 if (!mtd->writev) 985 if (!(mtd->flags & MTD_WRITEABLE))
986 return -EROFS;
987 if (!mtd->_writev)
733 return default_mtd_writev(mtd, vecs, count, to, retlen); 988 return default_mtd_writev(mtd, vecs, count, to, retlen);
734 return mtd->writev(mtd, vecs, count, to, retlen); 989 return mtd->_writev(mtd, vecs, count, to, retlen);
735} 990}
736EXPORT_SYMBOL_GPL(mtd_writev); 991EXPORT_SYMBOL_GPL(mtd_writev);
737 992
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 3ce99e00a49e..ae36d7e1e913 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -169,7 +169,7 @@ static void mtdoops_workfunc_erase(struct work_struct *work)
169 cxt->nextpage = 0; 169 cxt->nextpage = 0;
170 } 170 }
171 171
172 while (mtd_can_have_bb(mtd)) { 172 while (1) {
173 ret = mtd_block_isbad(mtd, cxt->nextpage * record_size); 173 ret = mtd_block_isbad(mtd, cxt->nextpage * record_size);
174 if (!ret) 174 if (!ret)
175 break; 175 break;
@@ -199,9 +199,9 @@ badblock:
199 return; 199 return;
200 } 200 }
201 201
202 if (mtd_can_have_bb(mtd) && ret == -EIO) { 202 if (ret == -EIO) {
203 ret = mtd_block_markbad(mtd, cxt->nextpage * record_size); 203 ret = mtd_block_markbad(mtd, cxt->nextpage * record_size);
204 if (ret < 0) { 204 if (ret < 0 && ret != -EOPNOTSUPP) {
205 printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n"); 205 printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n");
206 return; 206 return;
207 } 207 }
@@ -257,8 +257,7 @@ static void find_next_position(struct mtdoops_context *cxt)
257 size_t retlen; 257 size_t retlen;
258 258
259 for (page = 0; page < cxt->oops_pages; page++) { 259 for (page = 0; page < cxt->oops_pages; page++) {
260 if (mtd_can_have_bb(mtd) && 260 if (mtd_block_isbad(mtd, page * record_size))
261 mtd_block_isbad(mtd, page * record_size))
262 continue; 261 continue;
263 /* Assume the page is used */ 262 /* Assume the page is used */
264 mark_page_used(cxt, page); 263 mark_page_used(cxt, page);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index a3d44c3416b4..9651c06de0a9 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -65,12 +65,8 @@ static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
65 int res; 65 int res;
66 66
67 stats = part->master->ecc_stats; 67 stats = part->master->ecc_stats;
68 68 res = part->master->_read(part->master, from + part->offset, len,
69 if (from >= mtd->size) 69 retlen, buf);
70 len = 0;
71 else if (from + len > mtd->size)
72 len = mtd->size - from;
73 res = mtd_read(part->master, from + part->offset, len, retlen, buf);
74 if (unlikely(res)) { 70 if (unlikely(res)) {
75 if (mtd_is_bitflip(res)) 71 if (mtd_is_bitflip(res))
76 mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected; 72 mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected;
@@ -84,19 +80,16 @@ static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
84 size_t *retlen, void **virt, resource_size_t *phys) 80 size_t *retlen, void **virt, resource_size_t *phys)
85{ 81{
86 struct mtd_part *part = PART(mtd); 82 struct mtd_part *part = PART(mtd);
87 if (from >= mtd->size) 83
88 len = 0; 84 return part->master->_point(part->master, from + part->offset, len,
89 else if (from + len > mtd->size) 85 retlen, virt, phys);
90 len = mtd->size - from;
91 return mtd_point(part->master, from + part->offset, len, retlen,
92 virt, phys);
93} 86}
94 87
95static void part_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 88static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
96{ 89{
97 struct mtd_part *part = PART(mtd); 90 struct mtd_part *part = PART(mtd);
98 91
99 mtd_unpoint(part->master, from + part->offset, len); 92 return part->master->_unpoint(part->master, from + part->offset, len);
100} 93}
101 94
102static unsigned long part_get_unmapped_area(struct mtd_info *mtd, 95static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
@@ -107,7 +100,8 @@ static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
107 struct mtd_part *part = PART(mtd); 100 struct mtd_part *part = PART(mtd);
108 101
109 offset += part->offset; 102 offset += part->offset;
110 return mtd_get_unmapped_area(part->master, len, offset, flags); 103 return part->master->_get_unmapped_area(part->master, len, offset,
104 flags);
111} 105}
112 106
113static int part_read_oob(struct mtd_info *mtd, loff_t from, 107static int part_read_oob(struct mtd_info *mtd, loff_t from,
@@ -138,7 +132,7 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
138 return -EINVAL; 132 return -EINVAL;
139 } 133 }
140 134
141 res = mtd_read_oob(part->master, from + part->offset, ops); 135 res = part->master->_read_oob(part->master, from + part->offset, ops);
142 if (unlikely(res)) { 136 if (unlikely(res)) {
143 if (mtd_is_bitflip(res)) 137 if (mtd_is_bitflip(res))
144 mtd->ecc_stats.corrected++; 138 mtd->ecc_stats.corrected++;
@@ -152,55 +146,46 @@ static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
152 size_t len, size_t *retlen, u_char *buf) 146 size_t len, size_t *retlen, u_char *buf)
153{ 147{
154 struct mtd_part *part = PART(mtd); 148 struct mtd_part *part = PART(mtd);
155 return mtd_read_user_prot_reg(part->master, from, len, retlen, buf); 149 return part->master->_read_user_prot_reg(part->master, from, len,
150 retlen, buf);
156} 151}
157 152
158static int part_get_user_prot_info(struct mtd_info *mtd, 153static int part_get_user_prot_info(struct mtd_info *mtd,
159 struct otp_info *buf, size_t len) 154 struct otp_info *buf, size_t len)
160{ 155{
161 struct mtd_part *part = PART(mtd); 156 struct mtd_part *part = PART(mtd);
162 return mtd_get_user_prot_info(part->master, buf, len); 157 return part->master->_get_user_prot_info(part->master, buf, len);
163} 158}
164 159
165static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, 160static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
166 size_t len, size_t *retlen, u_char *buf) 161 size_t len, size_t *retlen, u_char *buf)
167{ 162{
168 struct mtd_part *part = PART(mtd); 163 struct mtd_part *part = PART(mtd);
169 return mtd_read_fact_prot_reg(part->master, from, len, retlen, buf); 164 return part->master->_read_fact_prot_reg(part->master, from, len,
165 retlen, buf);
170} 166}
171 167
172static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf, 168static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
173 size_t len) 169 size_t len)
174{ 170{
175 struct mtd_part *part = PART(mtd); 171 struct mtd_part *part = PART(mtd);
176 return mtd_get_fact_prot_info(part->master, buf, len); 172 return part->master->_get_fact_prot_info(part->master, buf, len);
177} 173}
178 174
179static int part_write(struct mtd_info *mtd, loff_t to, size_t len, 175static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
180 size_t *retlen, const u_char *buf) 176 size_t *retlen, const u_char *buf)
181{ 177{
182 struct mtd_part *part = PART(mtd); 178 struct mtd_part *part = PART(mtd);
183 if (!(mtd->flags & MTD_WRITEABLE)) 179 return part->master->_write(part->master, to + part->offset, len,
184 return -EROFS; 180 retlen, buf);
185 if (to >= mtd->size)
186 len = 0;
187 else if (to + len > mtd->size)
188 len = mtd->size - to;
189 return mtd_write(part->master, to + part->offset, len, retlen, buf);
190} 181}
191 182
192static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 183static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
193 size_t *retlen, const u_char *buf) 184 size_t *retlen, const u_char *buf)
194{ 185{
195 struct mtd_part *part = PART(mtd); 186 struct mtd_part *part = PART(mtd);
196 if (!(mtd->flags & MTD_WRITEABLE)) 187 return part->master->_panic_write(part->master, to + part->offset, len,
197 return -EROFS; 188 retlen, buf);
198 if (to >= mtd->size)
199 len = 0;
200 else if (to + len > mtd->size)
201 len = mtd->size - to;
202 return mtd_panic_write(part->master, to + part->offset, len, retlen,
203 buf);
204} 189}
205 190
206static int part_write_oob(struct mtd_info *mtd, loff_t to, 191static int part_write_oob(struct mtd_info *mtd, loff_t to,
@@ -208,50 +193,43 @@ static int part_write_oob(struct mtd_info *mtd, loff_t to,
208{ 193{
209 struct mtd_part *part = PART(mtd); 194 struct mtd_part *part = PART(mtd);
210 195
211 if (!(mtd->flags & MTD_WRITEABLE))
212 return -EROFS;
213
214 if (to >= mtd->size) 196 if (to >= mtd->size)
215 return -EINVAL; 197 return -EINVAL;
216 if (ops->datbuf && to + ops->len > mtd->size) 198 if (ops->datbuf && to + ops->len > mtd->size)
217 return -EINVAL; 199 return -EINVAL;
218 return mtd_write_oob(part->master, to + part->offset, ops); 200 return part->master->_write_oob(part->master, to + part->offset, ops);
219} 201}
220 202
221static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from, 203static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
222 size_t len, size_t *retlen, u_char *buf) 204 size_t len, size_t *retlen, u_char *buf)
223{ 205{
224 struct mtd_part *part = PART(mtd); 206 struct mtd_part *part = PART(mtd);
225 return mtd_write_user_prot_reg(part->master, from, len, retlen, buf); 207 return part->master->_write_user_prot_reg(part->master, from, len,
208 retlen, buf);
226} 209}
227 210
228static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, 211static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
229 size_t len) 212 size_t len)
230{ 213{
231 struct mtd_part *part = PART(mtd); 214 struct mtd_part *part = PART(mtd);
232 return mtd_lock_user_prot_reg(part->master, from, len); 215 return part->master->_lock_user_prot_reg(part->master, from, len);
233} 216}
234 217
235static int part_writev(struct mtd_info *mtd, const struct kvec *vecs, 218static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
236 unsigned long count, loff_t to, size_t *retlen) 219 unsigned long count, loff_t to, size_t *retlen)
237{ 220{
238 struct mtd_part *part = PART(mtd); 221 struct mtd_part *part = PART(mtd);
239 if (!(mtd->flags & MTD_WRITEABLE)) 222 return part->master->_writev(part->master, vecs, count,
240 return -EROFS; 223 to + part->offset, retlen);
241 return mtd_writev(part->master, vecs, count, to + part->offset,
242 retlen);
243} 224}
244 225
245static int part_erase(struct mtd_info *mtd, struct erase_info *instr) 226static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
246{ 227{
247 struct mtd_part *part = PART(mtd); 228 struct mtd_part *part = PART(mtd);
248 int ret; 229 int ret;
249 if (!(mtd->flags & MTD_WRITEABLE)) 230
250 return -EROFS;
251 if (instr->addr >= mtd->size)
252 return -EINVAL;
253 instr->addr += part->offset; 231 instr->addr += part->offset;
254 ret = mtd_erase(part->master, instr); 232 ret = part->master->_erase(part->master, instr);
255 if (ret) { 233 if (ret) {
256 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) 234 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
257 instr->fail_addr -= part->offset; 235 instr->fail_addr -= part->offset;
@@ -262,7 +240,7 @@ static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
262 240
263void mtd_erase_callback(struct erase_info *instr) 241void mtd_erase_callback(struct erase_info *instr)
264{ 242{
265 if (instr->mtd->erase == part_erase) { 243 if (instr->mtd->_erase == part_erase) {
266 struct mtd_part *part = PART(instr->mtd); 244 struct mtd_part *part = PART(instr->mtd);
267 245
268 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) 246 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
@@ -277,52 +255,44 @@ EXPORT_SYMBOL_GPL(mtd_erase_callback);
277static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 255static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
278{ 256{
279 struct mtd_part *part = PART(mtd); 257 struct mtd_part *part = PART(mtd);
280 if ((len + ofs) > mtd->size) 258 return part->master->_lock(part->master, ofs + part->offset, len);
281 return -EINVAL;
282 return mtd_lock(part->master, ofs + part->offset, len);
283} 259}
284 260
285static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 261static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
286{ 262{
287 struct mtd_part *part = PART(mtd); 263 struct mtd_part *part = PART(mtd);
288 if ((len + ofs) > mtd->size) 264 return part->master->_unlock(part->master, ofs + part->offset, len);
289 return -EINVAL;
290 return mtd_unlock(part->master, ofs + part->offset, len);
291} 265}
292 266
293static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) 267static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
294{ 268{
295 struct mtd_part *part = PART(mtd); 269 struct mtd_part *part = PART(mtd);
296 if ((len + ofs) > mtd->size) 270 return part->master->_is_locked(part->master, ofs + part->offset, len);
297 return -EINVAL;
298 return mtd_is_locked(part->master, ofs + part->offset, len);
299} 271}
300 272
301static void part_sync(struct mtd_info *mtd) 273static void part_sync(struct mtd_info *mtd)
302{ 274{
303 struct mtd_part *part = PART(mtd); 275 struct mtd_part *part = PART(mtd);
304 mtd_sync(part->master); 276 part->master->_sync(part->master);
305} 277}
306 278
307static int part_suspend(struct mtd_info *mtd) 279static int part_suspend(struct mtd_info *mtd)
308{ 280{
309 struct mtd_part *part = PART(mtd); 281 struct mtd_part *part = PART(mtd);
310 return mtd_suspend(part->master); 282 return part->master->_suspend(part->master);
311} 283}
312 284
313static void part_resume(struct mtd_info *mtd) 285static void part_resume(struct mtd_info *mtd)
314{ 286{
315 struct mtd_part *part = PART(mtd); 287 struct mtd_part *part = PART(mtd);
316 mtd_resume(part->master); 288 part->master->_resume(part->master);
317} 289}
318 290
319static int part_block_isbad(struct mtd_info *mtd, loff_t ofs) 291static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
320{ 292{
321 struct mtd_part *part = PART(mtd); 293 struct mtd_part *part = PART(mtd);
322 if (ofs >= mtd->size)
323 return -EINVAL;
324 ofs += part->offset; 294 ofs += part->offset;
325 return mtd_block_isbad(part->master, ofs); 295 return part->master->_block_isbad(part->master, ofs);
326} 296}
327 297
328static int part_block_markbad(struct mtd_info *mtd, loff_t ofs) 298static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
@@ -330,12 +300,8 @@ static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
330 struct mtd_part *part = PART(mtd); 300 struct mtd_part *part = PART(mtd);
331 int res; 301 int res;
332 302
333 if (!(mtd->flags & MTD_WRITEABLE))
334 return -EROFS;
335 if (ofs >= mtd->size)
336 return -EINVAL;
337 ofs += part->offset; 303 ofs += part->offset;
338 res = mtd_block_markbad(part->master, ofs); 304 res = part->master->_block_markbad(part->master, ofs);
339 if (!res) 305 if (!res)
340 mtd->ecc_stats.badblocks++; 306 mtd->ecc_stats.badblocks++;
341 return res; 307 return res;
@@ -410,54 +376,55 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
410 */ 376 */
411 slave->mtd.dev.parent = master->dev.parent; 377 slave->mtd.dev.parent = master->dev.parent;
412 378
413 slave->mtd.read = part_read; 379 slave->mtd._read = part_read;
414 slave->mtd.write = part_write; 380 slave->mtd._write = part_write;
415 381
416 if (master->panic_write) 382 if (master->_panic_write)
417 slave->mtd.panic_write = part_panic_write; 383 slave->mtd._panic_write = part_panic_write;
418 384
419 if (master->point && master->unpoint) { 385 if (master->_point && master->_unpoint) {
420 slave->mtd.point = part_point; 386 slave->mtd._point = part_point;
421 slave->mtd.unpoint = part_unpoint; 387 slave->mtd._unpoint = part_unpoint;
422 } 388 }
423 389
424 if (master->get_unmapped_area) 390 if (master->_get_unmapped_area)
425 slave->mtd.get_unmapped_area = part_get_unmapped_area; 391 slave->mtd._get_unmapped_area = part_get_unmapped_area;
426 if (master->read_oob) 392 if (master->_read_oob)
427 slave->mtd.read_oob = part_read_oob; 393 slave->mtd._read_oob = part_read_oob;
428 if (master->write_oob) 394 if (master->_write_oob)
429 slave->mtd.write_oob = part_write_oob; 395 slave->mtd._write_oob = part_write_oob;
430 if (master->read_user_prot_reg) 396 if (master->_read_user_prot_reg)
431 slave->mtd.read_user_prot_reg = part_read_user_prot_reg; 397 slave->mtd._read_user_prot_reg = part_read_user_prot_reg;
432 if (master->read_fact_prot_reg) 398 if (master->_read_fact_prot_reg)
433 slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg; 399 slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg;
434 if (master->write_user_prot_reg) 400 if (master->_write_user_prot_reg)
435 slave->mtd.write_user_prot_reg = part_write_user_prot_reg; 401 slave->mtd._write_user_prot_reg = part_write_user_prot_reg;
436 if (master->lock_user_prot_reg) 402 if (master->_lock_user_prot_reg)
437 slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg; 403 slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg;
438 if (master->get_user_prot_info) 404 if (master->_get_user_prot_info)
439 slave->mtd.get_user_prot_info = part_get_user_prot_info; 405 slave->mtd._get_user_prot_info = part_get_user_prot_info;
440 if (master->get_fact_prot_info) 406 if (master->_get_fact_prot_info)
441 slave->mtd.get_fact_prot_info = part_get_fact_prot_info; 407 slave->mtd._get_fact_prot_info = part_get_fact_prot_info;
442 if (master->sync) 408 if (master->_sync)
443 slave->mtd.sync = part_sync; 409 slave->mtd._sync = part_sync;
444 if (!partno && !master->dev.class && master->suspend && master->resume) { 410 if (!partno && !master->dev.class && master->_suspend &&
445 slave->mtd.suspend = part_suspend; 411 master->_resume) {
446 slave->mtd.resume = part_resume; 412 slave->mtd._suspend = part_suspend;
413 slave->mtd._resume = part_resume;
447 } 414 }
448 if (master->writev) 415 if (master->_writev)
449 slave->mtd.writev = part_writev; 416 slave->mtd._writev = part_writev;
450 if (master->lock) 417 if (master->_lock)
451 slave->mtd.lock = part_lock; 418 slave->mtd._lock = part_lock;
452 if (master->unlock) 419 if (master->_unlock)
453 slave->mtd.unlock = part_unlock; 420 slave->mtd._unlock = part_unlock;
454 if (master->is_locked) 421 if (master->_is_locked)
455 slave->mtd.is_locked = part_is_locked; 422 slave->mtd._is_locked = part_is_locked;
456 if (master->block_isbad) 423 if (master->_block_isbad)
457 slave->mtd.block_isbad = part_block_isbad; 424 slave->mtd._block_isbad = part_block_isbad;
458 if (master->block_markbad) 425 if (master->_block_markbad)
459 slave->mtd.block_markbad = part_block_markbad; 426 slave->mtd._block_markbad = part_block_markbad;
460 slave->mtd.erase = part_erase; 427 slave->mtd._erase = part_erase;
461 slave->master = master; 428 slave->master = master;
462 slave->offset = part->offset; 429 slave->offset = part->offset;
463 430
@@ -549,7 +516,8 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
549 } 516 }
550 517
551 slave->mtd.ecclayout = master->ecclayout; 518 slave->mtd.ecclayout = master->ecclayout;
552 if (master->block_isbad) { 519 slave->mtd.ecc_strength = master->ecc_strength;
520 if (master->_block_isbad) {
553 uint64_t offs = 0; 521 uint64_t offs = 0;
554 522
555 while (offs < slave->mtd.size) { 523 while (offs < slave->mtd.size) {
@@ -761,7 +729,7 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types,
761 for ( ; ret <= 0 && *types; types++) { 729 for ( ; ret <= 0 && *types; types++) {
762 parser = get_partition_parser(*types); 730 parser = get_partition_parser(*types);
763 if (!parser && !request_module("%s", *types)) 731 if (!parser && !request_module("%s", *types))
764 parser = get_partition_parser(*types); 732 parser = get_partition_parser(*types);
765 if (!parser) 733 if (!parser)
766 continue; 734 continue;
767 ret = (*parser->parse_fn)(master, pparts, data); 735 ret = (*parser->parse_fn)(master, pparts, data);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index a3c4de551ebe..7d17cecad69d 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -314,6 +314,26 @@ config MTD_NAND_DISKONCHIP_BBTWRITE
314 load time (assuming you build diskonchip as a module) with the module 314 load time (assuming you build diskonchip as a module) with the module
315 parameter "inftl_bbt_write=1". 315 parameter "inftl_bbt_write=1".
316 316
317config MTD_NAND_DOCG4
318 tristate "Support for DiskOnChip G4 (EXPERIMENTAL)"
319 depends on EXPERIMENTAL
320 select BCH
321 select BITREVERSE
322 help
323 Support for diskonchip G4 nand flash, found in various smartphones and
324 PDAs, among them the Palm Treo680, HTC Prophet and Wizard, Toshiba
325 Portege G900, Asus P526, and O2 XDA Zinc.
326
327 With this driver you will be able to use UBI and create a ubifs on the
328 device, so you may wish to consider enabling UBI and UBIFS as well.
329
330 These devices ship with the Mys/Sandisk SAFTL formatting, for which
331 there is currently no mtd parser, so you may want to use command line
332 partitioning to segregate write-protected blocks. On the Treo680, the
333 first five erase blocks (256KiB each) are write-protected, followed
334 by the block containing the saftl partition table. This is probably
335 typical.
336
317config MTD_NAND_SHARPSL 337config MTD_NAND_SHARPSL
318 tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)" 338 tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)"
319 depends on ARCH_PXA 339 depends on ARCH_PXA
@@ -421,7 +441,6 @@ config MTD_NAND_NANDSIM
421config MTD_NAND_GPMI_NAND 441config MTD_NAND_GPMI_NAND
422 bool "GPMI NAND Flash Controller driver" 442 bool "GPMI NAND Flash Controller driver"
423 depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28) 443 depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28)
424 select MTD_CMDLINE_PARTS
425 help 444 help
426 Enables NAND Flash support for IMX23 or IMX28. 445 Enables NAND Flash support for IMX23 or IMX28.
427 The GPMI controller is very powerful, with the help of BCH 446 The GPMI controller is very powerful, with the help of BCH
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 19bc8cb1d187..d4b4d8739bd8 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB) += ppchameleonevb.o
19obj-$(CONFIG_MTD_NAND_S3C2410) += s3c2410.o 19obj-$(CONFIG_MTD_NAND_S3C2410) += s3c2410.o
20obj-$(CONFIG_MTD_NAND_DAVINCI) += davinci_nand.o 20obj-$(CONFIG_MTD_NAND_DAVINCI) += davinci_nand.o
21obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o 21obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o
22obj-$(CONFIG_MTD_NAND_DOCG4) += docg4.o
22obj-$(CONFIG_MTD_NAND_FSMC) += fsmc_nand.o 23obj-$(CONFIG_MTD_NAND_FSMC) += fsmc_nand.o
23obj-$(CONFIG_MTD_NAND_H1900) += h1910.o 24obj-$(CONFIG_MTD_NAND_H1900) += h1910.o
24obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o 25obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
index 6a5ff64a139e..4f20e1d8bef1 100644
--- a/drivers/mtd/nand/alauda.c
+++ b/drivers/mtd/nand/alauda.c
@@ -585,12 +585,13 @@ static int alauda_init_media(struct alauda *al)
585 mtd->writesize = 1<<card->pageshift; 585 mtd->writesize = 1<<card->pageshift;
586 mtd->type = MTD_NANDFLASH; 586 mtd->type = MTD_NANDFLASH;
587 mtd->flags = MTD_CAP_NANDFLASH; 587 mtd->flags = MTD_CAP_NANDFLASH;
588 mtd->read = alauda_read; 588 mtd->_read = alauda_read;
589 mtd->write = alauda_write; 589 mtd->_write = alauda_write;
590 mtd->erase = alauda_erase; 590 mtd->_erase = alauda_erase;
591 mtd->block_isbad = alauda_isbad; 591 mtd->_block_isbad = alauda_isbad;
592 mtd->priv = al; 592 mtd->priv = al;
593 mtd->owner = THIS_MODULE; 593 mtd->owner = THIS_MODULE;
594 mtd->ecc_strength = 1;
594 595
595 err = mtd_device_register(mtd, NULL, 0); 596 err = mtd_device_register(mtd, NULL, 0);
596 if (err) { 597 if (err) {
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index ae7e37d9ac17..2165576a1c67 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -603,6 +603,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
603 nand_chip->ecc.hwctl = atmel_nand_hwctl; 603 nand_chip->ecc.hwctl = atmel_nand_hwctl;
604 nand_chip->ecc.read_page = atmel_nand_read_page; 604 nand_chip->ecc.read_page = atmel_nand_read_page;
605 nand_chip->ecc.bytes = 4; 605 nand_chip->ecc.bytes = 4;
606 nand_chip->ecc.strength = 1;
606 } 607 }
607 608
608 nand_chip->chip_delay = 20; /* 20us command delay time */ 609 nand_chip->chip_delay = 20; /* 20us command delay time */
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
index 64c9cbaf86a1..6908cdde3065 100644
--- a/drivers/mtd/nand/bcm_umi_nand.c
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -475,6 +475,14 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
475 largepage_bbt.options = NAND_BBT_SCAN2NDPAGE; 475 largepage_bbt.options = NAND_BBT_SCAN2NDPAGE;
476 this->badblock_pattern = &largepage_bbt; 476 this->badblock_pattern = &largepage_bbt;
477 } 477 }
478
479 /*
480 * FIXME: ecc strength value of 6 bits per 512 bytes of data is a
481 * conservative guess, given 13 ecc bytes and using bch alg.
482 * (Assume Galois field order m=15 to allow a margin of error.)
483 */
484 this->ecc.strength = 6;
485
478#endif 486#endif
479 487
480 /* Now finish off the scan, now that ecc.layout has been initialized. */ 488 /* Now finish off the scan, now that ecc.layout has been initialized. */
@@ -487,7 +495,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
487 495
488 /* Register the partitions */ 496 /* Register the partitions */
489 board_mtd->name = "bcm_umi-nand"; 497 board_mtd->name = "bcm_umi-nand";
490 mtd_device_parse_register(board_mtd, NULL, 0, NULL, 0); 498 mtd_device_parse_register(board_mtd, NULL, NULL, NULL, 0);
491 499
492 /* Return happy */ 500 /* Return happy */
493 return 0; 501 return 0;
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index dd899cb5d366..d7b86b925de5 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -702,9 +702,11 @@ static int bf5xx_nand_scan(struct mtd_info *mtd)
702 if (likely(mtd->writesize >= 512)) { 702 if (likely(mtd->writesize >= 512)) {
703 chip->ecc.size = 512; 703 chip->ecc.size = 512;
704 chip->ecc.bytes = 6; 704 chip->ecc.bytes = 6;
705 chip->ecc.strength = 2;
705 } else { 706 } else {
706 chip->ecc.size = 256; 707 chip->ecc.size = 256;
707 chip->ecc.bytes = 3; 708 chip->ecc.bytes = 3;
709 chip->ecc.strength = 1;
708 bfin_write_NFC_CTL(bfin_read_NFC_CTL() & ~(1 << NFC_PG_SIZE_OFFSET)); 710 bfin_write_NFC_CTL(bfin_read_NFC_CTL() & ~(1 << NFC_PG_SIZE_OFFSET));
709 SSYNC(); 711 SSYNC();
710 } 712 }
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 72d3f23490c5..2a96e1a12062 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -783,6 +783,7 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
783 cafe->nand.ecc.mode = NAND_ECC_HW_SYNDROME; 783 cafe->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
784 cafe->nand.ecc.size = mtd->writesize; 784 cafe->nand.ecc.size = mtd->writesize;
785 cafe->nand.ecc.bytes = 14; 785 cafe->nand.ecc.bytes = 14;
786 cafe->nand.ecc.strength = 4;
786 cafe->nand.ecc.hwctl = (void *)cafe_nand_bug; 787 cafe->nand.ecc.hwctl = (void *)cafe_nand_bug;
787 cafe->nand.ecc.calculate = (void *)cafe_nand_bug; 788 cafe->nand.ecc.calculate = (void *)cafe_nand_bug;
788 cafe->nand.ecc.correct = (void *)cafe_nand_bug; 789 cafe->nand.ecc.correct = (void *)cafe_nand_bug;
@@ -799,7 +800,7 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
799 pci_set_drvdata(pdev, mtd); 800 pci_set_drvdata(pdev, mtd);
800 801
801 mtd->name = "cafe_nand"; 802 mtd->name = "cafe_nand";
802 mtd_device_parse_register(mtd, part_probes, 0, NULL, 0); 803 mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0);
803 804
804 goto out; 805 goto out;
805 806
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index 737ef9a04fdb..1024bfc05c86 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -219,7 +219,7 @@ static int __init cmx270_init(void)
219 } 219 }
220 220
221 /* Register the partitions */ 221 /* Register the partitions */
222 ret = mtd_device_parse_register(cmx270_nand_mtd, NULL, 0, 222 ret = mtd_device_parse_register(cmx270_nand_mtd, NULL, NULL,
223 partition_info, NUM_PARTITIONS); 223 partition_info, NUM_PARTITIONS);
224 if (ret) 224 if (ret)
225 goto err_scan; 225 goto err_scan;
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index 414afa793563..821c34c62500 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -248,6 +248,8 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
248 goto out_ior; 248 goto out_ior;
249 } 249 }
250 250
251 this->ecc.strength = 1;
252
251 new_mtd->name = kasprintf(GFP_KERNEL, "cs553x_nand_cs%d", cs); 253 new_mtd->name = kasprintf(GFP_KERNEL, "cs553x_nand_cs%d", cs);
252 254
253 cs553x_mtd[cs] = new_mtd; 255 cs553x_mtd[cs] = new_mtd;
@@ -313,7 +315,7 @@ static int __init cs553x_init(void)
313 for (i = 0; i < NR_CS553X_CONTROLLERS; i++) { 315 for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
314 if (cs553x_mtd[i]) { 316 if (cs553x_mtd[i]) {
315 /* If any devices registered, return success. Else the last error. */ 317 /* If any devices registered, return success. Else the last error. */
316 mtd_device_parse_register(cs553x_mtd[i], NULL, 0, 318 mtd_device_parse_register(cs553x_mtd[i], NULL, NULL,
317 NULL, 0); 319 NULL, 0);
318 err = 0; 320 err = 0;
319 } 321 }
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 6e566156956f..d94b03c207af 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -641,6 +641,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
641 info->chip.ecc.bytes = 3; 641 info->chip.ecc.bytes = 3;
642 } 642 }
643 info->chip.ecc.size = 512; 643 info->chip.ecc.size = 512;
644 info->chip.ecc.strength = pdata->ecc_bits;
644 break; 645 break;
645 default: 646 default:
646 ret = -EINVAL; 647 ret = -EINVAL;
@@ -752,8 +753,8 @@ syndrome_done:
752 if (ret < 0) 753 if (ret < 0)
753 goto err_scan; 754 goto err_scan;
754 755
755 ret = mtd_device_parse_register(&info->mtd, NULL, 0, 756 ret = mtd_device_parse_register(&info->mtd, NULL, NULL, pdata->parts,
756 pdata->parts, pdata->nr_parts); 757 pdata->nr_parts);
757 758
758 if (ret < 0) 759 if (ret < 0)
759 goto err_scan; 760 goto err_scan;
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 3984d488f9ab..a9e57d686297 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -1590,6 +1590,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1590 ECC_15BITS * (denali->mtd.writesize / 1590 ECC_15BITS * (denali->mtd.writesize /
1591 ECC_SECTOR_SIZE)))) { 1591 ECC_SECTOR_SIZE)))) {
1592 /* if MLC OOB size is large enough, use 15bit ECC*/ 1592 /* if MLC OOB size is large enough, use 15bit ECC*/
1593 denali->nand.ecc.strength = 15;
1593 denali->nand.ecc.layout = &nand_15bit_oob; 1594 denali->nand.ecc.layout = &nand_15bit_oob;
1594 denali->nand.ecc.bytes = ECC_15BITS; 1595 denali->nand.ecc.bytes = ECC_15BITS;
1595 iowrite32(15, denali->flash_reg + ECC_CORRECTION); 1596 iowrite32(15, denali->flash_reg + ECC_CORRECTION);
@@ -1600,12 +1601,14 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1600 " contain 8bit ECC correction codes"); 1601 " contain 8bit ECC correction codes");
1601 goto failed_req_irq; 1602 goto failed_req_irq;
1602 } else { 1603 } else {
1604 denali->nand.ecc.strength = 8;
1603 denali->nand.ecc.layout = &nand_8bit_oob; 1605 denali->nand.ecc.layout = &nand_8bit_oob;
1604 denali->nand.ecc.bytes = ECC_8BITS; 1606 denali->nand.ecc.bytes = ECC_8BITS;
1605 iowrite32(8, denali->flash_reg + ECC_CORRECTION); 1607 iowrite32(8, denali->flash_reg + ECC_CORRECTION);
1606 } 1608 }
1607 1609
1608 denali->nand.ecc.bytes *= denali->devnum; 1610 denali->nand.ecc.bytes *= denali->devnum;
1611 denali->nand.ecc.strength *= denali->devnum;
1609 denali->nand.ecc.layout->eccbytes *= 1612 denali->nand.ecc.layout->eccbytes *=
1610 denali->mtd.writesize / ECC_SECTOR_SIZE; 1613 denali->mtd.writesize / ECC_SECTOR_SIZE;
1611 denali->nand.ecc.layout->oobfree[0].offset = 1614 denali->nand.ecc.layout->oobfree[0].offset =
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index df921e7a496c..e2ca067631cf 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -1653,6 +1653,7 @@ static int __init doc_probe(unsigned long physadr)
1653 nand->ecc.mode = NAND_ECC_HW_SYNDROME; 1653 nand->ecc.mode = NAND_ECC_HW_SYNDROME;
1654 nand->ecc.size = 512; 1654 nand->ecc.size = 512;
1655 nand->ecc.bytes = 6; 1655 nand->ecc.bytes = 6;
1656 nand->ecc.strength = 2;
1656 nand->bbt_options = NAND_BBT_USE_FLASH; 1657 nand->bbt_options = NAND_BBT_USE_FLASH;
1657 1658
1658 doc->physadr = physadr; 1659 doc->physadr = physadr;
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
new file mode 100644
index 000000000000..b08202664543
--- /dev/null
+++ b/drivers/mtd/nand/docg4.c
@@ -0,0 +1,1377 @@
1/*
2 * Copyright © 2012 Mike Dunn <mikedunn@newsguy.com>
3 *
4 * mtd nand driver for M-Systems DiskOnChip G4
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * Tested on the Palm Treo 680. The G4 is also present on Toshiba Portege, Asus
12 * P526, some HTC smartphones (Wizard, Prophet, ...), O2 XDA Zinc, maybe others.
13 * Should work on these as well. Let me know!
14 *
15 * TODO:
16 *
17 * Mechanism for management of password-protected areas
18 *
19 * Hamming ecc when reading oob only
20 *
21 * According to the M-Sys documentation, this device is also available in a
22 * "dual-die" configuration having a 256MB capacity, but no mechanism for
23 * detecting this variant is documented. Currently this driver assumes 128MB
24 * capacity.
25 *
26 * Support for multiple cascaded devices ("floors"). Not sure which gadgets
27 * contain multiple G4s in a cascaded configuration, if any.
28 *
29 */
30
31#include <linux/kernel.h>
32#include <linux/slab.h>
33#include <linux/init.h>
34#include <linux/string.h>
35#include <linux/sched.h>
36#include <linux/delay.h>
37#include <linux/module.h>
38#include <linux/export.h>
39#include <linux/platform_device.h>
40#include <linux/io.h>
41#include <linux/bitops.h>
42#include <linux/mtd/partitions.h>
43#include <linux/mtd/mtd.h>
44#include <linux/mtd/nand.h>
45#include <linux/bch.h>
46#include <linux/bitrev.h>
47
48/*
49 * You'll want to ignore badblocks if you're reading a partition that contains
50 * data written by the TrueFFS library (i.e., by PalmOS, Windows, etc), since
51 * it does not use mtd nand's method for marking bad blocks (using oob area).
52 * This will also skip the check of the "page written" flag.
53 */
54static bool ignore_badblocks;
55module_param(ignore_badblocks, bool, 0);
56MODULE_PARM_DESC(ignore_badblocks, "no badblock checking performed");
57
58struct docg4_priv {
59 struct mtd_info *mtd;
60 struct device *dev;
61 void __iomem *virtadr;
62 int status;
63 struct {
64 unsigned int command;
65 int column;
66 int page;
67 } last_command;
68 uint8_t oob_buf[16];
69 uint8_t ecc_buf[7];
70 int oob_page;
71 struct bch_control *bch;
72};
73
74/*
75 * Defines prefixed with DOCG4 are unique to the diskonchip G4. All others are
76 * shared with other diskonchip devices (P3, G3 at least).
77 *
78 * Functions with names prefixed with docg4_ are mtd / nand interface functions
79 * (though they may also be called internally). All others are internal.
80 */
81
82#define DOC_IOSPACE_DATA 0x0800
83
84/* register offsets */
85#define DOC_CHIPID 0x1000
86#define DOC_DEVICESELECT 0x100a
87#define DOC_ASICMODE 0x100c
88#define DOC_DATAEND 0x101e
89#define DOC_NOP 0x103e
90
91#define DOC_FLASHSEQUENCE 0x1032
92#define DOC_FLASHCOMMAND 0x1034
93#define DOC_FLASHADDRESS 0x1036
94#define DOC_FLASHCONTROL 0x1038
95#define DOC_ECCCONF0 0x1040
96#define DOC_ECCCONF1 0x1042
97#define DOC_HAMMINGPARITY 0x1046
98#define DOC_BCH_SYNDROM(idx) (0x1048 + idx)
99
100#define DOC_ASICMODECONFIRM 0x1072
101#define DOC_CHIPID_INV 0x1074
102#define DOC_POWERMODE 0x107c
103
104#define DOCG4_MYSTERY_REG 0x1050
105
106/* apparently used only to write oob bytes 6 and 7 */
107#define DOCG4_OOB_6_7 0x1052
108
109/* DOC_FLASHSEQUENCE register commands */
110#define DOC_SEQ_RESET 0x00
111#define DOCG4_SEQ_PAGE_READ 0x03
112#define DOCG4_SEQ_FLUSH 0x29
113#define DOCG4_SEQ_PAGEWRITE 0x16
114#define DOCG4_SEQ_PAGEPROG 0x1e
115#define DOCG4_SEQ_BLOCKERASE 0x24
116
117/* DOC_FLASHCOMMAND register commands */
118#define DOCG4_CMD_PAGE_READ 0x00
119#define DOC_CMD_ERASECYCLE2 0xd0
120#define DOCG4_CMD_FLUSH 0x70
121#define DOCG4_CMD_READ2 0x30
122#define DOC_CMD_PROG_BLOCK_ADDR 0x60
123#define DOCG4_CMD_PAGEWRITE 0x80
124#define DOC_CMD_PROG_CYCLE2 0x10
125#define DOC_CMD_RESET 0xff
126
127/* DOC_POWERMODE register bits */
128#define DOC_POWERDOWN_READY 0x80
129
130/* DOC_FLASHCONTROL register bits */
131#define DOC_CTRL_CE 0x10
132#define DOC_CTRL_UNKNOWN 0x40
133#define DOC_CTRL_FLASHREADY 0x01
134
135/* DOC_ECCCONF0 register bits */
136#define DOC_ECCCONF0_READ_MODE 0x8000
137#define DOC_ECCCONF0_UNKNOWN 0x2000
138#define DOC_ECCCONF0_ECC_ENABLE 0x1000
139#define DOC_ECCCONF0_DATA_BYTES_MASK 0x07ff
140
141/* DOC_ECCCONF1 register bits */
142#define DOC_ECCCONF1_BCH_SYNDROM_ERR 0x80
143#define DOC_ECCCONF1_ECC_ENABLE 0x07
144#define DOC_ECCCONF1_PAGE_IS_WRITTEN 0x20
145
146/* DOC_ASICMODE register bits */
147#define DOC_ASICMODE_RESET 0x00
148#define DOC_ASICMODE_NORMAL 0x01
149#define DOC_ASICMODE_POWERDOWN 0x02
150#define DOC_ASICMODE_MDWREN 0x04
151#define DOC_ASICMODE_BDETCT_RESET 0x08
152#define DOC_ASICMODE_RSTIN_RESET 0x10
153#define DOC_ASICMODE_RAM_WE 0x20
154
155/* good status values read after read/write/erase operations */
156#define DOCG4_PROGSTATUS_GOOD 0x51
157#define DOCG4_PROGSTATUS_GOOD_2 0xe0
158
159/*
160 * On read operations (page and oob-only), the first byte read from I/O reg is a
161 * status. On error, it reads 0x73; otherwise, it reads either 0x71 (first read
162 * after reset only) or 0x51, so bit 1 is presumed to be an error indicator.
163 */
164#define DOCG4_READ_ERROR 0x02 /* bit 1 indicates read error */
165
166/* anatomy of the device */
167#define DOCG4_CHIP_SIZE 0x8000000
168#define DOCG4_PAGE_SIZE 0x200
169#define DOCG4_PAGES_PER_BLOCK 0x200
170#define DOCG4_BLOCK_SIZE (DOCG4_PAGES_PER_BLOCK * DOCG4_PAGE_SIZE)
171#define DOCG4_NUMBLOCKS (DOCG4_CHIP_SIZE / DOCG4_BLOCK_SIZE)
172#define DOCG4_OOB_SIZE 0x10
173#define DOCG4_CHIP_SHIFT 27 /* log_2(DOCG4_CHIP_SIZE) */
174#define DOCG4_PAGE_SHIFT 9 /* log_2(DOCG4_PAGE_SIZE) */
175#define DOCG4_ERASE_SHIFT 18 /* log_2(DOCG4_BLOCK_SIZE) */
176
177/* all but the last byte is included in ecc calculation */
178#define DOCG4_BCH_SIZE (DOCG4_PAGE_SIZE + DOCG4_OOB_SIZE - 1)
179
180#define DOCG4_USERDATA_LEN 520 /* 512 byte page plus 8 oob avail to user */
181
182/* expected values from the ID registers */
183#define DOCG4_IDREG1_VALUE 0x0400
184#define DOCG4_IDREG2_VALUE 0xfbff
185
186/* primitive polynomial used to build the Galois field used by hw ecc gen */
187#define DOCG4_PRIMITIVE_POLY 0x4443
188
189#define DOCG4_M 14 /* Galois field is of order 2^14 */
190#define DOCG4_T 4 /* BCH alg corrects up to 4 bit errors */
191
192#define DOCG4_FACTORY_BBT_PAGE 16 /* page where read-only factory bbt lives */
193
194/*
195 * Oob bytes 0 - 6 are available to the user.
196 * Byte 7 is hamming ecc for first 7 bytes. Bytes 8 - 14 are hw-generated ecc.
197 * Byte 15 (the last) is used by the driver as a "page written" flag.
198 */
199static struct nand_ecclayout docg4_oobinfo = {
200 .eccbytes = 9,
201 .eccpos = {7, 8, 9, 10, 11, 12, 13, 14, 15},
202 .oobavail = 7,
203 .oobfree = { {0, 7} }
204};
205
206/*
207 * The device has a nop register which M-Sys claims is for the purpose of
208 * inserting precise delays. But beware; at least some operations fail if the
209 * nop writes are replaced with a generic delay!
210 */
211static inline void write_nop(void __iomem *docptr)
212{
213 writew(0, docptr + DOC_NOP);
214}
215
216static void docg4_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
217{
218 int i;
219 struct nand_chip *nand = mtd->priv;
220 uint16_t *p = (uint16_t *) buf;
221 len >>= 1;
222
223 for (i = 0; i < len; i++)
224 p[i] = readw(nand->IO_ADDR_R);
225}
226
227static void docg4_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
228{
229 int i;
230 struct nand_chip *nand = mtd->priv;
231 uint16_t *p = (uint16_t *) buf;
232 len >>= 1;
233
234 for (i = 0; i < len; i++)
235 writew(p[i], nand->IO_ADDR_W);
236}
237
238static int poll_status(struct docg4_priv *doc)
239{
240 /*
241 * Busy-wait for the FLASHREADY bit to be set in the FLASHCONTROL
242 * register. Operations known to take a long time (e.g., block erase)
243 * should sleep for a while before calling this.
244 */
245
246 uint16_t flash_status;
247 unsigned int timeo;
248 void __iomem *docptr = doc->virtadr;
249
250 dev_dbg(doc->dev, "%s...\n", __func__);
251
252 /* hardware quirk requires reading twice initially */
253 flash_status = readw(docptr + DOC_FLASHCONTROL);
254
255 timeo = 1000;
256 do {
257 cpu_relax();
258 flash_status = readb(docptr + DOC_FLASHCONTROL);
259 } while (!(flash_status & DOC_CTRL_FLASHREADY) && --timeo);
260
261
262 if (!timeo) {
263 dev_err(doc->dev, "%s: timed out!\n", __func__);
264 return NAND_STATUS_FAIL;
265 }
266
267 if (unlikely(timeo < 50))
268 dev_warn(doc->dev, "%s: nearly timed out; %d remaining\n",
269 __func__, timeo);
270
271 return 0;
272}
273
274
275static int docg4_wait(struct mtd_info *mtd, struct nand_chip *nand)
276{
277
278 struct docg4_priv *doc = nand->priv;
279 int status = NAND_STATUS_WP; /* inverse logic?? */
280 dev_dbg(doc->dev, "%s...\n", __func__);
281
282 /* report any previously unreported error */
283 if (doc->status) {
284 status |= doc->status;
285 doc->status = 0;
286 return status;
287 }
288
289 status |= poll_status(doc);
290 return status;
291}
292
293static void docg4_select_chip(struct mtd_info *mtd, int chip)
294{
295 /*
296 * Select among multiple cascaded chips ("floors"). Multiple floors are
297 * not yet supported, so the only valid non-negative value is 0.
298 */
299 struct nand_chip *nand = mtd->priv;
300 struct docg4_priv *doc = nand->priv;
301 void __iomem *docptr = doc->virtadr;
302
303 dev_dbg(doc->dev, "%s: chip %d\n", __func__, chip);
304
305 if (chip < 0)
306 return; /* deselected */
307
308 if (chip > 0)
309 dev_warn(doc->dev, "multiple floors currently unsupported\n");
310
311 writew(0, docptr + DOC_DEVICESELECT);
312}
313
314static void reset(struct mtd_info *mtd)
315{
316 /* full device reset */
317
318 struct nand_chip *nand = mtd->priv;
319 struct docg4_priv *doc = nand->priv;
320 void __iomem *docptr = doc->virtadr;
321
322 writew(DOC_ASICMODE_RESET | DOC_ASICMODE_MDWREN,
323 docptr + DOC_ASICMODE);
324 writew(~(DOC_ASICMODE_RESET | DOC_ASICMODE_MDWREN),
325 docptr + DOC_ASICMODECONFIRM);
326 write_nop(docptr);
327
328 writew(DOC_ASICMODE_NORMAL | DOC_ASICMODE_MDWREN,
329 docptr + DOC_ASICMODE);
330 writew(~(DOC_ASICMODE_NORMAL | DOC_ASICMODE_MDWREN),
331 docptr + DOC_ASICMODECONFIRM);
332
333 writew(DOC_ECCCONF1_ECC_ENABLE, docptr + DOC_ECCCONF1);
334
335 poll_status(doc);
336}
337
338static void read_hw_ecc(void __iomem *docptr, uint8_t *ecc_buf)
339{
340 /* read the 7 hw-generated ecc bytes */
341
342 int i;
343 for (i = 0; i < 7; i++) { /* hw quirk; read twice */
344 ecc_buf[i] = readb(docptr + DOC_BCH_SYNDROM(i));
345 ecc_buf[i] = readb(docptr + DOC_BCH_SYNDROM(i));
346 }
347}
348
349static int correct_data(struct mtd_info *mtd, uint8_t *buf, int page)
350{
351 /*
352 * Called after a page read when hardware reports bitflips.
353 * Up to four bitflips can be corrected.
354 */
355
356 struct nand_chip *nand = mtd->priv;
357 struct docg4_priv *doc = nand->priv;
358 void __iomem *docptr = doc->virtadr;
359 int i, numerrs, errpos[4];
360 const uint8_t blank_read_hwecc[8] = {
361 0xcf, 0x72, 0xfc, 0x1b, 0xa9, 0xc7, 0xb9, 0 };
362
363 read_hw_ecc(docptr, doc->ecc_buf); /* read 7 hw-generated ecc bytes */
364
365 /* check if read error is due to a blank page */
366 if (!memcmp(doc->ecc_buf, blank_read_hwecc, 7))
367 return 0; /* yes */
368
369 /* skip additional check of "written flag" if ignore_badblocks */
370 if (ignore_badblocks == false) {
371
372 /*
373 * If the hw ecc bytes are not those of a blank page, there's
374 * still a chance that the page is blank, but was read with
375 * errors. Check the "written flag" in last oob byte, which
376 * is set to zero when a page is written. If more than half
377 * the bits are set, assume a blank page. Unfortunately, the
378 * bit flips(s) are not reported in stats.
379 */
380
381 if (doc->oob_buf[15]) {
382 int bit, numsetbits = 0;
383 unsigned long written_flag = doc->oob_buf[15];
384 for_each_set_bit(bit, &written_flag, 8)
385 numsetbits++;
386 if (numsetbits > 4) { /* assume blank */
387 dev_warn(doc->dev,
388 "error(s) in blank page "
389 "at offset %08x\n",
390 page * DOCG4_PAGE_SIZE);
391 return 0;
392 }
393 }
394 }
395
396 /*
397 * The hardware ecc unit produces oob_ecc ^ calc_ecc. The kernel's bch
398 * algorithm is used to decode this. However the hw operates on page
399 * data in a bit order that is the reverse of that of the bch alg,
400 * requiring that the bits be reversed on the result. Thanks to Ivan
401 * Djelic for his analysis!
402 */
403 for (i = 0; i < 7; i++)
404 doc->ecc_buf[i] = bitrev8(doc->ecc_buf[i]);
405
406 numerrs = decode_bch(doc->bch, NULL, DOCG4_USERDATA_LEN, NULL,
407 doc->ecc_buf, NULL, errpos);
408
409 if (numerrs == -EBADMSG) {
410 dev_warn(doc->dev, "uncorrectable errors at offset %08x\n",
411 page * DOCG4_PAGE_SIZE);
412 return -EBADMSG;
413 }
414
415 BUG_ON(numerrs < 0); /* -EINVAL, or anything other than -EBADMSG */
416
417 /* undo last step in BCH alg (modulo mirroring not needed) */
418 for (i = 0; i < numerrs; i++)
419 errpos[i] = (errpos[i] & ~7)|(7-(errpos[i] & 7));
420
421 /* fix the errors */
422 for (i = 0; i < numerrs; i++) {
423
424 /* ignore if error within oob ecc bytes */
425 if (errpos[i] > DOCG4_USERDATA_LEN * 8)
426 continue;
427
428 /* if error within oob area preceeding ecc bytes... */
429 if (errpos[i] > DOCG4_PAGE_SIZE * 8)
430 change_bit(errpos[i] - DOCG4_PAGE_SIZE * 8,
431 (unsigned long *)doc->oob_buf);
432
433 else /* error in page data */
434 change_bit(errpos[i], (unsigned long *)buf);
435 }
436
437 dev_notice(doc->dev, "%d error(s) corrected at offset %08x\n",
438 numerrs, page * DOCG4_PAGE_SIZE);
439
440 return numerrs;
441}
442
443static uint8_t docg4_read_byte(struct mtd_info *mtd)
444{
445 struct nand_chip *nand = mtd->priv;
446 struct docg4_priv *doc = nand->priv;
447
448 dev_dbg(doc->dev, "%s\n", __func__);
449
450 if (doc->last_command.command == NAND_CMD_STATUS) {
451 int status;
452
453 /*
454 * Previous nand command was status request, so nand
455 * infrastructure code expects to read the status here. If an
456 * error occurred in a previous operation, report it.
457 */
458 doc->last_command.command = 0;
459
460 if (doc->status) {
461 status = doc->status;
462 doc->status = 0;
463 }
464
465 /* why is NAND_STATUS_WP inverse logic?? */
466 else
467 status = NAND_STATUS_WP | NAND_STATUS_READY;
468
469 return status;
470 }
471
472 dev_warn(doc->dev, "unexpectd call to read_byte()\n");
473
474 return 0;
475}
476
477static void write_addr(struct docg4_priv *doc, uint32_t docg4_addr)
478{
479 /* write the four address bytes packed in docg4_addr to the device */
480
481 void __iomem *docptr = doc->virtadr;
482 writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
483 docg4_addr >>= 8;
484 writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
485 docg4_addr >>= 8;
486 writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
487 docg4_addr >>= 8;
488 writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
489}
490
491static int read_progstatus(struct docg4_priv *doc)
492{
493 /*
494 * This apparently checks the status of programming. Done after an
495 * erasure, and after page data is written. On error, the status is
496 * saved, to be later retrieved by the nand infrastructure code.
497 */
498 void __iomem *docptr = doc->virtadr;
499
500 /* status is read from the I/O reg */
501 uint16_t status1 = readw(docptr + DOC_IOSPACE_DATA);
502 uint16_t status2 = readw(docptr + DOC_IOSPACE_DATA);
503 uint16_t status3 = readw(docptr + DOCG4_MYSTERY_REG);
504
505 dev_dbg(doc->dev, "docg4: %s: %02x %02x %02x\n",
506 __func__, status1, status2, status3);
507
508 if (status1 != DOCG4_PROGSTATUS_GOOD
509 || status2 != DOCG4_PROGSTATUS_GOOD_2
510 || status3 != DOCG4_PROGSTATUS_GOOD_2) {
511 doc->status = NAND_STATUS_FAIL;
512 dev_warn(doc->dev, "read_progstatus failed: "
513 "%02x, %02x, %02x\n", status1, status2, status3);
514 return -EIO;
515 }
516 return 0;
517}
518
519static int pageprog(struct mtd_info *mtd)
520{
521 /*
522 * Final step in writing a page. Writes the contents of its
523 * internal buffer out to the flash array, or some such.
524 */
525
526 struct nand_chip *nand = mtd->priv;
527 struct docg4_priv *doc = nand->priv;
528 void __iomem *docptr = doc->virtadr;
529 int retval = 0;
530
531 dev_dbg(doc->dev, "docg4: %s\n", __func__);
532
533 writew(DOCG4_SEQ_PAGEPROG, docptr + DOC_FLASHSEQUENCE);
534 writew(DOC_CMD_PROG_CYCLE2, docptr + DOC_FLASHCOMMAND);
535 write_nop(docptr);
536 write_nop(docptr);
537
538 /* Just busy-wait; usleep_range() slows things down noticeably. */
539 poll_status(doc);
540
541 writew(DOCG4_SEQ_FLUSH, docptr + DOC_FLASHSEQUENCE);
542 writew(DOCG4_CMD_FLUSH, docptr + DOC_FLASHCOMMAND);
543 writew(DOC_ECCCONF0_READ_MODE | 4, docptr + DOC_ECCCONF0);
544 write_nop(docptr);
545 write_nop(docptr);
546 write_nop(docptr);
547 write_nop(docptr);
548 write_nop(docptr);
549
550 retval = read_progstatus(doc);
551 writew(0, docptr + DOC_DATAEND);
552 write_nop(docptr);
553 poll_status(doc);
554 write_nop(docptr);
555
556 return retval;
557}
558
559static void sequence_reset(struct mtd_info *mtd)
560{
561 /* common starting sequence for all operations */
562
563 struct nand_chip *nand = mtd->priv;
564 struct docg4_priv *doc = nand->priv;
565 void __iomem *docptr = doc->virtadr;
566
567 writew(DOC_CTRL_UNKNOWN | DOC_CTRL_CE, docptr + DOC_FLASHCONTROL);
568 writew(DOC_SEQ_RESET, docptr + DOC_FLASHSEQUENCE);
569 writew(DOC_CMD_RESET, docptr + DOC_FLASHCOMMAND);
570 write_nop(docptr);
571 write_nop(docptr);
572 poll_status(doc);
573 write_nop(docptr);
574}
575
576static void read_page_prologue(struct mtd_info *mtd, uint32_t docg4_addr)
577{
578 /* first step in reading a page */
579
580 struct nand_chip *nand = mtd->priv;
581 struct docg4_priv *doc = nand->priv;
582 void __iomem *docptr = doc->virtadr;
583
584 dev_dbg(doc->dev,
585 "docg4: %s: g4 page %08x\n", __func__, docg4_addr);
586
587 sequence_reset(mtd);
588
589 writew(DOCG4_SEQ_PAGE_READ, docptr + DOC_FLASHSEQUENCE);
590 writew(DOCG4_CMD_PAGE_READ, docptr + DOC_FLASHCOMMAND);
591 write_nop(docptr);
592
593 write_addr(doc, docg4_addr);
594
595 write_nop(docptr);
596 writew(DOCG4_CMD_READ2, docptr + DOC_FLASHCOMMAND);
597 write_nop(docptr);
598 write_nop(docptr);
599
600 poll_status(doc);
601}
602
603static void write_page_prologue(struct mtd_info *mtd, uint32_t docg4_addr)
604{
605 /* first step in writing a page */
606
607 struct nand_chip *nand = mtd->priv;
608 struct docg4_priv *doc = nand->priv;
609 void __iomem *docptr = doc->virtadr;
610
611 dev_dbg(doc->dev,
612 "docg4: %s: g4 addr: %x\n", __func__, docg4_addr);
613 sequence_reset(mtd);
614 writew(DOCG4_SEQ_PAGEWRITE, docptr + DOC_FLASHSEQUENCE);
615 writew(DOCG4_CMD_PAGEWRITE, docptr + DOC_FLASHCOMMAND);
616 write_nop(docptr);
617 write_addr(doc, docg4_addr);
618 write_nop(docptr);
619 write_nop(docptr);
620 poll_status(doc);
621}
622
623static uint32_t mtd_to_docg4_address(int page, int column)
624{
625 /*
626 * Convert mtd address to format used by the device, 32 bit packed.
627 *
628 * Some notes on G4 addressing... The M-Sys documentation on this device
629 * claims that pages are 2K in length, and indeed, the format of the
630 * address used by the device reflects that. But within each page are
631 * four 512 byte "sub-pages", each with its own oob data that is
632 * read/written immediately after the 512 bytes of page data. This oob
633 * data contains the ecc bytes for the preceeding 512 bytes.
634 *
635 * Rather than tell the mtd nand infrastructure that page size is 2k,
636 * with four sub-pages each, we engage in a little subterfuge and tell
637 * the infrastructure code that pages are 512 bytes in size. This is
638 * done because during the course of reverse-engineering the device, I
639 * never observed an instance where an entire 2K "page" was read or
640 * written as a unit. Each "sub-page" is always addressed individually,
641 * its data read/written, and ecc handled before the next "sub-page" is
642 * addressed.
643 *
644 * This requires us to convert addresses passed by the mtd nand
645 * infrastructure code to those used by the device.
646 *
647 * The address that is written to the device consists of four bytes: the
648 * first two are the 2k page number, and the second is the index into
649 * the page. The index is in terms of 16-bit half-words and includes
650 * the preceeding oob data, so e.g., the index into the second
651 * "sub-page" is 0x108, and the full device address of the start of mtd
652 * page 0x201 is 0x00800108.
653 */
654 int g4_page = page / 4; /* device's 2K page */
655 int g4_index = (page % 4) * 0x108 + column/2; /* offset into page */
656 return (g4_page << 16) | g4_index; /* pack */
657}
658
659static void docg4_command(struct mtd_info *mtd, unsigned command, int column,
660 int page_addr)
661{
662 /* handle standard nand commands */
663
664 struct nand_chip *nand = mtd->priv;
665 struct docg4_priv *doc = nand->priv;
666 uint32_t g4_addr = mtd_to_docg4_address(page_addr, column);
667
668 dev_dbg(doc->dev, "%s %x, page_addr=%x, column=%x\n",
669 __func__, command, page_addr, column);
670
671 /*
672 * Save the command and its arguments. This enables emulation of
673 * standard flash devices, and also some optimizations.
674 */
675 doc->last_command.command = command;
676 doc->last_command.column = column;
677 doc->last_command.page = page_addr;
678
679 switch (command) {
680
681 case NAND_CMD_RESET:
682 reset(mtd);
683 break;
684
685 case NAND_CMD_READ0:
686 read_page_prologue(mtd, g4_addr);
687 break;
688
689 case NAND_CMD_STATUS:
690 /* next call to read_byte() will expect a status */
691 break;
692
693 case NAND_CMD_SEQIN:
694 write_page_prologue(mtd, g4_addr);
695
696 /* hack for deferred write of oob bytes */
697 if (doc->oob_page == page_addr)
698 memcpy(nand->oob_poi, doc->oob_buf, 16);
699 break;
700
701 case NAND_CMD_PAGEPROG:
702 pageprog(mtd);
703 break;
704
705 /* we don't expect these, based on review of nand_base.c */
706 case NAND_CMD_READOOB:
707 case NAND_CMD_READID:
708 case NAND_CMD_ERASE1:
709 case NAND_CMD_ERASE2:
710 dev_warn(doc->dev, "docg4_command: "
711 "unexpected nand command 0x%x\n", command);
712 break;
713
714 }
715}
716
717static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
718 uint8_t *buf, int page, bool use_ecc)
719{
720 struct docg4_priv *doc = nand->priv;
721 void __iomem *docptr = doc->virtadr;
722 uint16_t status, edc_err, *buf16;
723
724 dev_dbg(doc->dev, "%s: page %08x\n", __func__, page);
725
726 writew(DOC_ECCCONF0_READ_MODE |
727 DOC_ECCCONF0_ECC_ENABLE |
728 DOC_ECCCONF0_UNKNOWN |
729 DOCG4_BCH_SIZE,
730 docptr + DOC_ECCCONF0);
731 write_nop(docptr);
732 write_nop(docptr);
733 write_nop(docptr);
734 write_nop(docptr);
735 write_nop(docptr);
736
737 /* the 1st byte from the I/O reg is a status; the rest is page data */
738 status = readw(docptr + DOC_IOSPACE_DATA);
739 if (status & DOCG4_READ_ERROR) {
740 dev_err(doc->dev,
741 "docg4_read_page: bad status: 0x%02x\n", status);
742 writew(0, docptr + DOC_DATAEND);
743 return -EIO;
744 }
745
746 dev_dbg(doc->dev, "%s: status = 0x%x\n", __func__, status);
747
748 docg4_read_buf(mtd, buf, DOCG4_PAGE_SIZE); /* read the page data */
749
750 /*
751 * Diskonchips read oob immediately after a page read. Mtd
752 * infrastructure issues a separate command for reading oob after the
753 * page is read. So we save the oob bytes in a local buffer and just
754 * copy it if the next command reads oob from the same page.
755 */
756
757 /* first 14 oob bytes read from I/O reg */
758 docg4_read_buf(mtd, doc->oob_buf, 14);
759
760 /* last 2 read from another reg */
761 buf16 = (uint16_t *)(doc->oob_buf + 14);
762 *buf16 = readw(docptr + DOCG4_MYSTERY_REG);
763
764 write_nop(docptr);
765
766 if (likely(use_ecc == true)) {
767
768 /* read the register that tells us if bitflip(s) detected */
769 edc_err = readw(docptr + DOC_ECCCONF1);
770 edc_err = readw(docptr + DOC_ECCCONF1);
771 dev_dbg(doc->dev, "%s: edc_err = 0x%02x\n", __func__, edc_err);
772
773 /* If bitflips are reported, attempt to correct with ecc */
774 if (edc_err & DOC_ECCCONF1_BCH_SYNDROM_ERR) {
775 int bits_corrected = correct_data(mtd, buf, page);
776 if (bits_corrected == -EBADMSG)
777 mtd->ecc_stats.failed++;
778 else
779 mtd->ecc_stats.corrected += bits_corrected;
780 }
781 }
782
783 writew(0, docptr + DOC_DATAEND);
784 return 0;
785}
786
787
788static int docg4_read_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
789 uint8_t *buf, int page)
790{
791 return read_page(mtd, nand, buf, page, false);
792}
793
794static int docg4_read_page(struct mtd_info *mtd, struct nand_chip *nand,
795 uint8_t *buf, int page)
796{
797 return read_page(mtd, nand, buf, page, true);
798}
799
800static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
801 int page, int sndcmd)
802{
803 struct docg4_priv *doc = nand->priv;
804 void __iomem *docptr = doc->virtadr;
805 uint16_t status;
806
807 dev_dbg(doc->dev, "%s: page %x\n", __func__, page);
808
809 /*
810 * Oob bytes are read as part of a normal page read. If the previous
811 * nand command was a read of the page whose oob is now being read, just
812 * copy the oob bytes that we saved in a local buffer and avoid a
813 * separate oob read.
814 */
815 if (doc->last_command.command == NAND_CMD_READ0 &&
816 doc->last_command.page == page) {
817 memcpy(nand->oob_poi, doc->oob_buf, 16);
818 return 0;
819 }
820
821 /*
822 * Separate read of oob data only.
823 */
824 docg4_command(mtd, NAND_CMD_READ0, nand->ecc.size, page);
825
826 writew(DOC_ECCCONF0_READ_MODE | DOCG4_OOB_SIZE, docptr + DOC_ECCCONF0);
827 write_nop(docptr);
828 write_nop(docptr);
829 write_nop(docptr);
830 write_nop(docptr);
831 write_nop(docptr);
832
833 /* the 1st byte from the I/O reg is a status; the rest is oob data */
834 status = readw(docptr + DOC_IOSPACE_DATA);
835 if (status & DOCG4_READ_ERROR) {
836 dev_warn(doc->dev,
837 "docg4_read_oob failed: status = 0x%02x\n", status);
838 return -EIO;
839 }
840
841 dev_dbg(doc->dev, "%s: status = 0x%x\n", __func__, status);
842
843 docg4_read_buf(mtd, nand->oob_poi, 16);
844
845 write_nop(docptr);
846 write_nop(docptr);
847 write_nop(docptr);
848 writew(0, docptr + DOC_DATAEND);
849 write_nop(docptr);
850
851 return 0;
852}
853
854static void docg4_erase_block(struct mtd_info *mtd, int page)
855{
856 struct nand_chip *nand = mtd->priv;
857 struct docg4_priv *doc = nand->priv;
858 void __iomem *docptr = doc->virtadr;
859 uint16_t g4_page;
860
861 dev_dbg(doc->dev, "%s: page %04x\n", __func__, page);
862
863 sequence_reset(mtd);
864
865 writew(DOCG4_SEQ_BLOCKERASE, docptr + DOC_FLASHSEQUENCE);
866 writew(DOC_CMD_PROG_BLOCK_ADDR, docptr + DOC_FLASHCOMMAND);
867 write_nop(docptr);
868
869 /* only 2 bytes of address are written to specify erase block */
870 g4_page = (uint16_t)(page / 4); /* to g4's 2k page addressing */
871 writeb(g4_page & 0xff, docptr + DOC_FLASHADDRESS);
872 g4_page >>= 8;
873 writeb(g4_page & 0xff, docptr + DOC_FLASHADDRESS);
874 write_nop(docptr);
875
876 /* start the erasure */
877 writew(DOC_CMD_ERASECYCLE2, docptr + DOC_FLASHCOMMAND);
878 write_nop(docptr);
879 write_nop(docptr);
880
881 usleep_range(500, 1000); /* erasure is long; take a snooze */
882 poll_status(doc);
883 writew(DOCG4_SEQ_FLUSH, docptr + DOC_FLASHSEQUENCE);
884 writew(DOCG4_CMD_FLUSH, docptr + DOC_FLASHCOMMAND);
885 writew(DOC_ECCCONF0_READ_MODE | 4, docptr + DOC_ECCCONF0);
886 write_nop(docptr);
887 write_nop(docptr);
888 write_nop(docptr);
889 write_nop(docptr);
890 write_nop(docptr);
891
892 read_progstatus(doc);
893
894 writew(0, docptr + DOC_DATAEND);
895 write_nop(docptr);
896 poll_status(doc);
897 write_nop(docptr);
898}
899
900static void write_page(struct mtd_info *mtd, struct nand_chip *nand,
901 const uint8_t *buf, bool use_ecc)
902{
903 struct docg4_priv *doc = nand->priv;
904 void __iomem *docptr = doc->virtadr;
905 uint8_t ecc_buf[8];
906
907 dev_dbg(doc->dev, "%s...\n", __func__);
908
909 writew(DOC_ECCCONF0_ECC_ENABLE |
910 DOC_ECCCONF0_UNKNOWN |
911 DOCG4_BCH_SIZE,
912 docptr + DOC_ECCCONF0);
913 write_nop(docptr);
914
915 /* write the page data */
916 docg4_write_buf16(mtd, buf, DOCG4_PAGE_SIZE);
917
918 /* oob bytes 0 through 5 are written to I/O reg */
919 docg4_write_buf16(mtd, nand->oob_poi, 6);
920
921 /* oob byte 6 written to a separate reg */
922 writew(nand->oob_poi[6], docptr + DOCG4_OOB_6_7);
923
924 write_nop(docptr);
925 write_nop(docptr);
926
927 /* write hw-generated ecc bytes to oob */
928 if (likely(use_ecc == true)) {
929 /* oob byte 7 is hamming code */
930 uint8_t hamming = readb(docptr + DOC_HAMMINGPARITY);
931 hamming = readb(docptr + DOC_HAMMINGPARITY); /* 2nd read */
932 writew(hamming, docptr + DOCG4_OOB_6_7);
933 write_nop(docptr);
934
935 /* read the 7 bch bytes from ecc regs */
936 read_hw_ecc(docptr, ecc_buf);
937 ecc_buf[7] = 0; /* clear the "page written" flag */
938 }
939
940 /* write user-supplied bytes to oob */
941 else {
942 writew(nand->oob_poi[7], docptr + DOCG4_OOB_6_7);
943 write_nop(docptr);
944 memcpy(ecc_buf, &nand->oob_poi[8], 8);
945 }
946
947 docg4_write_buf16(mtd, ecc_buf, 8);
948 write_nop(docptr);
949 write_nop(docptr);
950 writew(0, docptr + DOC_DATAEND);
951 write_nop(docptr);
952}
953
954static void docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
955 const uint8_t *buf)
956{
957 return write_page(mtd, nand, buf, false);
958}
959
960static void docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand,
961 const uint8_t *buf)
962{
963 return write_page(mtd, nand, buf, true);
964}
965
966static int docg4_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
967 int page)
968{
969 /*
970 * Writing oob-only is not really supported, because MLC nand must write
971 * oob bytes at the same time as page data. Nonetheless, we save the
972 * oob buffer contents here, and then write it along with the page data
973 * if the same page is subsequently written. This allows user space
974 * utilities that write the oob data prior to the page data to work
975 * (e.g., nandwrite). The disdvantage is that, if the intention was to
976 * write oob only, the operation is quietly ignored. Also, oob can get
977 * corrupted if two concurrent processes are running nandwrite.
978 */
979
980 /* note that bytes 7..14 are hw generated hamming/ecc and overwritten */
981 struct docg4_priv *doc = nand->priv;
982 doc->oob_page = page;
983 memcpy(doc->oob_buf, nand->oob_poi, 16);
984 return 0;
985}
986
987static int __init read_factory_bbt(struct mtd_info *mtd)
988{
989 /*
990 * The device contains a read-only factory bad block table. Read it and
991 * update the memory-based bbt accordingly.
992 */
993
994 struct nand_chip *nand = mtd->priv;
995 struct docg4_priv *doc = nand->priv;
996 uint32_t g4_addr = mtd_to_docg4_address(DOCG4_FACTORY_BBT_PAGE, 0);
997 uint8_t *buf;
998 int i, block, status;
999
1000 buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL);
1001 if (buf == NULL)
1002 return -ENOMEM;
1003
1004 read_page_prologue(mtd, g4_addr);
1005 status = docg4_read_page(mtd, nand, buf, DOCG4_FACTORY_BBT_PAGE);
1006 if (status)
1007 goto exit;
1008
1009 /*
1010 * If no memory-based bbt was created, exit. This will happen if module
1011 * parameter ignore_badblocks is set. Then why even call this function?
1012 * For an unknown reason, block erase always fails if it's the first
1013 * operation after device power-up. The above read ensures it never is.
1014 * Ugly, I know.
1015 */
1016 if (nand->bbt == NULL) /* no memory-based bbt */
1017 goto exit;
1018
1019 /*
1020 * Parse factory bbt and update memory-based bbt. Factory bbt format is
1021 * simple: one bit per block, block numbers increase left to right (msb
1022 * to lsb). Bit clear means bad block.
1023 */
1024 for (i = block = 0; block < DOCG4_NUMBLOCKS; block += 8, i++) {
1025 int bitnum;
1026 unsigned long bits = ~buf[i];
1027 for_each_set_bit(bitnum, &bits, 8) {
1028 int badblock = block + 7 - bitnum;
1029 nand->bbt[badblock / 4] |=
1030 0x03 << ((badblock % 4) * 2);
1031 mtd->ecc_stats.badblocks++;
1032 dev_notice(doc->dev, "factory-marked bad block: %d\n",
1033 badblock);
1034 }
1035 }
1036 exit:
1037 kfree(buf);
1038 return status;
1039}
1040
1041static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs)
1042{
1043 /*
1044 * Mark a block as bad. Bad blocks are marked in the oob area of the
1045 * first page of the block. The default scan_bbt() in the nand
1046 * infrastructure code works fine for building the memory-based bbt
1047 * during initialization, as does the nand infrastructure function that
1048 * checks if a block is bad by reading the bbt. This function replaces
1049 * the nand default because writes to oob-only are not supported.
1050 */
1051
1052 int ret, i;
1053 uint8_t *buf;
1054 struct nand_chip *nand = mtd->priv;
1055 struct docg4_priv *doc = nand->priv;
1056 struct nand_bbt_descr *bbtd = nand->badblock_pattern;
1057 int block = (int)(ofs >> nand->bbt_erase_shift);
1058 int page = (int)(ofs >> nand->page_shift);
1059 uint32_t g4_addr = mtd_to_docg4_address(page, 0);
1060
1061 dev_dbg(doc->dev, "%s: %08llx\n", __func__, ofs);
1062
1063 if (unlikely(ofs & (DOCG4_BLOCK_SIZE - 1)))
1064 dev_warn(doc->dev, "%s: ofs %llx not start of block!\n",
1065 __func__, ofs);
1066
1067 /* allocate blank buffer for page data */
1068 buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL);
1069 if (buf == NULL)
1070 return -ENOMEM;
1071
1072 /* update bbt in memory */
1073 nand->bbt[block / 4] |= 0x01 << ((block & 0x03) * 2);
1074
1075 /* write bit-wise negation of pattern to oob buffer */
1076 memset(nand->oob_poi, 0xff, mtd->oobsize);
1077 for (i = 0; i < bbtd->len; i++)
1078 nand->oob_poi[bbtd->offs + i] = ~bbtd->pattern[i];
1079
1080 /* write first page of block */
1081 write_page_prologue(mtd, g4_addr);
1082 docg4_write_page(mtd, nand, buf);
1083 ret = pageprog(mtd);
1084 if (!ret)
1085 mtd->ecc_stats.badblocks++;
1086
1087 kfree(buf);
1088
1089 return ret;
1090}
1091
1092static int docg4_block_neverbad(struct mtd_info *mtd, loff_t ofs, int getchip)
1093{
1094 /* only called when module_param ignore_badblocks is set */
1095 return 0;
1096}
1097
1098static int docg4_suspend(struct platform_device *pdev, pm_message_t state)
1099{
1100 /*
1101 * Put the device into "deep power-down" mode. Note that CE# must be
1102 * deasserted for this to take effect. The xscale, e.g., can be
1103 * configured to float this signal when the processor enters power-down,
1104 * and a suitable pull-up ensures its deassertion.
1105 */
1106
1107 int i;
1108 uint8_t pwr_down;
1109 struct docg4_priv *doc = platform_get_drvdata(pdev);
1110 void __iomem *docptr = doc->virtadr;
1111
1112 dev_dbg(doc->dev, "%s...\n", __func__);
1113
1114 /* poll the register that tells us we're ready to go to sleep */
1115 for (i = 0; i < 10; i++) {
1116 pwr_down = readb(docptr + DOC_POWERMODE);
1117 if (pwr_down & DOC_POWERDOWN_READY)
1118 break;
1119 usleep_range(1000, 4000);
1120 }
1121
1122 if (pwr_down & DOC_POWERDOWN_READY) {
1123 dev_err(doc->dev, "suspend failed; "
1124 "timeout polling DOC_POWERDOWN_READY\n");
1125 return -EIO;
1126 }
1127
1128 writew(DOC_ASICMODE_POWERDOWN | DOC_ASICMODE_MDWREN,
1129 docptr + DOC_ASICMODE);
1130 writew(~(DOC_ASICMODE_POWERDOWN | DOC_ASICMODE_MDWREN),
1131 docptr + DOC_ASICMODECONFIRM);
1132
1133 write_nop(docptr);
1134
1135 return 0;
1136}
1137
1138static int docg4_resume(struct platform_device *pdev)
1139{
1140
1141 /*
1142 * Exit power-down. Twelve consecutive reads of the address below
1143 * accomplishes this, assuming CE# has been asserted.
1144 */
1145
1146 struct docg4_priv *doc = platform_get_drvdata(pdev);
1147 void __iomem *docptr = doc->virtadr;
1148 int i;
1149
1150 dev_dbg(doc->dev, "%s...\n", __func__);
1151
1152 for (i = 0; i < 12; i++)
1153 readb(docptr + 0x1fff);
1154
1155 return 0;
1156}
1157
1158static void __init init_mtd_structs(struct mtd_info *mtd)
1159{
1160 /* initialize mtd and nand data structures */
1161
1162 /*
1163 * Note that some of the following initializations are not usually
1164 * required within a nand driver because they are performed by the nand
1165 * infrastructure code as part of nand_scan(). In this case they need
1166 * to be initialized here because we skip call to nand_scan_ident() (the
1167 * first half of nand_scan()). The call to nand_scan_ident() is skipped
1168 * because for this device the chip id is not read in the manner of a
1169 * standard nand device. Unfortunately, nand_scan_ident() does other
1170 * things as well, such as call nand_set_defaults().
1171 */
1172
1173 struct nand_chip *nand = mtd->priv;
1174 struct docg4_priv *doc = nand->priv;
1175
1176 mtd->size = DOCG4_CHIP_SIZE;
1177 mtd->name = "Msys_Diskonchip_G4";
1178 mtd->writesize = DOCG4_PAGE_SIZE;
1179 mtd->erasesize = DOCG4_BLOCK_SIZE;
1180 mtd->oobsize = DOCG4_OOB_SIZE;
1181 nand->chipsize = DOCG4_CHIP_SIZE;
1182 nand->chip_shift = DOCG4_CHIP_SHIFT;
1183 nand->bbt_erase_shift = nand->phys_erase_shift = DOCG4_ERASE_SHIFT;
1184 nand->chip_delay = 20;
1185 nand->page_shift = DOCG4_PAGE_SHIFT;
1186 nand->pagemask = 0x3ffff;
1187 nand->badblockpos = NAND_LARGE_BADBLOCK_POS;
1188 nand->badblockbits = 8;
1189 nand->ecc.layout = &docg4_oobinfo;
1190 nand->ecc.mode = NAND_ECC_HW_SYNDROME;
1191 nand->ecc.size = DOCG4_PAGE_SIZE;
1192 nand->ecc.prepad = 8;
1193 nand->ecc.bytes = 8;
1194 nand->ecc.strength = DOCG4_T;
1195 nand->options =
1196 NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE | NAND_NO_AUTOINCR;
1197 nand->IO_ADDR_R = nand->IO_ADDR_W = doc->virtadr + DOC_IOSPACE_DATA;
1198 nand->controller = &nand->hwcontrol;
1199 spin_lock_init(&nand->controller->lock);
1200 init_waitqueue_head(&nand->controller->wq);
1201
1202 /* methods */
1203 nand->cmdfunc = docg4_command;
1204 nand->waitfunc = docg4_wait;
1205 nand->select_chip = docg4_select_chip;
1206 nand->read_byte = docg4_read_byte;
1207 nand->block_markbad = docg4_block_markbad;
1208 nand->read_buf = docg4_read_buf;
1209 nand->write_buf = docg4_write_buf16;
1210 nand->scan_bbt = nand_default_bbt;
1211 nand->erase_cmd = docg4_erase_block;
1212 nand->ecc.read_page = docg4_read_page;
1213 nand->ecc.write_page = docg4_write_page;
1214 nand->ecc.read_page_raw = docg4_read_page_raw;
1215 nand->ecc.write_page_raw = docg4_write_page_raw;
1216 nand->ecc.read_oob = docg4_read_oob;
1217 nand->ecc.write_oob = docg4_write_oob;
1218
1219 /*
1220 * The way the nand infrastructure code is written, a memory-based bbt
1221 * is not created if NAND_SKIP_BBTSCAN is set. With no memory bbt,
1222 * nand->block_bad() is used. So when ignoring bad blocks, we skip the
1223 * scan and define a dummy block_bad() which always returns 0.
1224 */
1225 if (ignore_badblocks) {
1226 nand->options |= NAND_SKIP_BBTSCAN;
1227 nand->block_bad = docg4_block_neverbad;
1228 }
1229
1230}
1231
1232static int __init read_id_reg(struct mtd_info *mtd)
1233{
1234 struct nand_chip *nand = mtd->priv;
1235 struct docg4_priv *doc = nand->priv;
1236 void __iomem *docptr = doc->virtadr;
1237 uint16_t id1, id2;
1238
1239 /* check for presence of g4 chip by reading id registers */
1240 id1 = readw(docptr + DOC_CHIPID);
1241 id1 = readw(docptr + DOCG4_MYSTERY_REG);
1242 id2 = readw(docptr + DOC_CHIPID_INV);
1243 id2 = readw(docptr + DOCG4_MYSTERY_REG);
1244
1245 if (id1 == DOCG4_IDREG1_VALUE && id2 == DOCG4_IDREG2_VALUE) {
1246 dev_info(doc->dev,
1247 "NAND device: 128MiB Diskonchip G4 detected\n");
1248 return 0;
1249 }
1250
1251 return -ENODEV;
1252}
1253
1254static char const *part_probes[] = { "cmdlinepart", "saftlpart", NULL };
1255
1256static int __init probe_docg4(struct platform_device *pdev)
1257{
1258 struct mtd_info *mtd;
1259 struct nand_chip *nand;
1260 void __iomem *virtadr;
1261 struct docg4_priv *doc;
1262 int len, retval;
1263 struct resource *r;
1264 struct device *dev = &pdev->dev;
1265
1266 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1267 if (r == NULL) {
1268 dev_err(dev, "no io memory resource defined!\n");
1269 return -ENODEV;
1270 }
1271
1272 virtadr = ioremap(r->start, resource_size(r));
1273 if (!virtadr) {
1274 dev_err(dev, "Diskonchip ioremap failed: %pR\n", r);
1275 return -EIO;
1276 }
1277
1278 len = sizeof(struct mtd_info) + sizeof(struct nand_chip) +
1279 sizeof(struct docg4_priv);
1280 mtd = kzalloc(len, GFP_KERNEL);
1281 if (mtd == NULL) {
1282 retval = -ENOMEM;
1283 goto fail;
1284 }
1285 nand = (struct nand_chip *) (mtd + 1);
1286 doc = (struct docg4_priv *) (nand + 1);
1287 mtd->priv = nand;
1288 nand->priv = doc;
1289 mtd->owner = THIS_MODULE;
1290 doc->virtadr = virtadr;
1291 doc->dev = dev;
1292
1293 init_mtd_structs(mtd);
1294
1295 /* initialize kernel bch algorithm */
1296 doc->bch = init_bch(DOCG4_M, DOCG4_T, DOCG4_PRIMITIVE_POLY);
1297 if (doc->bch == NULL) {
1298 retval = -EINVAL;
1299 goto fail;
1300 }
1301
1302 platform_set_drvdata(pdev, doc);
1303
1304 reset(mtd);
1305 retval = read_id_reg(mtd);
1306 if (retval == -ENODEV) {
1307 dev_warn(dev, "No diskonchip G4 device found.\n");
1308 goto fail;
1309 }
1310
1311 retval = nand_scan_tail(mtd);
1312 if (retval)
1313 goto fail;
1314
1315 retval = read_factory_bbt(mtd);
1316 if (retval)
1317 goto fail;
1318
1319 retval = mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0);
1320 if (retval)
1321 goto fail;
1322
1323 doc->mtd = mtd;
1324 return 0;
1325
1326 fail:
1327 iounmap(virtadr);
1328 if (mtd) {
1329 /* re-declarations avoid compiler warning */
1330 struct nand_chip *nand = mtd->priv;
1331 struct docg4_priv *doc = nand->priv;
1332 nand_release(mtd); /* deletes partitions and mtd devices */
1333 platform_set_drvdata(pdev, NULL);
1334 free_bch(doc->bch);
1335 kfree(mtd);
1336 }
1337
1338 return retval;
1339}
1340
1341static int __exit cleanup_docg4(struct platform_device *pdev)
1342{
1343 struct docg4_priv *doc = platform_get_drvdata(pdev);
1344 nand_release(doc->mtd);
1345 platform_set_drvdata(pdev, NULL);
1346 free_bch(doc->bch);
1347 kfree(doc->mtd);
1348 iounmap(doc->virtadr);
1349 return 0;
1350}
1351
1352static struct platform_driver docg4_driver = {
1353 .driver = {
1354 .name = "docg4",
1355 .owner = THIS_MODULE,
1356 },
1357 .suspend = docg4_suspend,
1358 .resume = docg4_resume,
1359 .remove = __exit_p(cleanup_docg4),
1360};
1361
1362static int __init docg4_init(void)
1363{
1364 return platform_driver_probe(&docg4_driver, probe_docg4);
1365}
1366
1367static void __exit docg4_exit(void)
1368{
1369 platform_driver_unregister(&docg4_driver);
1370}
1371
1372module_init(docg4_init);
1373module_exit(docg4_exit);
1374
1375MODULE_LICENSE("GPL");
1376MODULE_AUTHOR("Mike Dunn");
1377MODULE_DESCRIPTION("M-Systems DiskOnChip G4 device driver");
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 7195ee6efe12..80b5264f0a32 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -813,6 +813,12 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
813 &fsl_elbc_oob_sp_eccm1 : &fsl_elbc_oob_sp_eccm0; 813 &fsl_elbc_oob_sp_eccm1 : &fsl_elbc_oob_sp_eccm0;
814 chip->ecc.size = 512; 814 chip->ecc.size = 512;
815 chip->ecc.bytes = 3; 815 chip->ecc.bytes = 3;
816 chip->ecc.strength = 1;
817 /*
818 * FIXME: can hardware ecc correct 4 bitflips if page size is
819 * 2k? Then does hardware report number of corrections for this
820 * case? If so, ecc_stats reporting needs to be fixed as well.
821 */
816 } else { 822 } else {
817 /* otherwise fall back to default software ECC */ 823 /* otherwise fall back to default software ECC */
818 chip->ecc.mode = NAND_ECC_SOFT; 824 chip->ecc.mode = NAND_ECC_SOFT;
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index e53b76064133..1b8330e1155a 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -17,6 +17,10 @@
17 */ 17 */
18 18
19#include <linux/clk.h> 19#include <linux/clk.h>
20#include <linux/completion.h>
21#include <linux/dmaengine.h>
22#include <linux/dma-direction.h>
23#include <linux/dma-mapping.h>
20#include <linux/err.h> 24#include <linux/err.h>
21#include <linux/init.h> 25#include <linux/init.h>
22#include <linux/module.h> 26#include <linux/module.h>
@@ -27,6 +31,7 @@
27#include <linux/mtd/nand.h> 31#include <linux/mtd/nand.h>
28#include <linux/mtd/nand_ecc.h> 32#include <linux/mtd/nand_ecc.h>
29#include <linux/platform_device.h> 33#include <linux/platform_device.h>
34#include <linux/of.h>
30#include <linux/mtd/partitions.h> 35#include <linux/mtd/partitions.h>
31#include <linux/io.h> 36#include <linux/io.h>
32#include <linux/slab.h> 37#include <linux/slab.h>
@@ -34,7 +39,7 @@
34#include <linux/amba/bus.h> 39#include <linux/amba/bus.h>
35#include <mtd/mtd-abi.h> 40#include <mtd/mtd-abi.h>
36 41
37static struct nand_ecclayout fsmc_ecc1_layout = { 42static struct nand_ecclayout fsmc_ecc1_128_layout = {
38 .eccbytes = 24, 43 .eccbytes = 24,
39 .eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52, 44 .eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52,
40 66, 67, 68, 82, 83, 84, 98, 99, 100, 114, 115, 116}, 45 66, 67, 68, 82, 83, 84, 98, 99, 100, 114, 115, 116},
@@ -50,7 +55,127 @@ static struct nand_ecclayout fsmc_ecc1_layout = {
50 } 55 }
51}; 56};
52 57
53static struct nand_ecclayout fsmc_ecc4_lp_layout = { 58static struct nand_ecclayout fsmc_ecc1_64_layout = {
59 .eccbytes = 12,
60 .eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52},
61 .oobfree = {
62 {.offset = 8, .length = 8},
63 {.offset = 24, .length = 8},
64 {.offset = 40, .length = 8},
65 {.offset = 56, .length = 8},
66 }
67};
68
69static struct nand_ecclayout fsmc_ecc1_16_layout = {
70 .eccbytes = 3,
71 .eccpos = {2, 3, 4},
72 .oobfree = {
73 {.offset = 8, .length = 8},
74 }
75};
76
77/*
78 * ECC4 layout for NAND of pagesize 8192 bytes & OOBsize 256 bytes. 13*16 bytes
79 * of OB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block and 46
80 * bytes are free for use.
81 */
82static struct nand_ecclayout fsmc_ecc4_256_layout = {
83 .eccbytes = 208,
84 .eccpos = { 2, 3, 4, 5, 6, 7, 8,
85 9, 10, 11, 12, 13, 14,
86 18, 19, 20, 21, 22, 23, 24,
87 25, 26, 27, 28, 29, 30,
88 34, 35, 36, 37, 38, 39, 40,
89 41, 42, 43, 44, 45, 46,
90 50, 51, 52, 53, 54, 55, 56,
91 57, 58, 59, 60, 61, 62,
92 66, 67, 68, 69, 70, 71, 72,
93 73, 74, 75, 76, 77, 78,
94 82, 83, 84, 85, 86, 87, 88,
95 89, 90, 91, 92, 93, 94,
96 98, 99, 100, 101, 102, 103, 104,
97 105, 106, 107, 108, 109, 110,
98 114, 115, 116, 117, 118, 119, 120,
99 121, 122, 123, 124, 125, 126,
100 130, 131, 132, 133, 134, 135, 136,
101 137, 138, 139, 140, 141, 142,
102 146, 147, 148, 149, 150, 151, 152,
103 153, 154, 155, 156, 157, 158,
104 162, 163, 164, 165, 166, 167, 168,
105 169, 170, 171, 172, 173, 174,
106 178, 179, 180, 181, 182, 183, 184,
107 185, 186, 187, 188, 189, 190,
108 194, 195, 196, 197, 198, 199, 200,
109 201, 202, 203, 204, 205, 206,
110 210, 211, 212, 213, 214, 215, 216,
111 217, 218, 219, 220, 221, 222,
112 226, 227, 228, 229, 230, 231, 232,
113 233, 234, 235, 236, 237, 238,
114 242, 243, 244, 245, 246, 247, 248,
115 249, 250, 251, 252, 253, 254
116 },
117 .oobfree = {
118 {.offset = 15, .length = 3},
119 {.offset = 31, .length = 3},
120 {.offset = 47, .length = 3},
121 {.offset = 63, .length = 3},
122 {.offset = 79, .length = 3},
123 {.offset = 95, .length = 3},
124 {.offset = 111, .length = 3},
125 {.offset = 127, .length = 3},
126 {.offset = 143, .length = 3},
127 {.offset = 159, .length = 3},
128 {.offset = 175, .length = 3},
129 {.offset = 191, .length = 3},
130 {.offset = 207, .length = 3},
131 {.offset = 223, .length = 3},
132 {.offset = 239, .length = 3},
133 {.offset = 255, .length = 1}
134 }
135};
136
137/*
138 * ECC4 layout for NAND of pagesize 4096 bytes & OOBsize 224 bytes. 13*8 bytes
139 * of OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block & 118
140 * bytes are free for use.
141 */
142static struct nand_ecclayout fsmc_ecc4_224_layout = {
143 .eccbytes = 104,
144 .eccpos = { 2, 3, 4, 5, 6, 7, 8,
145 9, 10, 11, 12, 13, 14,
146 18, 19, 20, 21, 22, 23, 24,
147 25, 26, 27, 28, 29, 30,
148 34, 35, 36, 37, 38, 39, 40,
149 41, 42, 43, 44, 45, 46,
150 50, 51, 52, 53, 54, 55, 56,
151 57, 58, 59, 60, 61, 62,
152 66, 67, 68, 69, 70, 71, 72,
153 73, 74, 75, 76, 77, 78,
154 82, 83, 84, 85, 86, 87, 88,
155 89, 90, 91, 92, 93, 94,
156 98, 99, 100, 101, 102, 103, 104,
157 105, 106, 107, 108, 109, 110,
158 114, 115, 116, 117, 118, 119, 120,
159 121, 122, 123, 124, 125, 126
160 },
161 .oobfree = {
162 {.offset = 15, .length = 3},
163 {.offset = 31, .length = 3},
164 {.offset = 47, .length = 3},
165 {.offset = 63, .length = 3},
166 {.offset = 79, .length = 3},
167 {.offset = 95, .length = 3},
168 {.offset = 111, .length = 3},
169 {.offset = 127, .length = 97}
170 }
171};
172
173/*
174 * ECC4 layout for NAND of pagesize 4096 bytes & OOBsize 128 bytes. 13*8 bytes
175 * of OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block & 22
176 * bytes are free for use.
177 */
178static struct nand_ecclayout fsmc_ecc4_128_layout = {
54 .eccbytes = 104, 179 .eccbytes = 104,
55 .eccpos = { 2, 3, 4, 5, 6, 7, 8, 180 .eccpos = { 2, 3, 4, 5, 6, 7, 8,
56 9, 10, 11, 12, 13, 14, 181 9, 10, 11, 12, 13, 14,
@@ -82,6 +207,45 @@ static struct nand_ecclayout fsmc_ecc4_lp_layout = {
82}; 207};
83 208
84/* 209/*
210 * ECC4 layout for NAND of pagesize 2048 bytes & OOBsize 64 bytes. 13*4 bytes of
211 * OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block and 10
212 * bytes are free for use.
213 */
214static struct nand_ecclayout fsmc_ecc4_64_layout = {
215 .eccbytes = 52,
216 .eccpos = { 2, 3, 4, 5, 6, 7, 8,
217 9, 10, 11, 12, 13, 14,
218 18, 19, 20, 21, 22, 23, 24,
219 25, 26, 27, 28, 29, 30,
220 34, 35, 36, 37, 38, 39, 40,
221 41, 42, 43, 44, 45, 46,
222 50, 51, 52, 53, 54, 55, 56,
223 57, 58, 59, 60, 61, 62,
224 },
225 .oobfree = {
226 {.offset = 15, .length = 3},
227 {.offset = 31, .length = 3},
228 {.offset = 47, .length = 3},
229 {.offset = 63, .length = 1},
230 }
231};
232
233/*
234 * ECC4 layout for NAND of pagesize 512 bytes & OOBsize 16 bytes. 13 bytes of
235 * OOB size is reserved for ECC, Byte no. 4 & 5 reserved for bad block and One
236 * byte is free for use.
237 */
238static struct nand_ecclayout fsmc_ecc4_16_layout = {
239 .eccbytes = 13,
240 .eccpos = { 0, 1, 2, 3, 6, 7, 8,
241 9, 10, 11, 12, 13, 14
242 },
243 .oobfree = {
244 {.offset = 15, .length = 1},
245 }
246};
247
248/*
85 * ECC placement definitions in oobfree type format. 249 * ECC placement definitions in oobfree type format.
86 * There are 13 bytes of ecc for every 512 byte block and it has to be read 250 * There are 13 bytes of ecc for every 512 byte block and it has to be read
87 * consecutively and immediately after the 512 byte data block for hardware to 251 * consecutively and immediately after the 512 byte data block for hardware to
@@ -103,16 +267,6 @@ static struct fsmc_eccplace fsmc_ecc4_lp_place = {
103 } 267 }
104}; 268};
105 269
106static struct nand_ecclayout fsmc_ecc4_sp_layout = {
107 .eccbytes = 13,
108 .eccpos = { 0, 1, 2, 3, 6, 7, 8,
109 9, 10, 11, 12, 13, 14
110 },
111 .oobfree = {
112 {.offset = 15, .length = 1},
113 }
114};
115
116static struct fsmc_eccplace fsmc_ecc4_sp_place = { 270static struct fsmc_eccplace fsmc_ecc4_sp_place = {
117 .eccplace = { 271 .eccplace = {
118 {.offset = 0, .length = 4}, 272 {.offset = 0, .length = 4},
@@ -120,75 +274,24 @@ static struct fsmc_eccplace fsmc_ecc4_sp_place = {
120 } 274 }
121}; 275};
122 276
123/*
124 * Default partition tables to be used if the partition information not
125 * provided through platform data.
126 *
127 * Default partition layout for small page(= 512 bytes) devices
128 * Size for "Root file system" is updated in driver based on actual device size
129 */
130static struct mtd_partition partition_info_16KB_blk[] = {
131 {
132 .name = "X-loader",
133 .offset = 0,
134 .size = 4*0x4000,
135 },
136 {
137 .name = "U-Boot",
138 .offset = 0x10000,
139 .size = 20*0x4000,
140 },
141 {
142 .name = "Kernel",
143 .offset = 0x60000,
144 .size = 256*0x4000,
145 },
146 {
147 .name = "Root File System",
148 .offset = 0x460000,
149 .size = MTDPART_SIZ_FULL,
150 },
151};
152
153/*
154 * Default partition layout for large page(> 512 bytes) devices
155 * Size for "Root file system" is updated in driver based on actual device size
156 */
157static struct mtd_partition partition_info_128KB_blk[] = {
158 {
159 .name = "X-loader",
160 .offset = 0,
161 .size = 4*0x20000,
162 },
163 {
164 .name = "U-Boot",
165 .offset = 0x80000,
166 .size = 12*0x20000,
167 },
168 {
169 .name = "Kernel",
170 .offset = 0x200000,
171 .size = 48*0x20000,
172 },
173 {
174 .name = "Root File System",
175 .offset = 0x800000,
176 .size = MTDPART_SIZ_FULL,
177 },
178};
179
180
181/** 277/**
182 * struct fsmc_nand_data - structure for FSMC NAND device state 278 * struct fsmc_nand_data - structure for FSMC NAND device state
183 * 279 *
184 * @pid: Part ID on the AMBA PrimeCell format 280 * @pid: Part ID on the AMBA PrimeCell format
185 * @mtd: MTD info for a NAND flash. 281 * @mtd: MTD info for a NAND flash.
186 * @nand: Chip related info for a NAND flash. 282 * @nand: Chip related info for a NAND flash.
283 * @partitions: Partition info for a NAND Flash.
284 * @nr_partitions: Total number of partition of a NAND flash.
187 * 285 *
188 * @ecc_place: ECC placing locations in oobfree type format. 286 * @ecc_place: ECC placing locations in oobfree type format.
189 * @bank: Bank number for probed device. 287 * @bank: Bank number for probed device.
190 * @clk: Clock structure for FSMC. 288 * @clk: Clock structure for FSMC.
191 * 289 *
290 * @read_dma_chan: DMA channel for read access
291 * @write_dma_chan: DMA channel for write access to NAND
292 * @dma_access_complete: Completion structure
293 *
294 * @data_pa: NAND Physical port for Data.
192 * @data_va: NAND port for Data. 295 * @data_va: NAND port for Data.
193 * @cmd_va: NAND port for Command. 296 * @cmd_va: NAND port for Command.
194 * @addr_va: NAND port for Address. 297 * @addr_va: NAND port for Address.
@@ -198,16 +301,23 @@ struct fsmc_nand_data {
198 u32 pid; 301 u32 pid;
199 struct mtd_info mtd; 302 struct mtd_info mtd;
200 struct nand_chip nand; 303 struct nand_chip nand;
304 struct mtd_partition *partitions;
305 unsigned int nr_partitions;
201 306
202 struct fsmc_eccplace *ecc_place; 307 struct fsmc_eccplace *ecc_place;
203 unsigned int bank; 308 unsigned int bank;
309 struct device *dev;
310 enum access_mode mode;
204 struct clk *clk; 311 struct clk *clk;
205 312
206 struct resource *resregs; 313 /* DMA related objects */
207 struct resource *rescmd; 314 struct dma_chan *read_dma_chan;
208 struct resource *resaddr; 315 struct dma_chan *write_dma_chan;
209 struct resource *resdata; 316 struct completion dma_access_complete;
317
318 struct fsmc_nand_timings *dev_timings;
210 319
320 dma_addr_t data_pa;
211 void __iomem *data_va; 321 void __iomem *data_va;
212 void __iomem *cmd_va; 322 void __iomem *cmd_va;
213 void __iomem *addr_va; 323 void __iomem *addr_va;
@@ -251,28 +361,29 @@ static void fsmc_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
251 struct nand_chip *this = mtd->priv; 361 struct nand_chip *this = mtd->priv;
252 struct fsmc_nand_data *host = container_of(mtd, 362 struct fsmc_nand_data *host = container_of(mtd,
253 struct fsmc_nand_data, mtd); 363 struct fsmc_nand_data, mtd);
254 struct fsmc_regs *regs = host->regs_va; 364 void *__iomem *regs = host->regs_va;
255 unsigned int bank = host->bank; 365 unsigned int bank = host->bank;
256 366
257 if (ctrl & NAND_CTRL_CHANGE) { 367 if (ctrl & NAND_CTRL_CHANGE) {
368 u32 pc;
369
258 if (ctrl & NAND_CLE) { 370 if (ctrl & NAND_CLE) {
259 this->IO_ADDR_R = (void __iomem *)host->cmd_va; 371 this->IO_ADDR_R = host->cmd_va;
260 this->IO_ADDR_W = (void __iomem *)host->cmd_va; 372 this->IO_ADDR_W = host->cmd_va;
261 } else if (ctrl & NAND_ALE) { 373 } else if (ctrl & NAND_ALE) {
262 this->IO_ADDR_R = (void __iomem *)host->addr_va; 374 this->IO_ADDR_R = host->addr_va;
263 this->IO_ADDR_W = (void __iomem *)host->addr_va; 375 this->IO_ADDR_W = host->addr_va;
264 } else { 376 } else {
265 this->IO_ADDR_R = (void __iomem *)host->data_va; 377 this->IO_ADDR_R = host->data_va;
266 this->IO_ADDR_W = (void __iomem *)host->data_va; 378 this->IO_ADDR_W = host->data_va;
267 } 379 }
268 380
269 if (ctrl & NAND_NCE) { 381 pc = readl(FSMC_NAND_REG(regs, bank, PC));
270 writel(readl(&regs->bank_regs[bank].pc) | FSMC_ENABLE, 382 if (ctrl & NAND_NCE)
271 &regs->bank_regs[bank].pc); 383 pc |= FSMC_ENABLE;
272 } else { 384 else
273 writel(readl(&regs->bank_regs[bank].pc) & ~FSMC_ENABLE, 385 pc &= ~FSMC_ENABLE;
274 &regs->bank_regs[bank].pc); 386 writel(pc, FSMC_NAND_REG(regs, bank, PC));
275 }
276 } 387 }
277 388
278 mb(); 389 mb();
@@ -287,22 +398,42 @@ static void fsmc_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
287 * This routine initializes timing parameters related to NAND memory access in 398 * This routine initializes timing parameters related to NAND memory access in
288 * FSMC registers 399 * FSMC registers
289 */ 400 */
290static void __init fsmc_nand_setup(struct fsmc_regs *regs, uint32_t bank, 401static void fsmc_nand_setup(void __iomem *regs, uint32_t bank,
291 uint32_t busw) 402 uint32_t busw, struct fsmc_nand_timings *timings)
292{ 403{
293 uint32_t value = FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON; 404 uint32_t value = FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON;
405 uint32_t tclr, tar, thiz, thold, twait, tset;
406 struct fsmc_nand_timings *tims;
407 struct fsmc_nand_timings default_timings = {
408 .tclr = FSMC_TCLR_1,
409 .tar = FSMC_TAR_1,
410 .thiz = FSMC_THIZ_1,
411 .thold = FSMC_THOLD_4,
412 .twait = FSMC_TWAIT_6,
413 .tset = FSMC_TSET_0,
414 };
415
416 if (timings)
417 tims = timings;
418 else
419 tims = &default_timings;
420
421 tclr = (tims->tclr & FSMC_TCLR_MASK) << FSMC_TCLR_SHIFT;
422 tar = (tims->tar & FSMC_TAR_MASK) << FSMC_TAR_SHIFT;
423 thiz = (tims->thiz & FSMC_THIZ_MASK) << FSMC_THIZ_SHIFT;
424 thold = (tims->thold & FSMC_THOLD_MASK) << FSMC_THOLD_SHIFT;
425 twait = (tims->twait & FSMC_TWAIT_MASK) << FSMC_TWAIT_SHIFT;
426 tset = (tims->tset & FSMC_TSET_MASK) << FSMC_TSET_SHIFT;
294 427
295 if (busw) 428 if (busw)
296 writel(value | FSMC_DEVWID_16, &regs->bank_regs[bank].pc); 429 writel(value | FSMC_DEVWID_16, FSMC_NAND_REG(regs, bank, PC));
297 else 430 else
298 writel(value | FSMC_DEVWID_8, &regs->bank_regs[bank].pc); 431 writel(value | FSMC_DEVWID_8, FSMC_NAND_REG(regs, bank, PC));
299 432
300 writel(readl(&regs->bank_regs[bank].pc) | FSMC_TCLR_1 | FSMC_TAR_1, 433 writel(readl(FSMC_NAND_REG(regs, bank, PC)) | tclr | tar,
301 &regs->bank_regs[bank].pc); 434 FSMC_NAND_REG(regs, bank, PC));
302 writel(FSMC_THIZ_1 | FSMC_THOLD_4 | FSMC_TWAIT_6 | FSMC_TSET_0, 435 writel(thiz | thold | twait | tset, FSMC_NAND_REG(regs, bank, COMM));
303 &regs->bank_regs[bank].comm); 436 writel(thiz | thold | twait | tset, FSMC_NAND_REG(regs, bank, ATTRIB));
304 writel(FSMC_THIZ_1 | FSMC_THOLD_4 | FSMC_TWAIT_6 | FSMC_TSET_0,
305 &regs->bank_regs[bank].attrib);
306} 437}
307 438
308/* 439/*
@@ -312,15 +443,15 @@ static void fsmc_enable_hwecc(struct mtd_info *mtd, int mode)
312{ 443{
313 struct fsmc_nand_data *host = container_of(mtd, 444 struct fsmc_nand_data *host = container_of(mtd,
314 struct fsmc_nand_data, mtd); 445 struct fsmc_nand_data, mtd);
315 struct fsmc_regs *regs = host->regs_va; 446 void __iomem *regs = host->regs_va;
316 uint32_t bank = host->bank; 447 uint32_t bank = host->bank;
317 448
318 writel(readl(&regs->bank_regs[bank].pc) & ~FSMC_ECCPLEN_256, 449 writel(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCPLEN_256,
319 &regs->bank_regs[bank].pc); 450 FSMC_NAND_REG(regs, bank, PC));
320 writel(readl(&regs->bank_regs[bank].pc) & ~FSMC_ECCEN, 451 writel(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCEN,
321 &regs->bank_regs[bank].pc); 452 FSMC_NAND_REG(regs, bank, PC));
322 writel(readl(&regs->bank_regs[bank].pc) | FSMC_ECCEN, 453 writel(readl(FSMC_NAND_REG(regs, bank, PC)) | FSMC_ECCEN,
323 &regs->bank_regs[bank].pc); 454 FSMC_NAND_REG(regs, bank, PC));
324} 455}
325 456
326/* 457/*
@@ -333,37 +464,42 @@ static int fsmc_read_hwecc_ecc4(struct mtd_info *mtd, const uint8_t *data,
333{ 464{
334 struct fsmc_nand_data *host = container_of(mtd, 465 struct fsmc_nand_data *host = container_of(mtd,
335 struct fsmc_nand_data, mtd); 466 struct fsmc_nand_data, mtd);
336 struct fsmc_regs *regs = host->regs_va; 467 void __iomem *regs = host->regs_va;
337 uint32_t bank = host->bank; 468 uint32_t bank = host->bank;
338 uint32_t ecc_tmp; 469 uint32_t ecc_tmp;
339 unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT; 470 unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT;
340 471
341 do { 472 do {
342 if (readl(&regs->bank_regs[bank].sts) & FSMC_CODE_RDY) 473 if (readl(FSMC_NAND_REG(regs, bank, STS)) & FSMC_CODE_RDY)
343 break; 474 break;
344 else 475 else
345 cond_resched(); 476 cond_resched();
346 } while (!time_after_eq(jiffies, deadline)); 477 } while (!time_after_eq(jiffies, deadline));
347 478
348 ecc_tmp = readl(&regs->bank_regs[bank].ecc1); 479 if (time_after_eq(jiffies, deadline)) {
480 dev_err(host->dev, "calculate ecc timed out\n");
481 return -ETIMEDOUT;
482 }
483
484 ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC1));
349 ecc[0] = (uint8_t) (ecc_tmp >> 0); 485 ecc[0] = (uint8_t) (ecc_tmp >> 0);
350 ecc[1] = (uint8_t) (ecc_tmp >> 8); 486 ecc[1] = (uint8_t) (ecc_tmp >> 8);
351 ecc[2] = (uint8_t) (ecc_tmp >> 16); 487 ecc[2] = (uint8_t) (ecc_tmp >> 16);
352 ecc[3] = (uint8_t) (ecc_tmp >> 24); 488 ecc[3] = (uint8_t) (ecc_tmp >> 24);
353 489
354 ecc_tmp = readl(&regs->bank_regs[bank].ecc2); 490 ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC2));
355 ecc[4] = (uint8_t) (ecc_tmp >> 0); 491 ecc[4] = (uint8_t) (ecc_tmp >> 0);
356 ecc[5] = (uint8_t) (ecc_tmp >> 8); 492 ecc[5] = (uint8_t) (ecc_tmp >> 8);
357 ecc[6] = (uint8_t) (ecc_tmp >> 16); 493 ecc[6] = (uint8_t) (ecc_tmp >> 16);
358 ecc[7] = (uint8_t) (ecc_tmp >> 24); 494 ecc[7] = (uint8_t) (ecc_tmp >> 24);
359 495
360 ecc_tmp = readl(&regs->bank_regs[bank].ecc3); 496 ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC3));
361 ecc[8] = (uint8_t) (ecc_tmp >> 0); 497 ecc[8] = (uint8_t) (ecc_tmp >> 0);
362 ecc[9] = (uint8_t) (ecc_tmp >> 8); 498 ecc[9] = (uint8_t) (ecc_tmp >> 8);
363 ecc[10] = (uint8_t) (ecc_tmp >> 16); 499 ecc[10] = (uint8_t) (ecc_tmp >> 16);
364 ecc[11] = (uint8_t) (ecc_tmp >> 24); 500 ecc[11] = (uint8_t) (ecc_tmp >> 24);
365 501
366 ecc_tmp = readl(&regs->bank_regs[bank].sts); 502 ecc_tmp = readl(FSMC_NAND_REG(regs, bank, STS));
367 ecc[12] = (uint8_t) (ecc_tmp >> 16); 503 ecc[12] = (uint8_t) (ecc_tmp >> 16);
368 504
369 return 0; 505 return 0;
@@ -379,11 +515,11 @@ static int fsmc_read_hwecc_ecc1(struct mtd_info *mtd, const uint8_t *data,
379{ 515{
380 struct fsmc_nand_data *host = container_of(mtd, 516 struct fsmc_nand_data *host = container_of(mtd,
381 struct fsmc_nand_data, mtd); 517 struct fsmc_nand_data, mtd);
382 struct fsmc_regs *regs = host->regs_va; 518 void __iomem *regs = host->regs_va;
383 uint32_t bank = host->bank; 519 uint32_t bank = host->bank;
384 uint32_t ecc_tmp; 520 uint32_t ecc_tmp;
385 521
386 ecc_tmp = readl(&regs->bank_regs[bank].ecc1); 522 ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC1));
387 ecc[0] = (uint8_t) (ecc_tmp >> 0); 523 ecc[0] = (uint8_t) (ecc_tmp >> 0);
388 ecc[1] = (uint8_t) (ecc_tmp >> 8); 524 ecc[1] = (uint8_t) (ecc_tmp >> 8);
389 ecc[2] = (uint8_t) (ecc_tmp >> 16); 525 ecc[2] = (uint8_t) (ecc_tmp >> 16);
@@ -391,6 +527,166 @@ static int fsmc_read_hwecc_ecc1(struct mtd_info *mtd, const uint8_t *data,
391 return 0; 527 return 0;
392} 528}
393 529
530/* Count the number of 0's in buff upto a max of max_bits */
531static int count_written_bits(uint8_t *buff, int size, int max_bits)
532{
533 int k, written_bits = 0;
534
535 for (k = 0; k < size; k++) {
536 written_bits += hweight8(~buff[k]);
537 if (written_bits > max_bits)
538 break;
539 }
540
541 return written_bits;
542}
543
544static void dma_complete(void *param)
545{
546 struct fsmc_nand_data *host = param;
547
548 complete(&host->dma_access_complete);
549}
550
551static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
552 enum dma_data_direction direction)
553{
554 struct dma_chan *chan;
555 struct dma_device *dma_dev;
556 struct dma_async_tx_descriptor *tx;
557 dma_addr_t dma_dst, dma_src, dma_addr;
558 dma_cookie_t cookie;
559 unsigned long flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
560 int ret;
561
562 if (direction == DMA_TO_DEVICE)
563 chan = host->write_dma_chan;
564 else if (direction == DMA_FROM_DEVICE)
565 chan = host->read_dma_chan;
566 else
567 return -EINVAL;
568
569 dma_dev = chan->device;
570 dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
571
572 if (direction == DMA_TO_DEVICE) {
573 dma_src = dma_addr;
574 dma_dst = host->data_pa;
575 flags |= DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_SKIP_DEST_UNMAP;
576 } else {
577 dma_src = host->data_pa;
578 dma_dst = dma_addr;
579 flags |= DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SKIP_SRC_UNMAP;
580 }
581
582 tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src,
583 len, flags);
584
585 if (!tx) {
586 dev_err(host->dev, "device_prep_dma_memcpy error\n");
587 dma_unmap_single(dma_dev->dev, dma_addr, len, direction);
588 return -EIO;
589 }
590
591 tx->callback = dma_complete;
592 tx->callback_param = host;
593 cookie = tx->tx_submit(tx);
594
595 ret = dma_submit_error(cookie);
596 if (ret) {
597 dev_err(host->dev, "dma_submit_error %d\n", cookie);
598 return ret;
599 }
600
601 dma_async_issue_pending(chan);
602
603 ret =
604 wait_for_completion_interruptible_timeout(&host->dma_access_complete,
605 msecs_to_jiffies(3000));
606 if (ret <= 0) {
607 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
608 dev_err(host->dev, "wait_for_completion_timeout\n");
609 return ret ? ret : -ETIMEDOUT;
610 }
611
612 return 0;
613}
614
615/*
616 * fsmc_write_buf - write buffer to chip
617 * @mtd: MTD device structure
618 * @buf: data buffer
619 * @len: number of bytes to write
620 */
621static void fsmc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
622{
623 int i;
624 struct nand_chip *chip = mtd->priv;
625
626 if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
627 IS_ALIGNED(len, sizeof(uint32_t))) {
628 uint32_t *p = (uint32_t *)buf;
629 len = len >> 2;
630 for (i = 0; i < len; i++)
631 writel(p[i], chip->IO_ADDR_W);
632 } else {
633 for (i = 0; i < len; i++)
634 writeb(buf[i], chip->IO_ADDR_W);
635 }
636}
637
638/*
639 * fsmc_read_buf - read chip data into buffer
640 * @mtd: MTD device structure
641 * @buf: buffer to store date
642 * @len: number of bytes to read
643 */
644static void fsmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
645{
646 int i;
647 struct nand_chip *chip = mtd->priv;
648
649 if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
650 IS_ALIGNED(len, sizeof(uint32_t))) {
651 uint32_t *p = (uint32_t *)buf;
652 len = len >> 2;
653 for (i = 0; i < len; i++)
654 p[i] = readl(chip->IO_ADDR_R);
655 } else {
656 for (i = 0; i < len; i++)
657 buf[i] = readb(chip->IO_ADDR_R);
658 }
659}
660
661/*
662 * fsmc_read_buf_dma - read chip data into buffer
663 * @mtd: MTD device structure
664 * @buf: buffer to store date
665 * @len: number of bytes to read
666 */
667static void fsmc_read_buf_dma(struct mtd_info *mtd, uint8_t *buf, int len)
668{
669 struct fsmc_nand_data *host;
670
671 host = container_of(mtd, struct fsmc_nand_data, mtd);
672 dma_xfer(host, buf, len, DMA_FROM_DEVICE);
673}
674
675/*
676 * fsmc_write_buf_dma - write buffer to chip
677 * @mtd: MTD device structure
678 * @buf: data buffer
679 * @len: number of bytes to write
680 */
681static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf,
682 int len)
683{
684 struct fsmc_nand_data *host;
685
686 host = container_of(mtd, struct fsmc_nand_data, mtd);
687 dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE);
688}
689
394/* 690/*
395 * fsmc_read_page_hwecc 691 * fsmc_read_page_hwecc
396 * @mtd: mtd info structure 692 * @mtd: mtd info structure
@@ -426,7 +722,6 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
426 uint8_t *oob = (uint8_t *)&ecc_oob[0]; 722 uint8_t *oob = (uint8_t *)&ecc_oob[0];
427 723
428 for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) { 724 for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
429
430 chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page); 725 chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page);
431 chip->ecc.hwctl(mtd, NAND_ECC_READ); 726 chip->ecc.hwctl(mtd, NAND_ECC_READ);
432 chip->read_buf(mtd, p, eccsize); 727 chip->read_buf(mtd, p, eccsize);
@@ -437,17 +732,19 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
437 group++; 732 group++;
438 733
439 /* 734 /*
440 * length is intentionally kept a higher multiple of 2 735 * length is intentionally kept a higher multiple of 2
441 * to read at least 13 bytes even in case of 16 bit NAND 736 * to read at least 13 bytes even in case of 16 bit NAND
442 * devices 737 * devices
443 */ 738 */
444 len = roundup(len, 2); 739 if (chip->options & NAND_BUSWIDTH_16)
740 len = roundup(len, 2);
741
445 chip->cmdfunc(mtd, NAND_CMD_READOOB, off, page); 742 chip->cmdfunc(mtd, NAND_CMD_READOOB, off, page);
446 chip->read_buf(mtd, oob + j, len); 743 chip->read_buf(mtd, oob + j, len);
447 j += len; 744 j += len;
448 } 745 }
449 746
450 memcpy(&ecc_code[i], oob, 13); 747 memcpy(&ecc_code[i], oob, chip->ecc.bytes);
451 chip->ecc.calculate(mtd, p, &ecc_calc[i]); 748 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
452 749
453 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]); 750 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
@@ -461,7 +758,7 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
461} 758}
462 759
463/* 760/*
464 * fsmc_correct_data 761 * fsmc_bch8_correct_data
465 * @mtd: mtd info structure 762 * @mtd: mtd info structure
466 * @dat: buffer of read data 763 * @dat: buffer of read data
467 * @read_ecc: ecc read from device spare area 764 * @read_ecc: ecc read from device spare area
@@ -470,19 +767,51 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
470 * calc_ecc is a 104 bit information containing maximum of 8 error 767 * calc_ecc is a 104 bit information containing maximum of 8 error
471 * offset informations of 13 bits each in 512 bytes of read data. 768 * offset informations of 13 bits each in 512 bytes of read data.
472 */ 769 */
473static int fsmc_correct_data(struct mtd_info *mtd, uint8_t *dat, 770static int fsmc_bch8_correct_data(struct mtd_info *mtd, uint8_t *dat,
474 uint8_t *read_ecc, uint8_t *calc_ecc) 771 uint8_t *read_ecc, uint8_t *calc_ecc)
475{ 772{
476 struct fsmc_nand_data *host = container_of(mtd, 773 struct fsmc_nand_data *host = container_of(mtd,
477 struct fsmc_nand_data, mtd); 774 struct fsmc_nand_data, mtd);
478 struct fsmc_regs *regs = host->regs_va; 775 struct nand_chip *chip = mtd->priv;
776 void __iomem *regs = host->regs_va;
479 unsigned int bank = host->bank; 777 unsigned int bank = host->bank;
480 uint16_t err_idx[8]; 778 uint32_t err_idx[8];
481 uint64_t ecc_data[2];
482 uint32_t num_err, i; 779 uint32_t num_err, i;
780 uint32_t ecc1, ecc2, ecc3, ecc4;
781
782 num_err = (readl(FSMC_NAND_REG(regs, bank, STS)) >> 10) & 0xF;
783
784 /* no bit flipping */
785 if (likely(num_err == 0))
786 return 0;
787
788 /* too many errors */
789 if (unlikely(num_err > 8)) {
790 /*
791 * This is a temporary erase check. A newly erased page read
792 * would result in an ecc error because the oob data is also
793 * erased to FF and the calculated ecc for an FF data is not
794 * FF..FF.
795 * This is a workaround to skip performing correction in case
796 * data is FF..FF
797 *
798 * Logic:
799 * For every page, each bit written as 0 is counted until these
800 * number of bits are greater than 8 (the maximum correction
801 * capability of FSMC for each 512 + 13 bytes)
802 */
803
804 int bits_ecc = count_written_bits(read_ecc, chip->ecc.bytes, 8);
805 int bits_data = count_written_bits(dat, chip->ecc.size, 8);
806
807 if ((bits_ecc + bits_data) <= 8) {
808 if (bits_data)
809 memset(dat, 0xff, chip->ecc.size);
810 return bits_data;
811 }
483 812
484 /* The calculated ecc is actually the correction index in data */ 813 return -EBADMSG;
485 memcpy(ecc_data, calc_ecc, 13); 814 }
486 815
487 /* 816 /*
488 * ------------------- calc_ecc[] bit wise -----------|--13 bits--| 817 * ------------------- calc_ecc[] bit wise -----------|--13 bits--|
@@ -493,27 +822,26 @@ static int fsmc_correct_data(struct mtd_info *mtd, uint8_t *dat,
493 * uint64_t array and error offset indexes are populated in err_idx 822 * uint64_t array and error offset indexes are populated in err_idx
494 * array 823 * array
495 */ 824 */
496 for (i = 0; i < 8; i++) { 825 ecc1 = readl(FSMC_NAND_REG(regs, bank, ECC1));
497 if (i == 4) { 826 ecc2 = readl(FSMC_NAND_REG(regs, bank, ECC2));
498 err_idx[4] = ((ecc_data[1] & 0x1) << 12) | ecc_data[0]; 827 ecc3 = readl(FSMC_NAND_REG(regs, bank, ECC3));
499 ecc_data[1] >>= 1; 828 ecc4 = readl(FSMC_NAND_REG(regs, bank, STS));
500 continue; 829
501 } 830 err_idx[0] = (ecc1 >> 0) & 0x1FFF;
502 err_idx[i] = (ecc_data[i/4] & 0x1FFF); 831 err_idx[1] = (ecc1 >> 13) & 0x1FFF;
503 ecc_data[i/4] >>= 13; 832 err_idx[2] = (((ecc2 >> 0) & 0x7F) << 6) | ((ecc1 >> 26) & 0x3F);
504 } 833 err_idx[3] = (ecc2 >> 7) & 0x1FFF;
505 834 err_idx[4] = (((ecc3 >> 0) & 0x1) << 12) | ((ecc2 >> 20) & 0xFFF);
506 num_err = (readl(&regs->bank_regs[bank].sts) >> 10) & 0xF; 835 err_idx[5] = (ecc3 >> 1) & 0x1FFF;
507 836 err_idx[6] = (ecc3 >> 14) & 0x1FFF;
508 if (num_err == 0xF) 837 err_idx[7] = (((ecc4 >> 16) & 0xFF) << 5) | ((ecc3 >> 27) & 0x1F);
509 return -EBADMSG;
510 838
511 i = 0; 839 i = 0;
512 while (num_err--) { 840 while (num_err--) {
513 change_bit(0, (unsigned long *)&err_idx[i]); 841 change_bit(0, (unsigned long *)&err_idx[i]);
514 change_bit(1, (unsigned long *)&err_idx[i]); 842 change_bit(1, (unsigned long *)&err_idx[i]);
515 843
516 if (err_idx[i] <= 512 * 8) { 844 if (err_idx[i] < chip->ecc.size * 8) {
517 change_bit(err_idx[i], (unsigned long *)dat); 845 change_bit(err_idx[i], (unsigned long *)dat);
518 i++; 846 i++;
519 } 847 }
@@ -521,6 +849,44 @@ static int fsmc_correct_data(struct mtd_info *mtd, uint8_t *dat,
521 return i; 849 return i;
522} 850}
523 851
852static bool filter(struct dma_chan *chan, void *slave)
853{
854 chan->private = slave;
855 return true;
856}
857
858#ifdef CONFIG_OF
859static int __devinit fsmc_nand_probe_config_dt(struct platform_device *pdev,
860 struct device_node *np)
861{
862 struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
863 u32 val;
864
865 /* Set default NAND width to 8 bits */
866 pdata->width = 8;
867 if (!of_property_read_u32(np, "bank-width", &val)) {
868 if (val == 2) {
869 pdata->width = 16;
870 } else if (val != 1) {
871 dev_err(&pdev->dev, "invalid bank-width %u\n", val);
872 return -EINVAL;
873 }
874 }
875 of_property_read_u32(np, "st,ale-off", &pdata->ale_off);
876 of_property_read_u32(np, "st,cle-off", &pdata->cle_off);
877 if (of_get_property(np, "nand-skip-bbtscan", NULL))
878 pdata->options = NAND_SKIP_BBTSCAN;
879
880 return 0;
881}
882#else
883static int __devinit fsmc_nand_probe_config_dt(struct platform_device *pdev,
884 struct device_node *np)
885{
886 return -ENOSYS;
887}
888#endif
889
524/* 890/*
525 * fsmc_nand_probe - Probe function 891 * fsmc_nand_probe - Probe function
526 * @pdev: platform device structure 892 * @pdev: platform device structure
@@ -528,102 +894,109 @@ static int fsmc_correct_data(struct mtd_info *mtd, uint8_t *dat,
528static int __init fsmc_nand_probe(struct platform_device *pdev) 894static int __init fsmc_nand_probe(struct platform_device *pdev)
529{ 895{
530 struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev); 896 struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
897 struct device_node __maybe_unused *np = pdev->dev.of_node;
898 struct mtd_part_parser_data ppdata = {};
531 struct fsmc_nand_data *host; 899 struct fsmc_nand_data *host;
532 struct mtd_info *mtd; 900 struct mtd_info *mtd;
533 struct nand_chip *nand; 901 struct nand_chip *nand;
534 struct fsmc_regs *regs;
535 struct resource *res; 902 struct resource *res;
903 dma_cap_mask_t mask;
536 int ret = 0; 904 int ret = 0;
537 u32 pid; 905 u32 pid;
538 int i; 906 int i;
539 907
908 if (np) {
909 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
910 pdev->dev.platform_data = pdata;
911 ret = fsmc_nand_probe_config_dt(pdev, np);
912 if (ret) {
913 dev_err(&pdev->dev, "no platform data\n");
914 return -ENODEV;
915 }
916 }
917
540 if (!pdata) { 918 if (!pdata) {
541 dev_err(&pdev->dev, "platform data is NULL\n"); 919 dev_err(&pdev->dev, "platform data is NULL\n");
542 return -EINVAL; 920 return -EINVAL;
543 } 921 }
544 922
545 /* Allocate memory for the device structure (and zero it) */ 923 /* Allocate memory for the device structure (and zero it) */
546 host = kzalloc(sizeof(*host), GFP_KERNEL); 924 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
547 if (!host) { 925 if (!host) {
548 dev_err(&pdev->dev, "failed to allocate device structure\n"); 926 dev_err(&pdev->dev, "failed to allocate device structure\n");
549 return -ENOMEM; 927 return -ENOMEM;
550 } 928 }
551 929
552 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data"); 930 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
553 if (!res) { 931 if (!res)
554 ret = -EIO; 932 return -EINVAL;
555 goto err_probe1;
556 }
557 933
558 host->resdata = request_mem_region(res->start, resource_size(res), 934 if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
559 pdev->name); 935 pdev->name)) {
560 if (!host->resdata) { 936 dev_err(&pdev->dev, "Failed to get memory data resourse\n");
561 ret = -EIO; 937 return -ENOENT;
562 goto err_probe1;
563 } 938 }
564 939
565 host->data_va = ioremap(res->start, resource_size(res)); 940 host->data_pa = (dma_addr_t)res->start;
941 host->data_va = devm_ioremap(&pdev->dev, res->start,
942 resource_size(res));
566 if (!host->data_va) { 943 if (!host->data_va) {
567 ret = -EIO; 944 dev_err(&pdev->dev, "data ioremap failed\n");
568 goto err_probe1; 945 return -ENOMEM;
569 } 946 }
570 947
571 host->resaddr = request_mem_region(res->start + PLAT_NAND_ALE, 948 if (!devm_request_mem_region(&pdev->dev, res->start + pdata->ale_off,
572 resource_size(res), pdev->name); 949 resource_size(res), pdev->name)) {
573 if (!host->resaddr) { 950 dev_err(&pdev->dev, "Failed to get memory ale resourse\n");
574 ret = -EIO; 951 return -ENOENT;
575 goto err_probe1;
576 } 952 }
577 953
578 host->addr_va = ioremap(res->start + PLAT_NAND_ALE, resource_size(res)); 954 host->addr_va = devm_ioremap(&pdev->dev, res->start + pdata->ale_off,
955 resource_size(res));
579 if (!host->addr_va) { 956 if (!host->addr_va) {
580 ret = -EIO; 957 dev_err(&pdev->dev, "ale ioremap failed\n");
581 goto err_probe1; 958 return -ENOMEM;
582 } 959 }
583 960
584 host->rescmd = request_mem_region(res->start + PLAT_NAND_CLE, 961 if (!devm_request_mem_region(&pdev->dev, res->start + pdata->cle_off,
585 resource_size(res), pdev->name); 962 resource_size(res), pdev->name)) {
586 if (!host->rescmd) { 963 dev_err(&pdev->dev, "Failed to get memory cle resourse\n");
587 ret = -EIO; 964 return -ENOENT;
588 goto err_probe1;
589 } 965 }
590 966
591 host->cmd_va = ioremap(res->start + PLAT_NAND_CLE, resource_size(res)); 967 host->cmd_va = devm_ioremap(&pdev->dev, res->start + pdata->cle_off,
968 resource_size(res));
592 if (!host->cmd_va) { 969 if (!host->cmd_va) {
593 ret = -EIO; 970 dev_err(&pdev->dev, "ale ioremap failed\n");
594 goto err_probe1; 971 return -ENOMEM;
595 } 972 }
596 973
597 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fsmc_regs"); 974 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fsmc_regs");
598 if (!res) { 975 if (!res)
599 ret = -EIO; 976 return -EINVAL;
600 goto err_probe1;
601 }
602 977
603 host->resregs = request_mem_region(res->start, resource_size(res), 978 if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
604 pdev->name); 979 pdev->name)) {
605 if (!host->resregs) { 980 dev_err(&pdev->dev, "Failed to get memory regs resourse\n");
606 ret = -EIO; 981 return -ENOENT;
607 goto err_probe1;
608 } 982 }
609 983
610 host->regs_va = ioremap(res->start, resource_size(res)); 984 host->regs_va = devm_ioremap(&pdev->dev, res->start,
985 resource_size(res));
611 if (!host->regs_va) { 986 if (!host->regs_va) {
612 ret = -EIO; 987 dev_err(&pdev->dev, "regs ioremap failed\n");
613 goto err_probe1; 988 return -ENOMEM;
614 } 989 }
615 990
616 host->clk = clk_get(&pdev->dev, NULL); 991 host->clk = clk_get(&pdev->dev, NULL);
617 if (IS_ERR(host->clk)) { 992 if (IS_ERR(host->clk)) {
618 dev_err(&pdev->dev, "failed to fetch block clock\n"); 993 dev_err(&pdev->dev, "failed to fetch block clock\n");
619 ret = PTR_ERR(host->clk); 994 return PTR_ERR(host->clk);
620 host->clk = NULL;
621 goto err_probe1;
622 } 995 }
623 996
624 ret = clk_enable(host->clk); 997 ret = clk_enable(host->clk);
625 if (ret) 998 if (ret)
626 goto err_probe1; 999 goto err_clk_enable;
627 1000
628 /* 1001 /*
629 * This device ID is actually a common AMBA ID as used on the 1002 * This device ID is actually a common AMBA ID as used on the
@@ -639,7 +1012,14 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
639 1012
640 host->bank = pdata->bank; 1013 host->bank = pdata->bank;
641 host->select_chip = pdata->select_bank; 1014 host->select_chip = pdata->select_bank;
642 regs = host->regs_va; 1015 host->partitions = pdata->partitions;
1016 host->nr_partitions = pdata->nr_partitions;
1017 host->dev = &pdev->dev;
1018 host->dev_timings = pdata->nand_timings;
1019 host->mode = pdata->mode;
1020
1021 if (host->mode == USE_DMA_ACCESS)
1022 init_completion(&host->dma_access_complete);
643 1023
644 /* Link all private pointers */ 1024 /* Link all private pointers */
645 mtd = &host->mtd; 1025 mtd = &host->mtd;
@@ -658,21 +1038,53 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
658 nand->ecc.size = 512; 1038 nand->ecc.size = 512;
659 nand->options = pdata->options; 1039 nand->options = pdata->options;
660 nand->select_chip = fsmc_select_chip; 1040 nand->select_chip = fsmc_select_chip;
1041 nand->badblockbits = 7;
661 1042
662 if (pdata->width == FSMC_NAND_BW16) 1043 if (pdata->width == FSMC_NAND_BW16)
663 nand->options |= NAND_BUSWIDTH_16; 1044 nand->options |= NAND_BUSWIDTH_16;
664 1045
665 fsmc_nand_setup(regs, host->bank, nand->options & NAND_BUSWIDTH_16); 1046 switch (host->mode) {
1047 case USE_DMA_ACCESS:
1048 dma_cap_zero(mask);
1049 dma_cap_set(DMA_MEMCPY, mask);
1050 host->read_dma_chan = dma_request_channel(mask, filter,
1051 pdata->read_dma_priv);
1052 if (!host->read_dma_chan) {
1053 dev_err(&pdev->dev, "Unable to get read dma channel\n");
1054 goto err_req_read_chnl;
1055 }
1056 host->write_dma_chan = dma_request_channel(mask, filter,
1057 pdata->write_dma_priv);
1058 if (!host->write_dma_chan) {
1059 dev_err(&pdev->dev, "Unable to get write dma channel\n");
1060 goto err_req_write_chnl;
1061 }
1062 nand->read_buf = fsmc_read_buf_dma;
1063 nand->write_buf = fsmc_write_buf_dma;
1064 break;
1065
1066 default:
1067 case USE_WORD_ACCESS:
1068 nand->read_buf = fsmc_read_buf;
1069 nand->write_buf = fsmc_write_buf;
1070 break;
1071 }
1072
1073 fsmc_nand_setup(host->regs_va, host->bank,
1074 nand->options & NAND_BUSWIDTH_16,
1075 host->dev_timings);
666 1076
667 if (AMBA_REV_BITS(host->pid) >= 8) { 1077 if (AMBA_REV_BITS(host->pid) >= 8) {
668 nand->ecc.read_page = fsmc_read_page_hwecc; 1078 nand->ecc.read_page = fsmc_read_page_hwecc;
669 nand->ecc.calculate = fsmc_read_hwecc_ecc4; 1079 nand->ecc.calculate = fsmc_read_hwecc_ecc4;
670 nand->ecc.correct = fsmc_correct_data; 1080 nand->ecc.correct = fsmc_bch8_correct_data;
671 nand->ecc.bytes = 13; 1081 nand->ecc.bytes = 13;
1082 nand->ecc.strength = 8;
672 } else { 1083 } else {
673 nand->ecc.calculate = fsmc_read_hwecc_ecc1; 1084 nand->ecc.calculate = fsmc_read_hwecc_ecc1;
674 nand->ecc.correct = nand_correct_data; 1085 nand->ecc.correct = nand_correct_data;
675 nand->ecc.bytes = 3; 1086 nand->ecc.bytes = 3;
1087 nand->ecc.strength = 1;
676 } 1088 }
677 1089
678 /* 1090 /*
@@ -681,19 +1093,52 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
681 if (nand_scan_ident(&host->mtd, 1, NULL)) { 1093 if (nand_scan_ident(&host->mtd, 1, NULL)) {
682 ret = -ENXIO; 1094 ret = -ENXIO;
683 dev_err(&pdev->dev, "No NAND Device found!\n"); 1095 dev_err(&pdev->dev, "No NAND Device found!\n");
684 goto err_probe; 1096 goto err_scan_ident;
685 } 1097 }
686 1098
687 if (AMBA_REV_BITS(host->pid) >= 8) { 1099 if (AMBA_REV_BITS(host->pid) >= 8) {
688 if (host->mtd.writesize == 512) { 1100 switch (host->mtd.oobsize) {
689 nand->ecc.layout = &fsmc_ecc4_sp_layout; 1101 case 16:
1102 nand->ecc.layout = &fsmc_ecc4_16_layout;
690 host->ecc_place = &fsmc_ecc4_sp_place; 1103 host->ecc_place = &fsmc_ecc4_sp_place;
691 } else { 1104 break;
692 nand->ecc.layout = &fsmc_ecc4_lp_layout; 1105 case 64:
1106 nand->ecc.layout = &fsmc_ecc4_64_layout;
1107 host->ecc_place = &fsmc_ecc4_lp_place;
1108 break;
1109 case 128:
1110 nand->ecc.layout = &fsmc_ecc4_128_layout;
1111 host->ecc_place = &fsmc_ecc4_lp_place;
1112 break;
1113 case 224:
1114 nand->ecc.layout = &fsmc_ecc4_224_layout;
693 host->ecc_place = &fsmc_ecc4_lp_place; 1115 host->ecc_place = &fsmc_ecc4_lp_place;
1116 break;
1117 case 256:
1118 nand->ecc.layout = &fsmc_ecc4_256_layout;
1119 host->ecc_place = &fsmc_ecc4_lp_place;
1120 break;
1121 default:
1122 printk(KERN_WARNING "No oob scheme defined for "
1123 "oobsize %d\n", mtd->oobsize);
1124 BUG();
694 } 1125 }
695 } else { 1126 } else {
696 nand->ecc.layout = &fsmc_ecc1_layout; 1127 switch (host->mtd.oobsize) {
1128 case 16:
1129 nand->ecc.layout = &fsmc_ecc1_16_layout;
1130 break;
1131 case 64:
1132 nand->ecc.layout = &fsmc_ecc1_64_layout;
1133 break;
1134 case 128:
1135 nand->ecc.layout = &fsmc_ecc1_128_layout;
1136 break;
1137 default:
1138 printk(KERN_WARNING "No oob scheme defined for "
1139 "oobsize %d\n", mtd->oobsize);
1140 BUG();
1141 }
697 } 1142 }
698 1143
699 /* Second stage of scan to fill MTD data-structures */ 1144 /* Second stage of scan to fill MTD data-structures */
@@ -713,13 +1158,9 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
713 * Check for partition info passed 1158 * Check for partition info passed
714 */ 1159 */
715 host->mtd.name = "nand"; 1160 host->mtd.name = "nand";
716 ret = mtd_device_parse_register(&host->mtd, NULL, 0, 1161 ppdata.of_node = np;
717 host->mtd.size <= 0x04000000 ? 1162 ret = mtd_device_parse_register(&host->mtd, NULL, &ppdata,
718 partition_info_16KB_blk : 1163 host->partitions, host->nr_partitions);
719 partition_info_128KB_blk,
720 host->mtd.size <= 0x04000000 ?
721 ARRAY_SIZE(partition_info_16KB_blk) :
722 ARRAY_SIZE(partition_info_128KB_blk));
723 if (ret) 1164 if (ret)
724 goto err_probe; 1165 goto err_probe;
725 1166
@@ -728,32 +1169,16 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
728 return 0; 1169 return 0;
729 1170
730err_probe: 1171err_probe:
1172err_scan_ident:
1173 if (host->mode == USE_DMA_ACCESS)
1174 dma_release_channel(host->write_dma_chan);
1175err_req_write_chnl:
1176 if (host->mode == USE_DMA_ACCESS)
1177 dma_release_channel(host->read_dma_chan);
1178err_req_read_chnl:
731 clk_disable(host->clk); 1179 clk_disable(host->clk);
732err_probe1: 1180err_clk_enable:
733 if (host->clk) 1181 clk_put(host->clk);
734 clk_put(host->clk);
735 if (host->regs_va)
736 iounmap(host->regs_va);
737 if (host->resregs)
738 release_mem_region(host->resregs->start,
739 resource_size(host->resregs));
740 if (host->cmd_va)
741 iounmap(host->cmd_va);
742 if (host->rescmd)
743 release_mem_region(host->rescmd->start,
744 resource_size(host->rescmd));
745 if (host->addr_va)
746 iounmap(host->addr_va);
747 if (host->resaddr)
748 release_mem_region(host->resaddr->start,
749 resource_size(host->resaddr));
750 if (host->data_va)
751 iounmap(host->data_va);
752 if (host->resdata)
753 release_mem_region(host->resdata->start,
754 resource_size(host->resdata));
755
756 kfree(host);
757 return ret; 1182 return ret;
758} 1183}
759 1184
@@ -768,24 +1193,15 @@ static int fsmc_nand_remove(struct platform_device *pdev)
768 1193
769 if (host) { 1194 if (host) {
770 nand_release(&host->mtd); 1195 nand_release(&host->mtd);
1196
1197 if (host->mode == USE_DMA_ACCESS) {
1198 dma_release_channel(host->write_dma_chan);
1199 dma_release_channel(host->read_dma_chan);
1200 }
771 clk_disable(host->clk); 1201 clk_disable(host->clk);
772 clk_put(host->clk); 1202 clk_put(host->clk);
773
774 iounmap(host->regs_va);
775 release_mem_region(host->resregs->start,
776 resource_size(host->resregs));
777 iounmap(host->cmd_va);
778 release_mem_region(host->rescmd->start,
779 resource_size(host->rescmd));
780 iounmap(host->addr_va);
781 release_mem_region(host->resaddr->start,
782 resource_size(host->resaddr));
783 iounmap(host->data_va);
784 release_mem_region(host->resdata->start,
785 resource_size(host->resdata));
786
787 kfree(host);
788 } 1203 }
1204
789 return 0; 1205 return 0;
790} 1206}
791 1207
@@ -801,15 +1217,24 @@ static int fsmc_nand_suspend(struct device *dev)
801static int fsmc_nand_resume(struct device *dev) 1217static int fsmc_nand_resume(struct device *dev)
802{ 1218{
803 struct fsmc_nand_data *host = dev_get_drvdata(dev); 1219 struct fsmc_nand_data *host = dev_get_drvdata(dev);
804 if (host) 1220 if (host) {
805 clk_enable(host->clk); 1221 clk_enable(host->clk);
1222 fsmc_nand_setup(host->regs_va, host->bank,
1223 host->nand.options & NAND_BUSWIDTH_16,
1224 host->dev_timings);
1225 }
806 return 0; 1226 return 0;
807} 1227}
808 1228
809static const struct dev_pm_ops fsmc_nand_pm_ops = { 1229static SIMPLE_DEV_PM_OPS(fsmc_nand_pm_ops, fsmc_nand_suspend, fsmc_nand_resume);
810 .suspend = fsmc_nand_suspend, 1230#endif
811 .resume = fsmc_nand_resume, 1231
1232#ifdef CONFIG_OF
1233static const struct of_device_id fsmc_nand_id_table[] = {
1234 { .compatible = "st,spear600-fsmc-nand" },
1235 {}
812}; 1236};
1237MODULE_DEVICE_TABLE(of, fsmc_nand_id_table);
813#endif 1238#endif
814 1239
815static struct platform_driver fsmc_nand_driver = { 1240static struct platform_driver fsmc_nand_driver = {
@@ -817,6 +1242,7 @@ static struct platform_driver fsmc_nand_driver = {
817 .driver = { 1242 .driver = {
818 .owner = THIS_MODULE, 1243 .owner = THIS_MODULE,
819 .name = "fsmc-nand", 1244 .name = "fsmc-nand",
1245 .of_match_table = of_match_ptr(fsmc_nand_id_table),
820#ifdef CONFIG_PM 1246#ifdef CONFIG_PM
821 .pm = &fsmc_nand_pm_ops, 1247 .pm = &fsmc_nand_pm_ops,
822#endif 1248#endif
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index 590dd5cceed6..e8ea7107932e 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -848,7 +848,10 @@ int gpmi_send_command(struct gpmi_nand_data *this)
848 848
849 sg_init_one(sgl, this->cmd_buffer, this->command_length); 849 sg_init_one(sgl, this->cmd_buffer, this->command_length);
850 dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE); 850 dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
851 desc = dmaengine_prep_slave_sg(channel, sgl, 1, DMA_MEM_TO_DEV, 1); 851 desc = dmaengine_prep_slave_sg(channel,
852 sgl, 1, DMA_MEM_TO_DEV,
853 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
854
852 if (!desc) { 855 if (!desc) {
853 pr_err("step 2 error\n"); 856 pr_err("step 2 error\n");
854 return -1; 857 return -1;
@@ -889,7 +892,8 @@ int gpmi_send_data(struct gpmi_nand_data *this)
889 /* [2] send DMA request */ 892 /* [2] send DMA request */
890 prepare_data_dma(this, DMA_TO_DEVICE); 893 prepare_data_dma(this, DMA_TO_DEVICE);
891 desc = dmaengine_prep_slave_sg(channel, &this->data_sgl, 894 desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
892 1, DMA_MEM_TO_DEV, 1); 895 1, DMA_MEM_TO_DEV,
896 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
893 if (!desc) { 897 if (!desc) {
894 pr_err("step 2 error\n"); 898 pr_err("step 2 error\n");
895 return -1; 899 return -1;
@@ -925,7 +929,8 @@ int gpmi_read_data(struct gpmi_nand_data *this)
925 /* [2] : send DMA request */ 929 /* [2] : send DMA request */
926 prepare_data_dma(this, DMA_FROM_DEVICE); 930 prepare_data_dma(this, DMA_FROM_DEVICE);
927 desc = dmaengine_prep_slave_sg(channel, &this->data_sgl, 931 desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
928 1, DMA_DEV_TO_MEM, 1); 932 1, DMA_DEV_TO_MEM,
933 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
929 if (!desc) { 934 if (!desc) {
930 pr_err("step 2 error\n"); 935 pr_err("step 2 error\n");
931 return -1; 936 return -1;
@@ -970,8 +975,10 @@ int gpmi_send_page(struct gpmi_nand_data *this,
970 pio[4] = payload; 975 pio[4] = payload;
971 pio[5] = auxiliary; 976 pio[5] = auxiliary;
972 977
973 desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio, 978 desc = dmaengine_prep_slave_sg(channel,
974 ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); 979 (struct scatterlist *)pio,
980 ARRAY_SIZE(pio), DMA_TRANS_NONE,
981 DMA_CTRL_ACK);
975 if (!desc) { 982 if (!desc) {
976 pr_err("step 2 error\n"); 983 pr_err("step 2 error\n");
977 return -1; 984 return -1;
@@ -1035,7 +1042,8 @@ int gpmi_read_page(struct gpmi_nand_data *this,
1035 pio[5] = auxiliary; 1042 pio[5] = auxiliary;
1036 desc = dmaengine_prep_slave_sg(channel, 1043 desc = dmaengine_prep_slave_sg(channel,
1037 (struct scatterlist *)pio, 1044 (struct scatterlist *)pio,
1038 ARRAY_SIZE(pio), DMA_TRANS_NONE, 1); 1045 ARRAY_SIZE(pio), DMA_TRANS_NONE,
1046 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1039 if (!desc) { 1047 if (!desc) {
1040 pr_err("step 2 error\n"); 1048 pr_err("step 2 error\n");
1041 return -1; 1049 return -1;
@@ -1052,9 +1060,11 @@ int gpmi_read_page(struct gpmi_nand_data *this,
1052 | BF_GPMI_CTRL0_ADDRESS(address) 1060 | BF_GPMI_CTRL0_ADDRESS(address)
1053 | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size); 1061 | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
1054 pio[1] = 0; 1062 pio[1] = 0;
1063 pio[2] = 0; /* clear GPMI_HW_GPMI_ECCCTRL, disable the BCH. */
1055 desc = dmaengine_prep_slave_sg(channel, 1064 desc = dmaengine_prep_slave_sg(channel,
1056 (struct scatterlist *)pio, 2, 1065 (struct scatterlist *)pio, 3,
1057 DMA_TRANS_NONE, 1); 1066 DMA_TRANS_NONE,
1067 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1058 if (!desc) { 1068 if (!desc) {
1059 pr_err("step 3 error\n"); 1069 pr_err("step 3 error\n");
1060 return -1; 1070 return -1;
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 493ec2fcf97f..75b1dde16358 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -1124,7 +1124,7 @@ static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
1124 chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1); 1124 chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
1125 1125
1126 /* Do we have a flash based bad block table ? */ 1126 /* Do we have a flash based bad block table ? */
1127 if (chip->options & NAND_BBT_USE_FLASH) 1127 if (chip->bbt_options & NAND_BBT_USE_FLASH)
1128 ret = nand_update_bbt(mtd, ofs); 1128 ret = nand_update_bbt(mtd, ofs);
1129 else { 1129 else {
1130 chipnr = (int)(ofs >> chip->chip_shift); 1130 chipnr = (int)(ofs >> chip->chip_shift);
@@ -1155,7 +1155,7 @@ static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
1155 return ret; 1155 return ret;
1156} 1156}
1157 1157
1158static int __devinit nand_boot_set_geometry(struct gpmi_nand_data *this) 1158static int nand_boot_set_geometry(struct gpmi_nand_data *this)
1159{ 1159{
1160 struct boot_rom_geometry *geometry = &this->rom_geometry; 1160 struct boot_rom_geometry *geometry = &this->rom_geometry;
1161 1161
@@ -1182,7 +1182,7 @@ static int __devinit nand_boot_set_geometry(struct gpmi_nand_data *this)
1182} 1182}
1183 1183
1184static const char *fingerprint = "STMP"; 1184static const char *fingerprint = "STMP";
1185static int __devinit mx23_check_transcription_stamp(struct gpmi_nand_data *this) 1185static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
1186{ 1186{
1187 struct boot_rom_geometry *rom_geo = &this->rom_geometry; 1187 struct boot_rom_geometry *rom_geo = &this->rom_geometry;
1188 struct device *dev = this->dev; 1188 struct device *dev = this->dev;
@@ -1239,7 +1239,7 @@ static int __devinit mx23_check_transcription_stamp(struct gpmi_nand_data *this)
1239} 1239}
1240 1240
1241/* Writes a transcription stamp. */ 1241/* Writes a transcription stamp. */
1242static int __devinit mx23_write_transcription_stamp(struct gpmi_nand_data *this) 1242static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
1243{ 1243{
1244 struct device *dev = this->dev; 1244 struct device *dev = this->dev;
1245 struct boot_rom_geometry *rom_geo = &this->rom_geometry; 1245 struct boot_rom_geometry *rom_geo = &this->rom_geometry;
@@ -1322,7 +1322,7 @@ static int __devinit mx23_write_transcription_stamp(struct gpmi_nand_data *this)
1322 return 0; 1322 return 0;
1323} 1323}
1324 1324
1325static int __devinit mx23_boot_init(struct gpmi_nand_data *this) 1325static int mx23_boot_init(struct gpmi_nand_data *this)
1326{ 1326{
1327 struct device *dev = this->dev; 1327 struct device *dev = this->dev;
1328 struct nand_chip *chip = &this->nand; 1328 struct nand_chip *chip = &this->nand;
@@ -1391,7 +1391,7 @@ static int __devinit mx23_boot_init(struct gpmi_nand_data *this)
1391 return 0; 1391 return 0;
1392} 1392}
1393 1393
1394static int __devinit nand_boot_init(struct gpmi_nand_data *this) 1394static int nand_boot_init(struct gpmi_nand_data *this)
1395{ 1395{
1396 nand_boot_set_geometry(this); 1396 nand_boot_set_geometry(this);
1397 1397
@@ -1401,7 +1401,7 @@ static int __devinit nand_boot_init(struct gpmi_nand_data *this)
1401 return 0; 1401 return 0;
1402} 1402}
1403 1403
1404static int __devinit gpmi_set_geometry(struct gpmi_nand_data *this) 1404static int gpmi_set_geometry(struct gpmi_nand_data *this)
1405{ 1405{
1406 int ret; 1406 int ret;
1407 1407
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
index e023bccb7781..ec6180d4ff8f 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -20,7 +20,7 @@
20#include <linux/mtd/nand.h> 20#include <linux/mtd/nand.h>
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <linux/dma-mapping.h> 22#include <linux/dma-mapping.h>
23#include <mach/dma.h> 23#include <linux/fsl/mxs-dma.h>
24 24
25struct resources { 25struct resources {
26 void *gpmi_regs; 26 void *gpmi_regs;
diff --git a/drivers/mtd/nand/h1910.c b/drivers/mtd/nand/h1910.c
index 5dc6f0d92f1a..11e487813428 100644
--- a/drivers/mtd/nand/h1910.c
+++ b/drivers/mtd/nand/h1910.c
@@ -135,8 +135,8 @@ static int __init h1910_init(void)
135 } 135 }
136 136
137 /* Register the partitions */ 137 /* Register the partitions */
138 mtd_device_parse_register(h1910_nand_mtd, NULL, 0, 138 mtd_device_parse_register(h1910_nand_mtd, NULL, NULL, partition_info,
139 partition_info, NUM_PARTITIONS); 139 NUM_PARTITIONS);
140 140
141 /* Return happy */ 141 /* Return happy */
142 return 0; 142 return 0;
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index ac3b9f255e00..e4147e8acb7c 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -332,6 +332,11 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
332 chip->ecc.mode = NAND_ECC_HW_OOB_FIRST; 332 chip->ecc.mode = NAND_ECC_HW_OOB_FIRST;
333 chip->ecc.size = 512; 333 chip->ecc.size = 512;
334 chip->ecc.bytes = 9; 334 chip->ecc.bytes = 9;
335 chip->ecc.strength = 2;
336 /*
337 * FIXME: ecc_strength value of 2 bits per 512 bytes of data is a
338 * conservative guess, given 9 ecc bytes and reed-solomon alg.
339 */
335 340
336 if (pdata) 341 if (pdata)
337 chip->ecc.layout = pdata->ecc_layout; 342 chip->ecc.layout = pdata->ecc_layout;
@@ -367,9 +372,9 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
367 goto err_gpio_free; 372 goto err_gpio_free;
368 } 373 }
369 374
370 ret = mtd_device_parse_register(mtd, NULL, 0, 375 ret = mtd_device_parse_register(mtd, NULL, NULL,
371 pdata ? pdata->partitions : NULL, 376 pdata ? pdata->partitions : NULL,
372 pdata ? pdata->num_partitions : 0); 377 pdata ? pdata->num_partitions : 0);
373 378
374 if (ret) { 379 if (ret) {
375 dev_err(&pdev->dev, "Failed to add mtd device\n"); 380 dev_err(&pdev->dev, "Failed to add mtd device\n");
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 74a43b818d0e..cc0678a967c1 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -1225,9 +1225,16 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1225 goto escan; 1225 goto escan;
1226 } 1226 }
1227 1227
1228 if (this->ecc.mode == NAND_ECC_HW) {
1229 if (nfc_is_v1())
1230 this->ecc.strength = 1;
1231 else
1232 this->ecc.strength = (host->eccsize == 4) ? 4 : 8;
1233 }
1234
1228 /* Register the partitions */ 1235 /* Register the partitions */
1229 mtd_device_parse_register(mtd, part_probes, 0, 1236 mtd_device_parse_register(mtd, part_probes, NULL, pdata->parts,
1230 pdata->parts, pdata->nr_parts); 1237 pdata->nr_parts);
1231 1238
1232 platform_set_drvdata(pdev, host); 1239 platform_set_drvdata(pdev, host);
1233 1240
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 8a393f9e6027..47b19c0bb070 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -123,12 +123,6 @@ static int check_offs_len(struct mtd_info *mtd,
123 ret = -EINVAL; 123 ret = -EINVAL;
124 } 124 }
125 125
126 /* Do not allow past end of device */
127 if (ofs + len > mtd->size) {
128 pr_debug("%s: past end of device\n", __func__);
129 ret = -EINVAL;
130 }
131
132 return ret; 126 return ret;
133} 127}
134 128
@@ -338,7 +332,7 @@ static int nand_verify_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
338 */ 332 */
339static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip) 333static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
340{ 334{
341 int page, chipnr, res = 0; 335 int page, chipnr, res = 0, i = 0;
342 struct nand_chip *chip = mtd->priv; 336 struct nand_chip *chip = mtd->priv;
343 u16 bad; 337 u16 bad;
344 338
@@ -356,23 +350,29 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
356 chip->select_chip(mtd, chipnr); 350 chip->select_chip(mtd, chipnr);
357 } 351 }
358 352
359 if (chip->options & NAND_BUSWIDTH_16) { 353 do {
360 chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos & 0xFE, 354 if (chip->options & NAND_BUSWIDTH_16) {
361 page); 355 chip->cmdfunc(mtd, NAND_CMD_READOOB,
362 bad = cpu_to_le16(chip->read_word(mtd)); 356 chip->badblockpos & 0xFE, page);
363 if (chip->badblockpos & 0x1) 357 bad = cpu_to_le16(chip->read_word(mtd));
364 bad >>= 8; 358 if (chip->badblockpos & 0x1)
365 else 359 bad >>= 8;
366 bad &= 0xFF; 360 else
367 } else { 361 bad &= 0xFF;
368 chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, page); 362 } else {
369 bad = chip->read_byte(mtd); 363 chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos,
370 } 364 page);
365 bad = chip->read_byte(mtd);
366 }
371 367
372 if (likely(chip->badblockbits == 8)) 368 if (likely(chip->badblockbits == 8))
373 res = bad != 0xFF; 369 res = bad != 0xFF;
374 else 370 else
375 res = hweight8(bad) < chip->badblockbits; 371 res = hweight8(bad) < chip->badblockbits;
372 ofs += mtd->writesize;
373 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
374 i++;
375 } while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE));
376 376
377 if (getchip) 377 if (getchip)
378 nand_release_device(mtd); 378 nand_release_device(mtd);
@@ -386,51 +386,79 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
386 * @ofs: offset from device start 386 * @ofs: offset from device start
387 * 387 *
388 * This is the default implementation, which can be overridden by a hardware 388 * This is the default implementation, which can be overridden by a hardware
389 * specific driver. 389 * specific driver. We try operations in the following order, according to our
390 * bbt_options (NAND_BBT_NO_OOB_BBM and NAND_BBT_USE_FLASH):
391 * (1) erase the affected block, to allow OOB marker to be written cleanly
392 * (2) update in-memory BBT
393 * (3) write bad block marker to OOB area of affected block
394 * (4) update flash-based BBT
395 * Note that we retain the first error encountered in (3) or (4), finish the
396 * procedures, and dump the error in the end.
390*/ 397*/
391static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs) 398static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
392{ 399{
393 struct nand_chip *chip = mtd->priv; 400 struct nand_chip *chip = mtd->priv;
394 uint8_t buf[2] = { 0, 0 }; 401 uint8_t buf[2] = { 0, 0 };
395 int block, ret, i = 0; 402 int block, res, ret = 0, i = 0;
403 int write_oob = !(chip->bbt_options & NAND_BBT_NO_OOB_BBM);
396 404
397 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE) 405 if (write_oob) {
398 ofs += mtd->erasesize - mtd->writesize; 406 struct erase_info einfo;
407
408 /* Attempt erase before marking OOB */
409 memset(&einfo, 0, sizeof(einfo));
410 einfo.mtd = mtd;
411 einfo.addr = ofs;
412 einfo.len = 1 << chip->phys_erase_shift;
413 nand_erase_nand(mtd, &einfo, 0);
414 }
399 415
400 /* Get block number */ 416 /* Get block number */
401 block = (int)(ofs >> chip->bbt_erase_shift); 417 block = (int)(ofs >> chip->bbt_erase_shift);
418 /* Mark block bad in memory-based BBT */
402 if (chip->bbt) 419 if (chip->bbt)
403 chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1); 420 chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
404 421
405 /* Do we have a flash based bad block table? */ 422 /* Write bad block marker to OOB */
406 if (chip->bbt_options & NAND_BBT_USE_FLASH) 423 if (write_oob) {
407 ret = nand_update_bbt(mtd, ofs);
408 else {
409 struct mtd_oob_ops ops; 424 struct mtd_oob_ops ops;
425 loff_t wr_ofs = ofs;
410 426
411 nand_get_device(chip, mtd, FL_WRITING); 427 nand_get_device(chip, mtd, FL_WRITING);
412 428
413 /*
414 * Write to first two pages if necessary. If we write to more
415 * than one location, the first error encountered quits the
416 * procedure. We write two bytes per location, so we dont have
417 * to mess with 16 bit access.
418 */
419 ops.len = ops.ooblen = 2;
420 ops.datbuf = NULL; 429 ops.datbuf = NULL;
421 ops.oobbuf = buf; 430 ops.oobbuf = buf;
422 ops.ooboffs = chip->badblockpos & ~0x01; 431 ops.ooboffs = chip->badblockpos;
432 if (chip->options & NAND_BUSWIDTH_16) {
433 ops.ooboffs &= ~0x01;
434 ops.len = ops.ooblen = 2;
435 } else {
436 ops.len = ops.ooblen = 1;
437 }
423 ops.mode = MTD_OPS_PLACE_OOB; 438 ops.mode = MTD_OPS_PLACE_OOB;
439
440 /* Write to first/last page(s) if necessary */
441 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
442 wr_ofs += mtd->erasesize - mtd->writesize;
424 do { 443 do {
425 ret = nand_do_write_oob(mtd, ofs, &ops); 444 res = nand_do_write_oob(mtd, wr_ofs, &ops);
445 if (!ret)
446 ret = res;
426 447
427 i++; 448 i++;
428 ofs += mtd->writesize; 449 wr_ofs += mtd->writesize;
429 } while (!ret && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && 450 } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
430 i < 2);
431 451
432 nand_release_device(mtd); 452 nand_release_device(mtd);
433 } 453 }
454
455 /* Update flash-based bad block table */
456 if (chip->bbt_options & NAND_BBT_USE_FLASH) {
457 res = nand_update_bbt(mtd, ofs);
458 if (!ret)
459 ret = res;
460 }
461
434 if (!ret) 462 if (!ret)
435 mtd->ecc_stats.badblocks++; 463 mtd->ecc_stats.badblocks++;
436 464
@@ -1586,25 +1614,14 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
1586 struct mtd_oob_ops ops; 1614 struct mtd_oob_ops ops;
1587 int ret; 1615 int ret;
1588 1616
1589 /* Do not allow reads past end of device */
1590 if ((from + len) > mtd->size)
1591 return -EINVAL;
1592 if (!len)
1593 return 0;
1594
1595 nand_get_device(chip, mtd, FL_READING); 1617 nand_get_device(chip, mtd, FL_READING);
1596
1597 ops.len = len; 1618 ops.len = len;
1598 ops.datbuf = buf; 1619 ops.datbuf = buf;
1599 ops.oobbuf = NULL; 1620 ops.oobbuf = NULL;
1600 ops.mode = 0; 1621 ops.mode = 0;
1601
1602 ret = nand_do_read_ops(mtd, from, &ops); 1622 ret = nand_do_read_ops(mtd, from, &ops);
1603
1604 *retlen = ops.retlen; 1623 *retlen = ops.retlen;
1605
1606 nand_release_device(mtd); 1624 nand_release_device(mtd);
1607
1608 return ret; 1625 return ret;
1609} 1626}
1610 1627
@@ -2293,12 +2310,6 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2293 struct mtd_oob_ops ops; 2310 struct mtd_oob_ops ops;
2294 int ret; 2311 int ret;
2295 2312
2296 /* Do not allow reads past end of device */
2297 if ((to + len) > mtd->size)
2298 return -EINVAL;
2299 if (!len)
2300 return 0;
2301
2302 /* Wait for the device to get ready */ 2313 /* Wait for the device to get ready */
2303 panic_nand_wait(mtd, chip, 400); 2314 panic_nand_wait(mtd, chip, 400);
2304 2315
@@ -2333,25 +2344,14 @@ static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2333 struct mtd_oob_ops ops; 2344 struct mtd_oob_ops ops;
2334 int ret; 2345 int ret;
2335 2346
2336 /* Do not allow reads past end of device */
2337 if ((to + len) > mtd->size)
2338 return -EINVAL;
2339 if (!len)
2340 return 0;
2341
2342 nand_get_device(chip, mtd, FL_WRITING); 2347 nand_get_device(chip, mtd, FL_WRITING);
2343
2344 ops.len = len; 2348 ops.len = len;
2345 ops.datbuf = (uint8_t *)buf; 2349 ops.datbuf = (uint8_t *)buf;
2346 ops.oobbuf = NULL; 2350 ops.oobbuf = NULL;
2347 ops.mode = 0; 2351 ops.mode = 0;
2348
2349 ret = nand_do_write_ops(mtd, to, &ops); 2352 ret = nand_do_write_ops(mtd, to, &ops);
2350
2351 *retlen = ops.retlen; 2353 *retlen = ops.retlen;
2352
2353 nand_release_device(mtd); 2354 nand_release_device(mtd);
2354
2355 return ret; 2355 return ret;
2356} 2356}
2357 2357
@@ -2550,8 +2550,6 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2550 if (check_offs_len(mtd, instr->addr, instr->len)) 2550 if (check_offs_len(mtd, instr->addr, instr->len))
2551 return -EINVAL; 2551 return -EINVAL;
2552 2552
2553 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
2554
2555 /* Grab the lock and see if the device is available */ 2553 /* Grab the lock and see if the device is available */
2556 nand_get_device(chip, mtd, FL_ERASING); 2554 nand_get_device(chip, mtd, FL_ERASING);
2557 2555
@@ -2715,10 +2713,6 @@ static void nand_sync(struct mtd_info *mtd)
2715 */ 2713 */
2716static int nand_block_isbad(struct mtd_info *mtd, loff_t offs) 2714static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
2717{ 2715{
2718 /* Check for invalid offset */
2719 if (offs > mtd->size)
2720 return -EINVAL;
2721
2722 return nand_block_checkbad(mtd, offs, 1, 0); 2716 return nand_block_checkbad(mtd, offs, 1, 0);
2723} 2717}
2724 2718
@@ -2857,7 +2851,6 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
2857 chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I') 2851 chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
2858 return 0; 2852 return 0;
2859 2853
2860 pr_info("ONFI flash detected\n");
2861 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1); 2854 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
2862 for (i = 0; i < 3; i++) { 2855 for (i = 0; i < 3; i++) {
2863 chip->read_buf(mtd, (uint8_t *)p, sizeof(*p)); 2856 chip->read_buf(mtd, (uint8_t *)p, sizeof(*p));
@@ -2898,7 +2891,8 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
2898 mtd->writesize = le32_to_cpu(p->byte_per_page); 2891 mtd->writesize = le32_to_cpu(p->byte_per_page);
2899 mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize; 2892 mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize;
2900 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page); 2893 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
2901 chip->chipsize = (uint64_t)le32_to_cpu(p->blocks_per_lun) * mtd->erasesize; 2894 chip->chipsize = le32_to_cpu(p->blocks_per_lun);
2895 chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
2902 *busw = 0; 2896 *busw = 0;
2903 if (le16_to_cpu(p->features) & 1) 2897 if (le16_to_cpu(p->features) & 1)
2904 *busw = NAND_BUSWIDTH_16; 2898 *busw = NAND_BUSWIDTH_16;
@@ -2907,6 +2901,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
2907 chip->options |= (NAND_NO_READRDY | 2901 chip->options |= (NAND_NO_READRDY |
2908 NAND_NO_AUTOINCR) & NAND_CHIPOPTIONS_MSK; 2902 NAND_NO_AUTOINCR) & NAND_CHIPOPTIONS_MSK;
2909 2903
2904 pr_info("ONFI flash detected\n");
2910 return 1; 2905 return 1;
2911} 2906}
2912 2907
@@ -3238,6 +3233,10 @@ int nand_scan_tail(struct mtd_info *mtd)
3238 int i; 3233 int i;
3239 struct nand_chip *chip = mtd->priv; 3234 struct nand_chip *chip = mtd->priv;
3240 3235
3236 /* New bad blocks should be marked in OOB, flash-based BBT, or both */
3237 BUG_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
3238 !(chip->bbt_options & NAND_BBT_USE_FLASH));
3239
3241 if (!(chip->options & NAND_OWN_BUFFERS)) 3240 if (!(chip->options & NAND_OWN_BUFFERS))
3242 chip->buffers = kmalloc(sizeof(*chip->buffers), GFP_KERNEL); 3241 chip->buffers = kmalloc(sizeof(*chip->buffers), GFP_KERNEL);
3243 if (!chip->buffers) 3242 if (!chip->buffers)
@@ -3350,6 +3349,7 @@ int nand_scan_tail(struct mtd_info *mtd)
3350 if (!chip->ecc.size) 3349 if (!chip->ecc.size)
3351 chip->ecc.size = 256; 3350 chip->ecc.size = 256;
3352 chip->ecc.bytes = 3; 3351 chip->ecc.bytes = 3;
3352 chip->ecc.strength = 1;
3353 break; 3353 break;
3354 3354
3355 case NAND_ECC_SOFT_BCH: 3355 case NAND_ECC_SOFT_BCH:
@@ -3384,6 +3384,8 @@ int nand_scan_tail(struct mtd_info *mtd)
3384 pr_warn("BCH ECC initialization failed!\n"); 3384 pr_warn("BCH ECC initialization failed!\n");
3385 BUG(); 3385 BUG();
3386 } 3386 }
3387 chip->ecc.strength =
3388 chip->ecc.bytes*8 / fls(8*chip->ecc.size);
3387 break; 3389 break;
3388 3390
3389 case NAND_ECC_NONE: 3391 case NAND_ECC_NONE:
@@ -3397,6 +3399,7 @@ int nand_scan_tail(struct mtd_info *mtd)
3397 chip->ecc.write_oob = nand_write_oob_std; 3399 chip->ecc.write_oob = nand_write_oob_std;
3398 chip->ecc.size = mtd->writesize; 3400 chip->ecc.size = mtd->writesize;
3399 chip->ecc.bytes = 0; 3401 chip->ecc.bytes = 0;
3402 chip->ecc.strength = 0;
3400 break; 3403 break;
3401 3404
3402 default: 3405 default:
@@ -3461,25 +3464,26 @@ int nand_scan_tail(struct mtd_info *mtd)
3461 mtd->type = MTD_NANDFLASH; 3464 mtd->type = MTD_NANDFLASH;
3462 mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM : 3465 mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
3463 MTD_CAP_NANDFLASH; 3466 MTD_CAP_NANDFLASH;
3464 mtd->erase = nand_erase; 3467 mtd->_erase = nand_erase;
3465 mtd->point = NULL; 3468 mtd->_point = NULL;
3466 mtd->unpoint = NULL; 3469 mtd->_unpoint = NULL;
3467 mtd->read = nand_read; 3470 mtd->_read = nand_read;
3468 mtd->write = nand_write; 3471 mtd->_write = nand_write;
3469 mtd->panic_write = panic_nand_write; 3472 mtd->_panic_write = panic_nand_write;
3470 mtd->read_oob = nand_read_oob; 3473 mtd->_read_oob = nand_read_oob;
3471 mtd->write_oob = nand_write_oob; 3474 mtd->_write_oob = nand_write_oob;
3472 mtd->sync = nand_sync; 3475 mtd->_sync = nand_sync;
3473 mtd->lock = NULL; 3476 mtd->_lock = NULL;
3474 mtd->unlock = NULL; 3477 mtd->_unlock = NULL;
3475 mtd->suspend = nand_suspend; 3478 mtd->_suspend = nand_suspend;
3476 mtd->resume = nand_resume; 3479 mtd->_resume = nand_resume;
3477 mtd->block_isbad = nand_block_isbad; 3480 mtd->_block_isbad = nand_block_isbad;
3478 mtd->block_markbad = nand_block_markbad; 3481 mtd->_block_markbad = nand_block_markbad;
3479 mtd->writebufsize = mtd->writesize; 3482 mtd->writebufsize = mtd->writesize;
3480 3483
3481 /* propagate ecc.layout to mtd_info */ 3484 /* propagate ecc info to mtd_info */
3482 mtd->ecclayout = chip->ecc.layout; 3485 mtd->ecclayout = chip->ecc.layout;
3486 mtd->ecc_strength = chip->ecc.strength * chip->ecc.steps;
3483 3487
3484 /* Check, if we should skip the bad block table scan */ 3488 /* Check, if we should skip the bad block table scan */
3485 if (chip->options & NAND_SKIP_BBTSCAN) 3489 if (chip->options & NAND_SKIP_BBTSCAN)
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index ec688548c880..2b6f632cf274 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -179,6 +179,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
179 chip->ecc.mode = NAND_ECC_HW; 179 chip->ecc.mode = NAND_ECC_HW;
180 chip->ecc.size = 256; 180 chip->ecc.size = 256;
181 chip->ecc.bytes = 3; 181 chip->ecc.bytes = 3;
182 chip->ecc.strength = 1;
182 chip->priv = ndfc; 183 chip->priv = ndfc;
183 184
184 ndfc->mtd.priv = chip; 185 ndfc->mtd.priv = chip;
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index b3a883e2a22f..c2b0bba9d8b3 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1058,6 +1058,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
1058 (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) { 1058 (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) {
1059 info->nand.ecc.bytes = 3; 1059 info->nand.ecc.bytes = 3;
1060 info->nand.ecc.size = 512; 1060 info->nand.ecc.size = 512;
1061 info->nand.ecc.strength = 1;
1061 info->nand.ecc.calculate = omap_calculate_ecc; 1062 info->nand.ecc.calculate = omap_calculate_ecc;
1062 info->nand.ecc.hwctl = omap_enable_hwecc; 1063 info->nand.ecc.hwctl = omap_enable_hwecc;
1063 info->nand.ecc.correct = omap_correct_data; 1064 info->nand.ecc.correct = omap_correct_data;
@@ -1101,8 +1102,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
1101 goto out_release_mem_region; 1102 goto out_release_mem_region;
1102 } 1103 }
1103 1104
1104 mtd_device_parse_register(&info->mtd, NULL, 0, 1105 mtd_device_parse_register(&info->mtd, NULL, NULL, pdata->parts,
1105 pdata->parts, pdata->nr_parts); 1106 pdata->nr_parts);
1106 1107
1107 platform_set_drvdata(pdev, &info->mtd); 1108 platform_set_drvdata(pdev, &info->mtd);
1108 1109
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 29f505adaf84..1d3bfb26080c 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -129,8 +129,8 @@ static int __init orion_nand_probe(struct platform_device *pdev)
129 } 129 }
130 130
131 mtd->name = "orion_nand"; 131 mtd->name = "orion_nand";
132 ret = mtd_device_parse_register(mtd, NULL, 0, 132 ret = mtd_device_parse_register(mtd, NULL, NULL, board->parts,
133 board->parts, board->nr_parts); 133 board->nr_parts);
134 if (ret) { 134 if (ret) {
135 nand_release(mtd); 135 nand_release(mtd);
136 goto no_dev; 136 goto no_dev;
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index 7f2da6953357..6404e6e81b10 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -99,8 +99,9 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
99 } 99 }
100 100
101 err = mtd_device_parse_register(&data->mtd, 101 err = mtd_device_parse_register(&data->mtd,
102 pdata->chip.part_probe_types, 0, 102 pdata->chip.part_probe_types, NULL,
103 pdata->chip.partitions, pdata->chip.nr_partitions); 103 pdata->chip.partitions,
104 pdata->chip.nr_partitions);
104 105
105 if (!err) 106 if (!err)
106 return err; 107 return err;
diff --git a/drivers/mtd/nand/ppchameleonevb.c b/drivers/mtd/nand/ppchameleonevb.c
index 7e52af51a198..0ddd90e5788f 100644
--- a/drivers/mtd/nand/ppchameleonevb.c
+++ b/drivers/mtd/nand/ppchameleonevb.c
@@ -275,11 +275,10 @@ static int __init ppchameleonevb_init(void)
275 ppchameleon_mtd->name = "ppchameleon-nand"; 275 ppchameleon_mtd->name = "ppchameleon-nand";
276 276
277 /* Register the partitions */ 277 /* Register the partitions */
278 mtd_device_parse_register(ppchameleon_mtd, NULL, 0, 278 mtd_device_parse_register(ppchameleon_mtd, NULL, NULL,
279 ppchameleon_mtd->size == NAND_SMALL_SIZE ? 279 ppchameleon_mtd->size == NAND_SMALL_SIZE ?
280 partition_info_me : 280 partition_info_me : partition_info_hi,
281 partition_info_hi, 281 NUM_PARTITIONS);
282 NUM_PARTITIONS);
283 282
284 nand_evb_init: 283 nand_evb_init:
285 /**************************** 284 /****************************
@@ -365,11 +364,10 @@ static int __init ppchameleonevb_init(void)
365 ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME; 364 ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME;
366 365
367 /* Register the partitions */ 366 /* Register the partitions */
368 mtd_device_parse_register(ppchameleonevb_mtd, NULL, 0, 367 mtd_device_parse_register(ppchameleonevb_mtd, NULL, NULL,
369 ppchameleon_mtd->size == NAND_SMALL_SIZE ? 368 ppchameleon_mtd->size == NAND_SMALL_SIZE ?
370 partition_info_me : 369 partition_info_me : partition_info_hi,
371 partition_info_hi, 370 NUM_PARTITIONS);
372 NUM_PARTITIONS);
373 371
374 /* Return happy */ 372 /* Return happy */
375 return 0; 373 return 0;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 5c3d719c37e6..def50caa6f84 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1002,6 +1002,7 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
1002KEEP_CONFIG: 1002KEEP_CONFIG:
1003 chip->ecc.mode = NAND_ECC_HW; 1003 chip->ecc.mode = NAND_ECC_HW;
1004 chip->ecc.size = host->page_size; 1004 chip->ecc.size = host->page_size;
1005 chip->ecc.strength = 1;
1005 1006
1006 chip->options = NAND_NO_AUTOINCR; 1007 chip->options = NAND_NO_AUTOINCR;
1007 chip->options |= NAND_NO_READRDY; 1008 chip->options |= NAND_NO_READRDY;
@@ -1228,8 +1229,9 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
1228 continue; 1229 continue;
1229 } 1230 }
1230 1231
1231 ret = mtd_device_parse_register(info->host[cs]->mtd, NULL, 0, 1232 ret = mtd_device_parse_register(info->host[cs]->mtd, NULL,
1232 pdata->parts[cs], pdata->nr_parts[cs]); 1233 NULL, pdata->parts[cs],
1234 pdata->nr_parts[cs]);
1233 if (!ret) 1235 if (!ret)
1234 probe_success = 1; 1236 probe_success = 1;
1235 } 1237 }
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index 769a4e096b3c..c2040187c813 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -891,6 +891,7 @@ int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
891 chip->ecc.mode = NAND_ECC_HW_SYNDROME; 891 chip->ecc.mode = NAND_ECC_HW_SYNDROME;
892 chip->ecc.size = R852_DMA_LEN; 892 chip->ecc.size = R852_DMA_LEN;
893 chip->ecc.bytes = SM_OOB_SIZE; 893 chip->ecc.bytes = SM_OOB_SIZE;
894 chip->ecc.strength = 2;
894 chip->ecc.hwctl = r852_ecc_hwctl; 895 chip->ecc.hwctl = r852_ecc_hwctl;
895 chip->ecc.calculate = r852_ecc_calculate; 896 chip->ecc.calculate = r852_ecc_calculate;
896 chip->ecc.correct = r852_ecc_correct; 897 chip->ecc.correct = r852_ecc_correct;
diff --git a/drivers/mtd/nand/rtc_from4.c b/drivers/mtd/nand/rtc_from4.c
index f309addc2fa0..e55b5cfbe145 100644
--- a/drivers/mtd/nand/rtc_from4.c
+++ b/drivers/mtd/nand/rtc_from4.c
@@ -527,6 +527,7 @@ static int __init rtc_from4_init(void)
527 this->ecc.mode = NAND_ECC_HW_SYNDROME; 527 this->ecc.mode = NAND_ECC_HW_SYNDROME;
528 this->ecc.size = 512; 528 this->ecc.size = 512;
529 this->ecc.bytes = 8; 529 this->ecc.bytes = 8;
530 this->ecc.strength = 3;
530 /* return the status of extra status and ECC checks */ 531 /* return the status of extra status and ECC checks */
531 this->errstat = rtc_from4_errstat; 532 this->errstat = rtc_from4_errstat;
532 /* set the nand_oobinfo to support FPGA H/W error detection */ 533 /* set the nand_oobinfo to support FPGA H/W error detection */
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 868685db6712..91121f33f743 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -751,8 +751,8 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
751 if (set) 751 if (set)
752 mtd->mtd.name = set->name; 752 mtd->mtd.name = set->name;
753 753
754 return mtd_device_parse_register(&mtd->mtd, NULL, 0, 754 return mtd_device_parse_register(&mtd->mtd, NULL, NULL,
755 set->partitions, set->nr_partitions); 755 set->partitions, set->nr_partitions);
756} 756}
757 757
758/** 758/**
@@ -823,6 +823,7 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
823 chip->ecc.calculate = s3c2410_nand_calculate_ecc; 823 chip->ecc.calculate = s3c2410_nand_calculate_ecc;
824 chip->ecc.correct = s3c2410_nand_correct_data; 824 chip->ecc.correct = s3c2410_nand_correct_data;
825 chip->ecc.mode = NAND_ECC_HW; 825 chip->ecc.mode = NAND_ECC_HW;
826 chip->ecc.strength = 1;
826 827
827 switch (info->cpu_type) { 828 switch (info->cpu_type) {
828 case TYPE_S3C2410: 829 case TYPE_S3C2410:
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 93b1f74321c2..e9b2b260de3a 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -26,6 +26,7 @@
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/io.h> 27#include <linux/io.h>
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29#include <linux/pm_runtime.h>
29#include <linux/slab.h> 30#include <linux/slab.h>
30 31
31#include <linux/mtd/mtd.h> 32#include <linux/mtd/mtd.h>
@@ -283,7 +284,7 @@ static void write_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
283static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val) 284static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val)
284{ 285{
285 struct sh_flctl *flctl = mtd_to_flctl(mtd); 286 struct sh_flctl *flctl = mtd_to_flctl(mtd);
286 uint32_t flcmncr_val = readl(FLCMNCR(flctl)) & ~SEL_16BIT; 287 uint32_t flcmncr_val = flctl->flcmncr_base & ~SEL_16BIT;
287 uint32_t flcmdcr_val, addr_len_bytes = 0; 288 uint32_t flcmdcr_val, addr_len_bytes = 0;
288 289
289 /* Set SNAND bit if page size is 2048byte */ 290 /* Set SNAND bit if page size is 2048byte */
@@ -303,6 +304,7 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va
303 break; 304 break;
304 case NAND_CMD_READ0: 305 case NAND_CMD_READ0:
305 case NAND_CMD_READOOB: 306 case NAND_CMD_READOOB:
307 case NAND_CMD_RNDOUT:
306 addr_len_bytes = flctl->rw_ADRCNT; 308 addr_len_bytes = flctl->rw_ADRCNT;
307 flcmdcr_val |= CDSRC_E; 309 flcmdcr_val |= CDSRC_E;
308 if (flctl->chip.options & NAND_BUSWIDTH_16) 310 if (flctl->chip.options & NAND_BUSWIDTH_16)
@@ -320,6 +322,7 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va
320 break; 322 break;
321 case NAND_CMD_READID: 323 case NAND_CMD_READID:
322 flcmncr_val &= ~SNAND_E; 324 flcmncr_val &= ~SNAND_E;
325 flcmdcr_val |= CDSRC_E;
323 addr_len_bytes = ADRCNT_1; 326 addr_len_bytes = ADRCNT_1;
324 break; 327 break;
325 case NAND_CMD_STATUS: 328 case NAND_CMD_STATUS:
@@ -513,6 +516,8 @@ static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
513 struct sh_flctl *flctl = mtd_to_flctl(mtd); 516 struct sh_flctl *flctl = mtd_to_flctl(mtd);
514 uint32_t read_cmd = 0; 517 uint32_t read_cmd = 0;
515 518
519 pm_runtime_get_sync(&flctl->pdev->dev);
520
516 flctl->read_bytes = 0; 521 flctl->read_bytes = 0;
517 if (command != NAND_CMD_PAGEPROG) 522 if (command != NAND_CMD_PAGEPROG)
518 flctl->index = 0; 523 flctl->index = 0;
@@ -525,7 +530,6 @@ static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
525 execmd_read_page_sector(mtd, page_addr); 530 execmd_read_page_sector(mtd, page_addr);
526 break; 531 break;
527 } 532 }
528 empty_fifo(flctl);
529 if (flctl->page_size) 533 if (flctl->page_size)
530 set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8) 534 set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
531 | command); 535 | command);
@@ -547,7 +551,6 @@ static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
547 break; 551 break;
548 } 552 }
549 553
550 empty_fifo(flctl);
551 if (flctl->page_size) { 554 if (flctl->page_size) {
552 set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8) 555 set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
553 | NAND_CMD_READ0); 556 | NAND_CMD_READ0);
@@ -559,15 +562,35 @@ static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
559 flctl->read_bytes = mtd->oobsize; 562 flctl->read_bytes = mtd->oobsize;
560 goto read_normal_exit; 563 goto read_normal_exit;
561 564
565 case NAND_CMD_RNDOUT:
566 if (flctl->hwecc)
567 break;
568
569 if (flctl->page_size)
570 set_cmd_regs(mtd, command, (NAND_CMD_RNDOUTSTART << 8)
571 | command);
572 else
573 set_cmd_regs(mtd, command, command);
574
575 set_addr(mtd, column, 0);
576
577 flctl->read_bytes = mtd->writesize + mtd->oobsize - column;
578 goto read_normal_exit;
579
562 case NAND_CMD_READID: 580 case NAND_CMD_READID:
563 empty_fifo(flctl);
564 set_cmd_regs(mtd, command, command); 581 set_cmd_regs(mtd, command, command);
565 set_addr(mtd, 0, 0);
566 582
567 flctl->read_bytes = 4; 583 /* READID is always performed using an 8-bit bus */
584 if (flctl->chip.options & NAND_BUSWIDTH_16)
585 column <<= 1;
586 set_addr(mtd, column, 0);
587
588 flctl->read_bytes = 8;
568 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */ 589 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
590 empty_fifo(flctl);
569 start_translation(flctl); 591 start_translation(flctl);
570 read_datareg(flctl, 0); /* read and end */ 592 read_fiforeg(flctl, flctl->read_bytes, 0);
593 wait_completion(flctl);
571 break; 594 break;
572 595
573 case NAND_CMD_ERASE1: 596 case NAND_CMD_ERASE1:
@@ -650,29 +673,55 @@ static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
650 default: 673 default:
651 break; 674 break;
652 } 675 }
653 return; 676 goto runtime_exit;
654 677
655read_normal_exit: 678read_normal_exit:
656 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */ 679 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
680 empty_fifo(flctl);
657 start_translation(flctl); 681 start_translation(flctl);
658 read_fiforeg(flctl, flctl->read_bytes, 0); 682 read_fiforeg(flctl, flctl->read_bytes, 0);
659 wait_completion(flctl); 683 wait_completion(flctl);
684runtime_exit:
685 pm_runtime_put_sync(&flctl->pdev->dev);
660 return; 686 return;
661} 687}
662 688
663static void flctl_select_chip(struct mtd_info *mtd, int chipnr) 689static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
664{ 690{
665 struct sh_flctl *flctl = mtd_to_flctl(mtd); 691 struct sh_flctl *flctl = mtd_to_flctl(mtd);
666 uint32_t flcmncr_val = readl(FLCMNCR(flctl)); 692 int ret;
667 693
668 switch (chipnr) { 694 switch (chipnr) {
669 case -1: 695 case -1:
670 flcmncr_val &= ~CE0_ENABLE; 696 flctl->flcmncr_base &= ~CE0_ENABLE;
671 writel(flcmncr_val, FLCMNCR(flctl)); 697
698 pm_runtime_get_sync(&flctl->pdev->dev);
699 writel(flctl->flcmncr_base, FLCMNCR(flctl));
700
701 if (flctl->qos_request) {
702 dev_pm_qos_remove_request(&flctl->pm_qos);
703 flctl->qos_request = 0;
704 }
705
706 pm_runtime_put_sync(&flctl->pdev->dev);
672 break; 707 break;
673 case 0: 708 case 0:
674 flcmncr_val |= CE0_ENABLE; 709 flctl->flcmncr_base |= CE0_ENABLE;
675 writel(flcmncr_val, FLCMNCR(flctl)); 710
711 if (!flctl->qos_request) {
712 ret = dev_pm_qos_add_request(&flctl->pdev->dev,
713 &flctl->pm_qos, 100);
714 if (ret < 0)
715 dev_err(&flctl->pdev->dev,
716 "PM QoS request failed: %d\n", ret);
717 flctl->qos_request = 1;
718 }
719
720 if (flctl->holden) {
721 pm_runtime_get_sync(&flctl->pdev->dev);
722 writel(HOLDEN, FLHOLDCR(flctl));
723 pm_runtime_put_sync(&flctl->pdev->dev);
724 }
676 break; 725 break;
677 default: 726 default:
678 BUG(); 727 BUG();
@@ -730,11 +779,6 @@ static int flctl_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
730 return 0; 779 return 0;
731} 780}
732 781
733static void flctl_register_init(struct sh_flctl *flctl, unsigned long val)
734{
735 writel(val, FLCMNCR(flctl));
736}
737
738static int flctl_chip_init_tail(struct mtd_info *mtd) 782static int flctl_chip_init_tail(struct mtd_info *mtd)
739{ 783{
740 struct sh_flctl *flctl = mtd_to_flctl(mtd); 784 struct sh_flctl *flctl = mtd_to_flctl(mtd);
@@ -781,13 +825,13 @@ static int flctl_chip_init_tail(struct mtd_info *mtd)
781 825
782 chip->ecc.size = 512; 826 chip->ecc.size = 512;
783 chip->ecc.bytes = 10; 827 chip->ecc.bytes = 10;
828 chip->ecc.strength = 4;
784 chip->ecc.read_page = flctl_read_page_hwecc; 829 chip->ecc.read_page = flctl_read_page_hwecc;
785 chip->ecc.write_page = flctl_write_page_hwecc; 830 chip->ecc.write_page = flctl_write_page_hwecc;
786 chip->ecc.mode = NAND_ECC_HW; 831 chip->ecc.mode = NAND_ECC_HW;
787 832
788 /* 4 symbols ECC enabled */ 833 /* 4 symbols ECC enabled */
789 writel(readl(FLCMNCR(flctl)) | _4ECCEN | ECCPOS2 | ECCPOS_02, 834 flctl->flcmncr_base |= _4ECCEN | ECCPOS2 | ECCPOS_02;
790 FLCMNCR(flctl));
791 } else { 835 } else {
792 chip->ecc.mode = NAND_ECC_SOFT; 836 chip->ecc.mode = NAND_ECC_SOFT;
793 } 837 }
@@ -819,13 +863,13 @@ static int __devinit flctl_probe(struct platform_device *pdev)
819 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 863 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
820 if (!res) { 864 if (!res) {
821 dev_err(&pdev->dev, "failed to get I/O memory\n"); 865 dev_err(&pdev->dev, "failed to get I/O memory\n");
822 goto err; 866 goto err_iomap;
823 } 867 }
824 868
825 flctl->reg = ioremap(res->start, resource_size(res)); 869 flctl->reg = ioremap(res->start, resource_size(res));
826 if (flctl->reg == NULL) { 870 if (flctl->reg == NULL) {
827 dev_err(&pdev->dev, "failed to remap I/O memory\n"); 871 dev_err(&pdev->dev, "failed to remap I/O memory\n");
828 goto err; 872 goto err_iomap;
829 } 873 }
830 874
831 platform_set_drvdata(pdev, flctl); 875 platform_set_drvdata(pdev, flctl);
@@ -833,9 +877,9 @@ static int __devinit flctl_probe(struct platform_device *pdev)
833 nand = &flctl->chip; 877 nand = &flctl->chip;
834 flctl_mtd->priv = nand; 878 flctl_mtd->priv = nand;
835 flctl->pdev = pdev; 879 flctl->pdev = pdev;
880 flctl->flcmncr_base = pdata->flcmncr_val;
836 flctl->hwecc = pdata->has_hwecc; 881 flctl->hwecc = pdata->has_hwecc;
837 882 flctl->holden = pdata->use_holden;
838 flctl_register_init(flctl, pdata->flcmncr_val);
839 883
840 nand->options = NAND_NO_AUTOINCR; 884 nand->options = NAND_NO_AUTOINCR;
841 885
@@ -855,23 +899,28 @@ static int __devinit flctl_probe(struct platform_device *pdev)
855 nand->read_word = flctl_read_word; 899 nand->read_word = flctl_read_word;
856 } 900 }
857 901
902 pm_runtime_enable(&pdev->dev);
903 pm_runtime_resume(&pdev->dev);
904
858 ret = nand_scan_ident(flctl_mtd, 1, NULL); 905 ret = nand_scan_ident(flctl_mtd, 1, NULL);
859 if (ret) 906 if (ret)
860 goto err; 907 goto err_chip;
861 908
862 ret = flctl_chip_init_tail(flctl_mtd); 909 ret = flctl_chip_init_tail(flctl_mtd);
863 if (ret) 910 if (ret)
864 goto err; 911 goto err_chip;
865 912
866 ret = nand_scan_tail(flctl_mtd); 913 ret = nand_scan_tail(flctl_mtd);
867 if (ret) 914 if (ret)
868 goto err; 915 goto err_chip;
869 916
870 mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts); 917 mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts);
871 918
872 return 0; 919 return 0;
873 920
874err: 921err_chip:
922 pm_runtime_disable(&pdev->dev);
923err_iomap:
875 kfree(flctl); 924 kfree(flctl);
876 return ret; 925 return ret;
877} 926}
@@ -881,6 +930,7 @@ static int __devexit flctl_remove(struct platform_device *pdev)
881 struct sh_flctl *flctl = platform_get_drvdata(pdev); 930 struct sh_flctl *flctl = platform_get_drvdata(pdev);
882 931
883 nand_release(&flctl->mtd); 932 nand_release(&flctl->mtd);
933 pm_runtime_disable(&pdev->dev);
884 kfree(flctl); 934 kfree(flctl);
885 935
886 return 0; 936 return 0;
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index b175c0fd8b93..3421e3762a5a 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -167,6 +167,7 @@ static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
167 this->ecc.mode = NAND_ECC_HW; 167 this->ecc.mode = NAND_ECC_HW;
168 this->ecc.size = 256; 168 this->ecc.size = 256;
169 this->ecc.bytes = 3; 169 this->ecc.bytes = 3;
170 this->ecc.strength = 1;
170 this->badblock_pattern = data->badblock_pattern; 171 this->badblock_pattern = data->badblock_pattern;
171 this->ecc.layout = data->ecc_layout; 172 this->ecc.layout = data->ecc_layout;
172 this->ecc.hwctl = sharpsl_nand_enable_hwecc; 173 this->ecc.hwctl = sharpsl_nand_enable_hwecc;
@@ -181,8 +182,8 @@ static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
181 /* Register the partitions */ 182 /* Register the partitions */
182 sharpsl->mtd.name = "sharpsl-nand"; 183 sharpsl->mtd.name = "sharpsl-nand";
183 184
184 err = mtd_device_parse_register(&sharpsl->mtd, NULL, 0, 185 err = mtd_device_parse_register(&sharpsl->mtd, NULL, NULL,
185 data->partitions, data->nr_partitions); 186 data->partitions, data->nr_partitions);
186 if (err) 187 if (err)
187 goto err_add; 188 goto err_add;
188 189
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index 6caa0cd9d6a7..5aa518081c51 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -430,6 +430,7 @@ static int tmio_probe(struct platform_device *dev)
430 nand_chip->ecc.mode = NAND_ECC_HW; 430 nand_chip->ecc.mode = NAND_ECC_HW;
431 nand_chip->ecc.size = 512; 431 nand_chip->ecc.size = 512;
432 nand_chip->ecc.bytes = 6; 432 nand_chip->ecc.bytes = 6;
433 nand_chip->ecc.strength = 2;
433 nand_chip->ecc.hwctl = tmio_nand_enable_hwecc; 434 nand_chip->ecc.hwctl = tmio_nand_enable_hwecc;
434 nand_chip->ecc.calculate = tmio_nand_calculate_ecc; 435 nand_chip->ecc.calculate = tmio_nand_calculate_ecc;
435 nand_chip->ecc.correct = tmio_nand_correct_data; 436 nand_chip->ecc.correct = tmio_nand_correct_data;
@@ -456,9 +457,9 @@ static int tmio_probe(struct platform_device *dev)
456 goto err_scan; 457 goto err_scan;
457 } 458 }
458 /* Register the partitions */ 459 /* Register the partitions */
459 retval = mtd_device_parse_register(mtd, NULL, 0, 460 retval = mtd_device_parse_register(mtd, NULL, NULL,
460 data ? data->partition : NULL, 461 data ? data->partition : NULL,
461 data ? data->num_partitions : 0); 462 data ? data->num_partitions : 0);
462 if (!retval) 463 if (!retval)
463 return retval; 464 return retval;
464 465
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index c7c4f1d11c77..26398dcf21cf 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -356,6 +356,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
356 /* txx9ndfmc_nand_scan will overwrite ecc.size and ecc.bytes */ 356 /* txx9ndfmc_nand_scan will overwrite ecc.size and ecc.bytes */
357 chip->ecc.size = 256; 357 chip->ecc.size = 256;
358 chip->ecc.bytes = 3; 358 chip->ecc.bytes = 3;
359 chip->ecc.strength = 1;
359 chip->chip_delay = 100; 360 chip->chip_delay = 100;
360 chip->controller = &drvdata->hw_control; 361 chip->controller = &drvdata->hw_control;
361 362
@@ -386,7 +387,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
386 } 387 }
387 mtd->name = txx9_priv->mtdname; 388 mtd->name = txx9_priv->mtdname;
388 389
389 mtd_device_parse_register(mtd, NULL, 0, NULL, 0); 390 mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
390 drvdata->mtds[i] = mtd; 391 drvdata->mtds[i] = mtd;
391 } 392 }
392 393
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index a75382aff5f6..c5f4ebf4b384 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -56,13 +56,6 @@ static void nftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
56 if (memcmp(mtd->name, "DiskOnChip", 10)) 56 if (memcmp(mtd->name, "DiskOnChip", 10))
57 return; 57 return;
58 58
59 if (!mtd_can_have_bb(mtd)) {
60 printk(KERN_ERR
61"NFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n"
62"Please use the new diskonchip driver under the NAND subsystem.\n");
63 return;
64 }
65
66 pr_debug("NFTL: add_mtd for %s\n", mtd->name); 59 pr_debug("NFTL: add_mtd for %s\n", mtd->name);
67 60
68 nftl = kzalloc(sizeof(struct NFTLrecord), GFP_KERNEL); 61 nftl = kzalloc(sizeof(struct NFTLrecord), GFP_KERNEL);
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index 0ccd5bff2544..1c4f97c63e62 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -70,9 +70,9 @@ static int __devinit generic_onenand_probe(struct platform_device *pdev)
70 goto out_iounmap; 70 goto out_iounmap;
71 } 71 }
72 72
73 err = mtd_device_parse_register(&info->mtd, NULL, 0, 73 err = mtd_device_parse_register(&info->mtd, NULL, NULL,
74 pdata ? pdata->parts : NULL, 74 pdata ? pdata->parts : NULL,
75 pdata ? pdata->nr_parts : 0); 75 pdata ? pdata->nr_parts : 0);
76 76
77 platform_set_drvdata(pdev, info); 77 platform_set_drvdata(pdev, info);
78 78
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 7e9ea6852b67..398a82783848 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -751,9 +751,9 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
751 if ((r = onenand_scan(&c->mtd, 1)) < 0) 751 if ((r = onenand_scan(&c->mtd, 1)) < 0)
752 goto err_release_regulator; 752 goto err_release_regulator;
753 753
754 r = mtd_device_parse_register(&c->mtd, NULL, 0, 754 r = mtd_device_parse_register(&c->mtd, NULL, NULL,
755 pdata ? pdata->parts : NULL, 755 pdata ? pdata->parts : NULL,
756 pdata ? pdata->nr_parts : 0); 756 pdata ? pdata->nr_parts : 0);
757 if (r) 757 if (r)
758 goto err_release_onenand; 758 goto err_release_onenand;
759 759
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index a061bc163da2..b3ce12ef359e 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1753,16 +1753,6 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
1753 pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to, 1753 pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to,
1754 (int)len); 1754 (int)len);
1755 1755
1756 /* Initialize retlen, in case of early exit */
1757 *retlen = 0;
1758
1759 /* Do not allow writes past end of device */
1760 if (unlikely((to + len) > mtd->size)) {
1761 printk(KERN_ERR "%s: Attempt write to past end of device\n",
1762 __func__);
1763 return -EINVAL;
1764 }
1765
1766 /* Reject writes, which are not page aligned */ 1756 /* Reject writes, which are not page aligned */
1767 if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) { 1757 if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
1768 printk(KERN_ERR "%s: Attempt to write not page aligned data\n", 1758 printk(KERN_ERR "%s: Attempt to write not page aligned data\n",
@@ -1890,13 +1880,6 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
1890 ops->retlen = 0; 1880 ops->retlen = 0;
1891 ops->oobretlen = 0; 1881 ops->oobretlen = 0;
1892 1882
1893 /* Do not allow writes past end of device */
1894 if (unlikely((to + len) > mtd->size)) {
1895 printk(KERN_ERR "%s: Attempt write to past end of device\n",
1896 __func__);
1897 return -EINVAL;
1898 }
1899
1900 /* Reject writes, which are not page aligned */ 1883 /* Reject writes, which are not page aligned */
1901 if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) { 1884 if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
1902 printk(KERN_ERR "%s: Attempt to write not page aligned data\n", 1885 printk(KERN_ERR "%s: Attempt to write not page aligned data\n",
@@ -2493,12 +2476,6 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
2493 (unsigned long long)instr->addr, 2476 (unsigned long long)instr->addr,
2494 (unsigned long long)instr->len); 2477 (unsigned long long)instr->len);
2495 2478
2496 /* Do not allow erase past end of device */
2497 if (unlikely((len + addr) > mtd->size)) {
2498 printk(KERN_ERR "%s: Erase past end of device\n", __func__);
2499 return -EINVAL;
2500 }
2501
2502 if (FLEXONENAND(this)) { 2479 if (FLEXONENAND(this)) {
2503 /* Find the eraseregion of this address */ 2480 /* Find the eraseregion of this address */
2504 int i = flexonenand_region(mtd, addr); 2481 int i = flexonenand_region(mtd, addr);
@@ -2525,8 +2502,6 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
2525 return -EINVAL; 2502 return -EINVAL;
2526 } 2503 }
2527 2504
2528 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
2529
2530 /* Grab the lock and see if the device is available */ 2505 /* Grab the lock and see if the device is available */
2531 onenand_get_device(mtd, FL_ERASING); 2506 onenand_get_device(mtd, FL_ERASING);
2532 2507
@@ -4103,33 +4078,34 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
4103 mtd->oobavail = this->ecclayout->oobavail; 4078 mtd->oobavail = this->ecclayout->oobavail;
4104 4079
4105 mtd->ecclayout = this->ecclayout; 4080 mtd->ecclayout = this->ecclayout;
4081 mtd->ecc_strength = 1;
4106 4082
4107 /* Fill in remaining MTD driver data */ 4083 /* Fill in remaining MTD driver data */
4108 mtd->type = ONENAND_IS_MLC(this) ? MTD_MLCNANDFLASH : MTD_NANDFLASH; 4084 mtd->type = ONENAND_IS_MLC(this) ? MTD_MLCNANDFLASH : MTD_NANDFLASH;
4109 mtd->flags = MTD_CAP_NANDFLASH; 4085 mtd->flags = MTD_CAP_NANDFLASH;
4110 mtd->erase = onenand_erase; 4086 mtd->_erase = onenand_erase;
4111 mtd->point = NULL; 4087 mtd->_point = NULL;
4112 mtd->unpoint = NULL; 4088 mtd->_unpoint = NULL;
4113 mtd->read = onenand_read; 4089 mtd->_read = onenand_read;
4114 mtd->write = onenand_write; 4090 mtd->_write = onenand_write;
4115 mtd->read_oob = onenand_read_oob; 4091 mtd->_read_oob = onenand_read_oob;
4116 mtd->write_oob = onenand_write_oob; 4092 mtd->_write_oob = onenand_write_oob;
4117 mtd->panic_write = onenand_panic_write; 4093 mtd->_panic_write = onenand_panic_write;
4118#ifdef CONFIG_MTD_ONENAND_OTP 4094#ifdef CONFIG_MTD_ONENAND_OTP
4119 mtd->get_fact_prot_info = onenand_get_fact_prot_info; 4095 mtd->_get_fact_prot_info = onenand_get_fact_prot_info;
4120 mtd->read_fact_prot_reg = onenand_read_fact_prot_reg; 4096 mtd->_read_fact_prot_reg = onenand_read_fact_prot_reg;
4121 mtd->get_user_prot_info = onenand_get_user_prot_info; 4097 mtd->_get_user_prot_info = onenand_get_user_prot_info;
4122 mtd->read_user_prot_reg = onenand_read_user_prot_reg; 4098 mtd->_read_user_prot_reg = onenand_read_user_prot_reg;
4123 mtd->write_user_prot_reg = onenand_write_user_prot_reg; 4099 mtd->_write_user_prot_reg = onenand_write_user_prot_reg;
4124 mtd->lock_user_prot_reg = onenand_lock_user_prot_reg; 4100 mtd->_lock_user_prot_reg = onenand_lock_user_prot_reg;
4125#endif 4101#endif
4126 mtd->sync = onenand_sync; 4102 mtd->_sync = onenand_sync;
4127 mtd->lock = onenand_lock; 4103 mtd->_lock = onenand_lock;
4128 mtd->unlock = onenand_unlock; 4104 mtd->_unlock = onenand_unlock;
4129 mtd->suspend = onenand_suspend; 4105 mtd->_suspend = onenand_suspend;
4130 mtd->resume = onenand_resume; 4106 mtd->_resume = onenand_resume;
4131 mtd->block_isbad = onenand_block_isbad; 4107 mtd->_block_isbad = onenand_block_isbad;
4132 mtd->block_markbad = onenand_block_markbad; 4108 mtd->_block_markbad = onenand_block_markbad;
4133 mtd->owner = THIS_MODULE; 4109 mtd->owner = THIS_MODULE;
4134 mtd->writebufsize = mtd->writesize; 4110 mtd->writebufsize = mtd->writesize;
4135 4111
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index fa1ee43f735b..8e4b3f2742ba 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -923,7 +923,7 @@ static int s3c_onenand_probe(struct platform_device *pdev)
923 r = platform_get_resource(pdev, IORESOURCE_MEM, 1); 923 r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
924 if (!r) { 924 if (!r) {
925 dev_err(&pdev->dev, "no buffer memory resource defined\n"); 925 dev_err(&pdev->dev, "no buffer memory resource defined\n");
926 return -ENOENT; 926 err = -ENOENT;
927 goto ahb_resource_failed; 927 goto ahb_resource_failed;
928 } 928 }
929 929
@@ -964,7 +964,7 @@ static int s3c_onenand_probe(struct platform_device *pdev)
964 r = platform_get_resource(pdev, IORESOURCE_MEM, 1); 964 r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
965 if (!r) { 965 if (!r) {
966 dev_err(&pdev->dev, "no dma memory resource defined\n"); 966 dev_err(&pdev->dev, "no dma memory resource defined\n");
967 return -ENOENT; 967 err = -ENOENT;
968 goto dma_resource_failed; 968 goto dma_resource_failed;
969 } 969 }
970 970
@@ -1014,7 +1014,7 @@ static int s3c_onenand_probe(struct platform_device *pdev)
1014 if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ) 1014 if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ)
1015 dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n"); 1015 dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n");
1016 1016
1017 err = mtd_device_parse_register(mtd, NULL, 0, 1017 err = mtd_device_parse_register(mtd, NULL, NULL,
1018 pdata ? pdata->parts : NULL, 1018 pdata ? pdata->parts : NULL,
1019 pdata ? pdata->nr_parts : 0); 1019 pdata ? pdata->nr_parts : 0);
1020 1020
diff --git a/drivers/mtd/redboot.c b/drivers/mtd/redboot.c
index 48970c14beff..580035c803d6 100644
--- a/drivers/mtd/redboot.c
+++ b/drivers/mtd/redboot.c
@@ -78,8 +78,7 @@ static int parse_redboot_partitions(struct mtd_info *master,
78 78
79 if ( directory < 0 ) { 79 if ( directory < 0 ) {
80 offset = master->size + directory * master->erasesize; 80 offset = master->size + directory * master->erasesize;
81 while (mtd_can_have_bb(master) && 81 while (mtd_block_isbad(master, offset)) {
82 mtd_block_isbad(master, offset)) {
83 if (!offset) { 82 if (!offset) {
84 nogood: 83 nogood:
85 printk(KERN_NOTICE "Failed to find a non-bad block to check for RedBoot partition table\n"); 84 printk(KERN_NOTICE "Failed to find a non-bad block to check for RedBoot partition table\n");
@@ -89,8 +88,7 @@ static int parse_redboot_partitions(struct mtd_info *master,
89 } 88 }
90 } else { 89 } else {
91 offset = directory * master->erasesize; 90 offset = directory * master->erasesize;
92 while (mtd_can_have_bb(master) && 91 while (mtd_block_isbad(master, offset)) {
93 mtd_block_isbad(master, offset)) {
94 offset += master->erasesize; 92 offset += master->erasesize;
95 if (offset == master->size) 93 if (offset == master->size)
96 goto nogood; 94 goto nogood;
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 072ed5970e2f..9e2dfd517aa5 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -1256,7 +1256,7 @@ static void sm_remove_dev(struct mtd_blktrans_dev *dev)
1256 1256
1257static struct mtd_blktrans_ops sm_ftl_ops = { 1257static struct mtd_blktrans_ops sm_ftl_ops = {
1258 .name = "smblk", 1258 .name = "smblk",
1259 .major = -1, 1259 .major = 0,
1260 .part_bits = SM_FTL_PARTN_BITS, 1260 .part_bits = SM_FTL_PARTN_BITS,
1261 .blksize = SM_SECTOR_SIZE, 1261 .blksize = SM_SECTOR_SIZE,
1262 .getgeo = sm_getgeo, 1262 .getgeo = sm_getgeo,
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index 941bc3c05d6e..90b98822d9a4 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -174,11 +174,7 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
174 int err = 0, lnum, offs, total_read; 174 int err = 0, lnum, offs, total_read;
175 struct gluebi_device *gluebi; 175 struct gluebi_device *gluebi;
176 176
177 if (len < 0 || from < 0 || from + len > mtd->size)
178 return -EINVAL;
179
180 gluebi = container_of(mtd, struct gluebi_device, mtd); 177 gluebi = container_of(mtd, struct gluebi_device, mtd);
181
182 lnum = div_u64_rem(from, mtd->erasesize, &offs); 178 lnum = div_u64_rem(from, mtd->erasesize, &offs);
183 total_read = len; 179 total_read = len;
184 while (total_read) { 180 while (total_read) {
@@ -218,14 +214,7 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
218 int err = 0, lnum, offs, total_written; 214 int err = 0, lnum, offs, total_written;
219 struct gluebi_device *gluebi; 215 struct gluebi_device *gluebi;
220 216
221 if (len < 0 || to < 0 || len + to > mtd->size)
222 return -EINVAL;
223
224 gluebi = container_of(mtd, struct gluebi_device, mtd); 217 gluebi = container_of(mtd, struct gluebi_device, mtd);
225
226 if (!(mtd->flags & MTD_WRITEABLE))
227 return -EROFS;
228
229 lnum = div_u64_rem(to, mtd->erasesize, &offs); 218 lnum = div_u64_rem(to, mtd->erasesize, &offs);
230 219
231 if (len % mtd->writesize || offs % mtd->writesize) 220 if (len % mtd->writesize || offs % mtd->writesize)
@@ -265,21 +254,13 @@ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr)
265 int err, i, lnum, count; 254 int err, i, lnum, count;
266 struct gluebi_device *gluebi; 255 struct gluebi_device *gluebi;
267 256
268 if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize)
269 return -EINVAL;
270 if (instr->len < 0 || instr->addr + instr->len > mtd->size)
271 return -EINVAL;
272 if (mtd_mod_by_ws(instr->addr, mtd) || mtd_mod_by_ws(instr->len, mtd)) 257 if (mtd_mod_by_ws(instr->addr, mtd) || mtd_mod_by_ws(instr->len, mtd))
273 return -EINVAL; 258 return -EINVAL;
274 259
275 lnum = mtd_div_by_eb(instr->addr, mtd); 260 lnum = mtd_div_by_eb(instr->addr, mtd);
276 count = mtd_div_by_eb(instr->len, mtd); 261 count = mtd_div_by_eb(instr->len, mtd);
277
278 gluebi = container_of(mtd, struct gluebi_device, mtd); 262 gluebi = container_of(mtd, struct gluebi_device, mtd);
279 263
280 if (!(mtd->flags & MTD_WRITEABLE))
281 return -EROFS;
282
283 for (i = 0; i < count - 1; i++) { 264 for (i = 0; i < count - 1; i++) {
284 err = ubi_leb_unmap(gluebi->desc, lnum + i); 265 err = ubi_leb_unmap(gluebi->desc, lnum + i);
285 if (err) 266 if (err)
@@ -340,11 +321,11 @@ static int gluebi_create(struct ubi_device_info *di,
340 mtd->owner = THIS_MODULE; 321 mtd->owner = THIS_MODULE;
341 mtd->writesize = di->min_io_size; 322 mtd->writesize = di->min_io_size;
342 mtd->erasesize = vi->usable_leb_size; 323 mtd->erasesize = vi->usable_leb_size;
343 mtd->read = gluebi_read; 324 mtd->_read = gluebi_read;
344 mtd->write = gluebi_write; 325 mtd->_write = gluebi_write;
345 mtd->erase = gluebi_erase; 326 mtd->_erase = gluebi_erase;
346 mtd->get_device = gluebi_get_device; 327 mtd->_get_device = gluebi_get_device;
347 mtd->put_device = gluebi_put_device; 328 mtd->_put_device = gluebi_put_device;
348 329
349 /* 330 /*
350 * In case of dynamic a volume, MTD device size is just volume size. In 331 * In case of dynamic a volume, MTD device size is just volume size. In