aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-10-30 11:31:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-30 11:31:35 -0400
commit79346507ad48895f41b438fa562b1965721f36b9 (patch)
tree5c115ce87f1fbc0b530f30db56cecf824e9f6e05 /drivers/mtd
parent706d4b12f8d7edd28d7e879a77235472da393edb (diff)
parent40847437f15221b5822ba70550e8b9fcccfb9bb3 (diff)
Merge git://git.infradead.org/mtd-2.6
* git://git.infradead.org/mtd-2.6: (82 commits) mtd: fix build error in m25p80.c mtd: Remove redundant mutex from mtd_blkdevs.c MTD: Fix wrong check register_blkdev return value Revert "mtd: cleanup Kconfig dependencies" mtd: cfi_cmdset_0002: make sector erase command variable mtd: cfi_cmdset_0002: add CFI detection for SST 38VF640x chips mtd: cfi_util: add support for switching SST 39VF640xB chips into QRY mode mtd: cfi_cmdset_0001: use defined value of P_ID_INTEL_PERFORMANCE instead of hardcoded one block2mtd: dubious assignment P4080/mtd: Fix the freescale lbc issue with 36bit mode P4080/eLBC: Make Freescale elbc interrupt common to elbc devices mtd: phram: use KBUILD_MODNAME mtd: OneNAND: S5PC110: Fix double call suspend & resume function mtd: nand: fix MTD_MODE_RAW writes jffs2: use kmemdup mtd: sm_ftl: cosmetic, use bool when possible mtd: r852: remove useless pci powerup/down from suspend/resume routines mtd: blktrans: fix a race vs kthread_stop mtd: blktrans: kill BKL mtd: allow to unload the mtdtrans module if its block devices aren't open ... Fix up trivial whitespace-introduced conflict in drivers/mtd/mtdchar.c
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c4
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c83
-rw-r--r--drivers/mtd/chips/cfi_probe.c2
-rw-r--r--drivers/mtd/chips/cfi_util.c7
-rw-r--r--drivers/mtd/devices/block2mtd.c1
-rw-r--r--drivers/mtd/devices/m25p80.c6
-rw-r--r--drivers/mtd/devices/phram.c2
-rw-r--r--drivers/mtd/maps/Kconfig9
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/bcm963xx-flash.c271
-rw-r--r--drivers/mtd/maps/gpio-addr-flash.c10
-rw-r--r--drivers/mtd/maps/pcmciamtd.c4
-rw-r--r--drivers/mtd/maps/physmap_of.c20
-rw-r--r--drivers/mtd/mtd_blkdevs.c68
-rw-r--r--drivers/mtd/mtdchar.c111
-rw-r--r--drivers/mtd/mtdpart.c154
-rw-r--r--drivers/mtd/nand/Kconfig15
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c14
-rw-r--r--drivers/mtd/nand/davinci_nand.c11
-rw-r--r--drivers/mtd/nand/denali.c1
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c484
-rw-r--r--drivers/mtd/nand/fsl_upm.c10
-rw-r--r--drivers/mtd/nand/fsmc_nand.c866
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c11
-rw-r--r--drivers/mtd/nand/nand_base.c338
-rw-r--r--drivers/mtd/nand/nand_bbt.c250
-rw-r--r--drivers/mtd/nand/nand_ids.c31
-rw-r--r--drivers/mtd/nand/nandsim.c19
-rw-r--r--drivers/mtd/nand/ndfc.c8
-rw-r--r--drivers/mtd/nand/omap2.c4
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c348
-rw-r--r--drivers/mtd/nand/r852.c30
-rw-r--r--drivers/mtd/nand/r852.h2
-rw-r--r--drivers/mtd/ofpart.c2
-rw-r--r--drivers/mtd/onenand/Kconfig7
-rw-r--r--drivers/mtd/onenand/onenand_base.c7
-rw-r--r--drivers/mtd/onenand/samsung.c133
-rw-r--r--drivers/mtd/sm_ftl.h8
39 files changed, 2468 insertions, 885 deletions
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 9e2b7e9e0ad9..ad9268b44416 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -1496,7 +1496,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1496 1496
1497 switch (mode) { 1497 switch (mode) {
1498 case FL_WRITING: 1498 case FL_WRITING:
1499 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41); 1499 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1500 break; 1500 break;
1501 case FL_OTP_WRITE: 1501 case FL_OTP_WRITE:
1502 write_cmd = CMD(0xc0); 1502 write_cmd = CMD(0xc0);
@@ -1661,7 +1661,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1661 cmd_adr = adr & ~(wbufsize-1); 1661 cmd_adr = adr & ~(wbufsize-1);
1662 1662
1663 /* Let's determine this according to the interleave only once */ 1663 /* Let's determine this according to the interleave only once */
1664 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9); 1664 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1665 1665
1666 mutex_lock(&chip->mutex); 1666 mutex_lock(&chip->mutex);
1667 ret = get_chip(map, chip, cmd_adr, FL_WRITING); 1667 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index ba29d2f0ffd7..3b8e32d87977 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -291,6 +291,23 @@ static void fixup_sst39vf_rev_b(struct mtd_info *mtd, void *param)
291 291
292 cfi->addr_unlock1 = 0x555; 292 cfi->addr_unlock1 = 0x555;
293 cfi->addr_unlock2 = 0x2AA; 293 cfi->addr_unlock2 = 0x2AA;
294
295 cfi->sector_erase_cmd = CMD(0x50);
296}
297
298static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd, void *param)
299{
300 struct map_info *map = mtd->priv;
301 struct cfi_private *cfi = map->fldrv_priv;
302
303 fixup_sst39vf_rev_b(mtd, param);
304
305 /*
306 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
307 * it should report a size of 8KBytes (0x0020*256).
308 */
309 cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
310 pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name);
294} 311}
295 312
296static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param) 313static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param)
@@ -317,14 +334,14 @@ static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param)
317 334
318/* Used to fix CFI-Tables of chips without Extended Query Tables */ 335/* Used to fix CFI-Tables of chips without Extended Query Tables */
319static struct cfi_fixup cfi_nopri_fixup_table[] = { 336static struct cfi_fixup cfi_nopri_fixup_table[] = {
320 { CFI_MFR_SST, 0x234A, fixup_sst39vf, NULL, }, // SST39VF1602 337 { CFI_MFR_SST, 0x234A, fixup_sst39vf, NULL, }, /* SST39VF1602 */
321 { CFI_MFR_SST, 0x234B, fixup_sst39vf, NULL, }, // SST39VF1601 338 { CFI_MFR_SST, 0x234B, fixup_sst39vf, NULL, }, /* SST39VF1601 */
322 { CFI_MFR_SST, 0x235A, fixup_sst39vf, NULL, }, // SST39VF3202 339 { CFI_MFR_SST, 0x235A, fixup_sst39vf, NULL, }, /* SST39VF3202 */
323 { CFI_MFR_SST, 0x235B, fixup_sst39vf, NULL, }, // SST39VF3201 340 { CFI_MFR_SST, 0x235B, fixup_sst39vf, NULL, }, /* SST39VF3201 */
324 { CFI_MFR_SST, 0x235C, fixup_sst39vf_rev_b, NULL, }, // SST39VF3202B 341 { CFI_MFR_SST, 0x235C, fixup_sst39vf_rev_b, NULL, }, /* SST39VF3202B */
325 { CFI_MFR_SST, 0x235D, fixup_sst39vf_rev_b, NULL, }, // SST39VF3201B 342 { CFI_MFR_SST, 0x235D, fixup_sst39vf_rev_b, NULL, }, /* SST39VF3201B */
326 { CFI_MFR_SST, 0x236C, fixup_sst39vf_rev_b, NULL, }, // SST39VF6402B 343 { CFI_MFR_SST, 0x236C, fixup_sst39vf_rev_b, NULL, }, /* SST39VF6402B */
327 { CFI_MFR_SST, 0x236D, fixup_sst39vf_rev_b, NULL, }, // SST39VF6401B 344 { CFI_MFR_SST, 0x236D, fixup_sst39vf_rev_b, NULL, }, /* SST39VF6401B */
328 { 0, 0, NULL, NULL } 345 { 0, 0, NULL, NULL }
329}; 346};
330 347
@@ -344,6 +361,10 @@ static struct cfi_fixup cfi_fixup_table[] = {
344 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors, NULL, }, 361 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors, NULL, },
345 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors, NULL, }, 362 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors, NULL, },
346 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors, NULL, }, 363 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors, NULL, },
364 { CFI_MFR_SST, 0x536A, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6402 */
365 { CFI_MFR_SST, 0x536B, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6401 */
366 { CFI_MFR_SST, 0x536C, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6404 */
367 { CFI_MFR_SST, 0x536D, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6403 */
347#if !FORCE_WORD_WRITE 368#if !FORCE_WORD_WRITE
348 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, }, 369 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
349#endif 370#endif
@@ -374,6 +395,13 @@ static void cfi_fixup_major_minor(struct cfi_private *cfi,
374 if (cfi->mfr == CFI_MFR_SAMSUNG && cfi->id == 0x257e && 395 if (cfi->mfr == CFI_MFR_SAMSUNG && cfi->id == 0x257e &&
375 extp->MajorVersion == '0') 396 extp->MajorVersion == '0')
376 extp->MajorVersion = '1'; 397 extp->MajorVersion = '1';
398 /*
399 * SST 38VF640x chips report major=0xFF / minor=0xFF.
400 */
401 if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) {
402 extp->MajorVersion = '1';
403 extp->MinorVersion = '0';
404 }
377} 405}
378 406
379struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) 407struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
@@ -545,15 +573,6 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
545 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize); 573 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
546 goto setup_err; 574 goto setup_err;
547 } 575 }
548#if 0
549 // debug
550 for (i=0; i<mtd->numeraseregions;i++){
551 printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
552 i,mtd->eraseregions[i].offset,
553 mtd->eraseregions[i].erasesize,
554 mtd->eraseregions[i].numblocks);
555 }
556#endif
557 576
558 __module_get(THIS_MODULE); 577 __module_get(THIS_MODULE);
559 register_reboot_notifier(&mtd->reboot_notifier); 578 register_reboot_notifier(&mtd->reboot_notifier);
@@ -674,7 +693,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
674 * there was an error (so leave the erase 693 * there was an error (so leave the erase
675 * routine to recover from it) or we trying to 694 * routine to recover from it) or we trying to
676 * use the erase-in-progress sector. */ 695 * use the erase-in-progress sector. */
677 map_write(map, CMD(0x30), chip->in_progress_block_addr); 696 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
678 chip->state = FL_ERASING; 697 chip->state = FL_ERASING;
679 chip->oldstate = FL_READY; 698 chip->oldstate = FL_READY;
680 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__); 699 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
@@ -727,7 +746,7 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
727 switch(chip->oldstate) { 746 switch(chip->oldstate) {
728 case FL_ERASING: 747 case FL_ERASING:
729 chip->state = chip->oldstate; 748 chip->state = chip->oldstate;
730 map_write(map, CMD(0x30), chip->in_progress_block_addr); 749 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
731 chip->oldstate = FL_READY; 750 chip->oldstate = FL_READY;
732 chip->state = FL_ERASING; 751 chip->state = FL_ERASING;
733 break; 752 break;
@@ -870,7 +889,7 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
870 local_irq_disable(); 889 local_irq_disable();
871 890
872 /* Resume the write or erase operation */ 891 /* Resume the write or erase operation */
873 map_write(map, CMD(0x30), adr); 892 map_write(map, cfi->sector_erase_cmd, adr);
874 chip->state = oldstate; 893 chip->state = oldstate;
875 start = xip_currtime(); 894 start = xip_currtime();
876 } else if (usec >= 1000000/HZ) { 895 } else if (usec >= 1000000/HZ) {
@@ -1025,9 +1044,6 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
1025 mutex_lock(&chip->mutex); 1044 mutex_lock(&chip->mutex);
1026 1045
1027 if (chip->state != FL_READY){ 1046 if (chip->state != FL_READY){
1028#if 0
1029 printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
1030#endif
1031 set_current_state(TASK_UNINTERRUPTIBLE); 1047 set_current_state(TASK_UNINTERRUPTIBLE);
1032 add_wait_queue(&chip->wq, &wait); 1048 add_wait_queue(&chip->wq, &wait);
1033 1049
@@ -1035,10 +1051,6 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
1035 1051
1036 schedule(); 1052 schedule();
1037 remove_wait_queue(&chip->wq, &wait); 1053 remove_wait_queue(&chip->wq, &wait);
1038#if 0
1039 if(signal_pending(current))
1040 return -EINTR;
1041#endif
1042 timeo = jiffies + HZ; 1054 timeo = jiffies + HZ;
1043 1055
1044 goto retry; 1056 goto retry;
@@ -1246,9 +1258,6 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1246 mutex_lock(&cfi->chips[chipnum].mutex); 1258 mutex_lock(&cfi->chips[chipnum].mutex);
1247 1259
1248 if (cfi->chips[chipnum].state != FL_READY) { 1260 if (cfi->chips[chipnum].state != FL_READY) {
1249#if 0
1250 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1251#endif
1252 set_current_state(TASK_UNINTERRUPTIBLE); 1261 set_current_state(TASK_UNINTERRUPTIBLE);
1253 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1262 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1254 1263
@@ -1256,10 +1265,6 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1256 1265
1257 schedule(); 1266 schedule();
1258 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1267 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1259#if 0
1260 if(signal_pending(current))
1261 return -EINTR;
1262#endif
1263 goto retry; 1268 goto retry;
1264 } 1269 }
1265 1270
@@ -1324,9 +1329,6 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1324 mutex_lock(&cfi->chips[chipnum].mutex); 1329 mutex_lock(&cfi->chips[chipnum].mutex);
1325 1330
1326 if (cfi->chips[chipnum].state != FL_READY) { 1331 if (cfi->chips[chipnum].state != FL_READY) {
1327#if 0
1328 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1329#endif
1330 set_current_state(TASK_UNINTERRUPTIBLE); 1332 set_current_state(TASK_UNINTERRUPTIBLE);
1331 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1333 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1332 1334
@@ -1334,10 +1336,6 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1334 1336
1335 schedule(); 1337 schedule();
1336 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1338 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1337#if 0
1338 if(signal_pending(current))
1339 return -EINTR;
1340#endif
1341 goto retry1; 1339 goto retry1;
1342 } 1340 }
1343 1341
@@ -1396,7 +1394,6 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1396 1394
1397 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1395 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1398 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1396 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1399 //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1400 1397
1401 /* Write Buffer Load */ 1398 /* Write Buffer Load */
1402 map_write(map, CMD(0x25), cmd_adr); 1399 map_write(map, CMD(0x25), cmd_adr);
@@ -1675,7 +1672,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1675 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1672 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1676 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1673 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1677 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1674 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1678 map_write(map, CMD(0x30), adr); 1675 map_write(map, cfi->sector_erase_cmd, adr);
1679 1676
1680 chip->state = FL_ERASING; 1677 chip->state = FL_ERASING;
1681 chip->erase_suspended = 0; 1678 chip->erase_suspended = 0;
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c
index 8f5b96aa87a0..d25535279404 100644
--- a/drivers/mtd/chips/cfi_probe.c
+++ b/drivers/mtd/chips/cfi_probe.c
@@ -177,6 +177,8 @@ static int __xipram cfi_chip_setup(struct map_info *map,
177 177
178 cfi->cfi_mode = CFI_MODE_CFI; 178 cfi->cfi_mode = CFI_MODE_CFI;
179 179
180 cfi->sector_erase_cmd = CMD(0x30);
181
180 /* Read the CFI info structure */ 182 /* Read the CFI info structure */
181 xip_disable_qry(base, map, cfi); 183 xip_disable_qry(base, map, cfi);
182 for (i=0; i<(sizeof(struct cfi_ident) + num_erase_regions * 4); i++) 184 for (i=0; i<(sizeof(struct cfi_ident) + num_erase_regions * 4); i++)
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index e503b2ca894d..360525c637d2 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -77,6 +77,13 @@ int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map,
77 cfi_send_gen_cmd(0x98, 0x5555, base, map, cfi, cfi->device_type, NULL); 77 cfi_send_gen_cmd(0x98, 0x5555, base, map, cfi, cfi->device_type, NULL);
78 if (cfi_qry_present(map, base, cfi)) 78 if (cfi_qry_present(map, base, cfi))
79 return 1; 79 return 1;
80 /* SST 39VF640xB */
81 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
82 cfi_send_gen_cmd(0xAA, 0x555, base, map, cfi, cfi->device_type, NULL);
83 cfi_send_gen_cmd(0x55, 0x2AA, base, map, cfi, cfi->device_type, NULL);
84 cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL);
85 if (cfi_qry_present(map, base, cfi))
86 return 1;
80 /* QRY not found */ 87 /* QRY not found */
81 return 0; 88 return 0;
82} 89}
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 93651865ddbe..2cf0cc6a4189 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -91,7 +91,6 @@ static int block2mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
91 } else 91 } else
92 instr->state = MTD_ERASE_DONE; 92 instr->state = MTD_ERASE_DONE;
93 93
94 instr->state = MTD_ERASE_DONE;
95 mtd_erase_callback(instr); 94 mtd_erase_callback(instr);
96 return err; 95 return err;
97} 96}
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index ea22520c0406..bf5a002209bd 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -661,11 +661,14 @@ static const struct spi_device_id m25p_ids[] = {
661 { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) }, 661 { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
662 { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) }, 662 { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
663 { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) }, 663 { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
664 { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SECT_4K) },
664 { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) }, 665 { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
665 { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) }, 666 { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
666 { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) }, 667 { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
667 { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) }, 668 { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) },
668 { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) }, 669 { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) },
670 { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K) },
671 { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
669 672
670 /* SST -- large erase sizes are "overlays", "sectors" are 4K */ 673 /* SST -- large erase sizes are "overlays", "sectors" are 4K */
671 { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K) }, 674 { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K) },
@@ -714,6 +717,7 @@ static const struct spi_device_id m25p_ids[] = {
714 { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) }, 717 { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
715 { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) }, 718 { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
716 { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) }, 719 { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
720 { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
717 721
718 /* Catalyst / On Semiconductor -- non-JEDEC */ 722 /* Catalyst / On Semiconductor -- non-JEDEC */
719 { "cat25c11", CAT25_INFO( 16, 8, 16, 1) }, 723 { "cat25c11", CAT25_INFO( 16, 8, 16, 1) },
@@ -924,7 +928,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
924 nr_parts = data->nr_parts; 928 nr_parts = data->nr_parts;
925 } 929 }
926 930
927#ifdef CONFIG_OF 931#ifdef CONFIG_MTD_OF_PARTS
928 if (nr_parts <= 0 && spi->dev.of_node) { 932 if (nr_parts <= 0 && spi->dev.of_node) {
929 nr_parts = of_mtd_parse_partitions(&spi->dev, 933 nr_parts = of_mtd_parse_partitions(&spi->dev,
930 spi->dev.of_node, &parts); 934 spi->dev.of_node, &parts);
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 1696bbecaa7e..52393282eaf1 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -15,7 +15,7 @@
15 * phram=swap,64Mi,128Mi phram=test,900Mi,1Mi 15 * phram=swap,64Mi,128Mi phram=test,900Mi,1Mi
16 */ 16 */
17 17
18#define pr_fmt(fmt) "phram: " fmt 18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 19
20#include <asm/io.h> 20#include <asm/io.h>
21#include <linux/init.h> 21#include <linux/init.h>
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 962212628f6e..a0dd7bba9481 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -251,6 +251,15 @@ config MTD_NETtel
251 help 251 help
252 Support for flash chips on NETtel/SecureEdge/SnapGear boards. 252 Support for flash chips on NETtel/SecureEdge/SnapGear boards.
253 253
254config MTD_BCM963XX
255 tristate "Map driver for Broadcom BCM963xx boards"
256 depends on BCM63XX
257 select MTD_MAP_BANK_WIDTH_2
258 select MTD_CFI_I1
259 help
260 Support for parsing CFE image tag and creating MTD partitions on
261 Broadcom BCM63xx boards.
262
254config MTD_DILNETPC 263config MTD_DILNETPC
255 tristate "CFI Flash device mapped on DIL/Net PC" 264 tristate "CFI Flash device mapped on DIL/Net PC"
256 depends on X86 && MTD_CONCAT && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN 265 depends on X86 && MTD_CONCAT && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index f216bb573713..c7869c7a6b18 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -58,3 +58,4 @@ obj-$(CONFIG_MTD_BFIN_ASYNC) += bfin-async-flash.o
58obj-$(CONFIG_MTD_RBTX4939) += rbtx4939-flash.o 58obj-$(CONFIG_MTD_RBTX4939) += rbtx4939-flash.o
59obj-$(CONFIG_MTD_VMU) += vmu-flash.o 59obj-$(CONFIG_MTD_VMU) += vmu-flash.o
60obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o 60obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o
61obj-$(CONFIG_MTD_BCM963XX) += bcm963xx-flash.o
diff --git a/drivers/mtd/maps/bcm963xx-flash.c b/drivers/mtd/maps/bcm963xx-flash.c
new file mode 100644
index 000000000000..d175c120ee84
--- /dev/null
+++ b/drivers/mtd/maps/bcm963xx-flash.c
@@ -0,0 +1,271 @@
1/*
2 * Copyright © 2006-2008 Florian Fainelli <florian@openwrt.org>
3 * Mike Albon <malbon@openwrt.org>
4 * Copyright © 2009-2010 Daniel Dickinson <openwrt@cshore.neomailbox.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/slab.h>
24#include <linux/mtd/map.h>
25#include <linux/mtd/mtd.h>
26#include <linux/mtd/partitions.h>
27#include <linux/vmalloc.h>
28#include <linux/platform_device.h>
29#include <linux/io.h>
30
31#include <asm/mach-bcm63xx/bcm963xx_tag.h>
32
33#define BCM63XX_BUSWIDTH 2 /* Buswidth */
34#define BCM63XX_EXTENDED_SIZE 0xBFC00000 /* Extended flash address */
35
36#define PFX KBUILD_MODNAME ": "
37
38static struct mtd_partition *parsed_parts;
39
40static struct mtd_info *bcm963xx_mtd_info;
41
42static struct map_info bcm963xx_map = {
43 .name = "bcm963xx",
44 .bankwidth = BCM63XX_BUSWIDTH,
45};
46
47static int parse_cfe_partitions(struct mtd_info *master,
48 struct mtd_partition **pparts)
49{
50 /* CFE, NVRAM and global Linux are always present */
51 int nrparts = 3, curpart = 0;
52 struct bcm_tag *buf;
53 struct mtd_partition *parts;
54 int ret;
55 size_t retlen;
56 unsigned int rootfsaddr, kerneladdr, spareaddr;
57 unsigned int rootfslen, kernellen, sparelen, totallen;
58 int namelen = 0;
59 int i;
60 char *boardid;
61 char *tagversion;
62
63 /* Allocate memory for buffer */
64 buf = vmalloc(sizeof(struct bcm_tag));
65 if (!buf)
66 return -ENOMEM;
67
68 /* Get the tag */
69 ret = master->read(master, master->erasesize, sizeof(struct bcm_tag),
70 &retlen, (void *)buf);
71 if (retlen != sizeof(struct bcm_tag)) {
72 vfree(buf);
73 return -EIO;
74 }
75
76 sscanf(buf->kernel_address, "%u", &kerneladdr);
77 sscanf(buf->kernel_length, "%u", &kernellen);
78 sscanf(buf->total_length, "%u", &totallen);
79 tagversion = &(buf->tag_version[0]);
80 boardid = &(buf->board_id[0]);
81
82 printk(KERN_INFO PFX "CFE boot tag found with version %s "
83 "and board type %s\n", tagversion, boardid);
84
85 kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE;
86 rootfsaddr = kerneladdr + kernellen;
87 spareaddr = roundup(totallen, master->erasesize) + master->erasesize;
88 sparelen = master->size - spareaddr - master->erasesize;
89 rootfslen = spareaddr - rootfsaddr;
90
91 /* Determine number of partitions */
92 namelen = 8;
93 if (rootfslen > 0) {
94 nrparts++;
95 namelen += 6;
96 };
97 if (kernellen > 0) {
98 nrparts++;
99 namelen += 6;
100 };
101
102 /* Ask kernel for more memory */
103 parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL);
104 if (!parts) {
105 vfree(buf);
106 return -ENOMEM;
107 };
108
109 /* Start building partition list */
110 parts[curpart].name = "CFE";
111 parts[curpart].offset = 0;
112 parts[curpart].size = master->erasesize;
113 curpart++;
114
115 if (kernellen > 0) {
116 parts[curpart].name = "kernel";
117 parts[curpart].offset = kerneladdr;
118 parts[curpart].size = kernellen;
119 curpart++;
120 };
121
122 if (rootfslen > 0) {
123 parts[curpart].name = "rootfs";
124 parts[curpart].offset = rootfsaddr;
125 parts[curpart].size = rootfslen;
126 if (sparelen > 0)
127 parts[curpart].size += sparelen;
128 curpart++;
129 };
130
131 parts[curpart].name = "nvram";
132 parts[curpart].offset = master->size - master->erasesize;
133 parts[curpart].size = master->erasesize;
134
135 /* Global partition "linux" to make easy firmware upgrade */
136 curpart++;
137 parts[curpart].name = "linux";
138 parts[curpart].offset = parts[0].size;
139 parts[curpart].size = master->size - parts[0].size - parts[3].size;
140
141 for (i = 0; i < nrparts; i++)
142 printk(KERN_INFO PFX "Partition %d is %s offset %lx and "
143 "length %lx\n", i, parts[i].name,
144 (long unsigned int)(parts[i].offset),
145 (long unsigned int)(parts[i].size));
146
147 printk(KERN_INFO PFX "Spare partition is %x offset and length %x\n",
148 spareaddr, sparelen);
149 *pparts = parts;
150 vfree(buf);
151
152 return nrparts;
153};
154
155static int bcm963xx_detect_cfe(struct mtd_info *master)
156{
157 int idoffset = 0x4e0;
158 static char idstring[8] = "CFE1CFE1";
159 char buf[9];
160 int ret;
161 size_t retlen;
162
163 ret = master->read(master, idoffset, 8, &retlen, (void *)buf);
164 buf[retlen] = 0;
165 printk(KERN_INFO PFX "Read Signature value of %s\n", buf);
166
167 return strncmp(idstring, buf, 8);
168}
169
170static int bcm963xx_probe(struct platform_device *pdev)
171{
172 int err = 0;
173 int parsed_nr_parts = 0;
174 char *part_type;
175 struct resource *r;
176
177 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
178 if (!r) {
179 dev_err(&pdev->dev, "no resource supplied\n");
180 return -ENODEV;
181 }
182
183 bcm963xx_map.phys = r->start;
184 bcm963xx_map.size = resource_size(r);
185 bcm963xx_map.virt = ioremap(r->start, resource_size(r));
186 if (!bcm963xx_map.virt) {
187 dev_err(&pdev->dev, "failed to ioremap\n");
188 return -EIO;
189 }
190
191 dev_info(&pdev->dev, "0x%08lx at 0x%08x\n",
192 bcm963xx_map.size, bcm963xx_map.phys);
193
194 simple_map_init(&bcm963xx_map);
195
196 bcm963xx_mtd_info = do_map_probe("cfi_probe", &bcm963xx_map);
197 if (!bcm963xx_mtd_info) {
198 dev_err(&pdev->dev, "failed to probe using CFI\n");
199 err = -EIO;
200 goto err_probe;
201 }
202
203 bcm963xx_mtd_info->owner = THIS_MODULE;
204
205 /* This is mutually exclusive */
206 if (bcm963xx_detect_cfe(bcm963xx_mtd_info) == 0) {
207 dev_info(&pdev->dev, "CFE bootloader detected\n");
208 if (parsed_nr_parts == 0) {
209 int ret = parse_cfe_partitions(bcm963xx_mtd_info,
210 &parsed_parts);
211 if (ret > 0) {
212 part_type = "CFE";
213 parsed_nr_parts = ret;
214 }
215 }
216 } else {
217 dev_info(&pdev->dev, "unsupported bootloader\n");
218 err = -ENODEV;
219 goto err_probe;
220 }
221
222 return add_mtd_partitions(bcm963xx_mtd_info, parsed_parts,
223 parsed_nr_parts);
224
225err_probe:
226 iounmap(bcm963xx_map.virt);
227 return err;
228}
229
230static int bcm963xx_remove(struct platform_device *pdev)
231{
232 if (bcm963xx_mtd_info) {
233 del_mtd_partitions(bcm963xx_mtd_info);
234 map_destroy(bcm963xx_mtd_info);
235 }
236
237 if (bcm963xx_map.virt) {
238 iounmap(bcm963xx_map.virt);
239 bcm963xx_map.virt = 0;
240 }
241
242 return 0;
243}
244
245static struct platform_driver bcm63xx_mtd_dev = {
246 .probe = bcm963xx_probe,
247 .remove = bcm963xx_remove,
248 .driver = {
249 .name = "bcm963xx-flash",
250 .owner = THIS_MODULE,
251 },
252};
253
254static int __init bcm963xx_mtd_init(void)
255{
256 return platform_driver_register(&bcm63xx_mtd_dev);
257}
258
259static void __exit bcm963xx_mtd_exit(void)
260{
261 platform_driver_unregister(&bcm63xx_mtd_dev);
262}
263
264module_init(bcm963xx_mtd_init);
265module_exit(bcm963xx_mtd_exit);
266
267MODULE_LICENSE("GPL");
268MODULE_DESCRIPTION("Broadcom BCM63xx MTD driver for CFE and RedBoot");
269MODULE_AUTHOR("Daniel Dickinson <openwrt@cshore.neomailbox.net>");
270MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
271MODULE_AUTHOR("Mike Albon <malbon@openwrt.org>");
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
index 32e89d773b4e..af5707a80205 100644
--- a/drivers/mtd/maps/gpio-addr-flash.c
+++ b/drivers/mtd/maps/gpio-addr-flash.c
@@ -208,10 +208,14 @@ static int __devinit gpio_flash_probe(struct platform_device *pdev)
208 if (!state) 208 if (!state)
209 return -ENOMEM; 209 return -ENOMEM;
210 210
211 /*
212 * We cast start/end to known types in the boards file, so cast
213 * away their pointer types here to the known types (gpios->xxx).
214 */
211 state->gpio_count = gpios->end; 215 state->gpio_count = gpios->end;
212 state->gpio_addrs = (void *)gpios->start; 216 state->gpio_addrs = (void *)(unsigned long)gpios->start;
213 state->gpio_values = (void *)(state + 1); 217 state->gpio_values = (void *)(state + 1);
214 state->win_size = memory->end - memory->start + 1; 218 state->win_size = resource_size(memory);
215 memset(state->gpio_values, 0xff, arr_size); 219 memset(state->gpio_values, 0xff, arr_size);
216 220
217 state->map.name = DRIVER_NAME; 221 state->map.name = DRIVER_NAME;
@@ -221,7 +225,7 @@ static int __devinit gpio_flash_probe(struct platform_device *pdev)
221 state->map.copy_to = gf_copy_to; 225 state->map.copy_to = gf_copy_to;
222 state->map.bankwidth = pdata->width; 226 state->map.bankwidth = pdata->width;
223 state->map.size = state->win_size * (1 << state->gpio_count); 227 state->map.size = state->win_size * (1 << state->gpio_count);
224 state->map.virt = (void __iomem *)memory->start; 228 state->map.virt = ioremap_nocache(memory->start, state->map.size);
225 state->map.phys = NO_XIP; 229 state->map.phys = NO_XIP;
226 state->map.map_priv_1 = (unsigned long)state; 230 state->map.map_priv_1 = (unsigned long)state;
227 231
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index 57a1acfe22c4..917022948399 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -640,10 +640,6 @@ static int pcmciamtd_config(struct pcmcia_device *link)
640 } 640 }
641 dev_info(&dev->p_dev->dev, "mtd%d: %s\n", mtd->index, mtd->name); 641 dev_info(&dev->p_dev->dev, "mtd%d: %s\n", mtd->index, mtd->name);
642 return 0; 642 return 0;
643
644 dev_err(&dev->p_dev->dev, "CS Error, exiting\n");
645 pcmciamtd_release(link);
646 return -ENODEV;
647} 643}
648 644
649 645
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index ec3edf6e68b4..9861814aa027 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -50,7 +50,7 @@ static int parse_obsolete_partitions(struct platform_device *dev,
50{ 50{
51 int i, plen, nr_parts; 51 int i, plen, nr_parts;
52 const struct { 52 const struct {
53 u32 offset, len; 53 __be32 offset, len;
54 } *part; 54 } *part;
55 const char *names; 55 const char *names;
56 56
@@ -69,9 +69,9 @@ static int parse_obsolete_partitions(struct platform_device *dev,
69 names = of_get_property(dp, "partition-names", &plen); 69 names = of_get_property(dp, "partition-names", &plen);
70 70
71 for (i = 0; i < nr_parts; i++) { 71 for (i = 0; i < nr_parts; i++) {
72 info->parts[i].offset = part->offset; 72 info->parts[i].offset = be32_to_cpu(part->offset);
73 info->parts[i].size = part->len & ~1; 73 info->parts[i].size = be32_to_cpu(part->len) & ~1;
74 if (part->len & 1) /* bit 0 set signifies read only partition */ 74 if (be32_to_cpu(part->len) & 1) /* bit 0 set signifies read only partition */
75 info->parts[i].mask_flags = MTD_WRITEABLE; 75 info->parts[i].mask_flags = MTD_WRITEABLE;
76 76
77 if (names && (plen > 0)) { 77 if (names && (plen > 0)) {
@@ -226,11 +226,11 @@ static int __devinit of_flash_probe(struct platform_device *dev,
226 struct resource res; 226 struct resource res;
227 struct of_flash *info; 227 struct of_flash *info;
228 const char *probe_type = match->data; 228 const char *probe_type = match->data;
229 const u32 *width; 229 const __be32 *width;
230 int err; 230 int err;
231 int i; 231 int i;
232 int count; 232 int count;
233 const u32 *p; 233 const __be32 *p;
234 int reg_tuple_size; 234 int reg_tuple_size;
235 struct mtd_info **mtd_list = NULL; 235 struct mtd_info **mtd_list = NULL;
236 resource_size_t res_size; 236 resource_size_t res_size;
@@ -267,9 +267,11 @@ static int __devinit of_flash_probe(struct platform_device *dev,
267 for (i = 0; i < count; i++) { 267 for (i = 0; i < count; i++) {
268 err = -ENXIO; 268 err = -ENXIO;
269 if (of_address_to_resource(dp, i, &res)) { 269 if (of_address_to_resource(dp, i, &res)) {
270 dev_err(&dev->dev, "Can't get IO address from device" 270 /*
271 " tree\n"); 271 * Continue with next register tuple if this
272 goto err_out; 272 * one is not mappable
273 */
274 continue;
273 } 275 }
274 276
275 dev_dbg(&dev->dev, "of_flash device: %.8llx-%.8llx\n", 277 dev_dbg(&dev->dev, "of_flash device: %.8llx-%.8llx\n",
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 50ab431b24eb..cb20c67995d8 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -37,7 +37,6 @@
37 37
38#include "mtdcore.h" 38#include "mtdcore.h"
39 39
40static DEFINE_MUTEX(mtd_blkdevs_mutex);
41static LIST_HEAD(blktrans_majors); 40static LIST_HEAD(blktrans_majors);
42static DEFINE_MUTEX(blktrans_ref_mutex); 41static DEFINE_MUTEX(blktrans_ref_mutex);
43 42
@@ -133,6 +132,10 @@ static int mtd_blktrans_thread(void *arg)
133 132
134 if (!req && !(req = blk_fetch_request(rq))) { 133 if (!req && !(req = blk_fetch_request(rq))) {
135 set_current_state(TASK_INTERRUPTIBLE); 134 set_current_state(TASK_INTERRUPTIBLE);
135
136 if (kthread_should_stop())
137 set_current_state(TASK_RUNNING);
138
136 spin_unlock_irq(rq->queue_lock); 139 spin_unlock_irq(rq->queue_lock);
137 schedule(); 140 schedule();
138 spin_lock_irq(rq->queue_lock); 141 spin_lock_irq(rq->queue_lock);
@@ -176,54 +179,53 @@ static void mtd_blktrans_request(struct request_queue *rq)
176static int blktrans_open(struct block_device *bdev, fmode_t mode) 179static int blktrans_open(struct block_device *bdev, fmode_t mode)
177{ 180{
178 struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk); 181 struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
179 int ret; 182 int ret = 0;
180 183
181 if (!dev) 184 if (!dev)
182 return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/ 185 return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/
183 186
184 mutex_lock(&mtd_blkdevs_mutex);
185 mutex_lock(&dev->lock); 187 mutex_lock(&dev->lock);
186 188
187 if (!dev->mtd) { 189 if (dev->open++)
188 ret = -ENXIO;
189 goto unlock; 190 goto unlock;
190 }
191 191
192 ret = !dev->open++ && dev->tr->open ? dev->tr->open(dev) : 0; 192 kref_get(&dev->ref);
193 __module_get(dev->tr->owner);
194
195 if (dev->mtd) {
196 ret = dev->tr->open ? dev->tr->open(dev) : 0;
197 __get_mtd_device(dev->mtd);
198 }
193 199
194 /* Take another reference on the device so it won't go away till
195 last release */
196 if (!ret)
197 kref_get(&dev->ref);
198unlock: 200unlock:
199 mutex_unlock(&dev->lock); 201 mutex_unlock(&dev->lock);
200 blktrans_dev_put(dev); 202 blktrans_dev_put(dev);
201 mutex_unlock(&mtd_blkdevs_mutex);
202 return ret; 203 return ret;
203} 204}
204 205
205static int blktrans_release(struct gendisk *disk, fmode_t mode) 206static int blktrans_release(struct gendisk *disk, fmode_t mode)
206{ 207{
207 struct mtd_blktrans_dev *dev = blktrans_dev_get(disk); 208 struct mtd_blktrans_dev *dev = blktrans_dev_get(disk);
208 int ret = -ENXIO; 209 int ret = 0;
209 210
210 if (!dev) 211 if (!dev)
211 return ret; 212 return ret;
212 213
213 mutex_lock(&mtd_blkdevs_mutex);
214 mutex_lock(&dev->lock); 214 mutex_lock(&dev->lock);
215 215
216 /* Release one reference, we sure its not the last one here*/ 216 if (--dev->open)
217 kref_put(&dev->ref, blktrans_dev_release);
218
219 if (!dev->mtd)
220 goto unlock; 217 goto unlock;
221 218
222 ret = !--dev->open && dev->tr->release ? dev->tr->release(dev) : 0; 219 kref_put(&dev->ref, blktrans_dev_release);
220 module_put(dev->tr->owner);
221
222 if (dev->mtd) {
223 ret = dev->tr->release ? dev->tr->release(dev) : 0;
224 __put_mtd_device(dev->mtd);
225 }
223unlock: 226unlock:
224 mutex_unlock(&dev->lock); 227 mutex_unlock(&dev->lock);
225 blktrans_dev_put(dev); 228 blktrans_dev_put(dev);
226 mutex_unlock(&mtd_blkdevs_mutex);
227 return ret; 229 return ret;
228} 230}
229 231
@@ -256,7 +258,6 @@ static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
256 if (!dev) 258 if (!dev)
257 return ret; 259 return ret;
258 260
259 mutex_lock(&mtd_blkdevs_mutex);
260 mutex_lock(&dev->lock); 261 mutex_lock(&dev->lock);
261 262
262 if (!dev->mtd) 263 if (!dev->mtd)
@@ -271,7 +272,6 @@ static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
271 } 272 }
272unlock: 273unlock:
273 mutex_unlock(&dev->lock); 274 mutex_unlock(&dev->lock);
274 mutex_unlock(&mtd_blkdevs_mutex);
275 blktrans_dev_put(dev); 275 blktrans_dev_put(dev);
276 return ret; 276 return ret;
277} 277}
@@ -385,9 +385,6 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
385 385
386 gd->queue = new->rq; 386 gd->queue = new->rq;
387 387
388 __get_mtd_device(new->mtd);
389 __module_get(tr->owner);
390
391 /* Create processing thread */ 388 /* Create processing thread */
392 /* TODO: workqueue ? */ 389 /* TODO: workqueue ? */
393 new->thread = kthread_run(mtd_blktrans_thread, new, 390 new->thread = kthread_run(mtd_blktrans_thread, new,
@@ -410,8 +407,6 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
410 } 407 }
411 return 0; 408 return 0;
412error4: 409error4:
413 module_put(tr->owner);
414 __put_mtd_device(new->mtd);
415 blk_cleanup_queue(new->rq); 410 blk_cleanup_queue(new->rq);
416error3: 411error3:
417 put_disk(new->disk); 412 put_disk(new->disk);
@@ -448,17 +443,15 @@ int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
448 blk_start_queue(old->rq); 443 blk_start_queue(old->rq);
449 spin_unlock_irqrestore(&old->queue_lock, flags); 444 spin_unlock_irqrestore(&old->queue_lock, flags);
450 445
451 /* Ask trans driver for release to the mtd device */ 446 /* If the device is currently open, tell trans driver to close it,
447 then put mtd device, and don't touch it again */
452 mutex_lock(&old->lock); 448 mutex_lock(&old->lock);
453 if (old->open && old->tr->release) { 449 if (old->open) {
454 old->tr->release(old); 450 if (old->tr->release)
455 old->open = 0; 451 old->tr->release(old);
452 __put_mtd_device(old->mtd);
456 } 453 }
457 454
458 __put_mtd_device(old->mtd);
459 module_put(old->tr->owner);
460
461 /* At that point, we don't touch the mtd anymore */
462 old->mtd = NULL; 455 old->mtd = NULL;
463 456
464 mutex_unlock(&old->lock); 457 mutex_unlock(&old->lock);
@@ -508,13 +501,16 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
508 mutex_lock(&mtd_table_mutex); 501 mutex_lock(&mtd_table_mutex);
509 502
510 ret = register_blkdev(tr->major, tr->name); 503 ret = register_blkdev(tr->major, tr->name);
511 if (ret) { 504 if (ret < 0) {
512 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n", 505 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
513 tr->name, tr->major, ret); 506 tr->name, tr->major, ret);
514 mutex_unlock(&mtd_table_mutex); 507 mutex_unlock(&mtd_table_mutex);
515 return ret; 508 return ret;
516 } 509 }
517 510
511 if (ret)
512 tr->major = ret;
513
518 tr->blkshift = ffs(tr->blksize) - 1; 514 tr->blkshift = ffs(tr->blksize) - 1;
519 515
520 INIT_LIST_HEAD(&tr->devs); 516 INIT_LIST_HEAD(&tr->devs);
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index a34a0fe14884..4759d827e8c7 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -30,8 +30,9 @@
30#include <linux/backing-dev.h> 30#include <linux/backing-dev.h>
31#include <linux/compat.h> 31#include <linux/compat.h>
32#include <linux/mount.h> 32#include <linux/mount.h>
33 33#include <linux/blkpg.h>
34#include <linux/mtd/mtd.h> 34#include <linux/mtd/mtd.h>
35#include <linux/mtd/partitions.h>
35#include <linux/mtd/map.h> 36#include <linux/mtd/map.h>
36 37
37#include <asm/uaccess.h> 38#include <asm/uaccess.h>
@@ -478,6 +479,78 @@ static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
478 return ret; 479 return ret;
479} 480}
480 481
482/*
483 * Copies (and truncates, if necessary) data from the larger struct,
484 * nand_ecclayout, to the smaller, deprecated layout struct,
485 * nand_ecclayout_user. This is necessary only to suppport the deprecated
486 * API ioctl ECCGETLAYOUT while allowing all new functionality to use
487 * nand_ecclayout flexibly (i.e. the struct may change size in new
488 * releases without requiring major rewrites).
489 */
490static int shrink_ecclayout(const struct nand_ecclayout *from,
491 struct nand_ecclayout_user *to)
492{
493 int i;
494
495 if (!from || !to)
496 return -EINVAL;
497
498 memset(to, 0, sizeof(*to));
499
500 to->eccbytes = min((int)from->eccbytes, MTD_MAX_ECCPOS_ENTRIES);
501 for (i = 0; i < to->eccbytes; i++)
502 to->eccpos[i] = from->eccpos[i];
503
504 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) {
505 if (from->oobfree[i].length == 0 &&
506 from->oobfree[i].offset == 0)
507 break;
508 to->oobavail += from->oobfree[i].length;
509 to->oobfree[i] = from->oobfree[i];
510 }
511
512 return 0;
513}
514
515#ifdef CONFIG_MTD_PARTITIONS
516static int mtd_blkpg_ioctl(struct mtd_info *mtd,
517 struct blkpg_ioctl_arg __user *arg)
518{
519 struct blkpg_ioctl_arg a;
520 struct blkpg_partition p;
521
522 if (!capable(CAP_SYS_ADMIN))
523 return -EPERM;
524
525 /* Only master mtd device must be used to control partitions */
526 if (!mtd_is_master(mtd))
527 return -EINVAL;
528
529 if (copy_from_user(&a, arg, sizeof(struct blkpg_ioctl_arg)))
530 return -EFAULT;
531
532 if (copy_from_user(&p, a.data, sizeof(struct blkpg_partition)))
533 return -EFAULT;
534
535 switch (a.op) {
536 case BLKPG_ADD_PARTITION:
537
538 return mtd_add_partition(mtd, p.devname, p.start, p.length);
539
540 case BLKPG_DEL_PARTITION:
541
542 if (p.pno < 0)
543 return -EINVAL;
544
545 return mtd_del_partition(mtd, p.pno);
546
547 default:
548 return -EINVAL;
549 }
550}
551#endif
552
553
481static int mtd_ioctl(struct file *file, u_int cmd, u_long arg) 554static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
482{ 555{
483 struct mtd_file_info *mfi = file->private_data; 556 struct mtd_file_info *mfi = file->private_data;
@@ -514,6 +587,9 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
514 if (get_user(ur_idx, &(ur->regionindex))) 587 if (get_user(ur_idx, &(ur->regionindex)))
515 return -EFAULT; 588 return -EFAULT;
516 589
590 if (ur_idx >= mtd->numeraseregions)
591 return -EINVAL;
592
517 kr = &(mtd->eraseregions[ur_idx]); 593 kr = &(mtd->eraseregions[ur_idx]);
518 594
519 if (put_user(kr->offset, &(ur->offset)) 595 if (put_user(kr->offset, &(ur->offset))
@@ -813,14 +889,23 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
813 } 889 }
814#endif 890#endif
815 891
892 /* This ioctl is being deprecated - it truncates the ecc layout */
816 case ECCGETLAYOUT: 893 case ECCGETLAYOUT:
817 { 894 {
895 struct nand_ecclayout_user *usrlay;
896
818 if (!mtd->ecclayout) 897 if (!mtd->ecclayout)
819 return -EOPNOTSUPP; 898 return -EOPNOTSUPP;
820 899
821 if (copy_to_user(argp, mtd->ecclayout, 900 usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL);
822 sizeof(struct nand_ecclayout))) 901 if (!usrlay)
823 return -EFAULT; 902 return -ENOMEM;
903
904 shrink_ecclayout(mtd->ecclayout, usrlay);
905
906 if (copy_to_user(argp, usrlay, sizeof(*usrlay)))
907 ret = -EFAULT;
908 kfree(usrlay);
824 break; 909 break;
825 } 910 }
826 911
@@ -856,6 +941,22 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
856 break; 941 break;
857 } 942 }
858 943
944#ifdef CONFIG_MTD_PARTITIONS
945 case BLKPG:
946 {
947 ret = mtd_blkpg_ioctl(mtd,
948 (struct blkpg_ioctl_arg __user *)arg);
949 break;
950 }
951
952 case BLKRRPART:
953 {
954 /* No reread partition feature. Just return ok */
955 ret = 0;
956 break;
957 }
958#endif
959
859 default: 960 default:
860 ret = -ENOTTY; 961 ret = -ENOTTY;
861 } 962 }
@@ -1033,7 +1134,7 @@ static const struct file_operations mtd_fops = {
1033static struct dentry *mtd_inodefs_mount(struct file_system_type *fs_type, 1134static struct dentry *mtd_inodefs_mount(struct file_system_type *fs_type,
1034 int flags, const char *dev_name, void *data) 1135 int flags, const char *dev_name, void *data)
1035{ 1136{
1036 return mount_pseudo(fs_type, "mtd_inode:", NULL, MTD_INODE_FS_MAGIC); 1137 return mount_pseudo(fs_type, "mtd_inode:", NULL, MTD_INODE_FS_MAGIC);
1037} 1138}
1038 1139
1039static struct file_system_type mtd_inodefs_type = { 1140static struct file_system_type mtd_inodefs_type = {
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index dc6558568876..79e3689f1e16 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -29,9 +29,11 @@
29#include <linux/kmod.h> 29#include <linux/kmod.h>
30#include <linux/mtd/mtd.h> 30#include <linux/mtd/mtd.h>
31#include <linux/mtd/partitions.h> 31#include <linux/mtd/partitions.h>
32#include <linux/err.h>
32 33
33/* Our partition linked list */ 34/* Our partition linked list */
34static LIST_HEAD(mtd_partitions); 35static LIST_HEAD(mtd_partitions);
36static DEFINE_MUTEX(mtd_partitions_mutex);
35 37
36/* Our partition node structure */ 38/* Our partition node structure */
37struct mtd_part { 39struct mtd_part {
@@ -326,6 +328,12 @@ static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
326 return res; 328 return res;
327} 329}
328 330
331static inline void free_partition(struct mtd_part *p)
332{
333 kfree(p->mtd.name);
334 kfree(p);
335}
336
329/* 337/*
330 * This function unregisters and destroy all slave MTD objects which are 338 * This function unregisters and destroy all slave MTD objects which are
331 * attached to the given master MTD object. 339 * attached to the given master MTD object.
@@ -334,33 +342,42 @@ static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
334int del_mtd_partitions(struct mtd_info *master) 342int del_mtd_partitions(struct mtd_info *master)
335{ 343{
336 struct mtd_part *slave, *next; 344 struct mtd_part *slave, *next;
345 int ret, err = 0;
337 346
347 mutex_lock(&mtd_partitions_mutex);
338 list_for_each_entry_safe(slave, next, &mtd_partitions, list) 348 list_for_each_entry_safe(slave, next, &mtd_partitions, list)
339 if (slave->master == master) { 349 if (slave->master == master) {
350 ret = del_mtd_device(&slave->mtd);
351 if (ret < 0) {
352 err = ret;
353 continue;
354 }
340 list_del(&slave->list); 355 list_del(&slave->list);
341 del_mtd_device(&slave->mtd); 356 free_partition(slave);
342 kfree(slave);
343 } 357 }
358 mutex_unlock(&mtd_partitions_mutex);
344 359
345 return 0; 360 return err;
346} 361}
347EXPORT_SYMBOL(del_mtd_partitions); 362EXPORT_SYMBOL(del_mtd_partitions);
348 363
349static struct mtd_part *add_one_partition(struct mtd_info *master, 364static struct mtd_part *allocate_partition(struct mtd_info *master,
350 const struct mtd_partition *part, int partno, 365 const struct mtd_partition *part, int partno,
351 uint64_t cur_offset) 366 uint64_t cur_offset)
352{ 367{
353 struct mtd_part *slave; 368 struct mtd_part *slave;
369 char *name;
354 370
355 /* allocate the partition structure */ 371 /* allocate the partition structure */
356 slave = kzalloc(sizeof(*slave), GFP_KERNEL); 372 slave = kzalloc(sizeof(*slave), GFP_KERNEL);
357 if (!slave) { 373 name = kstrdup(part->name, GFP_KERNEL);
374 if (!name || !slave) {
358 printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n", 375 printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
359 master->name); 376 master->name);
360 del_mtd_partitions(master); 377 kfree(name);
361 return NULL; 378 kfree(slave);
379 return ERR_PTR(-ENOMEM);
362 } 380 }
363 list_add(&slave->list, &mtd_partitions);
364 381
365 /* set up the MTD object for this partition */ 382 /* set up the MTD object for this partition */
366 slave->mtd.type = master->type; 383 slave->mtd.type = master->type;
@@ -371,7 +388,7 @@ static struct mtd_part *add_one_partition(struct mtd_info *master,
371 slave->mtd.oobavail = master->oobavail; 388 slave->mtd.oobavail = master->oobavail;
372 slave->mtd.subpage_sft = master->subpage_sft; 389 slave->mtd.subpage_sft = master->subpage_sft;
373 390
374 slave->mtd.name = part->name; 391 slave->mtd.name = name;
375 slave->mtd.owner = master->owner; 392 slave->mtd.owner = master->owner;
376 slave->mtd.backing_dev_info = master->backing_dev_info; 393 slave->mtd.backing_dev_info = master->backing_dev_info;
377 394
@@ -518,12 +535,89 @@ static struct mtd_part *add_one_partition(struct mtd_info *master,
518 } 535 }
519 536
520out_register: 537out_register:
521 /* register our partition */
522 add_mtd_device(&slave->mtd);
523
524 return slave; 538 return slave;
525} 539}
526 540
541int mtd_add_partition(struct mtd_info *master, char *name,
542 long long offset, long long length)
543{
544 struct mtd_partition part;
545 struct mtd_part *p, *new;
546 uint64_t start, end;
547 int ret = 0;
548
549 /* the direct offset is expected */
550 if (offset == MTDPART_OFS_APPEND ||
551 offset == MTDPART_OFS_NXTBLK)
552 return -EINVAL;
553
554 if (length == MTDPART_SIZ_FULL)
555 length = master->size - offset;
556
557 if (length <= 0)
558 return -EINVAL;
559
560 part.name = name;
561 part.size = length;
562 part.offset = offset;
563 part.mask_flags = 0;
564 part.ecclayout = NULL;
565
566 new = allocate_partition(master, &part, -1, offset);
567 if (IS_ERR(new))
568 return PTR_ERR(new);
569
570 start = offset;
571 end = offset + length;
572
573 mutex_lock(&mtd_partitions_mutex);
574 list_for_each_entry(p, &mtd_partitions, list)
575 if (p->master == master) {
576 if ((start >= p->offset) &&
577 (start < (p->offset + p->mtd.size)))
578 goto err_inv;
579
580 if ((end >= p->offset) &&
581 (end < (p->offset + p->mtd.size)))
582 goto err_inv;
583 }
584
585 list_add(&new->list, &mtd_partitions);
586 mutex_unlock(&mtd_partitions_mutex);
587
588 add_mtd_device(&new->mtd);
589
590 return ret;
591err_inv:
592 mutex_unlock(&mtd_partitions_mutex);
593 free_partition(new);
594 return -EINVAL;
595}
596EXPORT_SYMBOL_GPL(mtd_add_partition);
597
598int mtd_del_partition(struct mtd_info *master, int partno)
599{
600 struct mtd_part *slave, *next;
601 int ret = -EINVAL;
602
603 mutex_lock(&mtd_partitions_mutex);
604 list_for_each_entry_safe(slave, next, &mtd_partitions, list)
605 if ((slave->master == master) &&
606 (slave->mtd.index == partno)) {
607 ret = del_mtd_device(&slave->mtd);
608 if (ret < 0)
609 break;
610
611 list_del(&slave->list);
612 free_partition(slave);
613 break;
614 }
615 mutex_unlock(&mtd_partitions_mutex);
616
617 return ret;
618}
619EXPORT_SYMBOL_GPL(mtd_del_partition);
620
527/* 621/*
528 * This function, given a master MTD object and a partition table, creates 622 * This function, given a master MTD object and a partition table, creates
529 * and registers slave MTD objects which are bound to the master according to 623 * and registers slave MTD objects which are bound to the master according to
@@ -544,9 +638,16 @@ int add_mtd_partitions(struct mtd_info *master,
544 printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name); 638 printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
545 639
546 for (i = 0; i < nbparts; i++) { 640 for (i = 0; i < nbparts; i++) {
547 slave = add_one_partition(master, parts + i, i, cur_offset); 641 slave = allocate_partition(master, parts + i, i, cur_offset);
548 if (!slave) 642 if (IS_ERR(slave))
549 return -ENOMEM; 643 return PTR_ERR(slave);
644
645 mutex_lock(&mtd_partitions_mutex);
646 list_add(&slave->list, &mtd_partitions);
647 mutex_unlock(&mtd_partitions_mutex);
648
649 add_mtd_device(&slave->mtd);
650
550 cur_offset = slave->offset + slave->mtd.size; 651 cur_offset = slave->offset + slave->mtd.size;
551 } 652 }
552 653
@@ -618,3 +719,20 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types,
618 return ret; 719 return ret;
619} 720}
620EXPORT_SYMBOL_GPL(parse_mtd_partitions); 721EXPORT_SYMBOL_GPL(parse_mtd_partitions);
722
723int mtd_is_master(struct mtd_info *mtd)
724{
725 struct mtd_part *part;
726 int nopart = 0;
727
728 mutex_lock(&mtd_partitions_mutex);
729 list_for_each_entry(part, &mtd_partitions, list)
730 if (&part->mtd == mtd) {
731 nopart = 1;
732 break;
733 }
734 mutex_unlock(&mtd_partitions_mutex);
735
736 return nopart;
737}
738EXPORT_SYMBOL_GPL(mtd_is_master);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 8b4b67c8a391..8229802b4346 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -400,13 +400,6 @@ config MTD_NAND_PXA3xx
400 This enables the driver for the NAND flash device found on 400 This enables the driver for the NAND flash device found on
401 PXA3xx processors 401 PXA3xx processors
402 402
403config MTD_NAND_PXA3xx_BUILTIN
404 bool "Use builtin definitions for some NAND chips (deprecated)"
405 depends on MTD_NAND_PXA3xx
406 help
407 This enables builtin definitions for some NAND chips. This
408 is deprecated in favor of platform specific data.
409
410config MTD_NAND_CM_X270 403config MTD_NAND_CM_X270
411 tristate "Support for NAND Flash on CM-X270 modules" 404 tristate "Support for NAND Flash on CM-X270 modules"
412 depends on MACH_ARMCORE 405 depends on MACH_ARMCORE
@@ -458,6 +451,7 @@ config MTD_NAND_ORION
458config MTD_NAND_FSL_ELBC 451config MTD_NAND_FSL_ELBC
459 tristate "NAND support for Freescale eLBC controllers" 452 tristate "NAND support for Freescale eLBC controllers"
460 depends on PPC_OF 453 depends on PPC_OF
454 select FSL_LBC
461 help 455 help
462 Various Freescale chips, including the 8313, include a NAND Flash 456 Various Freescale chips, including the 8313, include a NAND Flash
463 Controller Module with built-in hardware ECC capabilities. 457 Controller Module with built-in hardware ECC capabilities.
@@ -531,4 +525,11 @@ config MTD_NAND_JZ4740
531 help 525 help
532 Enables support for NAND Flash on JZ4740 SoC based boards. 526 Enables support for NAND Flash on JZ4740 SoC based boards.
533 527
528config MTD_NAND_FSMC
529 tristate "Support for NAND on ST Micros FSMC"
530 depends on PLAT_SPEAR || PLAT_NOMADIK || MACH_U300
531 help
532 Enables support for NAND Flash chips on the ST Microelectronics
533 Flexible Static Memory Controller (FSMC)
534
534endif # MTD_NAND 535endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index ac83dcdac5d6..8ad6faec72cb 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB) += ppchameleonevb.o
19obj-$(CONFIG_MTD_NAND_S3C2410) += s3c2410.o 19obj-$(CONFIG_MTD_NAND_S3C2410) += s3c2410.o
20obj-$(CONFIG_MTD_NAND_DAVINCI) += davinci_nand.o 20obj-$(CONFIG_MTD_NAND_DAVINCI) += davinci_nand.o
21obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o 21obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o
22obj-$(CONFIG_MTD_NAND_FSMC) += fsmc_nand.o
22obj-$(CONFIG_MTD_NAND_H1900) += h1910.o 23obj-$(CONFIG_MTD_NAND_H1900) += h1910.o
23obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o 24obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o
24obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o 25obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 6fbeefa3a766..79947bea4d57 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -110,15 +110,6 @@ static const unsigned short bfin_nfc_pin_req[] =
110 0}; 110 0};
111 111
112#ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC 112#ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC
113static uint8_t bbt_pattern[] = { 0xff };
114
115static struct nand_bbt_descr bootrom_bbt = {
116 .options = 0,
117 .offs = 63,
118 .len = 1,
119 .pattern = bbt_pattern,
120};
121
122static struct nand_ecclayout bootrom_ecclayout = { 113static struct nand_ecclayout bootrom_ecclayout = {
123 .eccbytes = 24, 114 .eccbytes = 24,
124 .eccpos = { 115 .eccpos = {
@@ -809,7 +800,6 @@ static int __devinit bf5xx_nand_probe(struct platform_device *pdev)
809 /* setup hardware ECC data struct */ 800 /* setup hardware ECC data struct */
810 if (hardware_ecc) { 801 if (hardware_ecc) {
811#ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC 802#ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC
812 chip->badblock_pattern = &bootrom_bbt;
813 chip->ecc.layout = &bootrom_ecclayout; 803 chip->ecc.layout = &bootrom_ecclayout;
814#endif 804#endif
815 chip->read_buf = bf5xx_nand_dma_read_buf; 805 chip->read_buf = bf5xx_nand_dma_read_buf;
@@ -830,6 +820,10 @@ static int __devinit bf5xx_nand_probe(struct platform_device *pdev)
830 goto out_err_nand_scan; 820 goto out_err_nand_scan;
831 } 821 }
832 822
823#ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC
824 chip->badblockpos = 63;
825#endif
826
833 /* add NAND partition */ 827 /* add NAND partition */
834 bf5xx_nand_add_partition(info); 828 bf5xx_nand_add_partition(info);
835 829
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 8beb0d0233b5..a90fde3ede28 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -316,7 +316,7 @@ static int nand_davinci_correct_4bit(struct mtd_info *mtd,
316 u32 syndrome[4]; 316 u32 syndrome[4];
317 u32 ecc_state; 317 u32 ecc_state;
318 unsigned num_errors, corrected; 318 unsigned num_errors, corrected;
319 unsigned long timeo = jiffies + msecs_to_jiffies(100); 319 unsigned long timeo;
320 320
321 /* All bytes 0xff? It's an erased page; ignore its ECC. */ 321 /* All bytes 0xff? It's an erased page; ignore its ECC. */
322 for (i = 0; i < 10; i++) { 322 for (i = 0; i < 10; i++) {
@@ -372,9 +372,11 @@ compare:
372 * after setting the 4BITECC_ADD_CALC_START bit. So if you immediately 372 * after setting the 4BITECC_ADD_CALC_START bit. So if you immediately
373 * begin trying to poll for the state, you may fall right out of your 373 * begin trying to poll for the state, you may fall right out of your
374 * loop without any of the correction calculations having taken place. 374 * loop without any of the correction calculations having taken place.
375 * The recommendation from the hardware team is to wait till ECC_STATE 375 * The recommendation from the hardware team is to initially delay as
376 * reads less than 4, which means ECC HW has entered correction state. 376 * long as ECC_STATE reads less than 4. After that, ECC HW has entered
377 * correction state.
377 */ 378 */
379 timeo = jiffies + usecs_to_jiffies(100);
378 do { 380 do {
379 ecc_state = (davinci_nand_readl(info, 381 ecc_state = (davinci_nand_readl(info,
380 NANDFSR_OFFSET) >> 8) & 0x0f; 382 NANDFSR_OFFSET) >> 8) & 0x0f;
@@ -733,6 +735,9 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
733 * breaks userspace ioctl interface with mtd-utils. Once we 735 * breaks userspace ioctl interface with mtd-utils. Once we
734 * resolve this issue, NAND_ECC_HW_OOB_FIRST mode can be used 736 * resolve this issue, NAND_ECC_HW_OOB_FIRST mode can be used
735 * for the 4KiB page chips. 737 * for the 4KiB page chips.
738 *
739 * TODO: Note that nand_ecclayout has now been expanded and can
740 * hold plenty of OOB entries.
736 */ 741 */
737 dev_warn(&pdev->dev, "no 4-bit ECC support yet " 742 dev_warn(&pdev->dev, "no 4-bit ECC support yet "
738 "for 4KiB-page NAND\n"); 743 "for 4KiB-page NAND\n");
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 532fe07cf886..8c8d3c86c0e8 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -1292,6 +1292,7 @@ static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
1292 read_status(denali); 1292 read_status(denali);
1293 break; 1293 break;
1294 case NAND_CMD_READID: 1294 case NAND_CMD_READID:
1295 case NAND_CMD_PARAM:
1295 reset_buf(denali); 1296 reset_buf(denali);
1296 /*sometimes ManufactureId read from register is not right 1297 /*sometimes ManufactureId read from register is not right
1297 * e.g. some of Micron MT29F32G08QAA MLC NAND chips 1298 * e.g. some of Micron MT29F32G08QAA MLC NAND chips
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 80de0bff6c3a..c141b07b25d1 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -1,9 +1,11 @@
1/* Freescale Enhanced Local Bus Controller NAND driver 1/* Freescale Enhanced Local Bus Controller NAND driver
2 * 2 *
3 * Copyright (c) 2006-2007 Freescale Semiconductor 3 * Copyright © 2006-2007, 2010 Freescale Semiconductor
4 * 4 *
5 * Authors: Nick Spence <nick.spence@freescale.com>, 5 * Authors: Nick Spence <nick.spence@freescale.com>,
6 * Scott Wood <scottwood@freescale.com> 6 * Scott Wood <scottwood@freescale.com>
7 * Jack Lan <jack.lan@freescale.com>
8 * Roy Zang <tie-fei.zang@freescale.com>
7 * 9 *
8 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
@@ -27,6 +29,7 @@
27#include <linux/string.h> 29#include <linux/string.h>
28#include <linux/ioport.h> 30#include <linux/ioport.h>
29#include <linux/of_platform.h> 31#include <linux/of_platform.h>
32#include <linux/platform_device.h>
30#include <linux/slab.h> 33#include <linux/slab.h>
31#include <linux/interrupt.h> 34#include <linux/interrupt.h>
32 35
@@ -42,14 +45,12 @@
42#define ERR_BYTE 0xFF /* Value returned for read bytes when read failed */ 45#define ERR_BYTE 0xFF /* Value returned for read bytes when read failed */
43#define FCM_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait for FCM */ 46#define FCM_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait for FCM */
44 47
45struct fsl_elbc_ctrl;
46
47/* mtd information per set */ 48/* mtd information per set */
48 49
49struct fsl_elbc_mtd { 50struct fsl_elbc_mtd {
50 struct mtd_info mtd; 51 struct mtd_info mtd;
51 struct nand_chip chip; 52 struct nand_chip chip;
52 struct fsl_elbc_ctrl *ctrl; 53 struct fsl_lbc_ctrl *ctrl;
53 54
54 struct device *dev; 55 struct device *dev;
55 int bank; /* Chip select bank number */ 56 int bank; /* Chip select bank number */
@@ -58,18 +59,12 @@ struct fsl_elbc_mtd {
58 unsigned int fmr; /* FCM Flash Mode Register value */ 59 unsigned int fmr; /* FCM Flash Mode Register value */
59}; 60};
60 61
61/* overview of the fsl elbc controller */ 62/* Freescale eLBC FCM controller infomation */
62 63
63struct fsl_elbc_ctrl { 64struct fsl_elbc_fcm_ctrl {
64 struct nand_hw_control controller; 65 struct nand_hw_control controller;
65 struct fsl_elbc_mtd *chips[MAX_BANKS]; 66 struct fsl_elbc_mtd *chips[MAX_BANKS];
66 67
67 /* device info */
68 struct device *dev;
69 struct fsl_lbc_regs __iomem *regs;
70 int irq;
71 wait_queue_head_t irq_wait;
72 unsigned int irq_status; /* status read from LTESR by irq handler */
73 u8 __iomem *addr; /* Address of assigned FCM buffer */ 68 u8 __iomem *addr; /* Address of assigned FCM buffer */
74 unsigned int page; /* Last page written to / read from */ 69 unsigned int page; /* Last page written to / read from */
75 unsigned int read_bytes; /* Number of bytes read during command */ 70 unsigned int read_bytes; /* Number of bytes read during command */
@@ -79,6 +74,7 @@ struct fsl_elbc_ctrl {
79 unsigned int mdr; /* UPM/FCM Data Register value */ 74 unsigned int mdr; /* UPM/FCM Data Register value */
80 unsigned int use_mdr; /* Non zero if the MDR is to be set */ 75 unsigned int use_mdr; /* Non zero if the MDR is to be set */
81 unsigned int oob; /* Non zero if operating on OOB data */ 76 unsigned int oob; /* Non zero if operating on OOB data */
77 unsigned int counter; /* counter for the initializations */
82 char *oob_poi; /* Place to write ECC after read back */ 78 char *oob_poi; /* Place to write ECC after read back */
83}; 79};
84 80
@@ -164,11 +160,12 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
164{ 160{
165 struct nand_chip *chip = mtd->priv; 161 struct nand_chip *chip = mtd->priv;
166 struct fsl_elbc_mtd *priv = chip->priv; 162 struct fsl_elbc_mtd *priv = chip->priv;
167 struct fsl_elbc_ctrl *ctrl = priv->ctrl; 163 struct fsl_lbc_ctrl *ctrl = priv->ctrl;
168 struct fsl_lbc_regs __iomem *lbc = ctrl->regs; 164 struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
165 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
169 int buf_num; 166 int buf_num;
170 167
171 ctrl->page = page_addr; 168 elbc_fcm_ctrl->page = page_addr;
172 169
173 out_be32(&lbc->fbar, 170 out_be32(&lbc->fbar,
174 page_addr >> (chip->phys_erase_shift - chip->page_shift)); 171 page_addr >> (chip->phys_erase_shift - chip->page_shift));
@@ -185,16 +182,18 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
185 buf_num = page_addr & 7; 182 buf_num = page_addr & 7;
186 } 183 }
187 184
188 ctrl->addr = priv->vbase + buf_num * 1024; 185 elbc_fcm_ctrl->addr = priv->vbase + buf_num * 1024;
189 ctrl->index = column; 186 elbc_fcm_ctrl->index = column;
190 187
191 /* for OOB data point to the second half of the buffer */ 188 /* for OOB data point to the second half of the buffer */
192 if (oob) 189 if (oob)
193 ctrl->index += priv->page_size ? 2048 : 512; 190 elbc_fcm_ctrl->index += priv->page_size ? 2048 : 512;
194 191
195 dev_vdbg(ctrl->dev, "set_addr: bank=%d, ctrl->addr=0x%p (0x%p), " 192 dev_vdbg(priv->dev, "set_addr: bank=%d, "
193 "elbc_fcm_ctrl->addr=0x%p (0x%p), "
196 "index %x, pes %d ps %d\n", 194 "index %x, pes %d ps %d\n",
197 buf_num, ctrl->addr, priv->vbase, ctrl->index, 195 buf_num, elbc_fcm_ctrl->addr, priv->vbase,
196 elbc_fcm_ctrl->index,
198 chip->phys_erase_shift, chip->page_shift); 197 chip->phys_erase_shift, chip->page_shift);
199} 198}
200 199
@@ -205,18 +204,19 @@ static int fsl_elbc_run_command(struct mtd_info *mtd)
205{ 204{
206 struct nand_chip *chip = mtd->priv; 205 struct nand_chip *chip = mtd->priv;
207 struct fsl_elbc_mtd *priv = chip->priv; 206 struct fsl_elbc_mtd *priv = chip->priv;
208 struct fsl_elbc_ctrl *ctrl = priv->ctrl; 207 struct fsl_lbc_ctrl *ctrl = priv->ctrl;
208 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
209 struct fsl_lbc_regs __iomem *lbc = ctrl->regs; 209 struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
210 210
211 /* Setup the FMR[OP] to execute without write protection */ 211 /* Setup the FMR[OP] to execute without write protection */
212 out_be32(&lbc->fmr, priv->fmr | 3); 212 out_be32(&lbc->fmr, priv->fmr | 3);
213 if (ctrl->use_mdr) 213 if (elbc_fcm_ctrl->use_mdr)
214 out_be32(&lbc->mdr, ctrl->mdr); 214 out_be32(&lbc->mdr, elbc_fcm_ctrl->mdr);
215 215
216 dev_vdbg(ctrl->dev, 216 dev_vdbg(priv->dev,
217 "fsl_elbc_run_command: fmr=%08x fir=%08x fcr=%08x\n", 217 "fsl_elbc_run_command: fmr=%08x fir=%08x fcr=%08x\n",
218 in_be32(&lbc->fmr), in_be32(&lbc->fir), in_be32(&lbc->fcr)); 218 in_be32(&lbc->fmr), in_be32(&lbc->fir), in_be32(&lbc->fcr));
219 dev_vdbg(ctrl->dev, 219 dev_vdbg(priv->dev,
220 "fsl_elbc_run_command: fbar=%08x fpar=%08x " 220 "fsl_elbc_run_command: fbar=%08x fpar=%08x "
221 "fbcr=%08x bank=%d\n", 221 "fbcr=%08x bank=%d\n",
222 in_be32(&lbc->fbar), in_be32(&lbc->fpar), 222 in_be32(&lbc->fbar), in_be32(&lbc->fpar),
@@ -229,19 +229,18 @@ static int fsl_elbc_run_command(struct mtd_info *mtd)
229 /* wait for FCM complete flag or timeout */ 229 /* wait for FCM complete flag or timeout */
230 wait_event_timeout(ctrl->irq_wait, ctrl->irq_status, 230 wait_event_timeout(ctrl->irq_wait, ctrl->irq_status,
231 FCM_TIMEOUT_MSECS * HZ/1000); 231 FCM_TIMEOUT_MSECS * HZ/1000);
232 ctrl->status = ctrl->irq_status; 232 elbc_fcm_ctrl->status = ctrl->irq_status;
233
234 /* store mdr value in case it was needed */ 233 /* store mdr value in case it was needed */
235 if (ctrl->use_mdr) 234 if (elbc_fcm_ctrl->use_mdr)
236 ctrl->mdr = in_be32(&lbc->mdr); 235 elbc_fcm_ctrl->mdr = in_be32(&lbc->mdr);
237 236
238 ctrl->use_mdr = 0; 237 elbc_fcm_ctrl->use_mdr = 0;
239 238
240 if (ctrl->status != LTESR_CC) { 239 if (elbc_fcm_ctrl->status != LTESR_CC) {
241 dev_info(ctrl->dev, 240 dev_info(priv->dev,
242 "command failed: fir %x fcr %x status %x mdr %x\n", 241 "command failed: fir %x fcr %x status %x mdr %x\n",
243 in_be32(&lbc->fir), in_be32(&lbc->fcr), 242 in_be32(&lbc->fir), in_be32(&lbc->fcr),
244 ctrl->status, ctrl->mdr); 243 elbc_fcm_ctrl->status, elbc_fcm_ctrl->mdr);
245 return -EIO; 244 return -EIO;
246 } 245 }
247 246
@@ -251,7 +250,7 @@ static int fsl_elbc_run_command(struct mtd_info *mtd)
251static void fsl_elbc_do_read(struct nand_chip *chip, int oob) 250static void fsl_elbc_do_read(struct nand_chip *chip, int oob)
252{ 251{
253 struct fsl_elbc_mtd *priv = chip->priv; 252 struct fsl_elbc_mtd *priv = chip->priv;
254 struct fsl_elbc_ctrl *ctrl = priv->ctrl; 253 struct fsl_lbc_ctrl *ctrl = priv->ctrl;
255 struct fsl_lbc_regs __iomem *lbc = ctrl->regs; 254 struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
256 255
257 if (priv->page_size) { 256 if (priv->page_size) {
@@ -284,15 +283,16 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
284{ 283{
285 struct nand_chip *chip = mtd->priv; 284 struct nand_chip *chip = mtd->priv;
286 struct fsl_elbc_mtd *priv = chip->priv; 285 struct fsl_elbc_mtd *priv = chip->priv;
287 struct fsl_elbc_ctrl *ctrl = priv->ctrl; 286 struct fsl_lbc_ctrl *ctrl = priv->ctrl;
287 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
288 struct fsl_lbc_regs __iomem *lbc = ctrl->regs; 288 struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
289 289
290 ctrl->use_mdr = 0; 290 elbc_fcm_ctrl->use_mdr = 0;
291 291
292 /* clear the read buffer */ 292 /* clear the read buffer */
293 ctrl->read_bytes = 0; 293 elbc_fcm_ctrl->read_bytes = 0;
294 if (command != NAND_CMD_PAGEPROG) 294 if (command != NAND_CMD_PAGEPROG)
295 ctrl->index = 0; 295 elbc_fcm_ctrl->index = 0;
296 296
297 switch (command) { 297 switch (command) {
298 /* READ0 and READ1 read the entire buffer to use hardware ECC. */ 298 /* READ0 and READ1 read the entire buffer to use hardware ECC. */
@@ -301,7 +301,7 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
301 301
302 /* fall-through */ 302 /* fall-through */
303 case NAND_CMD_READ0: 303 case NAND_CMD_READ0:
304 dev_dbg(ctrl->dev, 304 dev_dbg(priv->dev,
305 "fsl_elbc_cmdfunc: NAND_CMD_READ0, page_addr:" 305 "fsl_elbc_cmdfunc: NAND_CMD_READ0, page_addr:"
306 " 0x%x, column: 0x%x.\n", page_addr, column); 306 " 0x%x, column: 0x%x.\n", page_addr, column);
307 307
@@ -309,8 +309,8 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
309 out_be32(&lbc->fbcr, 0); /* read entire page to enable ECC */ 309 out_be32(&lbc->fbcr, 0); /* read entire page to enable ECC */
310 set_addr(mtd, 0, page_addr, 0); 310 set_addr(mtd, 0, page_addr, 0);
311 311
312 ctrl->read_bytes = mtd->writesize + mtd->oobsize; 312 elbc_fcm_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
313 ctrl->index += column; 313 elbc_fcm_ctrl->index += column;
314 314
315 fsl_elbc_do_read(chip, 0); 315 fsl_elbc_do_read(chip, 0);
316 fsl_elbc_run_command(mtd); 316 fsl_elbc_run_command(mtd);
@@ -318,14 +318,14 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
318 318
319 /* READOOB reads only the OOB because no ECC is performed. */ 319 /* READOOB reads only the OOB because no ECC is performed. */
320 case NAND_CMD_READOOB: 320 case NAND_CMD_READOOB:
321 dev_vdbg(ctrl->dev, 321 dev_vdbg(priv->dev,
322 "fsl_elbc_cmdfunc: NAND_CMD_READOOB, page_addr:" 322 "fsl_elbc_cmdfunc: NAND_CMD_READOOB, page_addr:"
323 " 0x%x, column: 0x%x.\n", page_addr, column); 323 " 0x%x, column: 0x%x.\n", page_addr, column);
324 324
325 out_be32(&lbc->fbcr, mtd->oobsize - column); 325 out_be32(&lbc->fbcr, mtd->oobsize - column);
326 set_addr(mtd, column, page_addr, 1); 326 set_addr(mtd, column, page_addr, 1);
327 327
328 ctrl->read_bytes = mtd->writesize + mtd->oobsize; 328 elbc_fcm_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
329 329
330 fsl_elbc_do_read(chip, 1); 330 fsl_elbc_do_read(chip, 1);
331 fsl_elbc_run_command(mtd); 331 fsl_elbc_run_command(mtd);
@@ -333,7 +333,7 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
333 333
334 /* READID must read all 5 possible bytes while CEB is active */ 334 /* READID must read all 5 possible bytes while CEB is active */
335 case NAND_CMD_READID: 335 case NAND_CMD_READID:
336 dev_vdbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_READID.\n"); 336 dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_READID.\n");
337 337
338 out_be32(&lbc->fir, (FIR_OP_CM0 << FIR_OP0_SHIFT) | 338 out_be32(&lbc->fir, (FIR_OP_CM0 << FIR_OP0_SHIFT) |
339 (FIR_OP_UA << FIR_OP1_SHIFT) | 339 (FIR_OP_UA << FIR_OP1_SHIFT) |
@@ -341,9 +341,9 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
341 out_be32(&lbc->fcr, NAND_CMD_READID << FCR_CMD0_SHIFT); 341 out_be32(&lbc->fcr, NAND_CMD_READID << FCR_CMD0_SHIFT);
342 /* 5 bytes for manuf, device and exts */ 342 /* 5 bytes for manuf, device and exts */
343 out_be32(&lbc->fbcr, 5); 343 out_be32(&lbc->fbcr, 5);
344 ctrl->read_bytes = 5; 344 elbc_fcm_ctrl->read_bytes = 5;
345 ctrl->use_mdr = 1; 345 elbc_fcm_ctrl->use_mdr = 1;
346 ctrl->mdr = 0; 346 elbc_fcm_ctrl->mdr = 0;
347 347
348 set_addr(mtd, 0, 0, 0); 348 set_addr(mtd, 0, 0, 0);
349 fsl_elbc_run_command(mtd); 349 fsl_elbc_run_command(mtd);
@@ -351,7 +351,7 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
351 351
352 /* ERASE1 stores the block and page address */ 352 /* ERASE1 stores the block and page address */
353 case NAND_CMD_ERASE1: 353 case NAND_CMD_ERASE1:
354 dev_vdbg(ctrl->dev, 354 dev_vdbg(priv->dev,
355 "fsl_elbc_cmdfunc: NAND_CMD_ERASE1, " 355 "fsl_elbc_cmdfunc: NAND_CMD_ERASE1, "
356 "page_addr: 0x%x.\n", page_addr); 356 "page_addr: 0x%x.\n", page_addr);
357 set_addr(mtd, 0, page_addr, 0); 357 set_addr(mtd, 0, page_addr, 0);
@@ -359,7 +359,7 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
359 359
360 /* ERASE2 uses the block and page address from ERASE1 */ 360 /* ERASE2 uses the block and page address from ERASE1 */
361 case NAND_CMD_ERASE2: 361 case NAND_CMD_ERASE2:
362 dev_vdbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_ERASE2.\n"); 362 dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_ERASE2.\n");
363 363
364 out_be32(&lbc->fir, 364 out_be32(&lbc->fir,
365 (FIR_OP_CM0 << FIR_OP0_SHIFT) | 365 (FIR_OP_CM0 << FIR_OP0_SHIFT) |
@@ -374,8 +374,8 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
374 (NAND_CMD_ERASE2 << FCR_CMD2_SHIFT)); 374 (NAND_CMD_ERASE2 << FCR_CMD2_SHIFT));
375 375
376 out_be32(&lbc->fbcr, 0); 376 out_be32(&lbc->fbcr, 0);
377 ctrl->read_bytes = 0; 377 elbc_fcm_ctrl->read_bytes = 0;
378 ctrl->use_mdr = 1; 378 elbc_fcm_ctrl->use_mdr = 1;
379 379
380 fsl_elbc_run_command(mtd); 380 fsl_elbc_run_command(mtd);
381 return; 381 return;
@@ -383,14 +383,12 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
383 /* SEQIN sets up the addr buffer and all registers except the length */ 383 /* SEQIN sets up the addr buffer and all registers except the length */
384 case NAND_CMD_SEQIN: { 384 case NAND_CMD_SEQIN: {
385 __be32 fcr; 385 __be32 fcr;
386 dev_vdbg(ctrl->dev, 386 dev_vdbg(priv->dev,
387 "fsl_elbc_cmdfunc: NAND_CMD_SEQIN/PAGE_PROG, " 387 "fsl_elbc_cmdfunc: NAND_CMD_SEQIN/PAGE_PROG, "
388 "page_addr: 0x%x, column: 0x%x.\n", 388 "page_addr: 0x%x, column: 0x%x.\n",
389 page_addr, column); 389 page_addr, column);
390 390
391 ctrl->column = column; 391 elbc_fcm_ctrl->use_mdr = 1;
392 ctrl->oob = 0;
393 ctrl->use_mdr = 1;
394 392
395 fcr = (NAND_CMD_STATUS << FCR_CMD1_SHIFT) | 393 fcr = (NAND_CMD_STATUS << FCR_CMD1_SHIFT) |
396 (NAND_CMD_SEQIN << FCR_CMD2_SHIFT) | 394 (NAND_CMD_SEQIN << FCR_CMD2_SHIFT) |
@@ -420,7 +418,7 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
420 /* OOB area --> READOOB */ 418 /* OOB area --> READOOB */
421 column -= mtd->writesize; 419 column -= mtd->writesize;
422 fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT; 420 fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT;
423 ctrl->oob = 1; 421 elbc_fcm_ctrl->oob = 1;
424 } else { 422 } else {
425 WARN_ON(column != 0); 423 WARN_ON(column != 0);
426 /* First 256 bytes --> READ0 */ 424 /* First 256 bytes --> READ0 */
@@ -429,24 +427,24 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
429 } 427 }
430 428
431 out_be32(&lbc->fcr, fcr); 429 out_be32(&lbc->fcr, fcr);
432 set_addr(mtd, column, page_addr, ctrl->oob); 430 set_addr(mtd, column, page_addr, elbc_fcm_ctrl->oob);
433 return; 431 return;
434 } 432 }
435 433
436 /* PAGEPROG reuses all of the setup from SEQIN and adds the length */ 434 /* PAGEPROG reuses all of the setup from SEQIN and adds the length */
437 case NAND_CMD_PAGEPROG: { 435 case NAND_CMD_PAGEPROG: {
438 int full_page; 436 int full_page;
439 dev_vdbg(ctrl->dev, 437 dev_vdbg(priv->dev,
440 "fsl_elbc_cmdfunc: NAND_CMD_PAGEPROG " 438 "fsl_elbc_cmdfunc: NAND_CMD_PAGEPROG "
441 "writing %d bytes.\n", ctrl->index); 439 "writing %d bytes.\n", elbc_fcm_ctrl->index);
442 440
443 /* if the write did not start at 0 or is not a full page 441 /* if the write did not start at 0 or is not a full page
444 * then set the exact length, otherwise use a full page 442 * then set the exact length, otherwise use a full page
445 * write so the HW generates the ECC. 443 * write so the HW generates the ECC.
446 */ 444 */
447 if (ctrl->oob || ctrl->column != 0 || 445 if (elbc_fcm_ctrl->oob || elbc_fcm_ctrl->column != 0 ||
448 ctrl->index != mtd->writesize + mtd->oobsize) { 446 elbc_fcm_ctrl->index != mtd->writesize + mtd->oobsize) {
449 out_be32(&lbc->fbcr, ctrl->index); 447 out_be32(&lbc->fbcr, elbc_fcm_ctrl->index);
450 full_page = 0; 448 full_page = 0;
451 } else { 449 } else {
452 out_be32(&lbc->fbcr, 0); 450 out_be32(&lbc->fbcr, 0);
@@ -458,21 +456,21 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
458 /* Read back the page in order to fill in the ECC for the 456 /* Read back the page in order to fill in the ECC for the
459 * caller. Is this really needed? 457 * caller. Is this really needed?
460 */ 458 */
461 if (full_page && ctrl->oob_poi) { 459 if (full_page && elbc_fcm_ctrl->oob_poi) {
462 out_be32(&lbc->fbcr, 3); 460 out_be32(&lbc->fbcr, 3);
463 set_addr(mtd, 6, page_addr, 1); 461 set_addr(mtd, 6, page_addr, 1);
464 462
465 ctrl->read_bytes = mtd->writesize + 9; 463 elbc_fcm_ctrl->read_bytes = mtd->writesize + 9;
466 464
467 fsl_elbc_do_read(chip, 1); 465 fsl_elbc_do_read(chip, 1);
468 fsl_elbc_run_command(mtd); 466 fsl_elbc_run_command(mtd);
469 467
470 memcpy_fromio(ctrl->oob_poi + 6, 468 memcpy_fromio(elbc_fcm_ctrl->oob_poi + 6,
471 &ctrl->addr[ctrl->index], 3); 469 &elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index], 3);
472 ctrl->index += 3; 470 elbc_fcm_ctrl->index += 3;
473 } 471 }
474 472
475 ctrl->oob_poi = NULL; 473 elbc_fcm_ctrl->oob_poi = NULL;
476 return; 474 return;
477 } 475 }
478 476
@@ -485,26 +483,26 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
485 out_be32(&lbc->fcr, NAND_CMD_STATUS << FCR_CMD0_SHIFT); 483 out_be32(&lbc->fcr, NAND_CMD_STATUS << FCR_CMD0_SHIFT);
486 out_be32(&lbc->fbcr, 1); 484 out_be32(&lbc->fbcr, 1);
487 set_addr(mtd, 0, 0, 0); 485 set_addr(mtd, 0, 0, 0);
488 ctrl->read_bytes = 1; 486 elbc_fcm_ctrl->read_bytes = 1;
489 487
490 fsl_elbc_run_command(mtd); 488 fsl_elbc_run_command(mtd);
491 489
492 /* The chip always seems to report that it is 490 /* The chip always seems to report that it is
493 * write-protected, even when it is not. 491 * write-protected, even when it is not.
494 */ 492 */
495 setbits8(ctrl->addr, NAND_STATUS_WP); 493 setbits8(elbc_fcm_ctrl->addr, NAND_STATUS_WP);
496 return; 494 return;
497 495
498 /* RESET without waiting for the ready line */ 496 /* RESET without waiting for the ready line */
499 case NAND_CMD_RESET: 497 case NAND_CMD_RESET:
500 dev_dbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_RESET.\n"); 498 dev_dbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_RESET.\n");
501 out_be32(&lbc->fir, FIR_OP_CM0 << FIR_OP0_SHIFT); 499 out_be32(&lbc->fir, FIR_OP_CM0 << FIR_OP0_SHIFT);
502 out_be32(&lbc->fcr, NAND_CMD_RESET << FCR_CMD0_SHIFT); 500 out_be32(&lbc->fcr, NAND_CMD_RESET << FCR_CMD0_SHIFT);
503 fsl_elbc_run_command(mtd); 501 fsl_elbc_run_command(mtd);
504 return; 502 return;
505 503
506 default: 504 default:
507 dev_err(ctrl->dev, 505 dev_err(priv->dev,
508 "fsl_elbc_cmdfunc: error, unsupported command 0x%x.\n", 506 "fsl_elbc_cmdfunc: error, unsupported command 0x%x.\n",
509 command); 507 command);
510 } 508 }
@@ -524,24 +522,24 @@ static void fsl_elbc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
524{ 522{
525 struct nand_chip *chip = mtd->priv; 523 struct nand_chip *chip = mtd->priv;
526 struct fsl_elbc_mtd *priv = chip->priv; 524 struct fsl_elbc_mtd *priv = chip->priv;
527 struct fsl_elbc_ctrl *ctrl = priv->ctrl; 525 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
528 unsigned int bufsize = mtd->writesize + mtd->oobsize; 526 unsigned int bufsize = mtd->writesize + mtd->oobsize;
529 527
530 if (len <= 0) { 528 if (len <= 0) {
531 dev_err(ctrl->dev, "write_buf of %d bytes", len); 529 dev_err(priv->dev, "write_buf of %d bytes", len);
532 ctrl->status = 0; 530 elbc_fcm_ctrl->status = 0;
533 return; 531 return;
534 } 532 }
535 533
536 if ((unsigned int)len > bufsize - ctrl->index) { 534 if ((unsigned int)len > bufsize - elbc_fcm_ctrl->index) {
537 dev_err(ctrl->dev, 535 dev_err(priv->dev,
538 "write_buf beyond end of buffer " 536 "write_buf beyond end of buffer "
539 "(%d requested, %u available)\n", 537 "(%d requested, %u available)\n",
540 len, bufsize - ctrl->index); 538 len, bufsize - elbc_fcm_ctrl->index);
541 len = bufsize - ctrl->index; 539 len = bufsize - elbc_fcm_ctrl->index;
542 } 540 }
543 541
544 memcpy_toio(&ctrl->addr[ctrl->index], buf, len); 542 memcpy_toio(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index], buf, len);
545 /* 543 /*
546 * This is workaround for the weird elbc hangs during nand write, 544 * This is workaround for the weird elbc hangs during nand write,
547 * Scott Wood says: "...perhaps difference in how long it takes a 545 * Scott Wood says: "...perhaps difference in how long it takes a
@@ -549,9 +547,9 @@ static void fsl_elbc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
549 * is causing problems, and sync isn't helping for some reason." 547 * is causing problems, and sync isn't helping for some reason."
550 * Reading back the last byte helps though. 548 * Reading back the last byte helps though.
551 */ 549 */
552 in_8(&ctrl->addr[ctrl->index] + len - 1); 550 in_8(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index] + len - 1);
553 551
554 ctrl->index += len; 552 elbc_fcm_ctrl->index += len;
555} 553}
556 554
557/* 555/*
@@ -562,13 +560,13 @@ static u8 fsl_elbc_read_byte(struct mtd_info *mtd)
562{ 560{
563 struct nand_chip *chip = mtd->priv; 561 struct nand_chip *chip = mtd->priv;
564 struct fsl_elbc_mtd *priv = chip->priv; 562 struct fsl_elbc_mtd *priv = chip->priv;
565 struct fsl_elbc_ctrl *ctrl = priv->ctrl; 563 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
566 564
567 /* If there are still bytes in the FCM, then use the next byte. */ 565 /* If there are still bytes in the FCM, then use the next byte. */
568 if (ctrl->index < ctrl->read_bytes) 566 if (elbc_fcm_ctrl->index < elbc_fcm_ctrl->read_bytes)
569 return in_8(&ctrl->addr[ctrl->index++]); 567 return in_8(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index++]);
570 568
571 dev_err(ctrl->dev, "read_byte beyond end of buffer\n"); 569 dev_err(priv->dev, "read_byte beyond end of buffer\n");
572 return ERR_BYTE; 570 return ERR_BYTE;
573} 571}
574 572
@@ -579,18 +577,19 @@ static void fsl_elbc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
579{ 577{
580 struct nand_chip *chip = mtd->priv; 578 struct nand_chip *chip = mtd->priv;
581 struct fsl_elbc_mtd *priv = chip->priv; 579 struct fsl_elbc_mtd *priv = chip->priv;
582 struct fsl_elbc_ctrl *ctrl = priv->ctrl; 580 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
583 int avail; 581 int avail;
584 582
585 if (len < 0) 583 if (len < 0)
586 return; 584 return;
587 585
588 avail = min((unsigned int)len, ctrl->read_bytes - ctrl->index); 586 avail = min((unsigned int)len,
589 memcpy_fromio(buf, &ctrl->addr[ctrl->index], avail); 587 elbc_fcm_ctrl->read_bytes - elbc_fcm_ctrl->index);
590 ctrl->index += avail; 588 memcpy_fromio(buf, &elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index], avail);
589 elbc_fcm_ctrl->index += avail;
591 590
592 if (len > avail) 591 if (len > avail)
593 dev_err(ctrl->dev, 592 dev_err(priv->dev,
594 "read_buf beyond end of buffer " 593 "read_buf beyond end of buffer "
595 "(%d requested, %d available)\n", 594 "(%d requested, %d available)\n",
596 len, avail); 595 len, avail);
@@ -603,30 +602,32 @@ static int fsl_elbc_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
603{ 602{
604 struct nand_chip *chip = mtd->priv; 603 struct nand_chip *chip = mtd->priv;
605 struct fsl_elbc_mtd *priv = chip->priv; 604 struct fsl_elbc_mtd *priv = chip->priv;
606 struct fsl_elbc_ctrl *ctrl = priv->ctrl; 605 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
607 int i; 606 int i;
608 607
609 if (len < 0) { 608 if (len < 0) {
610 dev_err(ctrl->dev, "write_buf of %d bytes", len); 609 dev_err(priv->dev, "write_buf of %d bytes", len);
611 return -EINVAL; 610 return -EINVAL;
612 } 611 }
613 612
614 if ((unsigned int)len > ctrl->read_bytes - ctrl->index) { 613 if ((unsigned int)len >
615 dev_err(ctrl->dev, 614 elbc_fcm_ctrl->read_bytes - elbc_fcm_ctrl->index) {
616 "verify_buf beyond end of buffer " 615 dev_err(priv->dev,
617 "(%d requested, %u available)\n", 616 "verify_buf beyond end of buffer "
618 len, ctrl->read_bytes - ctrl->index); 617 "(%d requested, %u available)\n",
618 len, elbc_fcm_ctrl->read_bytes - elbc_fcm_ctrl->index);
619 619
620 ctrl->index = ctrl->read_bytes; 620 elbc_fcm_ctrl->index = elbc_fcm_ctrl->read_bytes;
621 return -EINVAL; 621 return -EINVAL;
622 } 622 }
623 623
624 for (i = 0; i < len; i++) 624 for (i = 0; i < len; i++)
625 if (in_8(&ctrl->addr[ctrl->index + i]) != buf[i]) 625 if (in_8(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index + i])
626 != buf[i])
626 break; 627 break;
627 628
628 ctrl->index += len; 629 elbc_fcm_ctrl->index += len;
629 return i == len && ctrl->status == LTESR_CC ? 0 : -EIO; 630 return i == len && elbc_fcm_ctrl->status == LTESR_CC ? 0 : -EIO;
630} 631}
631 632
632/* This function is called after Program and Erase Operations to 633/* This function is called after Program and Erase Operations to
@@ -635,22 +636,22 @@ static int fsl_elbc_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
635static int fsl_elbc_wait(struct mtd_info *mtd, struct nand_chip *chip) 636static int fsl_elbc_wait(struct mtd_info *mtd, struct nand_chip *chip)
636{ 637{
637 struct fsl_elbc_mtd *priv = chip->priv; 638 struct fsl_elbc_mtd *priv = chip->priv;
638 struct fsl_elbc_ctrl *ctrl = priv->ctrl; 639 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
639 640
640 if (ctrl->status != LTESR_CC) 641 if (elbc_fcm_ctrl->status != LTESR_CC)
641 return NAND_STATUS_FAIL; 642 return NAND_STATUS_FAIL;
642 643
643 /* The chip always seems to report that it is 644 /* The chip always seems to report that it is
644 * write-protected, even when it is not. 645 * write-protected, even when it is not.
645 */ 646 */
646 return (ctrl->mdr & 0xff) | NAND_STATUS_WP; 647 return (elbc_fcm_ctrl->mdr & 0xff) | NAND_STATUS_WP;
647} 648}
648 649
649static int fsl_elbc_chip_init_tail(struct mtd_info *mtd) 650static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
650{ 651{
651 struct nand_chip *chip = mtd->priv; 652 struct nand_chip *chip = mtd->priv;
652 struct fsl_elbc_mtd *priv = chip->priv; 653 struct fsl_elbc_mtd *priv = chip->priv;
653 struct fsl_elbc_ctrl *ctrl = priv->ctrl; 654 struct fsl_lbc_ctrl *ctrl = priv->ctrl;
654 struct fsl_lbc_regs __iomem *lbc = ctrl->regs; 655 struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
655 unsigned int al; 656 unsigned int al;
656 657
@@ -665,41 +666,41 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
665 priv->fmr |= (12 << FMR_CWTO_SHIFT) | /* Timeout > 12 ms */ 666 priv->fmr |= (12 << FMR_CWTO_SHIFT) | /* Timeout > 12 ms */
666 (al << FMR_AL_SHIFT); 667 (al << FMR_AL_SHIFT);
667 668
668 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->numchips = %d\n", 669 dev_dbg(priv->dev, "fsl_elbc_init: nand->numchips = %d\n",
669 chip->numchips); 670 chip->numchips);
670 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->chipsize = %lld\n", 671 dev_dbg(priv->dev, "fsl_elbc_init: nand->chipsize = %lld\n",
671 chip->chipsize); 672 chip->chipsize);
672 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->pagemask = %8x\n", 673 dev_dbg(priv->dev, "fsl_elbc_init: nand->pagemask = %8x\n",
673 chip->pagemask); 674 chip->pagemask);
674 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->chip_delay = %d\n", 675 dev_dbg(priv->dev, "fsl_elbc_init: nand->chip_delay = %d\n",
675 chip->chip_delay); 676 chip->chip_delay);
676 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->badblockpos = %d\n", 677 dev_dbg(priv->dev, "fsl_elbc_init: nand->badblockpos = %d\n",
677 chip->badblockpos); 678 chip->badblockpos);
678 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->chip_shift = %d\n", 679 dev_dbg(priv->dev, "fsl_elbc_init: nand->chip_shift = %d\n",
679 chip->chip_shift); 680 chip->chip_shift);
680 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->page_shift = %d\n", 681 dev_dbg(priv->dev, "fsl_elbc_init: nand->page_shift = %d\n",
681 chip->page_shift); 682 chip->page_shift);
682 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->phys_erase_shift = %d\n", 683 dev_dbg(priv->dev, "fsl_elbc_init: nand->phys_erase_shift = %d\n",
683 chip->phys_erase_shift); 684 chip->phys_erase_shift);
684 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecclayout = %p\n", 685 dev_dbg(priv->dev, "fsl_elbc_init: nand->ecclayout = %p\n",
685 chip->ecclayout); 686 chip->ecclayout);
686 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.mode = %d\n", 687 dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.mode = %d\n",
687 chip->ecc.mode); 688 chip->ecc.mode);
688 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.steps = %d\n", 689 dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.steps = %d\n",
689 chip->ecc.steps); 690 chip->ecc.steps);
690 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.bytes = %d\n", 691 dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.bytes = %d\n",
691 chip->ecc.bytes); 692 chip->ecc.bytes);
692 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.total = %d\n", 693 dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.total = %d\n",
693 chip->ecc.total); 694 chip->ecc.total);
694 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.layout = %p\n", 695 dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.layout = %p\n",
695 chip->ecc.layout); 696 chip->ecc.layout);
696 dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->flags = %08x\n", mtd->flags); 697 dev_dbg(priv->dev, "fsl_elbc_init: mtd->flags = %08x\n", mtd->flags);
697 dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->size = %lld\n", mtd->size); 698 dev_dbg(priv->dev, "fsl_elbc_init: mtd->size = %lld\n", mtd->size);
698 dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->erasesize = %d\n", 699 dev_dbg(priv->dev, "fsl_elbc_init: mtd->erasesize = %d\n",
699 mtd->erasesize); 700 mtd->erasesize);
700 dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->writesize = %d\n", 701 dev_dbg(priv->dev, "fsl_elbc_init: mtd->writesize = %d\n",
701 mtd->writesize); 702 mtd->writesize);
702 dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->oobsize = %d\n", 703 dev_dbg(priv->dev, "fsl_elbc_init: mtd->oobsize = %d\n",
703 mtd->oobsize); 704 mtd->oobsize);
704 705
705 /* adjust Option Register and ECC to match Flash page size */ 706 /* adjust Option Register and ECC to match Flash page size */
@@ -719,7 +720,7 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
719 chip->badblock_pattern = &largepage_memorybased; 720 chip->badblock_pattern = &largepage_memorybased;
720 } 721 }
721 } else { 722 } else {
722 dev_err(ctrl->dev, 723 dev_err(priv->dev,
723 "fsl_elbc_init: page size %d is not supported\n", 724 "fsl_elbc_init: page size %d is not supported\n",
724 mtd->writesize); 725 mtd->writesize);
725 return -1; 726 return -1;
@@ -750,18 +751,19 @@ static void fsl_elbc_write_page(struct mtd_info *mtd,
750 const uint8_t *buf) 751 const uint8_t *buf)
751{ 752{
752 struct fsl_elbc_mtd *priv = chip->priv; 753 struct fsl_elbc_mtd *priv = chip->priv;
753 struct fsl_elbc_ctrl *ctrl = priv->ctrl; 754 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
754 755
755 fsl_elbc_write_buf(mtd, buf, mtd->writesize); 756 fsl_elbc_write_buf(mtd, buf, mtd->writesize);
756 fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize); 757 fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
757 758
758 ctrl->oob_poi = chip->oob_poi; 759 elbc_fcm_ctrl->oob_poi = chip->oob_poi;
759} 760}
760 761
761static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv) 762static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
762{ 763{
763 struct fsl_elbc_ctrl *ctrl = priv->ctrl; 764 struct fsl_lbc_ctrl *ctrl = priv->ctrl;
764 struct fsl_lbc_regs __iomem *lbc = ctrl->regs; 765 struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
766 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
765 struct nand_chip *chip = &priv->chip; 767 struct nand_chip *chip = &priv->chip;
766 768
767 dev_dbg(priv->dev, "eLBC Set Information for bank %d\n", priv->bank); 769 dev_dbg(priv->dev, "eLBC Set Information for bank %d\n", priv->bank);
@@ -790,7 +792,7 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
790 chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR | 792 chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR |
791 NAND_USE_FLASH_BBT; 793 NAND_USE_FLASH_BBT;
792 794
793 chip->controller = &ctrl->controller; 795 chip->controller = &elbc_fcm_ctrl->controller;
794 chip->priv = priv; 796 chip->priv = priv;
795 797
796 chip->ecc.read_page = fsl_elbc_read_page; 798 chip->ecc.read_page = fsl_elbc_read_page;
@@ -815,8 +817,7 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
815 817
816static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv) 818static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv)
817{ 819{
818 struct fsl_elbc_ctrl *ctrl = priv->ctrl; 820 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
819
820 nand_release(&priv->mtd); 821 nand_release(&priv->mtd);
821 822
822 kfree(priv->mtd.name); 823 kfree(priv->mtd.name);
@@ -824,18 +825,21 @@ static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv)
824 if (priv->vbase) 825 if (priv->vbase)
825 iounmap(priv->vbase); 826 iounmap(priv->vbase);
826 827
827 ctrl->chips[priv->bank] = NULL; 828 elbc_fcm_ctrl->chips[priv->bank] = NULL;
828 kfree(priv); 829 kfree(priv);
829 830 kfree(elbc_fcm_ctrl);
830 return 0; 831 return 0;
831} 832}
832 833
833static int __devinit fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl, 834static DEFINE_MUTEX(fsl_elbc_nand_mutex);
834 struct device_node *node) 835
836static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev)
835{ 837{
836 struct fsl_lbc_regs __iomem *lbc = ctrl->regs; 838 struct fsl_lbc_regs __iomem *lbc;
837 struct fsl_elbc_mtd *priv; 839 struct fsl_elbc_mtd *priv;
838 struct resource res; 840 struct resource res;
841 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl;
842
839#ifdef CONFIG_MTD_PARTITIONS 843#ifdef CONFIG_MTD_PARTITIONS
840 static const char *part_probe_types[] 844 static const char *part_probe_types[]
841 = { "cmdlinepart", "RedBoot", NULL }; 845 = { "cmdlinepart", "RedBoot", NULL };
@@ -843,11 +847,18 @@ static int __devinit fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl,
843#endif 847#endif
844 int ret; 848 int ret;
845 int bank; 849 int bank;
850 struct device *dev;
851 struct device_node *node = pdev->dev.of_node;
852
853 if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
854 return -ENODEV;
855 lbc = fsl_lbc_ctrl_dev->regs;
856 dev = fsl_lbc_ctrl_dev->dev;
846 857
847 /* get, allocate and map the memory resource */ 858 /* get, allocate and map the memory resource */
848 ret = of_address_to_resource(node, 0, &res); 859 ret = of_address_to_resource(node, 0, &res);
849 if (ret) { 860 if (ret) {
850 dev_err(ctrl->dev, "failed to get resource\n"); 861 dev_err(dev, "failed to get resource\n");
851 return ret; 862 return ret;
852 } 863 }
853 864
@@ -857,11 +868,11 @@ static int __devinit fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl,
857 (in_be32(&lbc->bank[bank].br) & BR_MSEL) == BR_MS_FCM && 868 (in_be32(&lbc->bank[bank].br) & BR_MSEL) == BR_MS_FCM &&
858 (in_be32(&lbc->bank[bank].br) & 869 (in_be32(&lbc->bank[bank].br) &
859 in_be32(&lbc->bank[bank].or) & BR_BA) 870 in_be32(&lbc->bank[bank].or) & BR_BA)
860 == res.start) 871 == fsl_lbc_addr(res.start))
861 break; 872 break;
862 873
863 if (bank >= MAX_BANKS) { 874 if (bank >= MAX_BANKS) {
864 dev_err(ctrl->dev, "address did not match any chip selects\n"); 875 dev_err(dev, "address did not match any chip selects\n");
865 return -ENODEV; 876 return -ENODEV;
866 } 877 }
867 878
@@ -869,14 +880,33 @@ static int __devinit fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl,
869 if (!priv) 880 if (!priv)
870 return -ENOMEM; 881 return -ENOMEM;
871 882
872 ctrl->chips[bank] = priv; 883 mutex_lock(&fsl_elbc_nand_mutex);
884 if (!fsl_lbc_ctrl_dev->nand) {
885 elbc_fcm_ctrl = kzalloc(sizeof(*elbc_fcm_ctrl), GFP_KERNEL);
886 if (!elbc_fcm_ctrl) {
887 dev_err(dev, "failed to allocate memory\n");
888 mutex_unlock(&fsl_elbc_nand_mutex);
889 ret = -ENOMEM;
890 goto err;
891 }
892 elbc_fcm_ctrl->counter++;
893
894 spin_lock_init(&elbc_fcm_ctrl->controller.lock);
895 init_waitqueue_head(&elbc_fcm_ctrl->controller.wq);
896 fsl_lbc_ctrl_dev->nand = elbc_fcm_ctrl;
897 } else {
898 elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
899 }
900 mutex_unlock(&fsl_elbc_nand_mutex);
901
902 elbc_fcm_ctrl->chips[bank] = priv;
873 priv->bank = bank; 903 priv->bank = bank;
874 priv->ctrl = ctrl; 904 priv->ctrl = fsl_lbc_ctrl_dev;
875 priv->dev = ctrl->dev; 905 priv->dev = dev;
876 906
877 priv->vbase = ioremap(res.start, resource_size(&res)); 907 priv->vbase = ioremap(res.start, resource_size(&res));
878 if (!priv->vbase) { 908 if (!priv->vbase) {
879 dev_err(ctrl->dev, "failed to map chip region\n"); 909 dev_err(dev, "failed to map chip region\n");
880 ret = -ENOMEM; 910 ret = -ENOMEM;
881 goto err; 911 goto err;
882 } 912 }
@@ -933,171 +963,53 @@ err:
933 return ret; 963 return ret;
934} 964}
935 965
936static int __devinit fsl_elbc_ctrl_init(struct fsl_elbc_ctrl *ctrl) 966static int fsl_elbc_nand_remove(struct platform_device *pdev)
937{ 967{
938 struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
939
940 /*
941 * NAND transactions can tie up the bus for a long time, so set the
942 * bus timeout to max by clearing LBCR[BMT] (highest base counter
943 * value) and setting LBCR[BMTPS] to the highest prescaler value.
944 */
945 clrsetbits_be32(&lbc->lbcr, LBCR_BMT, 15);
946
947 /* clear event registers */
948 setbits32(&lbc->ltesr, LTESR_NAND_MASK);
949 out_be32(&lbc->lteatr, 0);
950
951 /* Enable interrupts for any detected events */
952 out_be32(&lbc->lteir, LTESR_NAND_MASK);
953
954 ctrl->read_bytes = 0;
955 ctrl->index = 0;
956 ctrl->addr = NULL;
957
958 return 0;
959}
960
961static int fsl_elbc_ctrl_remove(struct platform_device *ofdev)
962{
963 struct fsl_elbc_ctrl *ctrl = dev_get_drvdata(&ofdev->dev);
964 int i; 968 int i;
965 969 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
966 for (i = 0; i < MAX_BANKS; i++) 970 for (i = 0; i < MAX_BANKS; i++)
967 if (ctrl->chips[i]) 971 if (elbc_fcm_ctrl->chips[i])
968 fsl_elbc_chip_remove(ctrl->chips[i]); 972 fsl_elbc_chip_remove(elbc_fcm_ctrl->chips[i]);
969 973
970 if (ctrl->irq) 974 mutex_lock(&fsl_elbc_nand_mutex);
971 free_irq(ctrl->irq, ctrl); 975 elbc_fcm_ctrl->counter--;
972 976 if (!elbc_fcm_ctrl->counter) {
973 if (ctrl->regs) 977 fsl_lbc_ctrl_dev->nand = NULL;
974 iounmap(ctrl->regs); 978 kfree(elbc_fcm_ctrl);
975
976 dev_set_drvdata(&ofdev->dev, NULL);
977 kfree(ctrl);
978 return 0;
979}
980
981/* NOTE: This interrupt is also used to report other localbus events,
982 * such as transaction errors on other chipselects. If we want to
983 * capture those, we'll need to move the IRQ code into a shared
984 * LBC driver.
985 */
986
987static irqreturn_t fsl_elbc_ctrl_irq(int irqno, void *data)
988{
989 struct fsl_elbc_ctrl *ctrl = data;
990 struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
991 __be32 status = in_be32(&lbc->ltesr) & LTESR_NAND_MASK;
992
993 if (status) {
994 out_be32(&lbc->ltesr, status);
995 out_be32(&lbc->lteatr, 0);
996
997 ctrl->irq_status = status;
998 smp_wmb();
999 wake_up(&ctrl->irq_wait);
1000
1001 return IRQ_HANDLED;
1002 } 979 }
1003 980 mutex_unlock(&fsl_elbc_nand_mutex);
1004 return IRQ_NONE;
1005}
1006
1007/* fsl_elbc_ctrl_probe
1008 *
1009 * called by device layer when it finds a device matching
1010 * one our driver can handled. This code allocates all of
1011 * the resources needed for the controller only. The
1012 * resources for the NAND banks themselves are allocated
1013 * in the chip probe function.
1014*/
1015
1016static int __devinit fsl_elbc_ctrl_probe(struct platform_device *ofdev,
1017 const struct of_device_id *match)
1018{
1019 struct device_node *child;
1020 struct fsl_elbc_ctrl *ctrl;
1021 int ret;
1022
1023 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
1024 if (!ctrl)
1025 return -ENOMEM;
1026
1027 dev_set_drvdata(&ofdev->dev, ctrl);
1028
1029 spin_lock_init(&ctrl->controller.lock);
1030 init_waitqueue_head(&ctrl->controller.wq);
1031 init_waitqueue_head(&ctrl->irq_wait);
1032
1033 ctrl->regs = of_iomap(ofdev->dev.of_node, 0);
1034 if (!ctrl->regs) {
1035 dev_err(&ofdev->dev, "failed to get memory region\n");
1036 ret = -ENODEV;
1037 goto err;
1038 }
1039
1040 ctrl->irq = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
1041 if (ctrl->irq == NO_IRQ) {
1042 dev_err(&ofdev->dev, "failed to get irq resource\n");
1043 ret = -ENODEV;
1044 goto err;
1045 }
1046
1047 ctrl->dev = &ofdev->dev;
1048
1049 ret = fsl_elbc_ctrl_init(ctrl);
1050 if (ret < 0)
1051 goto err;
1052
1053 ret = request_irq(ctrl->irq, fsl_elbc_ctrl_irq, 0, "fsl-elbc", ctrl);
1054 if (ret != 0) {
1055 dev_err(&ofdev->dev, "failed to install irq (%d)\n",
1056 ctrl->irq);
1057 ret = ctrl->irq;
1058 goto err;
1059 }
1060
1061 for_each_child_of_node(ofdev->dev.of_node, child)
1062 if (of_device_is_compatible(child, "fsl,elbc-fcm-nand"))
1063 fsl_elbc_chip_probe(ctrl, child);
1064 981
1065 return 0; 982 return 0;
1066 983
1067err:
1068 fsl_elbc_ctrl_remove(ofdev);
1069 return ret;
1070} 984}
1071 985
1072static const struct of_device_id fsl_elbc_match[] = { 986static const struct of_device_id fsl_elbc_nand_match[] = {
1073 { 987 { .compatible = "fsl,elbc-fcm-nand", },
1074 .compatible = "fsl,elbc",
1075 },
1076 {} 988 {}
1077}; 989};
1078 990
1079static struct of_platform_driver fsl_elbc_ctrl_driver = { 991static struct platform_driver fsl_elbc_nand_driver = {
1080 .driver = { 992 .driver = {
1081 .name = "fsl-elbc", 993 .name = "fsl,elbc-fcm-nand",
1082 .owner = THIS_MODULE, 994 .owner = THIS_MODULE,
1083 .of_match_table = fsl_elbc_match, 995 .of_match_table = fsl_elbc_nand_match,
1084 }, 996 },
1085 .probe = fsl_elbc_ctrl_probe, 997 .probe = fsl_elbc_nand_probe,
1086 .remove = fsl_elbc_ctrl_remove, 998 .remove = fsl_elbc_nand_remove,
1087}; 999};
1088 1000
1089static int __init fsl_elbc_init(void) 1001static int __init fsl_elbc_nand_init(void)
1090{ 1002{
1091 return of_register_platform_driver(&fsl_elbc_ctrl_driver); 1003 return platform_driver_register(&fsl_elbc_nand_driver);
1092} 1004}
1093 1005
1094static void __exit fsl_elbc_exit(void) 1006static void __exit fsl_elbc_nand_exit(void)
1095{ 1007{
1096 of_unregister_platform_driver(&fsl_elbc_ctrl_driver); 1008 platform_driver_unregister(&fsl_elbc_nand_driver);
1097} 1009}
1098 1010
1099module_init(fsl_elbc_init); 1011module_init(fsl_elbc_nand_init);
1100module_exit(fsl_elbc_exit); 1012module_exit(fsl_elbc_nand_exit);
1101 1013
1102MODULE_LICENSE("GPL"); 1014MODULE_LICENSE("GPL");
1103MODULE_AUTHOR("Freescale"); 1015MODULE_AUTHOR("Freescale");
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 4eff8b25e5af..efdcca94ce55 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -186,7 +186,7 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
186 if (!flash_np) 186 if (!flash_np)
187 return -ENODEV; 187 return -ENODEV;
188 188
189 fun->mtd.name = kasprintf(GFP_KERNEL, "%x.%s", io_res->start, 189 fun->mtd.name = kasprintf(GFP_KERNEL, "0x%llx.%s", (u64)io_res->start,
190 flash_np->name); 190 flash_np->name);
191 if (!fun->mtd.name) { 191 if (!fun->mtd.name) {
192 ret = -ENOMEM; 192 ret = -ENOMEM;
@@ -222,7 +222,7 @@ static int __devinit fun_probe(struct platform_device *ofdev,
222{ 222{
223 struct fsl_upm_nand *fun; 223 struct fsl_upm_nand *fun;
224 struct resource io_res; 224 struct resource io_res;
225 const uint32_t *prop; 225 const __be32 *prop;
226 int rnb_gpio; 226 int rnb_gpio;
227 int ret; 227 int ret;
228 int size; 228 int size;
@@ -270,7 +270,7 @@ static int __devinit fun_probe(struct platform_device *ofdev,
270 goto err1; 270 goto err1;
271 } 271 }
272 for (i = 0; i < fun->mchip_count; i++) 272 for (i = 0; i < fun->mchip_count; i++)
273 fun->mchip_offsets[i] = prop[i]; 273 fun->mchip_offsets[i] = be32_to_cpu(prop[i]);
274 } else { 274 } else {
275 fun->mchip_count = 1; 275 fun->mchip_count = 1;
276 } 276 }
@@ -295,13 +295,13 @@ static int __devinit fun_probe(struct platform_device *ofdev,
295 295
296 prop = of_get_property(ofdev->dev.of_node, "chip-delay", NULL); 296 prop = of_get_property(ofdev->dev.of_node, "chip-delay", NULL);
297 if (prop) 297 if (prop)
298 fun->chip_delay = *prop; 298 fun->chip_delay = be32_to_cpup(prop);
299 else 299 else
300 fun->chip_delay = 50; 300 fun->chip_delay = 50;
301 301
302 prop = of_get_property(ofdev->dev.of_node, "fsl,upm-wait-flags", &size); 302 prop = of_get_property(ofdev->dev.of_node, "fsl,upm-wait-flags", &size);
303 if (prop && size == sizeof(uint32_t)) 303 if (prop && size == sizeof(uint32_t))
304 fun->wait_flags = *prop; 304 fun->wait_flags = be32_to_cpup(prop);
305 else 305 else
306 fun->wait_flags = FSL_UPM_WAIT_RUN_PATTERN | 306 fun->wait_flags = FSL_UPM_WAIT_RUN_PATTERN |
307 FSL_UPM_WAIT_WRITE_BYTE; 307 FSL_UPM_WAIT_WRITE_BYTE;
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
new file mode 100644
index 000000000000..02edfba25b0c
--- /dev/null
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -0,0 +1,866 @@
1/*
2 * drivers/mtd/nand/fsmc_nand.c
3 *
4 * ST Microelectronics
5 * Flexible Static Memory Controller (FSMC)
6 * Driver for NAND portions
7 *
8 * Copyright © 2010 ST Microelectronics
9 * Vipin Kumar <vipin.kumar@st.com>
10 * Ashish Priyadarshi
11 *
12 * Based on drivers/mtd/nand/nomadik_nand.c
13 *
14 * This file is licensed under the terms of the GNU General Public
15 * License version 2. This program is licensed "as is" without any
16 * warranty of any kind, whether express or implied.
17 */
18
19#include <linux/clk.h>
20#include <linux/err.h>
21#include <linux/init.h>
22#include <linux/module.h>
23#include <linux/resource.h>
24#include <linux/sched.h>
25#include <linux/types.h>
26#include <linux/mtd/mtd.h>
27#include <linux/mtd/nand.h>
28#include <linux/mtd/nand_ecc.h>
29#include <linux/platform_device.h>
30#include <linux/mtd/partitions.h>
31#include <linux/io.h>
32#include <linux/slab.h>
33#include <linux/mtd/fsmc.h>
34#include <mtd/mtd-abi.h>
35
36static struct nand_ecclayout fsmc_ecc1_layout = {
37 .eccbytes = 24,
38 .eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52,
39 66, 67, 68, 82, 83, 84, 98, 99, 100, 114, 115, 116},
40 .oobfree = {
41 {.offset = 8, .length = 8},
42 {.offset = 24, .length = 8},
43 {.offset = 40, .length = 8},
44 {.offset = 56, .length = 8},
45 {.offset = 72, .length = 8},
46 {.offset = 88, .length = 8},
47 {.offset = 104, .length = 8},
48 {.offset = 120, .length = 8}
49 }
50};
51
52static struct nand_ecclayout fsmc_ecc4_lp_layout = {
53 .eccbytes = 104,
54 .eccpos = { 2, 3, 4, 5, 6, 7, 8,
55 9, 10, 11, 12, 13, 14,
56 18, 19, 20, 21, 22, 23, 24,
57 25, 26, 27, 28, 29, 30,
58 34, 35, 36, 37, 38, 39, 40,
59 41, 42, 43, 44, 45, 46,
60 50, 51, 52, 53, 54, 55, 56,
61 57, 58, 59, 60, 61, 62,
62 66, 67, 68, 69, 70, 71, 72,
63 73, 74, 75, 76, 77, 78,
64 82, 83, 84, 85, 86, 87, 88,
65 89, 90, 91, 92, 93, 94,
66 98, 99, 100, 101, 102, 103, 104,
67 105, 106, 107, 108, 109, 110,
68 114, 115, 116, 117, 118, 119, 120,
69 121, 122, 123, 124, 125, 126
70 },
71 .oobfree = {
72 {.offset = 15, .length = 3},
73 {.offset = 31, .length = 3},
74 {.offset = 47, .length = 3},
75 {.offset = 63, .length = 3},
76 {.offset = 79, .length = 3},
77 {.offset = 95, .length = 3},
78 {.offset = 111, .length = 3},
79 {.offset = 127, .length = 1}
80 }
81};
82
83/*
84 * ECC placement definitions in oobfree type format.
85 * There are 13 bytes of ecc for every 512 byte block and it has to be read
86 * consecutively and immediately after the 512 byte data block for hardware to
87 * generate the error bit offsets in 512 byte data.
88 * Managing the ecc bytes in the following way makes it easier for software to
89 * read ecc bytes consecutive to data bytes. This way is similar to
90 * oobfree structure maintained already in generic nand driver
91 */
92static struct fsmc_eccplace fsmc_ecc4_lp_place = {
93 .eccplace = {
94 {.offset = 2, .length = 13},
95 {.offset = 18, .length = 13},
96 {.offset = 34, .length = 13},
97 {.offset = 50, .length = 13},
98 {.offset = 66, .length = 13},
99 {.offset = 82, .length = 13},
100 {.offset = 98, .length = 13},
101 {.offset = 114, .length = 13}
102 }
103};
104
105static struct nand_ecclayout fsmc_ecc4_sp_layout = {
106 .eccbytes = 13,
107 .eccpos = { 0, 1, 2, 3, 6, 7, 8,
108 9, 10, 11, 12, 13, 14
109 },
110 .oobfree = {
111 {.offset = 15, .length = 1},
112 }
113};
114
115static struct fsmc_eccplace fsmc_ecc4_sp_place = {
116 .eccplace = {
117 {.offset = 0, .length = 4},
118 {.offset = 6, .length = 9}
119 }
120};
121
122/*
123 * Default partition tables to be used if the partition information not
124 * provided through platform data
125 */
126#define PARTITION(n, off, sz) {.name = n, .offset = off, .size = sz}
127
128/*
129 * Default partition layout for small page(= 512 bytes) devices
130 * Size for "Root file system" is updated in driver based on actual device size
131 */
132static struct mtd_partition partition_info_16KB_blk[] = {
133 PARTITION("X-loader", 0, 4 * 0x4000),
134 PARTITION("U-Boot", 0x10000, 20 * 0x4000),
135 PARTITION("Kernel", 0x60000, 256 * 0x4000),
136 PARTITION("Root File System", 0x460000, 0),
137};
138
139/*
140 * Default partition layout for large page(> 512 bytes) devices
141 * Size for "Root file system" is updated in driver based on actual device size
142 */
143static struct mtd_partition partition_info_128KB_blk[] = {
144 PARTITION("X-loader", 0, 4 * 0x20000),
145 PARTITION("U-Boot", 0x80000, 12 * 0x20000),
146 PARTITION("Kernel", 0x200000, 48 * 0x20000),
147 PARTITION("Root File System", 0x800000, 0),
148};
149
150#ifdef CONFIG_MTD_CMDLINE_PARTS
151const char *part_probes[] = { "cmdlinepart", NULL };
152#endif
153
154/**
155 * struct fsmc_nand_data - atructure for FSMC NAND device state
156 *
157 * @mtd: MTD info for a NAND flash.
158 * @nand: Chip related info for a NAND flash.
159 * @partitions: Partition info for a NAND Flash.
160 * @nr_partitions: Total number of partition of a NAND flash.
161 *
162 * @ecc_place: ECC placing locations in oobfree type format.
163 * @bank: Bank number for probed device.
164 * @clk: Clock structure for FSMC.
165 *
166 * @data_va: NAND port for Data.
167 * @cmd_va: NAND port for Command.
168 * @addr_va: NAND port for Address.
169 * @regs_va: FSMC regs base address.
170 */
171struct fsmc_nand_data {
172 struct mtd_info mtd;
173 struct nand_chip nand;
174 struct mtd_partition *partitions;
175 unsigned int nr_partitions;
176
177 struct fsmc_eccplace *ecc_place;
178 unsigned int bank;
179 struct clk *clk;
180
181 struct resource *resregs;
182 struct resource *rescmd;
183 struct resource *resaddr;
184 struct resource *resdata;
185
186 void __iomem *data_va;
187 void __iomem *cmd_va;
188 void __iomem *addr_va;
189 void __iomem *regs_va;
190
191 void (*select_chip)(uint32_t bank, uint32_t busw);
192};
193
194/* Assert CS signal based on chipnr */
195static void fsmc_select_chip(struct mtd_info *mtd, int chipnr)
196{
197 struct nand_chip *chip = mtd->priv;
198 struct fsmc_nand_data *host;
199
200 host = container_of(mtd, struct fsmc_nand_data, mtd);
201
202 switch (chipnr) {
203 case -1:
204 chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
205 break;
206 case 0:
207 case 1:
208 case 2:
209 case 3:
210 if (host->select_chip)
211 host->select_chip(chipnr,
212 chip->options & NAND_BUSWIDTH_16);
213 break;
214
215 default:
216 BUG();
217 }
218}
219
220/*
221 * fsmc_cmd_ctrl - For facilitaing Hardware access
222 * This routine allows hardware specific access to control-lines(ALE,CLE)
223 */
224static void fsmc_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
225{
226 struct nand_chip *this = mtd->priv;
227 struct fsmc_nand_data *host = container_of(mtd,
228 struct fsmc_nand_data, mtd);
229 struct fsmc_regs *regs = host->regs_va;
230 unsigned int bank = host->bank;
231
232 if (ctrl & NAND_CTRL_CHANGE) {
233 if (ctrl & NAND_CLE) {
234 this->IO_ADDR_R = (void __iomem *)host->cmd_va;
235 this->IO_ADDR_W = (void __iomem *)host->cmd_va;
236 } else if (ctrl & NAND_ALE) {
237 this->IO_ADDR_R = (void __iomem *)host->addr_va;
238 this->IO_ADDR_W = (void __iomem *)host->addr_va;
239 } else {
240 this->IO_ADDR_R = (void __iomem *)host->data_va;
241 this->IO_ADDR_W = (void __iomem *)host->data_va;
242 }
243
244 if (ctrl & NAND_NCE) {
245 writel(readl(&regs->bank_regs[bank].pc) | FSMC_ENABLE,
246 &regs->bank_regs[bank].pc);
247 } else {
248 writel(readl(&regs->bank_regs[bank].pc) & ~FSMC_ENABLE,
249 &regs->bank_regs[bank].pc);
250 }
251 }
252
253 mb();
254
255 if (cmd != NAND_CMD_NONE)
256 writeb(cmd, this->IO_ADDR_W);
257}
258
259/*
260 * fsmc_nand_setup - FSMC (Flexible Static Memory Controller) init routine
261 *
262 * This routine initializes timing parameters related to NAND memory access in
263 * FSMC registers
264 */
265static void __init fsmc_nand_setup(struct fsmc_regs *regs, uint32_t bank,
266 uint32_t busw)
267{
268 uint32_t value = FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON;
269
270 if (busw)
271 writel(value | FSMC_DEVWID_16, &regs->bank_regs[bank].pc);
272 else
273 writel(value | FSMC_DEVWID_8, &regs->bank_regs[bank].pc);
274
275 writel(readl(&regs->bank_regs[bank].pc) | FSMC_TCLR_1 | FSMC_TAR_1,
276 &regs->bank_regs[bank].pc);
277 writel(FSMC_THIZ_1 | FSMC_THOLD_4 | FSMC_TWAIT_6 | FSMC_TSET_0,
278 &regs->bank_regs[bank].comm);
279 writel(FSMC_THIZ_1 | FSMC_THOLD_4 | FSMC_TWAIT_6 | FSMC_TSET_0,
280 &regs->bank_regs[bank].attrib);
281}
282
283/*
284 * fsmc_enable_hwecc - Enables Hardware ECC through FSMC registers
285 */
286static void fsmc_enable_hwecc(struct mtd_info *mtd, int mode)
287{
288 struct fsmc_nand_data *host = container_of(mtd,
289 struct fsmc_nand_data, mtd);
290 struct fsmc_regs *regs = host->regs_va;
291 uint32_t bank = host->bank;
292
293 writel(readl(&regs->bank_regs[bank].pc) & ~FSMC_ECCPLEN_256,
294 &regs->bank_regs[bank].pc);
295 writel(readl(&regs->bank_regs[bank].pc) & ~FSMC_ECCEN,
296 &regs->bank_regs[bank].pc);
297 writel(readl(&regs->bank_regs[bank].pc) | FSMC_ECCEN,
298 &regs->bank_regs[bank].pc);
299}
300
301/*
302 * fsmc_read_hwecc_ecc4 - Hardware ECC calculator for ecc4 option supported by
303 * FSMC. ECC is 13 bytes for 512 bytes of data (supports error correction upto
304 * max of 8-bits)
305 */
306static int fsmc_read_hwecc_ecc4(struct mtd_info *mtd, const uint8_t *data,
307 uint8_t *ecc)
308{
309 struct fsmc_nand_data *host = container_of(mtd,
310 struct fsmc_nand_data, mtd);
311 struct fsmc_regs *regs = host->regs_va;
312 uint32_t bank = host->bank;
313 uint32_t ecc_tmp;
314 unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT;
315
316 do {
317 if (readl(&regs->bank_regs[bank].sts) & FSMC_CODE_RDY)
318 break;
319 else
320 cond_resched();
321 } while (!time_after_eq(jiffies, deadline));
322
323 ecc_tmp = readl(&regs->bank_regs[bank].ecc1);
324 ecc[0] = (uint8_t) (ecc_tmp >> 0);
325 ecc[1] = (uint8_t) (ecc_tmp >> 8);
326 ecc[2] = (uint8_t) (ecc_tmp >> 16);
327 ecc[3] = (uint8_t) (ecc_tmp >> 24);
328
329 ecc_tmp = readl(&regs->bank_regs[bank].ecc2);
330 ecc[4] = (uint8_t) (ecc_tmp >> 0);
331 ecc[5] = (uint8_t) (ecc_tmp >> 8);
332 ecc[6] = (uint8_t) (ecc_tmp >> 16);
333 ecc[7] = (uint8_t) (ecc_tmp >> 24);
334
335 ecc_tmp = readl(&regs->bank_regs[bank].ecc3);
336 ecc[8] = (uint8_t) (ecc_tmp >> 0);
337 ecc[9] = (uint8_t) (ecc_tmp >> 8);
338 ecc[10] = (uint8_t) (ecc_tmp >> 16);
339 ecc[11] = (uint8_t) (ecc_tmp >> 24);
340
341 ecc_tmp = readl(&regs->bank_regs[bank].sts);
342 ecc[12] = (uint8_t) (ecc_tmp >> 16);
343
344 return 0;
345}
346
347/*
348 * fsmc_read_hwecc_ecc1 - Hardware ECC calculator for ecc1 option supported by
349 * FSMC. ECC is 3 bytes for 512 bytes of data (supports error correction upto
350 * max of 1-bit)
351 */
352static int fsmc_read_hwecc_ecc1(struct mtd_info *mtd, const uint8_t *data,
353 uint8_t *ecc)
354{
355 struct fsmc_nand_data *host = container_of(mtd,
356 struct fsmc_nand_data, mtd);
357 struct fsmc_regs *regs = host->regs_va;
358 uint32_t bank = host->bank;
359 uint32_t ecc_tmp;
360
361 ecc_tmp = readl(&regs->bank_regs[bank].ecc1);
362 ecc[0] = (uint8_t) (ecc_tmp >> 0);
363 ecc[1] = (uint8_t) (ecc_tmp >> 8);
364 ecc[2] = (uint8_t) (ecc_tmp >> 16);
365
366 return 0;
367}
368
369/*
370 * fsmc_read_page_hwecc
371 * @mtd: mtd info structure
372 * @chip: nand chip info structure
373 * @buf: buffer to store read data
374 * @page: page number to read
375 *
376 * This routine is needed for fsmc verison 8 as reading from NAND chip has to be
377 * performed in a strict sequence as follows:
378 * data(512 byte) -> ecc(13 byte)
379 * After this read, fsmc hardware generates and reports error data bits(upto a
380 * max of 8 bits)
381 */
382static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
383 uint8_t *buf, int page)
384{
385 struct fsmc_nand_data *host = container_of(mtd,
386 struct fsmc_nand_data, mtd);
387 struct fsmc_eccplace *ecc_place = host->ecc_place;
388 int i, j, s, stat, eccsize = chip->ecc.size;
389 int eccbytes = chip->ecc.bytes;
390 int eccsteps = chip->ecc.steps;
391 uint8_t *p = buf;
392 uint8_t *ecc_calc = chip->buffers->ecccalc;
393 uint8_t *ecc_code = chip->buffers->ecccode;
394 int off, len, group = 0;
395 /*
396 * ecc_oob is intentionally taken as uint16_t. In 16bit devices, we
397 * end up reading 14 bytes (7 words) from oob. The local array is
398 * to maintain word alignment
399 */
400 uint16_t ecc_oob[7];
401 uint8_t *oob = (uint8_t *)&ecc_oob[0];
402
403 for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
404
405 chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page);
406 chip->ecc.hwctl(mtd, NAND_ECC_READ);
407 chip->read_buf(mtd, p, eccsize);
408
409 for (j = 0; j < eccbytes;) {
410 off = ecc_place->eccplace[group].offset;
411 len = ecc_place->eccplace[group].length;
412 group++;
413
414 /*
415 * length is intentionally kept a higher multiple of 2
416 * to read at least 13 bytes even in case of 16 bit NAND
417 * devices
418 */
419 len = roundup(len, 2);
420 chip->cmdfunc(mtd, NAND_CMD_READOOB, off, page);
421 chip->read_buf(mtd, oob + j, len);
422 j += len;
423 }
424
425 memcpy(&ecc_code[i], oob, 13);
426 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
427
428 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
429 if (stat < 0)
430 mtd->ecc_stats.failed++;
431 else
432 mtd->ecc_stats.corrected += stat;
433 }
434
435 return 0;
436}
437
438/*
439 * fsmc_correct_data
440 * @mtd: mtd info structure
441 * @dat: buffer of read data
442 * @read_ecc: ecc read from device spare area
443 * @calc_ecc: ecc calculated from read data
444 *
445 * calc_ecc is a 104 bit information containing maximum of 8 error
446 * offset informations of 13 bits each in 512 bytes of read data.
447 */
448static int fsmc_correct_data(struct mtd_info *mtd, uint8_t *dat,
449 uint8_t *read_ecc, uint8_t *calc_ecc)
450{
451 struct fsmc_nand_data *host = container_of(mtd,
452 struct fsmc_nand_data, mtd);
453 struct fsmc_regs *regs = host->regs_va;
454 unsigned int bank = host->bank;
455 uint16_t err_idx[8];
456 uint64_t ecc_data[2];
457 uint32_t num_err, i;
458
459 /* The calculated ecc is actually the correction index in data */
460 memcpy(ecc_data, calc_ecc, 13);
461
462 /*
463 * ------------------- calc_ecc[] bit wise -----------|--13 bits--|
464 * |---idx[7]--|--.....-----|---idx[2]--||---idx[1]--||---idx[0]--|
465 *
466 * calc_ecc is a 104 bit information containing maximum of 8 error
467 * offset informations of 13 bits each. calc_ecc is copied into a
468 * uint64_t array and error offset indexes are populated in err_idx
469 * array
470 */
471 for (i = 0; i < 8; i++) {
472 if (i == 4) {
473 err_idx[4] = ((ecc_data[1] & 0x1) << 12) | ecc_data[0];
474 ecc_data[1] >>= 1;
475 continue;
476 }
477 err_idx[i] = (ecc_data[i/4] & 0x1FFF);
478 ecc_data[i/4] >>= 13;
479 }
480
481 num_err = (readl(&regs->bank_regs[bank].sts) >> 10) & 0xF;
482
483 if (num_err == 0xF)
484 return -EBADMSG;
485
486 i = 0;
487 while (num_err--) {
488 change_bit(0, (unsigned long *)&err_idx[i]);
489 change_bit(1, (unsigned long *)&err_idx[i]);
490
491 if (err_idx[i] <= 512 * 8) {
492 change_bit(err_idx[i], (unsigned long *)dat);
493 i++;
494 }
495 }
496 return i;
497}
498
499/*
500 * fsmc_nand_probe - Probe function
501 * @pdev: platform device structure
502 */
503static int __init fsmc_nand_probe(struct platform_device *pdev)
504{
505 struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
506 struct fsmc_nand_data *host;
507 struct mtd_info *mtd;
508 struct nand_chip *nand;
509 struct fsmc_regs *regs;
510 struct resource *res;
511 int nr_parts, ret = 0;
512
513 if (!pdata) {
514 dev_err(&pdev->dev, "platform data is NULL\n");
515 return -EINVAL;
516 }
517
518 /* Allocate memory for the device structure (and zero it) */
519 host = kzalloc(sizeof(*host), GFP_KERNEL);
520 if (!host) {
521 dev_err(&pdev->dev, "failed to allocate device structure\n");
522 return -ENOMEM;
523 }
524
525 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
526 if (!res) {
527 ret = -EIO;
528 goto err_probe1;
529 }
530
531 host->resdata = request_mem_region(res->start, resource_size(res),
532 pdev->name);
533 if (!host->resdata) {
534 ret = -EIO;
535 goto err_probe1;
536 }
537
538 host->data_va = ioremap(res->start, resource_size(res));
539 if (!host->data_va) {
540 ret = -EIO;
541 goto err_probe1;
542 }
543
544 host->resaddr = request_mem_region(res->start + PLAT_NAND_ALE,
545 resource_size(res), pdev->name);
546 if (!host->resaddr) {
547 ret = -EIO;
548 goto err_probe1;
549 }
550
551 host->addr_va = ioremap(res->start + PLAT_NAND_ALE, resource_size(res));
552 if (!host->addr_va) {
553 ret = -EIO;
554 goto err_probe1;
555 }
556
557 host->rescmd = request_mem_region(res->start + PLAT_NAND_CLE,
558 resource_size(res), pdev->name);
559 if (!host->rescmd) {
560 ret = -EIO;
561 goto err_probe1;
562 }
563
564 host->cmd_va = ioremap(res->start + PLAT_NAND_CLE, resource_size(res));
565 if (!host->cmd_va) {
566 ret = -EIO;
567 goto err_probe1;
568 }
569
570 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fsmc_regs");
571 if (!res) {
572 ret = -EIO;
573 goto err_probe1;
574 }
575
576 host->resregs = request_mem_region(res->start, resource_size(res),
577 pdev->name);
578 if (!host->resregs) {
579 ret = -EIO;
580 goto err_probe1;
581 }
582
583 host->regs_va = ioremap(res->start, resource_size(res));
584 if (!host->regs_va) {
585 ret = -EIO;
586 goto err_probe1;
587 }
588
589 host->clk = clk_get(&pdev->dev, NULL);
590 if (IS_ERR(host->clk)) {
591 dev_err(&pdev->dev, "failed to fetch block clock\n");
592 ret = PTR_ERR(host->clk);
593 host->clk = NULL;
594 goto err_probe1;
595 }
596
597 ret = clk_enable(host->clk);
598 if (ret)
599 goto err_probe1;
600
601 host->bank = pdata->bank;
602 host->select_chip = pdata->select_bank;
603 regs = host->regs_va;
604
605 /* Link all private pointers */
606 mtd = &host->mtd;
607 nand = &host->nand;
608 mtd->priv = nand;
609 nand->priv = host;
610
611 host->mtd.owner = THIS_MODULE;
612 nand->IO_ADDR_R = host->data_va;
613 nand->IO_ADDR_W = host->data_va;
614 nand->cmd_ctrl = fsmc_cmd_ctrl;
615 nand->chip_delay = 30;
616
617 nand->ecc.mode = NAND_ECC_HW;
618 nand->ecc.hwctl = fsmc_enable_hwecc;
619 nand->ecc.size = 512;
620 nand->options = pdata->options;
621 nand->select_chip = fsmc_select_chip;
622
623 if (pdata->width == FSMC_NAND_BW16)
624 nand->options |= NAND_BUSWIDTH_16;
625
626 fsmc_nand_setup(regs, host->bank, nand->options & NAND_BUSWIDTH_16);
627
628 if (get_fsmc_version(host->regs_va) == FSMC_VER8) {
629 nand->ecc.read_page = fsmc_read_page_hwecc;
630 nand->ecc.calculate = fsmc_read_hwecc_ecc4;
631 nand->ecc.correct = fsmc_correct_data;
632 nand->ecc.bytes = 13;
633 } else {
634 nand->ecc.calculate = fsmc_read_hwecc_ecc1;
635 nand->ecc.correct = nand_correct_data;
636 nand->ecc.bytes = 3;
637 }
638
639 /*
640 * Scan to find existance of the device
641 */
642 if (nand_scan_ident(&host->mtd, 1, NULL)) {
643 ret = -ENXIO;
644 dev_err(&pdev->dev, "No NAND Device found!\n");
645 goto err_probe;
646 }
647
648 if (get_fsmc_version(host->regs_va) == FSMC_VER8) {
649 if (host->mtd.writesize == 512) {
650 nand->ecc.layout = &fsmc_ecc4_sp_layout;
651 host->ecc_place = &fsmc_ecc4_sp_place;
652 } else {
653 nand->ecc.layout = &fsmc_ecc4_lp_layout;
654 host->ecc_place = &fsmc_ecc4_lp_place;
655 }
656 } else {
657 nand->ecc.layout = &fsmc_ecc1_layout;
658 }
659
660 /* Second stage of scan to fill MTD data-structures */
661 if (nand_scan_tail(&host->mtd)) {
662 ret = -ENXIO;
663 goto err_probe;
664 }
665
666 /*
667 * The partition information can is accessed by (in the same precedence)
668 *
669 * command line through Bootloader,
670 * platform data,
671 * default partition information present in driver.
672 */
673#ifdef CONFIG_MTD_PARTITIONS
674#ifdef CONFIG_MTD_CMDLINE_PARTS
675 /*
676 * Check if partition info passed via command line
677 */
678 host->mtd.name = "nand";
679 nr_parts = parse_mtd_partitions(&host->mtd, part_probes,
680 &host->partitions, 0);
681 if (nr_parts > 0) {
682 host->nr_partitions = nr_parts;
683 } else {
684#endif
685 /*
686 * Check if partition info passed via command line
687 */
688 if (pdata->partitions) {
689 host->partitions = pdata->partitions;
690 host->nr_partitions = pdata->nr_partitions;
691 } else {
692 struct mtd_partition *partition;
693 int i;
694
695 /* Select the default partitions info */
696 switch (host->mtd.size) {
697 case 0x01000000:
698 case 0x02000000:
699 case 0x04000000:
700 host->partitions = partition_info_16KB_blk;
701 host->nr_partitions =
702 sizeof(partition_info_16KB_blk) /
703 sizeof(struct mtd_partition);
704 break;
705 case 0x08000000:
706 case 0x10000000:
707 case 0x20000000:
708 case 0x40000000:
709 host->partitions = partition_info_128KB_blk;
710 host->nr_partitions =
711 sizeof(partition_info_128KB_blk) /
712 sizeof(struct mtd_partition);
713 break;
714 default:
715 ret = -ENXIO;
716 pr_err("Unsupported NAND size\n");
717 goto err_probe;
718 }
719
720 partition = host->partitions;
721 for (i = 0; i < host->nr_partitions; i++, partition++) {
722 if (partition->size == 0) {
723 partition->size = host->mtd.size -
724 partition->offset;
725 break;
726 }
727 }
728 }
729#ifdef CONFIG_MTD_CMDLINE_PARTS
730 }
731#endif
732
733 if (host->partitions) {
734 ret = add_mtd_partitions(&host->mtd, host->partitions,
735 host->nr_partitions);
736 if (ret)
737 goto err_probe;
738 }
739#else
740 dev_info(&pdev->dev, "Registering %s as whole device\n", mtd->name);
741 if (!add_mtd_device(mtd)) {
742 ret = -ENXIO;
743 goto err_probe;
744 }
745#endif
746
747 platform_set_drvdata(pdev, host);
748 dev_info(&pdev->dev, "FSMC NAND driver registration successful\n");
749 return 0;
750
751err_probe:
752 clk_disable(host->clk);
753err_probe1:
754 if (host->clk)
755 clk_put(host->clk);
756 if (host->regs_va)
757 iounmap(host->regs_va);
758 if (host->resregs)
759 release_mem_region(host->resregs->start,
760 resource_size(host->resregs));
761 if (host->cmd_va)
762 iounmap(host->cmd_va);
763 if (host->rescmd)
764 release_mem_region(host->rescmd->start,
765 resource_size(host->rescmd));
766 if (host->addr_va)
767 iounmap(host->addr_va);
768 if (host->resaddr)
769 release_mem_region(host->resaddr->start,
770 resource_size(host->resaddr));
771 if (host->data_va)
772 iounmap(host->data_va);
773 if (host->resdata)
774 release_mem_region(host->resdata->start,
775 resource_size(host->resdata));
776
777 kfree(host);
778 return ret;
779}
780
781/*
782 * Clean up routine
783 */
784static int fsmc_nand_remove(struct platform_device *pdev)
785{
786 struct fsmc_nand_data *host = platform_get_drvdata(pdev);
787
788 platform_set_drvdata(pdev, NULL);
789
790 if (host) {
791#ifdef CONFIG_MTD_PARTITIONS
792 del_mtd_partitions(&host->mtd);
793#else
794 del_mtd_device(&host->mtd);
795#endif
796 clk_disable(host->clk);
797 clk_put(host->clk);
798
799 iounmap(host->regs_va);
800 release_mem_region(host->resregs->start,
801 resource_size(host->resregs));
802 iounmap(host->cmd_va);
803 release_mem_region(host->rescmd->start,
804 resource_size(host->rescmd));
805 iounmap(host->addr_va);
806 release_mem_region(host->resaddr->start,
807 resource_size(host->resaddr));
808 iounmap(host->data_va);
809 release_mem_region(host->resdata->start,
810 resource_size(host->resdata));
811
812 kfree(host);
813 }
814 return 0;
815}
816
817#ifdef CONFIG_PM
818static int fsmc_nand_suspend(struct device *dev)
819{
820 struct fsmc_nand_data *host = dev_get_drvdata(dev);
821 if (host)
822 clk_disable(host->clk);
823 return 0;
824}
825
826static int fsmc_nand_resume(struct device *dev)
827{
828 struct fsmc_nand_data *host = dev_get_drvdata(dev);
829 if (host)
830 clk_enable(host->clk);
831 return 0;
832}
833
834static const struct dev_pm_ops fsmc_nand_pm_ops = {
835 .suspend = fsmc_nand_suspend,
836 .resume = fsmc_nand_resume,
837};
838#endif
839
840static struct platform_driver fsmc_nand_driver = {
841 .remove = fsmc_nand_remove,
842 .driver = {
843 .owner = THIS_MODULE,
844 .name = "fsmc-nand",
845#ifdef CONFIG_PM
846 .pm = &fsmc_nand_pm_ops,
847#endif
848 },
849};
850
851static int __init fsmc_nand_init(void)
852{
853 return platform_driver_probe(&fsmc_nand_driver,
854 fsmc_nand_probe);
855}
856module_init(fsmc_nand_init);
857
858static void __exit fsmc_nand_exit(void)
859{
860 platform_driver_unregister(&fsmc_nand_driver);
861}
862module_exit(fsmc_nand_exit);
863
864MODULE_LICENSE("GPL");
865MODULE_AUTHOR("Vipin Kumar <vipin.kumar@st.com>, Ashish Priyadarshi");
866MODULE_DESCRIPTION("NAND driver for SPEAr Platforms");
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index df0c1da4ff49..469e649c911c 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -568,6 +568,7 @@ static int mpc5121_nfc_read_hw_config(struct mtd_info *mtd)
568 uint rcw_width; 568 uint rcw_width;
569 uint rcwh; 569 uint rcwh;
570 uint romloc, ps; 570 uint romloc, ps;
571 int ret = 0;
571 572
572 rmnode = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-reset"); 573 rmnode = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-reset");
573 if (!rmnode) { 574 if (!rmnode) {
@@ -579,7 +580,8 @@ static int mpc5121_nfc_read_hw_config(struct mtd_info *mtd)
579 rm = of_iomap(rmnode, 0); 580 rm = of_iomap(rmnode, 0);
580 if (!rm) { 581 if (!rm) {
581 dev_err(prv->dev, "Error mapping reset module node!\n"); 582 dev_err(prv->dev, "Error mapping reset module node!\n");
582 return -EBUSY; 583 ret = -EBUSY;
584 goto out;
583 } 585 }
584 586
585 rcwh = in_be32(&rm->rcwhr); 587 rcwh = in_be32(&rm->rcwhr);
@@ -628,8 +630,9 @@ static int mpc5121_nfc_read_hw_config(struct mtd_info *mtd)
628 rcw_width * 8, rcw_pagesize, 630 rcw_width * 8, rcw_pagesize,
629 rcw_sparesize); 631 rcw_sparesize);
630 iounmap(rm); 632 iounmap(rm);
633out:
631 of_node_put(rmnode); 634 of_node_put(rmnode);
632 return 0; 635 return ret;
633} 636}
634 637
635/* Free driver resources */ 638/* Free driver resources */
@@ -660,7 +663,7 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op,
660#endif 663#endif
661 struct nand_chip *chip; 664 struct nand_chip *chip;
662 unsigned long regs_paddr, regs_size; 665 unsigned long regs_paddr, regs_size;
663 const uint *chips_no; 666 const __be32 *chips_no;
664 int resettime = 0; 667 int resettime = 0;
665 int retval = 0; 668 int retval = 0;
666 int rev, len; 669 int rev, len;
@@ -803,7 +806,7 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op,
803 } 806 }
804 807
805 /* Detect NAND chips */ 808 /* Detect NAND chips */
806 if (nand_scan(mtd, *chips_no)) { 809 if (nand_scan(mtd, be32_to_cpup(chips_no))) {
807 dev_err(dev, "NAND Flash not found !\n"); 810 dev_err(dev, "NAND Flash not found !\n");
808 devm_free_irq(dev, prv->irq, mtd); 811 devm_free_irq(dev, prv->irq, mtd);
809 retval = -ENXIO; 812 retval = -ENXIO;
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index d551ddd9537a..1f75a1b1f7c3 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -45,7 +45,7 @@
45#include <linux/interrupt.h> 45#include <linux/interrupt.h>
46#include <linux/bitops.h> 46#include <linux/bitops.h>
47#include <linux/leds.h> 47#include <linux/leds.h>
48#include <asm/io.h> 48#include <linux/io.h>
49 49
50#ifdef CONFIG_MTD_PARTITIONS 50#ifdef CONFIG_MTD_PARTITIONS
51#include <linux/mtd/partitions.h> 51#include <linux/mtd/partitions.h>
@@ -59,7 +59,7 @@ static struct nand_ecclayout nand_oob_8 = {
59 {.offset = 3, 59 {.offset = 3,
60 .length = 2}, 60 .length = 2},
61 {.offset = 6, 61 {.offset = 6,
62 .length = 2}} 62 .length = 2} }
63}; 63};
64 64
65static struct nand_ecclayout nand_oob_16 = { 65static struct nand_ecclayout nand_oob_16 = {
@@ -67,7 +67,7 @@ static struct nand_ecclayout nand_oob_16 = {
67 .eccpos = {0, 1, 2, 3, 6, 7}, 67 .eccpos = {0, 1, 2, 3, 6, 7},
68 .oobfree = { 68 .oobfree = {
69 {.offset = 8, 69 {.offset = 8,
70 . length = 8}} 70 . length = 8} }
71}; 71};
72 72
73static struct nand_ecclayout nand_oob_64 = { 73static struct nand_ecclayout nand_oob_64 = {
@@ -78,7 +78,7 @@ static struct nand_ecclayout nand_oob_64 = {
78 56, 57, 58, 59, 60, 61, 62, 63}, 78 56, 57, 58, 59, 60, 61, 62, 63},
79 .oobfree = { 79 .oobfree = {
80 {.offset = 2, 80 {.offset = 2,
81 .length = 38}} 81 .length = 38} }
82}; 82};
83 83
84static struct nand_ecclayout nand_oob_128 = { 84static struct nand_ecclayout nand_oob_128 = {
@@ -92,7 +92,7 @@ static struct nand_ecclayout nand_oob_128 = {
92 120, 121, 122, 123, 124, 125, 126, 127}, 92 120, 121, 122, 123, 124, 125, 126, 127},
93 .oobfree = { 93 .oobfree = {
94 {.offset = 2, 94 {.offset = 2,
95 .length = 78}} 95 .length = 78} }
96}; 96};
97 97
98static int nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, 98static int nand_get_device(struct nand_chip *chip, struct mtd_info *mtd,
@@ -612,7 +612,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
612 NAND_CTRL_CLE | NAND_CTRL_CHANGE); 612 NAND_CTRL_CLE | NAND_CTRL_CHANGE);
613 chip->cmd_ctrl(mtd, 613 chip->cmd_ctrl(mtd,
614 NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); 614 NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
615 while (!(chip->read_byte(mtd) & NAND_STATUS_READY)) ; 615 while (!(chip->read_byte(mtd) & NAND_STATUS_READY))
616 ;
616 return; 617 return;
617 618
618 /* This applies to read commands */ 619 /* This applies to read commands */
@@ -718,7 +719,8 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
718 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE); 719 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
719 chip->cmd_ctrl(mtd, NAND_CMD_NONE, 720 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
720 NAND_NCE | NAND_CTRL_CHANGE); 721 NAND_NCE | NAND_CTRL_CHANGE);
721 while (!(chip->read_byte(mtd) & NAND_STATUS_READY)) ; 722 while (!(chip->read_byte(mtd) & NAND_STATUS_READY))
723 ;
722 return; 724 return;
723 725
724 case NAND_CMD_RNDOUT: 726 case NAND_CMD_RNDOUT:
@@ -784,7 +786,7 @@ nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state)
784 spinlock_t *lock = &chip->controller->lock; 786 spinlock_t *lock = &chip->controller->lock;
785 wait_queue_head_t *wq = &chip->controller->wq; 787 wait_queue_head_t *wq = &chip->controller->wq;
786 DECLARE_WAITQUEUE(wait, current); 788 DECLARE_WAITQUEUE(wait, current);
787 retry: 789retry:
788 spin_lock(lock); 790 spin_lock(lock);
789 791
790 /* Hardware controller shared among independent devices */ 792 /* Hardware controller shared among independent devices */
@@ -834,7 +836,7 @@ static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
834 break; 836 break;
835 } 837 }
836 mdelay(1); 838 mdelay(1);
837 } 839 }
838} 840}
839 841
840/** 842/**
@@ -980,6 +982,7 @@ out:
980 982
981 return ret; 983 return ret;
982} 984}
985EXPORT_SYMBOL(nand_unlock);
983 986
984/** 987/**
985 * nand_lock - [REPLACEABLE] locks all blocks present in the device 988 * nand_lock - [REPLACEABLE] locks all blocks present in the device
@@ -1049,6 +1052,7 @@ out:
1049 1052
1050 return ret; 1053 return ret;
1051} 1054}
1055EXPORT_SYMBOL(nand_lock);
1052 1056
1053/** 1057/**
1054 * nand_read_page_raw - [Intern] read raw page data without ecc 1058 * nand_read_page_raw - [Intern] read raw page data without ecc
@@ -1076,8 +1080,9 @@ static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1076 * 1080 *
1077 * We need a special oob layout and handling even when OOB isn't used. 1081 * We need a special oob layout and handling even when OOB isn't used.
1078 */ 1082 */
1079static int nand_read_page_raw_syndrome(struct mtd_info *mtd, struct nand_chip *chip, 1083static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
1080 uint8_t *buf, int page) 1084 struct nand_chip *chip,
1085 uint8_t *buf, int page)
1081{ 1086{
1082 int eccsize = chip->ecc.size; 1087 int eccsize = chip->ecc.size;
1083 int eccbytes = chip->ecc.bytes; 1088 int eccbytes = chip->ecc.bytes;
@@ -1158,7 +1163,8 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1158 * @readlen: data length 1163 * @readlen: data length
1159 * @bufpoi: buffer to store read data 1164 * @bufpoi: buffer to store read data
1160 */ 1165 */
1161static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi) 1166static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1167 uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi)
1162{ 1168{
1163 int start_step, end_step, num_steps; 1169 int start_step, end_step, num_steps;
1164 uint32_t *eccpos = chip->ecc.layout->eccpos; 1170 uint32_t *eccpos = chip->ecc.layout->eccpos;
@@ -1166,6 +1172,7 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, uint3
1166 int data_col_addr, i, gaps = 0; 1172 int data_col_addr, i, gaps = 0;
1167 int datafrag_len, eccfrag_len, aligned_len, aligned_pos; 1173 int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
1168 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1; 1174 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
1175 int index = 0;
1169 1176
1170 /* Column address wihin the page aligned to ECC size (256bytes). */ 1177 /* Column address wihin the page aligned to ECC size (256bytes). */
1171 start_step = data_offs / chip->ecc.size; 1178 start_step = data_offs / chip->ecc.size;
@@ -1204,26 +1211,30 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, uint3
1204 } else { 1211 } else {
1205 /* send the command to read the particular ecc bytes */ 1212 /* send the command to read the particular ecc bytes */
1206 /* take care about buswidth alignment in read_buf */ 1213 /* take care about buswidth alignment in read_buf */
1207 aligned_pos = eccpos[start_step * chip->ecc.bytes] & ~(busw - 1); 1214 index = start_step * chip->ecc.bytes;
1215
1216 aligned_pos = eccpos[index] & ~(busw - 1);
1208 aligned_len = eccfrag_len; 1217 aligned_len = eccfrag_len;
1209 if (eccpos[start_step * chip->ecc.bytes] & (busw - 1)) 1218 if (eccpos[index] & (busw - 1))
1210 aligned_len++; 1219 aligned_len++;
1211 if (eccpos[(start_step + num_steps) * chip->ecc.bytes] & (busw - 1)) 1220 if (eccpos[index + (num_steps * chip->ecc.bytes)] & (busw - 1))
1212 aligned_len++; 1221 aligned_len++;
1213 1222
1214 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize + aligned_pos, -1); 1223 chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
1224 mtd->writesize + aligned_pos, -1);
1215 chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len); 1225 chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
1216 } 1226 }
1217 1227
1218 for (i = 0; i < eccfrag_len; i++) 1228 for (i = 0; i < eccfrag_len; i++)
1219 chip->buffers->ecccode[i] = chip->oob_poi[eccpos[i + start_step * chip->ecc.bytes]]; 1229 chip->buffers->ecccode[i] = chip->oob_poi[eccpos[i + index]];
1220 1230
1221 p = bufpoi + data_col_addr; 1231 p = bufpoi + data_col_addr;
1222 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) { 1232 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
1223 int stat; 1233 int stat;
1224 1234
1225 stat = chip->ecc.correct(mtd, p, &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]); 1235 stat = chip->ecc.correct(mtd, p,
1226 if (stat == -1) 1236 &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
1237 if (stat < 0)
1227 mtd->ecc_stats.failed++; 1238 mtd->ecc_stats.failed++;
1228 else 1239 else
1229 mtd->ecc_stats.corrected += stat; 1240 mtd->ecc_stats.corrected += stat;
@@ -1390,7 +1401,7 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1390static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob, 1401static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
1391 struct mtd_oob_ops *ops, size_t len) 1402 struct mtd_oob_ops *ops, size_t len)
1392{ 1403{
1393 switch(ops->mode) { 1404 switch (ops->mode) {
1394 1405
1395 case MTD_OOB_PLACE: 1406 case MTD_OOB_PLACE:
1396 case MTD_OOB_RAW: 1407 case MTD_OOB_RAW:
@@ -1402,7 +1413,7 @@ static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
1402 uint32_t boffs = 0, roffs = ops->ooboffs; 1413 uint32_t boffs = 0, roffs = ops->ooboffs;
1403 size_t bytes = 0; 1414 size_t bytes = 0;
1404 1415
1405 for(; free->length && len; free++, len -= bytes) { 1416 for (; free->length && len; free++, len -= bytes) {
1406 /* Read request not from offset 0 ? */ 1417 /* Read request not from offset 0 ? */
1407 if (unlikely(roffs)) { 1418 if (unlikely(roffs)) {
1408 if (roffs >= free->length) { 1419 if (roffs >= free->length) {
@@ -1466,7 +1477,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1466 buf = ops->datbuf; 1477 buf = ops->datbuf;
1467 oob = ops->oobbuf; 1478 oob = ops->oobbuf;
1468 1479
1469 while(1) { 1480 while (1) {
1470 bytes = min(mtd->writesize - col, readlen); 1481 bytes = min(mtd->writesize - col, readlen);
1471 aligned = (bytes == mtd->writesize); 1482 aligned = (bytes == mtd->writesize);
1472 1483
@@ -1484,7 +1495,8 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1484 ret = chip->ecc.read_page_raw(mtd, chip, 1495 ret = chip->ecc.read_page_raw(mtd, chip,
1485 bufpoi, page); 1496 bufpoi, page);
1486 else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob) 1497 else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob)
1487 ret = chip->ecc.read_subpage(mtd, chip, col, bytes, bufpoi); 1498 ret = chip->ecc.read_subpage(mtd, chip,
1499 col, bytes, bufpoi);
1488 else 1500 else
1489 ret = chip->ecc.read_page(mtd, chip, bufpoi, 1501 ret = chip->ecc.read_page(mtd, chip, bufpoi,
1490 page); 1502 page);
@@ -1493,7 +1505,8 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1493 1505
1494 /* Transfer not aligned data */ 1506 /* Transfer not aligned data */
1495 if (!aligned) { 1507 if (!aligned) {
1496 if (!NAND_SUBPAGE_READ(chip) && !oob) 1508 if (!NAND_SUBPAGE_READ(chip) && !oob &&
1509 !(mtd->ecc_stats.failed - stats.failed))
1497 chip->pagebuf = realpage; 1510 chip->pagebuf = realpage;
1498 memcpy(buf, chip->buffers->databuf + col, bytes); 1511 memcpy(buf, chip->buffers->databuf + col, bytes);
1499 } 1512 }
@@ -1791,7 +1804,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
1791 realpage = (int)(from >> chip->page_shift); 1804 realpage = (int)(from >> chip->page_shift);
1792 page = realpage & chip->pagemask; 1805 page = realpage & chip->pagemask;
1793 1806
1794 while(1) { 1807 while (1) {
1795 sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd); 1808 sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd);
1796 1809
1797 len = min(len, readlen); 1810 len = min(len, readlen);
@@ -1861,7 +1874,7 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
1861 1874
1862 nand_get_device(chip, mtd, FL_READING); 1875 nand_get_device(chip, mtd, FL_READING);
1863 1876
1864 switch(ops->mode) { 1877 switch (ops->mode) {
1865 case MTD_OOB_PLACE: 1878 case MTD_OOB_PLACE:
1866 case MTD_OOB_AUTO: 1879 case MTD_OOB_AUTO:
1867 case MTD_OOB_RAW: 1880 case MTD_OOB_RAW:
@@ -1876,7 +1889,7 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
1876 else 1889 else
1877 ret = nand_do_read_ops(mtd, from, ops); 1890 ret = nand_do_read_ops(mtd, from, ops);
1878 1891
1879 out: 1892out:
1880 nand_release_device(mtd); 1893 nand_release_device(mtd);
1881 return ret; 1894 return ret;
1882} 1895}
@@ -1905,8 +1918,9 @@ static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1905 * 1918 *
1906 * We need a special oob layout and handling even when ECC isn't checked. 1919 * We need a special oob layout and handling even when ECC isn't checked.
1907 */ 1920 */
1908static void nand_write_page_raw_syndrome(struct mtd_info *mtd, struct nand_chip *chip, 1921static void nand_write_page_raw_syndrome(struct mtd_info *mtd,
1909 const uint8_t *buf) 1922 struct nand_chip *chip,
1923 const uint8_t *buf)
1910{ 1924{
1911 int eccsize = chip->ecc.size; 1925 int eccsize = chip->ecc.size;
1912 int eccbytes = chip->ecc.bytes; 1926 int eccbytes = chip->ecc.bytes;
@@ -2099,7 +2113,7 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
2099static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len, 2113static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
2100 struct mtd_oob_ops *ops) 2114 struct mtd_oob_ops *ops)
2101{ 2115{
2102 switch(ops->mode) { 2116 switch (ops->mode) {
2103 2117
2104 case MTD_OOB_PLACE: 2118 case MTD_OOB_PLACE:
2105 case MTD_OOB_RAW: 2119 case MTD_OOB_RAW:
@@ -2111,7 +2125,7 @@ static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
2111 uint32_t boffs = 0, woffs = ops->ooboffs; 2125 uint32_t boffs = 0, woffs = ops->ooboffs;
2112 size_t bytes = 0; 2126 size_t bytes = 0;
2113 2127
2114 for(; free->length && len; free++, len -= bytes) { 2128 for (; free->length && len; free++, len -= bytes) {
2115 /* Write request not from offset 0 ? */ 2129 /* Write request not from offset 0 ? */
2116 if (unlikely(woffs)) { 2130 if (unlikely(woffs)) {
2117 if (woffs >= free->length) { 2131 if (woffs >= free->length) {
@@ -2137,7 +2151,7 @@ static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
2137 return NULL; 2151 return NULL;
2138} 2152}
2139 2153
2140#define NOTALIGNED(x) (x & (chip->subpagesize - 1)) != 0 2154#define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
2141 2155
2142/** 2156/**
2143 * nand_do_write_ops - [Internal] NAND write with ECC 2157 * nand_do_write_ops - [Internal] NAND write with ECC
@@ -2200,10 +2214,10 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2200 memset(chip->oob_poi, 0xff, mtd->oobsize); 2214 memset(chip->oob_poi, 0xff, mtd->oobsize);
2201 2215
2202 /* Don't allow multipage oob writes with offset */ 2216 /* Don't allow multipage oob writes with offset */
2203 if (ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) 2217 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen))
2204 return -EINVAL; 2218 return -EINVAL;
2205 2219
2206 while(1) { 2220 while (1) {
2207 int bytes = mtd->writesize; 2221 int bytes = mtd->writesize;
2208 int cached = writelen > bytes && page != blockmask; 2222 int cached = writelen > bytes && page != blockmask;
2209 uint8_t *wbuf = buf; 2223 uint8_t *wbuf = buf;
@@ -2431,7 +2445,7 @@ static int nand_write_oob(struct mtd_info *mtd, loff_t to,
2431 2445
2432 nand_get_device(chip, mtd, FL_WRITING); 2446 nand_get_device(chip, mtd, FL_WRITING);
2433 2447
2434 switch(ops->mode) { 2448 switch (ops->mode) {
2435 case MTD_OOB_PLACE: 2449 case MTD_OOB_PLACE:
2436 case MTD_OOB_AUTO: 2450 case MTD_OOB_AUTO:
2437 case MTD_OOB_RAW: 2451 case MTD_OOB_RAW:
@@ -2446,7 +2460,7 @@ static int nand_write_oob(struct mtd_info *mtd, loff_t to,
2446 else 2460 else
2447 ret = nand_do_write_ops(mtd, to, ops); 2461 ret = nand_do_write_ops(mtd, to, ops);
2448 2462
2449 out: 2463out:
2450 nand_release_device(mtd); 2464 nand_release_device(mtd);
2451 return ret; 2465 return ret;
2452} 2466}
@@ -2511,7 +2525,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2511{ 2525{
2512 int page, status, pages_per_block, ret, chipnr; 2526 int page, status, pages_per_block, ret, chipnr;
2513 struct nand_chip *chip = mtd->priv; 2527 struct nand_chip *chip = mtd->priv;
2514 loff_t rewrite_bbt[NAND_MAX_CHIPS]={0}; 2528 loff_t rewrite_bbt[NAND_MAX_CHIPS] = {0};
2515 unsigned int bbt_masked_page = 0xffffffff; 2529 unsigned int bbt_masked_page = 0xffffffff;
2516 loff_t len; 2530 loff_t len;
2517 2531
@@ -2632,7 +2646,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2632 } 2646 }
2633 instr->state = MTD_ERASE_DONE; 2647 instr->state = MTD_ERASE_DONE;
2634 2648
2635 erase_exit: 2649erase_exit:
2636 2650
2637 ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO; 2651 ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
2638 2652
@@ -2706,7 +2720,8 @@ static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
2706 struct nand_chip *chip = mtd->priv; 2720 struct nand_chip *chip = mtd->priv;
2707 int ret; 2721 int ret;
2708 2722
2709 if ((ret = nand_block_isbad(mtd, ofs))) { 2723 ret = nand_block_isbad(mtd, ofs);
2724 if (ret) {
2710 /* If it was bad already, return success and do nothing. */ 2725 /* If it was bad already, return success and do nothing. */
2711 if (ret > 0) 2726 if (ret > 0)
2712 return 0; 2727 return 0;
@@ -2787,15 +2802,115 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
2787} 2802}
2788 2803
2789/* 2804/*
2805 * sanitize ONFI strings so we can safely print them
2806 */
2807static void sanitize_string(uint8_t *s, size_t len)
2808{
2809 ssize_t i;
2810
2811 /* null terminate */
2812 s[len - 1] = 0;
2813
2814 /* remove non printable chars */
2815 for (i = 0; i < len - 1; i++) {
2816 if (s[i] < ' ' || s[i] > 127)
2817 s[i] = '?';
2818 }
2819
2820 /* remove trailing spaces */
2821 strim(s);
2822}
2823
2824static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
2825{
2826 int i;
2827 while (len--) {
2828 crc ^= *p++ << 8;
2829 for (i = 0; i < 8; i++)
2830 crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
2831 }
2832
2833 return crc;
2834}
2835
2836/*
2837 * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise
2838 */
2839static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
2840 int busw)
2841{
2842 struct nand_onfi_params *p = &chip->onfi_params;
2843 int i;
2844 int val;
2845
2846 /* try ONFI for unknow chip or LP */
2847 chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
2848 if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
2849 chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
2850 return 0;
2851
2852 printk(KERN_INFO "ONFI flash detected\n");
2853 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
2854 for (i = 0; i < 3; i++) {
2855 chip->read_buf(mtd, (uint8_t *)p, sizeof(*p));
2856 if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
2857 le16_to_cpu(p->crc)) {
2858 printk(KERN_INFO "ONFI param page %d valid\n", i);
2859 break;
2860 }
2861 }
2862
2863 if (i == 3)
2864 return 0;
2865
2866 /* check version */
2867 val = le16_to_cpu(p->revision);
2868 if (val == 1 || val > (1 << 4)) {
2869 printk(KERN_INFO "%s: unsupported ONFI version: %d\n",
2870 __func__, val);
2871 return 0;
2872 }
2873
2874 if (val & (1 << 4))
2875 chip->onfi_version = 22;
2876 else if (val & (1 << 3))
2877 chip->onfi_version = 21;
2878 else if (val & (1 << 2))
2879 chip->onfi_version = 20;
2880 else
2881 chip->onfi_version = 10;
2882
2883 sanitize_string(p->manufacturer, sizeof(p->manufacturer));
2884 sanitize_string(p->model, sizeof(p->model));
2885 if (!mtd->name)
2886 mtd->name = p->model;
2887 mtd->writesize = le32_to_cpu(p->byte_per_page);
2888 mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize;
2889 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
2890 chip->chipsize = le32_to_cpu(p->blocks_per_lun) * mtd->erasesize;
2891 busw = 0;
2892 if (le16_to_cpu(p->features) & 1)
2893 busw = NAND_BUSWIDTH_16;
2894
2895 chip->options &= ~NAND_CHIPOPTIONS_MSK;
2896 chip->options |= (NAND_NO_READRDY |
2897 NAND_NO_AUTOINCR) & NAND_CHIPOPTIONS_MSK;
2898
2899 return 1;
2900}
2901
2902/*
2790 * Get the flash and manufacturer id and lookup if the type is supported 2903 * Get the flash and manufacturer id and lookup if the type is supported
2791 */ 2904 */
2792static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, 2905static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2793 struct nand_chip *chip, 2906 struct nand_chip *chip,
2794 int busw, int *maf_id, 2907 int busw,
2908 int *maf_id, int *dev_id,
2795 struct nand_flash_dev *type) 2909 struct nand_flash_dev *type)
2796{ 2910{
2797 int i, dev_id, maf_idx; 2911 int i, maf_idx;
2798 u8 id_data[8]; 2912 u8 id_data[8];
2913 int ret;
2799 2914
2800 /* Select the device */ 2915 /* Select the device */
2801 chip->select_chip(mtd, 0); 2916 chip->select_chip(mtd, 0);
@@ -2811,7 +2926,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2811 2926
2812 /* Read manufacturer and device IDs */ 2927 /* Read manufacturer and device IDs */
2813 *maf_id = chip->read_byte(mtd); 2928 *maf_id = chip->read_byte(mtd);
2814 dev_id = chip->read_byte(mtd); 2929 *dev_id = chip->read_byte(mtd);
2815 2930
2816 /* Try again to make sure, as some systems the bus-hold or other 2931 /* Try again to make sure, as some systems the bus-hold or other
2817 * interface concerns can cause random data which looks like a 2932 * interface concerns can cause random data which looks like a
@@ -2821,15 +2936,13 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2821 2936
2822 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); 2937 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
2823 2938
2824 /* Read entire ID string */ 2939 for (i = 0; i < 2; i++)
2825
2826 for (i = 0; i < 8; i++)
2827 id_data[i] = chip->read_byte(mtd); 2940 id_data[i] = chip->read_byte(mtd);
2828 2941
2829 if (id_data[0] != *maf_id || id_data[1] != dev_id) { 2942 if (id_data[0] != *maf_id || id_data[1] != *dev_id) {
2830 printk(KERN_INFO "%s: second ID read did not match " 2943 printk(KERN_INFO "%s: second ID read did not match "
2831 "%02x,%02x against %02x,%02x\n", __func__, 2944 "%02x,%02x against %02x,%02x\n", __func__,
2832 *maf_id, dev_id, id_data[0], id_data[1]); 2945 *maf_id, *dev_id, id_data[0], id_data[1]);
2833 return ERR_PTR(-ENODEV); 2946 return ERR_PTR(-ENODEV);
2834 } 2947 }
2835 2948
@@ -2837,8 +2950,23 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2837 type = nand_flash_ids; 2950 type = nand_flash_ids;
2838 2951
2839 for (; type->name != NULL; type++) 2952 for (; type->name != NULL; type++)
2840 if (dev_id == type->id) 2953 if (*dev_id == type->id)
2841 break; 2954 break;
2955
2956 chip->onfi_version = 0;
2957 if (!type->name || !type->pagesize) {
2958 /* Check is chip is ONFI compliant */
2959 ret = nand_flash_detect_onfi(mtd, chip, busw);
2960 if (ret)
2961 goto ident_done;
2962 }
2963
2964 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
2965
2966 /* Read entire ID string */
2967
2968 for (i = 0; i < 8; i++)
2969 id_data[i] = chip->read_byte(mtd);
2842 2970
2843 if (!type->name) 2971 if (!type->name)
2844 return ERR_PTR(-ENODEV); 2972 return ERR_PTR(-ENODEV);
@@ -2848,8 +2976,10 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2848 2976
2849 chip->chipsize = (uint64_t)type->chipsize << 20; 2977 chip->chipsize = (uint64_t)type->chipsize << 20;
2850 2978
2851 /* Newer devices have all the information in additional id bytes */ 2979 if (!type->pagesize && chip->init_size) {
2852 if (!type->pagesize) { 2980 /* set the pagesize, oobsize, erasesize by the driver*/
2981 busw = chip->init_size(mtd, chip, id_data);
2982 } else if (!type->pagesize) {
2853 int extid; 2983 int extid;
2854 /* The 3rd id byte holds MLC / multichip data */ 2984 /* The 3rd id byte holds MLC / multichip data */
2855 chip->cellinfo = id_data[2]; 2985 chip->cellinfo = id_data[2];
@@ -2859,7 +2989,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2859 /* 2989 /*
2860 * Field definitions are in the following datasheets: 2990 * Field definitions are in the following datasheets:
2861 * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32) 2991 * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32)
2862 * New style (6 byte ID): Samsung K9GAG08U0D (p.40) 2992 * New style (6 byte ID): Samsung K9GBG08U0M (p.40)
2863 * 2993 *
2864 * Check for wraparound + Samsung ID + nonzero 6th byte 2994 * Check for wraparound + Samsung ID + nonzero 6th byte
2865 * to decide what to do. 2995 * to decide what to do.
@@ -2872,7 +3002,20 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2872 mtd->writesize = 2048 << (extid & 0x03); 3002 mtd->writesize = 2048 << (extid & 0x03);
2873 extid >>= 2; 3003 extid >>= 2;
2874 /* Calc oobsize */ 3004 /* Calc oobsize */
2875 mtd->oobsize = (extid & 0x03) == 0x01 ? 128 : 218; 3005 switch (extid & 0x03) {
3006 case 1:
3007 mtd->oobsize = 128;
3008 break;
3009 case 2:
3010 mtd->oobsize = 218;
3011 break;
3012 case 3:
3013 mtd->oobsize = 400;
3014 break;
3015 default:
3016 mtd->oobsize = 436;
3017 break;
3018 }
2876 extid >>= 2; 3019 extid >>= 2;
2877 /* Calc blocksize */ 3020 /* Calc blocksize */
2878 mtd->erasesize = (128 * 1024) << 3021 mtd->erasesize = (128 * 1024) <<
@@ -2900,7 +3043,35 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2900 mtd->writesize = type->pagesize; 3043 mtd->writesize = type->pagesize;
2901 mtd->oobsize = mtd->writesize / 32; 3044 mtd->oobsize = mtd->writesize / 32;
2902 busw = type->options & NAND_BUSWIDTH_16; 3045 busw = type->options & NAND_BUSWIDTH_16;
3046
3047 /*
3048 * Check for Spansion/AMD ID + repeating 5th, 6th byte since
3049 * some Spansion chips have erasesize that conflicts with size
3050 * listed in nand_ids table
3051 * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39)
3052 */
3053 if (*maf_id == NAND_MFR_AMD && id_data[4] != 0x00 &&
3054 id_data[5] == 0x00 && id_data[6] == 0x00 &&
3055 id_data[7] == 0x00 && mtd->writesize == 512) {
3056 mtd->erasesize = 128 * 1024;
3057 mtd->erasesize <<= ((id_data[3] & 0x03) << 1);
3058 }
2903 } 3059 }
3060 /* Get chip options, preserve non chip based options */
3061 chip->options &= ~NAND_CHIPOPTIONS_MSK;
3062 chip->options |= type->options & NAND_CHIPOPTIONS_MSK;
3063
3064 /* Check if chip is a not a samsung device. Do not clear the
3065 * options for chips which are not having an extended id.
3066 */
3067 if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)
3068 chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
3069ident_done:
3070
3071 /*
3072 * Set chip as a default. Board drivers can override it, if necessary
3073 */
3074 chip->options |= NAND_NO_AUTOINCR;
2904 3075
2905 /* Try to identify manufacturer */ 3076 /* Try to identify manufacturer */
2906 for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) { 3077 for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) {
@@ -2915,7 +3086,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2915 if (busw != (chip->options & NAND_BUSWIDTH_16)) { 3086 if (busw != (chip->options & NAND_BUSWIDTH_16)) {
2916 printk(KERN_INFO "NAND device: Manufacturer ID:" 3087 printk(KERN_INFO "NAND device: Manufacturer ID:"
2917 " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, 3088 " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id,
2918 dev_id, nand_manuf_ids[maf_idx].name, mtd->name); 3089 *dev_id, nand_manuf_ids[maf_idx].name, mtd->name);
2919 printk(KERN_WARNING "NAND bus width %d instead %d bit\n", 3090 printk(KERN_WARNING "NAND bus width %d instead %d bit\n",
2920 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8, 3091 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
2921 busw ? 16 : 8); 3092 busw ? 16 : 8);
@@ -2931,8 +3102,10 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2931 ffs(mtd->erasesize) - 1; 3102 ffs(mtd->erasesize) - 1;
2932 if (chip->chipsize & 0xffffffff) 3103 if (chip->chipsize & 0xffffffff)
2933 chip->chip_shift = ffs((unsigned)chip->chipsize) - 1; 3104 chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
2934 else 3105 else {
2935 chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32)) + 32 - 1; 3106 chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32));
3107 chip->chip_shift += 32 - 1;
3108 }
2936 3109
2937 /* Set the bad block position */ 3110 /* Set the bad block position */
2938 if (mtd->writesize > 512 || (busw & NAND_BUSWIDTH_16)) 3111 if (mtd->writesize > 512 || (busw & NAND_BUSWIDTH_16))
@@ -2940,27 +3113,12 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2940 else 3113 else
2941 chip->badblockpos = NAND_SMALL_BADBLOCK_POS; 3114 chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
2942 3115
2943 /* Get chip options, preserve non chip based options */
2944 chip->options &= ~NAND_CHIPOPTIONS_MSK;
2945 chip->options |= type->options & NAND_CHIPOPTIONS_MSK;
2946
2947 /*
2948 * Set chip as a default. Board drivers can override it, if necessary
2949 */
2950 chip->options |= NAND_NO_AUTOINCR;
2951
2952 /* Check if chip is a not a samsung device. Do not clear the
2953 * options for chips which are not having an extended id.
2954 */
2955 if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)
2956 chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
2957
2958 /* 3116 /*
2959 * Bad block marker is stored in the last page of each block 3117 * Bad block marker is stored in the last page of each block
2960 * on Samsung and Hynix MLC devices; stored in first two pages 3118 * on Samsung and Hynix MLC devices; stored in first two pages
2961 * of each block on Micron devices with 2KiB pages and on 3119 * of each block on Micron devices with 2KiB pages and on
2962 * SLC Samsung, Hynix, and AMD/Spansion. All others scan only 3120 * SLC Samsung, Hynix, Toshiba and AMD/Spansion. All others scan
2963 * the first page. 3121 * only the first page.
2964 */ 3122 */
2965 if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) && 3123 if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
2966 (*maf_id == NAND_MFR_SAMSUNG || 3124 (*maf_id == NAND_MFR_SAMSUNG ||
@@ -2969,6 +3127,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2969 else if ((!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) && 3127 else if ((!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
2970 (*maf_id == NAND_MFR_SAMSUNG || 3128 (*maf_id == NAND_MFR_SAMSUNG ||
2971 *maf_id == NAND_MFR_HYNIX || 3129 *maf_id == NAND_MFR_HYNIX ||
3130 *maf_id == NAND_MFR_TOSHIBA ||
2972 *maf_id == NAND_MFR_AMD)) || 3131 *maf_id == NAND_MFR_AMD)) ||
2973 (mtd->writesize == 2048 && 3132 (mtd->writesize == 2048 &&
2974 *maf_id == NAND_MFR_MICRON)) 3133 *maf_id == NAND_MFR_MICRON))
@@ -2994,9 +3153,11 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2994 if (mtd->writesize > 512 && chip->cmdfunc == nand_command) 3153 if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
2995 chip->cmdfunc = nand_command_lp; 3154 chip->cmdfunc = nand_command_lp;
2996 3155
3156 /* TODO onfi flash name */
2997 printk(KERN_INFO "NAND device: Manufacturer ID:" 3157 printk(KERN_INFO "NAND device: Manufacturer ID:"
2998 " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, dev_id, 3158 " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, *dev_id,
2999 nand_manuf_ids[maf_idx].name, type->name); 3159 nand_manuf_ids[maf_idx].name,
3160 chip->onfi_version ? type->name : chip->onfi_params.model);
3000 3161
3001 return type; 3162 return type;
3002} 3163}
@@ -3015,7 +3176,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
3015int nand_scan_ident(struct mtd_info *mtd, int maxchips, 3176int nand_scan_ident(struct mtd_info *mtd, int maxchips,
3016 struct nand_flash_dev *table) 3177 struct nand_flash_dev *table)
3017{ 3178{
3018 int i, busw, nand_maf_id; 3179 int i, busw, nand_maf_id, nand_dev_id;
3019 struct nand_chip *chip = mtd->priv; 3180 struct nand_chip *chip = mtd->priv;
3020 struct nand_flash_dev *type; 3181 struct nand_flash_dev *type;
3021 3182
@@ -3025,7 +3186,8 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
3025 nand_set_defaults(chip, busw); 3186 nand_set_defaults(chip, busw);
3026 3187
3027 /* Read the flash type */ 3188 /* Read the flash type */
3028 type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id, table); 3189 type = nand_get_flash_type(mtd, chip, busw,
3190 &nand_maf_id, &nand_dev_id, table);
3029 3191
3030 if (IS_ERR(type)) { 3192 if (IS_ERR(type)) {
3031 if (!(chip->options & NAND_SCAN_SILENT_NODEV)) 3193 if (!(chip->options & NAND_SCAN_SILENT_NODEV))
@@ -3043,7 +3205,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
3043 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); 3205 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
3044 /* Read manufacturer and device IDs */ 3206 /* Read manufacturer and device IDs */
3045 if (nand_maf_id != chip->read_byte(mtd) || 3207 if (nand_maf_id != chip->read_byte(mtd) ||
3046 type->id != chip->read_byte(mtd)) 3208 nand_dev_id != chip->read_byte(mtd))
3047 break; 3209 break;
3048 } 3210 }
3049 if (i > 1) 3211 if (i > 1)
@@ -3055,6 +3217,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
3055 3217
3056 return 0; 3218 return 0;
3057} 3219}
3220EXPORT_SYMBOL(nand_scan_ident);
3058 3221
3059 3222
3060/** 3223/**
@@ -3219,7 +3382,7 @@ int nand_scan_tail(struct mtd_info *mtd)
3219 * mode 3382 * mode
3220 */ 3383 */
3221 chip->ecc.steps = mtd->writesize / chip->ecc.size; 3384 chip->ecc.steps = mtd->writesize / chip->ecc.size;
3222 if(chip->ecc.steps * chip->ecc.size != mtd->writesize) { 3385 if (chip->ecc.steps * chip->ecc.size != mtd->writesize) {
3223 printk(KERN_WARNING "Invalid ecc parameters\n"); 3386 printk(KERN_WARNING "Invalid ecc parameters\n");
3224 BUG(); 3387 BUG();
3225 } 3388 }
@@ -3231,7 +3394,7 @@ int nand_scan_tail(struct mtd_info *mtd)
3231 */ 3394 */
3232 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && 3395 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
3233 !(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) { 3396 !(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) {
3234 switch(chip->ecc.steps) { 3397 switch (chip->ecc.steps) {
3235 case 2: 3398 case 2:
3236 mtd->subpage_sft = 1; 3399 mtd->subpage_sft = 1;
3237 break; 3400 break;
@@ -3283,10 +3446,11 @@ int nand_scan_tail(struct mtd_info *mtd)
3283 /* Build bad block table */ 3446 /* Build bad block table */
3284 return chip->scan_bbt(mtd); 3447 return chip->scan_bbt(mtd);
3285} 3448}
3449EXPORT_SYMBOL(nand_scan_tail);
3286 3450
3287/* is_module_text_address() isn't exported, and it's mostly a pointless 3451/* is_module_text_address() isn't exported, and it's mostly a pointless
3288 test if this is a module _anyway_ -- they'd have to try _really_ hard 3452 * test if this is a module _anyway_ -- they'd have to try _really_ hard
3289 to call us from in-kernel code if the core NAND support is modular. */ 3453 * to call us from in-kernel code if the core NAND support is modular. */
3290#ifdef MODULE 3454#ifdef MODULE
3291#define caller_is_module() (1) 3455#define caller_is_module() (1)
3292#else 3456#else
@@ -3322,6 +3486,7 @@ int nand_scan(struct mtd_info *mtd, int maxchips)
3322 ret = nand_scan_tail(mtd); 3486 ret = nand_scan_tail(mtd);
3323 return ret; 3487 return ret;
3324} 3488}
3489EXPORT_SYMBOL(nand_scan);
3325 3490
3326/** 3491/**
3327 * nand_release - [NAND Interface] Free resources held by the NAND device 3492 * nand_release - [NAND Interface] Free resources held by the NAND device
@@ -3348,12 +3513,6 @@ void nand_release(struct mtd_info *mtd)
3348 & NAND_BBT_DYNAMICSTRUCT) 3513 & NAND_BBT_DYNAMICSTRUCT)
3349 kfree(chip->badblock_pattern); 3514 kfree(chip->badblock_pattern);
3350} 3515}
3351
3352EXPORT_SYMBOL_GPL(nand_lock);
3353EXPORT_SYMBOL_GPL(nand_unlock);
3354EXPORT_SYMBOL_GPL(nand_scan);
3355EXPORT_SYMBOL_GPL(nand_scan_ident);
3356EXPORT_SYMBOL_GPL(nand_scan_tail);
3357EXPORT_SYMBOL_GPL(nand_release); 3516EXPORT_SYMBOL_GPL(nand_release);
3358 3517
3359static int __init nand_base_init(void) 3518static int __init nand_base_init(void)
@@ -3371,5 +3530,6 @@ module_init(nand_base_init);
3371module_exit(nand_base_exit); 3530module_exit(nand_base_exit);
3372 3531
3373MODULE_LICENSE("GPL"); 3532MODULE_LICENSE("GPL");
3374MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>, Thomas Gleixner <tglx@linutronix.de>"); 3533MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
3534MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
3375MODULE_DESCRIPTION("Generic NAND flash driver code"); 3535MODULE_DESCRIPTION("Generic NAND flash driver code");
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 5fedf4a74f16..586b981f0e61 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -13,28 +13,37 @@
13 * Description: 13 * Description:
14 * 14 *
15 * When nand_scan_bbt is called, then it tries to find the bad block table 15 * When nand_scan_bbt is called, then it tries to find the bad block table
16 * depending on the options in the bbt descriptor(s). If a bbt is found 16 * depending on the options in the BBT descriptor(s). If no flash based BBT
17 * then the contents are read and the memory based bbt is created. If a 17 * (NAND_USE_FLASH_BBT) is specified then the device is scanned for factory
18 * mirrored bbt is selected then the mirror is searched too and the 18 * marked good / bad blocks. This information is used to create a memory BBT.
19 * versions are compared. If the mirror has a greater version number 19 * Once a new bad block is discovered then the "factory" information is updated
20 * than the mirror bbt is used to build the memory based bbt. 20 * on the device.
21 * If a flash based BBT is specified then the function first tries to find the
22 * BBT on flash. If a BBT is found then the contents are read and the memory
23 * based BBT is created. If a mirrored BBT is selected then the mirror is
24 * searched too and the versions are compared. If the mirror has a greater
25 * version number than the mirror BBT is used to build the memory based BBT.
21 * If the tables are not versioned, then we "or" the bad block information. 26 * If the tables are not versioned, then we "or" the bad block information.
22 * If one of the bbt's is out of date or does not exist it is (re)created. 27 * If one of the BBTs is out of date or does not exist it is (re)created.
23 * If no bbt exists at all then the device is scanned for factory marked 28 * If no BBT exists at all then the device is scanned for factory marked
24 * good / bad blocks and the bad block tables are created. 29 * good / bad blocks and the bad block tables are created.
25 * 30 *
26 * For manufacturer created bbts like the one found on M-SYS DOC devices 31 * For manufacturer created BBTs like the one found on M-SYS DOC devices
27 * the bbt is searched and read but never created 32 * the BBT is searched and read but never created
28 * 33 *
29 * The autogenerated bad block table is located in the last good blocks 34 * The auto generated bad block table is located in the last good blocks
30 * of the device. The table is mirrored, so it can be updated eventually. 35 * of the device. The table is mirrored, so it can be updated eventually.
31 * The table is marked in the oob area with an ident pattern and a version 36 * The table is marked in the OOB area with an ident pattern and a version
32 * number which indicates which of both tables is more up to date. 37 * number which indicates which of both tables is more up to date. If the NAND
38 * controller needs the complete OOB area for the ECC information then the
39 * option NAND_USE_FLASH_BBT_NO_OOB should be used: it moves the ident pattern
40 * and the version byte into the data area and the OOB area will remain
41 * untouched.
33 * 42 *
34 * The table uses 2 bits per block 43 * The table uses 2 bits per block
35 * 11b: block is good 44 * 11b: block is good
36 * 00b: block is factory marked bad 45 * 00b: block is factory marked bad
37 * 01b, 10b: block is marked bad due to wear 46 * 01b, 10b: block is marked bad due to wear
38 * 47 *
39 * The memory bad block table uses the following scheme: 48 * The memory bad block table uses the following scheme:
40 * 00b: block is good 49 * 00b: block is good
@@ -59,6 +68,16 @@
59#include <linux/delay.h> 68#include <linux/delay.h>
60#include <linux/vmalloc.h> 69#include <linux/vmalloc.h>
61 70
71static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td)
72{
73 int ret;
74
75 ret = memcmp(buf, td->pattern, td->len);
76 if (!ret)
77 return ret;
78 return -1;
79}
80
62/** 81/**
63 * check_pattern - [GENERIC] check if a pattern is in the buffer 82 * check_pattern - [GENERIC] check if a pattern is in the buffer
64 * @buf: the buffer to search 83 * @buf: the buffer to search
@@ -77,6 +96,9 @@ static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_desc
77 int i, end = 0; 96 int i, end = 0;
78 uint8_t *p = buf; 97 uint8_t *p = buf;
79 98
99 if (td->options & NAND_BBT_NO_OOB)
100 return check_pattern_no_oob(buf, td);
101
80 end = paglen + td->offs; 102 end = paglen + td->offs;
81 if (td->options & NAND_BBT_SCANEMPTY) { 103 if (td->options & NAND_BBT_SCANEMPTY) {
82 for (i = 0; i < end; i++) { 104 for (i = 0; i < end; i++) {
@@ -156,32 +178,63 @@ static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td)
156} 178}
157 179
158/** 180/**
181 * add_marker_len - compute the length of the marker in data area
182 * @td: BBT descriptor used for computation
183 *
184 * The length will be 0 if the markeris located in OOB area.
185 */
186static u32 add_marker_len(struct nand_bbt_descr *td)
187{
188 u32 len;
189
190 if (!(td->options & NAND_BBT_NO_OOB))
191 return 0;
192
193 len = td->len;
194 if (td->options & NAND_BBT_VERSION)
195 len++;
196 return len;
197}
198
199/**
159 * read_bbt - [GENERIC] Read the bad block table starting from page 200 * read_bbt - [GENERIC] Read the bad block table starting from page
160 * @mtd: MTD device structure 201 * @mtd: MTD device structure
161 * @buf: temporary buffer 202 * @buf: temporary buffer
162 * @page: the starting page 203 * @page: the starting page
163 * @num: the number of bbt descriptors to read 204 * @num: the number of bbt descriptors to read
164 * @bits: number of bits per block 205 * @td: the bbt describtion table
165 * @offs: offset in the memory table 206 * @offs: offset in the memory table
166 * @reserved_block_code: Pattern to identify reserved blocks
167 * 207 *
168 * Read the bad block table starting from page. 208 * Read the bad block table starting from page.
169 * 209 *
170 */ 210 */
171static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num, 211static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
172 int bits, int offs, int reserved_block_code) 212 struct nand_bbt_descr *td, int offs)
173{ 213{
174 int res, i, j, act = 0; 214 int res, i, j, act = 0;
175 struct nand_chip *this = mtd->priv; 215 struct nand_chip *this = mtd->priv;
176 size_t retlen, len, totlen; 216 size_t retlen, len, totlen;
177 loff_t from; 217 loff_t from;
218 int bits = td->options & NAND_BBT_NRBITS_MSK;
178 uint8_t msk = (uint8_t) ((1 << bits) - 1); 219 uint8_t msk = (uint8_t) ((1 << bits) - 1);
220 u32 marker_len;
221 int reserved_block_code = td->reserved_block_code;
179 222
180 totlen = (num * bits) >> 3; 223 totlen = (num * bits) >> 3;
224 marker_len = add_marker_len(td);
181 from = ((loff_t) page) << this->page_shift; 225 from = ((loff_t) page) << this->page_shift;
182 226
183 while (totlen) { 227 while (totlen) {
184 len = min(totlen, (size_t) (1 << this->bbt_erase_shift)); 228 len = min(totlen, (size_t) (1 << this->bbt_erase_shift));
229 if (marker_len) {
230 /*
231 * In case the BBT marker is not in the OOB area it
232 * will be just in the first page.
233 */
234 len -= marker_len;
235 from += marker_len;
236 marker_len = 0;
237 }
185 res = mtd->read(mtd, from, len, &retlen, buf); 238 res = mtd->read(mtd, from, len, &retlen, buf);
186 if (res < 0) { 239 if (res < 0) {
187 if (retlen != len) { 240 if (retlen != len) {
@@ -238,20 +291,21 @@ static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
238{ 291{
239 struct nand_chip *this = mtd->priv; 292 struct nand_chip *this = mtd->priv;
240 int res = 0, i; 293 int res = 0, i;
241 int bits;
242 294
243 bits = td->options & NAND_BBT_NRBITS_MSK;
244 if (td->options & NAND_BBT_PERCHIP) { 295 if (td->options & NAND_BBT_PERCHIP) {
245 int offs = 0; 296 int offs = 0;
246 for (i = 0; i < this->numchips; i++) { 297 for (i = 0; i < this->numchips; i++) {
247 if (chip == -1 || chip == i) 298 if (chip == -1 || chip == i)
248 res = read_bbt (mtd, buf, td->pages[i], this->chipsize >> this->bbt_erase_shift, bits, offs, td->reserved_block_code); 299 res = read_bbt(mtd, buf, td->pages[i],
300 this->chipsize >> this->bbt_erase_shift,
301 td, offs);
249 if (res) 302 if (res)
250 return res; 303 return res;
251 offs += this->chipsize >> (this->bbt_erase_shift + 2); 304 offs += this->chipsize >> (this->bbt_erase_shift + 2);
252 } 305 }
253 } else { 306 } else {
254 res = read_bbt (mtd, buf, td->pages[0], mtd->size >> this->bbt_erase_shift, bits, 0, td->reserved_block_code); 307 res = read_bbt(mtd, buf, td->pages[0],
308 mtd->size >> this->bbt_erase_shift, td, 0);
255 if (res) 309 if (res)
256 return res; 310 return res;
257 } 311 }
@@ -259,9 +313,25 @@ static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
259} 313}
260 314
261/* 315/*
316 * BBT marker is in the first page, no OOB.
317 */
318static int scan_read_raw_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
319 struct nand_bbt_descr *td)
320{
321 size_t retlen;
322 size_t len;
323
324 len = td->len;
325 if (td->options & NAND_BBT_VERSION)
326 len++;
327
328 return mtd->read(mtd, offs, len, &retlen, buf);
329}
330
331/*
262 * Scan read raw data from flash 332 * Scan read raw data from flash
263 */ 333 */
264static int scan_read_raw(struct mtd_info *mtd, uint8_t *buf, loff_t offs, 334static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
265 size_t len) 335 size_t len)
266{ 336{
267 struct mtd_oob_ops ops; 337 struct mtd_oob_ops ops;
@@ -294,6 +364,15 @@ static int scan_read_raw(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
294 return 0; 364 return 0;
295} 365}
296 366
367static int scan_read_raw(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
368 size_t len, struct nand_bbt_descr *td)
369{
370 if (td->options & NAND_BBT_NO_OOB)
371 return scan_read_raw_data(mtd, buf, offs, td);
372 else
373 return scan_read_raw_oob(mtd, buf, offs, len);
374}
375
297/* 376/*
298 * Scan write data with oob to flash 377 * Scan write data with oob to flash
299 */ 378 */
@@ -312,6 +391,15 @@ static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len,
312 return mtd->write_oob(mtd, offs, &ops); 391 return mtd->write_oob(mtd, offs, &ops);
313} 392}
314 393
394static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td)
395{
396 u32 ver_offs = td->veroffs;
397
398 if (!(td->options & NAND_BBT_NO_OOB))
399 ver_offs += mtd->writesize;
400 return ver_offs;
401}
402
315/** 403/**
316 * read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page 404 * read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page
317 * @mtd: MTD device structure 405 * @mtd: MTD device structure
@@ -331,8 +419,8 @@ static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
331 /* Read the primary version, if available */ 419 /* Read the primary version, if available */
332 if (td->options & NAND_BBT_VERSION) { 420 if (td->options & NAND_BBT_VERSION) {
333 scan_read_raw(mtd, buf, (loff_t)td->pages[0] << this->page_shift, 421 scan_read_raw(mtd, buf, (loff_t)td->pages[0] << this->page_shift,
334 mtd->writesize); 422 mtd->writesize, td);
335 td->version[0] = buf[mtd->writesize + td->veroffs]; 423 td->version[0] = buf[bbt_get_ver_offs(mtd, td)];
336 printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n", 424 printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n",
337 td->pages[0], td->version[0]); 425 td->pages[0], td->version[0]);
338 } 426 }
@@ -340,8 +428,8 @@ static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
340 /* Read the mirror version, if available */ 428 /* Read the mirror version, if available */
341 if (md && (md->options & NAND_BBT_VERSION)) { 429 if (md && (md->options & NAND_BBT_VERSION)) {
342 scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift, 430 scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
343 mtd->writesize); 431 mtd->writesize, td);
344 md->version[0] = buf[mtd->writesize + md->veroffs]; 432 md->version[0] = buf[bbt_get_ver_offs(mtd, md)];
345 printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n", 433 printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n",
346 md->pages[0], md->version[0]); 434 md->pages[0], md->version[0]);
347 } 435 }
@@ -357,7 +445,7 @@ static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd,
357{ 445{
358 int ret, j; 446 int ret, j;
359 447
360 ret = scan_read_raw(mtd, buf, offs, readlen); 448 ret = scan_read_raw_oob(mtd, buf, offs, readlen);
361 if (ret) 449 if (ret)
362 return ret; 450 return ret;
363 451
@@ -464,6 +552,8 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
464 for (i = startblock; i < numblocks;) { 552 for (i = startblock; i < numblocks;) {
465 int ret; 553 int ret;
466 554
555 BUG_ON(bd->options & NAND_BBT_NO_OOB);
556
467 if (bd->options & NAND_BBT_SCANALLPAGES) 557 if (bd->options & NAND_BBT_SCANALLPAGES)
468 ret = scan_block_full(mtd, bd, from, buf, readlen, 558 ret = scan_block_full(mtd, bd, from, buf, readlen,
469 scanlen, len); 559 scanlen, len);
@@ -545,11 +635,12 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
545 loff_t offs = (loff_t)actblock << this->bbt_erase_shift; 635 loff_t offs = (loff_t)actblock << this->bbt_erase_shift;
546 636
547 /* Read first page */ 637 /* Read first page */
548 scan_read_raw(mtd, buf, offs, mtd->writesize); 638 scan_read_raw(mtd, buf, offs, mtd->writesize, td);
549 if (!check_pattern(buf, scanlen, mtd->writesize, td)) { 639 if (!check_pattern(buf, scanlen, mtd->writesize, td)) {
550 td->pages[i] = actblock << blocktopage; 640 td->pages[i] = actblock << blocktopage;
551 if (td->options & NAND_BBT_VERSION) { 641 if (td->options & NAND_BBT_VERSION) {
552 td->version[i] = buf[mtd->writesize + td->veroffs]; 642 offs = bbt_get_ver_offs(mtd, td);
643 td->version[i] = buf[offs];
553 } 644 }
554 break; 645 break;
555 } 646 }
@@ -733,12 +824,26 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
733 memset(&buf[offs], 0xff, (size_t) (numblocks >> sft)); 824 memset(&buf[offs], 0xff, (size_t) (numblocks >> sft));
734 ooboffs = len + (pageoffs * mtd->oobsize); 825 ooboffs = len + (pageoffs * mtd->oobsize);
735 826
827 } else if (td->options & NAND_BBT_NO_OOB) {
828 ooboffs = 0;
829 offs = td->len;
830 /* the version byte */
831 if (td->options & NAND_BBT_VERSION)
832 offs++;
833 /* Calc length */
834 len = (size_t) (numblocks >> sft);
835 len += offs;
836 /* Make it page aligned ! */
837 len = ALIGN(len, mtd->writesize);
838 /* Preset the buffer with 0xff */
839 memset(buf, 0xff, len);
840 /* Pattern is located at the begin of first page */
841 memcpy(buf, td->pattern, td->len);
736 } else { 842 } else {
737 /* Calc length */ 843 /* Calc length */
738 len = (size_t) (numblocks >> sft); 844 len = (size_t) (numblocks >> sft);
739 /* Make it page aligned ! */ 845 /* Make it page aligned ! */
740 len = (len + (mtd->writesize - 1)) & 846 len = ALIGN(len, mtd->writesize);
741 ~(mtd->writesize - 1);
742 /* Preset the buffer with 0xff */ 847 /* Preset the buffer with 0xff */
743 memset(buf, 0xff, len + 848 memset(buf, 0xff, len +
744 (len >> this->page_shift)* mtd->oobsize); 849 (len >> this->page_shift)* mtd->oobsize);
@@ -772,7 +877,9 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
772 if (res < 0) 877 if (res < 0)
773 goto outerr; 878 goto outerr;
774 879
775 res = scan_write_bbt(mtd, to, len, buf, &buf[len]); 880 res = scan_write_bbt(mtd, to, len, buf,
881 td->options & NAND_BBT_NO_OOB ? NULL :
882 &buf[len]);
776 if (res < 0) 883 if (res < 0)
777 goto outerr; 884 goto outerr;
778 885
@@ -892,7 +999,8 @@ static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
892 continue; 999 continue;
893 1000
894 /* Create the table in memory by scanning the chip(s) */ 1001 /* Create the table in memory by scanning the chip(s) */
895 create_bbt(mtd, buf, bd, chipsel); 1002 if (!(this->options & NAND_CREATE_EMPTY_BBT))
1003 create_bbt(mtd, buf, bd, chipsel);
896 1004
897 td->version[i] = 1; 1005 td->version[i] = 1;
898 if (md) 1006 if (md)
@@ -983,6 +1091,49 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
983} 1091}
984 1092
985/** 1093/**
1094 * verify_bbt_descr - verify the bad block description
1095 * @bd: the table to verify
1096 *
1097 * This functions performs a few sanity checks on the bad block description
1098 * table.
1099 */
1100static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
1101{
1102 struct nand_chip *this = mtd->priv;
1103 u32 pattern_len = bd->len;
1104 u32 bits = bd->options & NAND_BBT_NRBITS_MSK;
1105 u32 table_size;
1106
1107 if (!bd)
1108 return;
1109 BUG_ON((this->options & NAND_USE_FLASH_BBT_NO_OOB) &&
1110 !(this->options & NAND_USE_FLASH_BBT));
1111 BUG_ON(!bits);
1112
1113 if (bd->options & NAND_BBT_VERSION)
1114 pattern_len++;
1115
1116 if (bd->options & NAND_BBT_NO_OOB) {
1117 BUG_ON(!(this->options & NAND_USE_FLASH_BBT));
1118 BUG_ON(!(this->options & NAND_USE_FLASH_BBT_NO_OOB));
1119 BUG_ON(bd->offs);
1120 if (bd->options & NAND_BBT_VERSION)
1121 BUG_ON(bd->veroffs != bd->len);
1122 BUG_ON(bd->options & NAND_BBT_SAVECONTENT);
1123 }
1124
1125 if (bd->options & NAND_BBT_PERCHIP)
1126 table_size = this->chipsize >> this->bbt_erase_shift;
1127 else
1128 table_size = mtd->size >> this->bbt_erase_shift;
1129 table_size >>= 3;
1130 table_size *= bits;
1131 if (bd->options & NAND_BBT_NO_OOB)
1132 table_size += pattern_len;
1133 BUG_ON(table_size > (1 << this->bbt_erase_shift));
1134}
1135
1136/**
986 * nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s) 1137 * nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s)
987 * @mtd: MTD device structure 1138 * @mtd: MTD device structure
988 * @bd: descriptor for the good/bad block search pattern 1139 * @bd: descriptor for the good/bad block search pattern
@@ -1023,6 +1174,8 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
1023 } 1174 }
1024 return res; 1175 return res;
1025 } 1176 }
1177 verify_bbt_descr(mtd, td);
1178 verify_bbt_descr(mtd, md);
1026 1179
1027 /* Allocate a temporary buffer for one eraseblock incl. oob */ 1180 /* Allocate a temporary buffer for one eraseblock incl. oob */
1028 len = (1 << this->bbt_erase_shift); 1181 len = (1 << this->bbt_erase_shift);
@@ -1166,6 +1319,26 @@ static struct nand_bbt_descr bbt_mirror_descr = {
1166 .pattern = mirror_pattern 1319 .pattern = mirror_pattern
1167}; 1320};
1168 1321
1322static struct nand_bbt_descr bbt_main_no_bbt_descr = {
1323 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
1324 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
1325 | NAND_BBT_NO_OOB,
1326 .len = 4,
1327 .veroffs = 4,
1328 .maxblocks = 4,
1329 .pattern = bbt_pattern
1330};
1331
1332static struct nand_bbt_descr bbt_mirror_no_bbt_descr = {
1333 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
1334 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
1335 | NAND_BBT_NO_OOB,
1336 .len = 4,
1337 .veroffs = 4,
1338 .maxblocks = 4,
1339 .pattern = mirror_pattern
1340};
1341
1169#define BBT_SCAN_OPTIONS (NAND_BBT_SCANLASTPAGE | NAND_BBT_SCAN2NDPAGE | \ 1342#define BBT_SCAN_OPTIONS (NAND_BBT_SCANLASTPAGE | NAND_BBT_SCAN2NDPAGE | \
1170 NAND_BBT_SCANBYTE1AND6) 1343 NAND_BBT_SCANBYTE1AND6)
1171/** 1344/**
@@ -1236,8 +1409,13 @@ int nand_default_bbt(struct mtd_info *mtd)
1236 if (this->options & NAND_USE_FLASH_BBT) { 1409 if (this->options & NAND_USE_FLASH_BBT) {
1237 /* Use the default pattern descriptors */ 1410 /* Use the default pattern descriptors */
1238 if (!this->bbt_td) { 1411 if (!this->bbt_td) {
1239 this->bbt_td = &bbt_main_descr; 1412 if (this->options & NAND_USE_FLASH_BBT_NO_OOB) {
1240 this->bbt_md = &bbt_mirror_descr; 1413 this->bbt_td = &bbt_main_no_bbt_descr;
1414 this->bbt_md = &bbt_mirror_no_bbt_descr;
1415 } else {
1416 this->bbt_td = &bbt_main_descr;
1417 this->bbt_md = &bbt_mirror_descr;
1418 }
1241 } 1419 }
1242 if (!this->badblock_pattern) { 1420 if (!this->badblock_pattern) {
1243 this->badblock_pattern = (mtd->writesize > 512) ? &largepage_flashbased : &smallpage_flashbased; 1421 this->badblock_pattern = (mtd->writesize > 512) ? &largepage_flashbased : &smallpage_flashbased;
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index c65f19074bc8..00cf1b0d6053 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -75,9 +75,13 @@ struct nand_flash_dev nand_flash_ids[] = {
75 75
76 /*512 Megabit */ 76 /*512 Megabit */
77 {"NAND 64MiB 1,8V 8-bit", 0xA2, 0, 64, 0, LP_OPTIONS}, 77 {"NAND 64MiB 1,8V 8-bit", 0xA2, 0, 64, 0, LP_OPTIONS},
78 {"NAND 64MiB 1,8V 8-bit", 0xA0, 0, 64, 0, LP_OPTIONS},
78 {"NAND 64MiB 3,3V 8-bit", 0xF2, 0, 64, 0, LP_OPTIONS}, 79 {"NAND 64MiB 3,3V 8-bit", 0xF2, 0, 64, 0, LP_OPTIONS},
80 {"NAND 64MiB 3,3V 8-bit", 0xD0, 0, 64, 0, LP_OPTIONS},
79 {"NAND 64MiB 1,8V 16-bit", 0xB2, 0, 64, 0, LP_OPTIONS16}, 81 {"NAND 64MiB 1,8V 16-bit", 0xB2, 0, 64, 0, LP_OPTIONS16},
82 {"NAND 64MiB 1,8V 16-bit", 0xB0, 0, 64, 0, LP_OPTIONS16},
80 {"NAND 64MiB 3,3V 16-bit", 0xC2, 0, 64, 0, LP_OPTIONS16}, 83 {"NAND 64MiB 3,3V 16-bit", 0xC2, 0, 64, 0, LP_OPTIONS16},
84 {"NAND 64MiB 3,3V 16-bit", 0xC0, 0, 64, 0, LP_OPTIONS16},
81 85
82 /* 1 Gigabit */ 86 /* 1 Gigabit */
83 {"NAND 128MiB 1,8V 8-bit", 0xA1, 0, 128, 0, LP_OPTIONS}, 87 {"NAND 128MiB 1,8V 8-bit", 0xA1, 0, 128, 0, LP_OPTIONS},
@@ -112,7 +116,34 @@ struct nand_flash_dev nand_flash_ids[] = {
112 {"NAND 2GiB 3,3V 16-bit", 0xC5, 0, 2048, 0, LP_OPTIONS16}, 116 {"NAND 2GiB 3,3V 16-bit", 0xC5, 0, 2048, 0, LP_OPTIONS16},
113 117
114 /* 32 Gigabit */ 118 /* 32 Gigabit */
119 {"NAND 4GiB 1,8V 8-bit", 0xA7, 0, 4096, 0, LP_OPTIONS},
115 {"NAND 4GiB 3,3V 8-bit", 0xD7, 0, 4096, 0, LP_OPTIONS}, 120 {"NAND 4GiB 3,3V 8-bit", 0xD7, 0, 4096, 0, LP_OPTIONS},
121 {"NAND 4GiB 1,8V 16-bit", 0xB7, 0, 4096, 0, LP_OPTIONS16},
122 {"NAND 4GiB 3,3V 16-bit", 0xC7, 0, 4096, 0, LP_OPTIONS16},
123
124 /* 64 Gigabit */
125 {"NAND 8GiB 1,8V 8-bit", 0xAE, 0, 8192, 0, LP_OPTIONS},
126 {"NAND 8GiB 3,3V 8-bit", 0xDE, 0, 8192, 0, LP_OPTIONS},
127 {"NAND 8GiB 1,8V 16-bit", 0xBE, 0, 8192, 0, LP_OPTIONS16},
128 {"NAND 8GiB 3,3V 16-bit", 0xCE, 0, 8192, 0, LP_OPTIONS16},
129
130 /* 128 Gigabit */
131 {"NAND 16GiB 1,8V 8-bit", 0x1A, 0, 16384, 0, LP_OPTIONS},
132 {"NAND 16GiB 3,3V 8-bit", 0x3A, 0, 16384, 0, LP_OPTIONS},
133 {"NAND 16GiB 1,8V 16-bit", 0x2A, 0, 16384, 0, LP_OPTIONS16},
134 {"NAND 16GiB 3,3V 16-bit", 0x4A, 0, 16384, 0, LP_OPTIONS16},
135
136 /* 256 Gigabit */
137 {"NAND 32GiB 1,8V 8-bit", 0x1C, 0, 32768, 0, LP_OPTIONS},
138 {"NAND 32GiB 3,3V 8-bit", 0x3C, 0, 32768, 0, LP_OPTIONS},
139 {"NAND 32GiB 1,8V 16-bit", 0x2C, 0, 32768, 0, LP_OPTIONS16},
140 {"NAND 32GiB 3,3V 16-bit", 0x4C, 0, 32768, 0, LP_OPTIONS16},
141
142 /* 512 Gigabit */
143 {"NAND 64GiB 1,8V 8-bit", 0x1E, 0, 65536, 0, LP_OPTIONS},
144 {"NAND 64GiB 3,3V 8-bit", 0x3E, 0, 65536, 0, LP_OPTIONS},
145 {"NAND 64GiB 1,8V 16-bit", 0x2E, 0, 65536, 0, LP_OPTIONS16},
146 {"NAND 64GiB 3,3V 16-bit", 0x4E, 0, 65536, 0, LP_OPTIONS16},
116 147
117 /* 148 /*
118 * Renesas AND 1 Gigabit. Those chips do not support extended id and 149 * Renesas AND 1 Gigabit. Those chips do not support extended id and
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index c25648bb5793..a6a73aab1253 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -107,6 +107,7 @@ static char *gravepages = NULL;
107static unsigned int rptwear = 0; 107static unsigned int rptwear = 0;
108static unsigned int overridesize = 0; 108static unsigned int overridesize = 0;
109static char *cache_file = NULL; 109static char *cache_file = NULL;
110static unsigned int bbt;
110 111
111module_param(first_id_byte, uint, 0400); 112module_param(first_id_byte, uint, 0400);
112module_param(second_id_byte, uint, 0400); 113module_param(second_id_byte, uint, 0400);
@@ -130,6 +131,7 @@ module_param(gravepages, charp, 0400);
130module_param(rptwear, uint, 0400); 131module_param(rptwear, uint, 0400);
131module_param(overridesize, uint, 0400); 132module_param(overridesize, uint, 0400);
132module_param(cache_file, charp, 0400); 133module_param(cache_file, charp, 0400);
134module_param(bbt, uint, 0400);
133 135
134MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID)"); 136MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID)");
135MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID)"); 137MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID)");
@@ -162,6 +164,7 @@ MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the I
162 "The size is specified in erase blocks and as the exponent of a power of two" 164 "The size is specified in erase blocks and as the exponent of a power of two"
163 " e.g. 5 means a size of 32 erase blocks"); 165 " e.g. 5 means a size of 32 erase blocks");
164MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of memory"); 166MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of memory");
167MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in data area");
165 168
166/* The largest possible page size */ 169/* The largest possible page size */
167#define NS_LARGEST_PAGE_SIZE 4096 170#define NS_LARGEST_PAGE_SIZE 4096
@@ -2264,6 +2267,18 @@ static int __init ns_init_module(void)
2264 /* and 'badblocks' parameters to work */ 2267 /* and 'badblocks' parameters to work */
2265 chip->options |= NAND_SKIP_BBTSCAN; 2268 chip->options |= NAND_SKIP_BBTSCAN;
2266 2269
2270 switch (bbt) {
2271 case 2:
2272 chip->options |= NAND_USE_FLASH_BBT_NO_OOB;
2273 case 1:
2274 chip->options |= NAND_USE_FLASH_BBT;
2275 case 0:
2276 break;
2277 default:
2278 NS_ERR("bbt has to be 0..2\n");
2279 retval = -EINVAL;
2280 goto error;
2281 }
2267 /* 2282 /*
2268 * Perform minimum nandsim structure initialization to handle 2283 * Perform minimum nandsim structure initialization to handle
2269 * the initial ID read command correctly 2284 * the initial ID read command correctly
@@ -2321,10 +2336,10 @@ static int __init ns_init_module(void)
2321 if ((retval = init_nandsim(nsmtd)) != 0) 2336 if ((retval = init_nandsim(nsmtd)) != 0)
2322 goto err_exit; 2337 goto err_exit;
2323 2338
2324 if ((retval = parse_badblocks(nand, nsmtd)) != 0) 2339 if ((retval = nand_default_bbt(nsmtd)) != 0)
2325 goto err_exit; 2340 goto err_exit;
2326 2341
2327 if ((retval = nand_default_bbt(nsmtd)) != 0) 2342 if ((retval = parse_badblocks(nand, nsmtd)) != 0)
2328 goto err_exit; 2343 goto err_exit;
2329 2344
2330 /* Register NAND partitions */ 2345 /* Register NAND partitions */
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index 510554e6c115..c9ae0a5023b6 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -229,7 +229,7 @@ static int __devinit ndfc_probe(struct platform_device *ofdev,
229 const struct of_device_id *match) 229 const struct of_device_id *match)
230{ 230{
231 struct ndfc_controller *ndfc = &ndfc_ctrl; 231 struct ndfc_controller *ndfc = &ndfc_ctrl;
232 const u32 *reg; 232 const __be32 *reg;
233 u32 ccr; 233 u32 ccr;
234 int err, len; 234 int err, len;
235 235
@@ -244,7 +244,7 @@ static int __devinit ndfc_probe(struct platform_device *ofdev,
244 dev_err(&ofdev->dev, "unable read reg property (%d)\n", len); 244 dev_err(&ofdev->dev, "unable read reg property (%d)\n", len);
245 return -ENOENT; 245 return -ENOENT;
246 } 246 }
247 ndfc->chip_select = reg[0]; 247 ndfc->chip_select = be32_to_cpu(reg[0]);
248 248
249 ndfc->ndfcbase = of_iomap(ofdev->dev.of_node, 0); 249 ndfc->ndfcbase = of_iomap(ofdev->dev.of_node, 0);
250 if (!ndfc->ndfcbase) { 250 if (!ndfc->ndfcbase) {
@@ -257,7 +257,7 @@ static int __devinit ndfc_probe(struct platform_device *ofdev,
257 /* It is ok if ccr does not exist - just default to 0 */ 257 /* It is ok if ccr does not exist - just default to 0 */
258 reg = of_get_property(ofdev->dev.of_node, "ccr", NULL); 258 reg = of_get_property(ofdev->dev.of_node, "ccr", NULL);
259 if (reg) 259 if (reg)
260 ccr |= *reg; 260 ccr |= be32_to_cpup(reg);
261 261
262 out_be32(ndfc->ndfcbase + NDFC_CCR, ccr); 262 out_be32(ndfc->ndfcbase + NDFC_CCR, ccr);
263 263
@@ -265,7 +265,7 @@ static int __devinit ndfc_probe(struct platform_device *ofdev,
265 reg = of_get_property(ofdev->dev.of_node, "bank-settings", NULL); 265 reg = of_get_property(ofdev->dev.of_node, "bank-settings", NULL);
266 if (reg) { 266 if (reg) {
267 int offset = NDFC_BCFG0 + (ndfc->chip_select << 2); 267 int offset = NDFC_BCFG0 + (ndfc->chip_select << 2);
268 out_be32(ndfc->ndfcbase + offset, *reg); 268 out_be32(ndfc->ndfcbase + offset, be32_to_cpup(reg));
269 } 269 }
270 270
271 err = ndfc_chip_init(ndfc, ofdev->dev.of_node); 271 err = ndfc_chip_init(ndfc, ofdev->dev.of_node);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 513e0a76a4a7..cd41c58b5bbd 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -111,11 +111,11 @@ static int use_dma = 1;
111module_param(use_dma, bool, 0); 111module_param(use_dma, bool, 0);
112MODULE_PARM_DESC(use_dma, "enable/disable use of DMA"); 112MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
113#else 113#else
114const int use_dma; 114static const int use_dma;
115#endif 115#endif
116#else 116#else
117const int use_prefetch; 117const int use_prefetch;
118const int use_dma; 118static const int use_dma;
119#endif 119#endif
120 120
121struct omap_nand_info { 121struct omap_nand_info {
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 4d01cda68844..17f8518cc5eb 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -117,7 +117,7 @@ struct pxa3xx_nand_info {
117 struct nand_chip nand_chip; 117 struct nand_chip nand_chip;
118 118
119 struct platform_device *pdev; 119 struct platform_device *pdev;
120 const struct pxa3xx_nand_flash *flash_info; 120 struct pxa3xx_nand_cmdset *cmdset;
121 121
122 struct clk *clk; 122 struct clk *clk;
123 void __iomem *mmio_base; 123 void __iomem *mmio_base;
@@ -131,6 +131,7 @@ struct pxa3xx_nand_info {
131 int drcmr_cmd; 131 int drcmr_cmd;
132 132
133 unsigned char *data_buff; 133 unsigned char *data_buff;
134 unsigned char *oob_buff;
134 dma_addr_t data_buff_phys; 135 dma_addr_t data_buff_phys;
135 size_t data_buff_size; 136 size_t data_buff_size;
136 int data_dma_ch; 137 int data_dma_ch;
@@ -149,7 +150,8 @@ struct pxa3xx_nand_info {
149 int use_ecc; /* use HW ECC ? */ 150 int use_ecc; /* use HW ECC ? */
150 int use_dma; /* use DMA ? */ 151 int use_dma; /* use DMA ? */
151 152
152 size_t data_size; /* data size in FIFO */ 153 unsigned int page_size; /* page size of attached chip */
154 unsigned int data_size; /* data size in FIFO */
153 int retcode; 155 int retcode;
154 struct completion cmd_complete; 156 struct completion cmd_complete;
155 157
@@ -158,6 +160,10 @@ struct pxa3xx_nand_info {
158 uint32_t ndcb1; 160 uint32_t ndcb1;
159 uint32_t ndcb2; 161 uint32_t ndcb2;
160 162
163 /* timing calcuted from setting */
164 uint32_t ndtr0cs0;
165 uint32_t ndtr1cs0;
166
161 /* calculated from pxa3xx_nand_flash data */ 167 /* calculated from pxa3xx_nand_flash data */
162 size_t oob_size; 168 size_t oob_size;
163 size_t read_id_bytes; 169 size_t read_id_bytes;
@@ -174,23 +180,7 @@ MODULE_PARM_DESC(use_dma, "enable DMA for data transfering to/from NAND HW");
174 * Default NAND flash controller configuration setup by the 180 * Default NAND flash controller configuration setup by the
175 * bootloader. This configuration is used only when pdata->keep_config is set 181 * bootloader. This configuration is used only when pdata->keep_config is set
176 */ 182 */
177static struct pxa3xx_nand_timing default_timing; 183static struct pxa3xx_nand_cmdset default_cmdset = {
178static struct pxa3xx_nand_flash default_flash;
179
180static struct pxa3xx_nand_cmdset smallpage_cmdset = {
181 .read1 = 0x0000,
182 .read2 = 0x0050,
183 .program = 0x1080,
184 .read_status = 0x0070,
185 .read_id = 0x0090,
186 .erase = 0xD060,
187 .reset = 0x00FF,
188 .lock = 0x002A,
189 .unlock = 0x2423,
190 .lock_status = 0x007A,
191};
192
193static struct pxa3xx_nand_cmdset largepage_cmdset = {
194 .read1 = 0x3000, 184 .read1 = 0x3000,
195 .read2 = 0x0050, 185 .read2 = 0x0050,
196 .program = 0x1080, 186 .program = 0x1080,
@@ -203,142 +193,27 @@ static struct pxa3xx_nand_cmdset largepage_cmdset = {
203 .lock_status = 0x007A, 193 .lock_status = 0x007A,
204}; 194};
205 195
206#ifdef CONFIG_MTD_NAND_PXA3xx_BUILTIN 196static struct pxa3xx_nand_timing timing[] = {
207static struct pxa3xx_nand_timing samsung512MbX16_timing = { 197 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
208 .tCH = 10, 198 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
209 .tCS = 0, 199 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
210 .tWH = 20, 200 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
211 .tWP = 40,
212 .tRH = 30,
213 .tRP = 40,
214 .tR = 11123,
215 .tWHR = 110,
216 .tAR = 10,
217};
218
219static struct pxa3xx_nand_flash samsung512MbX16 = {
220 .timing = &samsung512MbX16_timing,
221 .cmdset = &smallpage_cmdset,
222 .page_per_block = 32,
223 .page_size = 512,
224 .flash_width = 16,
225 .dfc_width = 16,
226 .num_blocks = 4096,
227 .chip_id = 0x46ec,
228};
229
230static struct pxa3xx_nand_flash samsung2GbX8 = {
231 .timing = &samsung512MbX16_timing,
232 .cmdset = &smallpage_cmdset,
233 .page_per_block = 64,
234 .page_size = 2048,
235 .flash_width = 8,
236 .dfc_width = 8,
237 .num_blocks = 2048,
238 .chip_id = 0xdaec,
239}; 201};
240 202
241static struct pxa3xx_nand_flash samsung32GbX8 = { 203static struct pxa3xx_nand_flash builtin_flash_types[] = {
242 .timing = &samsung512MbX16_timing, 204 { 0, 0, 2048, 8, 8, 0, &default_cmdset, &timing[0] },
243 .cmdset = &smallpage_cmdset, 205 { 0x46ec, 32, 512, 16, 16, 4096, &default_cmdset, &timing[1] },
244 .page_per_block = 128, 206 { 0xdaec, 64, 2048, 8, 8, 2048, &default_cmdset, &timing[1] },
245 .page_size = 4096, 207 { 0xd7ec, 128, 4096, 8, 8, 8192, &default_cmdset, &timing[1] },
246 .flash_width = 8, 208 { 0xa12c, 64, 2048, 8, 8, 1024, &default_cmdset, &timing[2] },
247 .dfc_width = 8, 209 { 0xb12c, 64, 2048, 16, 16, 1024, &default_cmdset, &timing[2] },
248 .num_blocks = 8192, 210 { 0xdc2c, 64, 2048, 8, 8, 4096, &default_cmdset, &timing[2] },
249 .chip_id = 0xd7ec, 211 { 0xcc2c, 64, 2048, 16, 16, 4096, &default_cmdset, &timing[2] },
212 { 0xba20, 64, 2048, 16, 16, 2048, &default_cmdset, &timing[3] },
250}; 213};
251 214
252static struct pxa3xx_nand_timing micron_timing = { 215/* Define a default flash type setting serve as flash detecting only */
253 .tCH = 10, 216#define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
254 .tCS = 25,
255 .tWH = 15,
256 .tWP = 25,
257 .tRH = 15,
258 .tRP = 30,
259 .tR = 25000,
260 .tWHR = 60,
261 .tAR = 10,
262};
263
264static struct pxa3xx_nand_flash micron1GbX8 = {
265 .timing = &micron_timing,
266 .cmdset = &largepage_cmdset,
267 .page_per_block = 64,
268 .page_size = 2048,
269 .flash_width = 8,
270 .dfc_width = 8,
271 .num_blocks = 1024,
272 .chip_id = 0xa12c,
273};
274
275static struct pxa3xx_nand_flash micron1GbX16 = {
276 .timing = &micron_timing,
277 .cmdset = &largepage_cmdset,
278 .page_per_block = 64,
279 .page_size = 2048,
280 .flash_width = 16,
281 .dfc_width = 16,
282 .num_blocks = 1024,
283 .chip_id = 0xb12c,
284};
285
286static struct pxa3xx_nand_flash micron4GbX8 = {
287 .timing = &micron_timing,
288 .cmdset = &largepage_cmdset,
289 .page_per_block = 64,
290 .page_size = 2048,
291 .flash_width = 8,
292 .dfc_width = 8,
293 .num_blocks = 4096,
294 .chip_id = 0xdc2c,
295};
296
297static struct pxa3xx_nand_flash micron4GbX16 = {
298 .timing = &micron_timing,
299 .cmdset = &largepage_cmdset,
300 .page_per_block = 64,
301 .page_size = 2048,
302 .flash_width = 16,
303 .dfc_width = 16,
304 .num_blocks = 4096,
305 .chip_id = 0xcc2c,
306};
307
308static struct pxa3xx_nand_timing stm2GbX16_timing = {
309 .tCH = 10,
310 .tCS = 35,
311 .tWH = 15,
312 .tWP = 25,
313 .tRH = 15,
314 .tRP = 25,
315 .tR = 25000,
316 .tWHR = 60,
317 .tAR = 10,
318};
319
320static struct pxa3xx_nand_flash stm2GbX16 = {
321 .timing = &stm2GbX16_timing,
322 .cmdset = &largepage_cmdset,
323 .page_per_block = 64,
324 .page_size = 2048,
325 .flash_width = 16,
326 .dfc_width = 16,
327 .num_blocks = 2048,
328 .chip_id = 0xba20,
329};
330
331static struct pxa3xx_nand_flash *builtin_flash_types[] = {
332 &samsung512MbX16,
333 &samsung2GbX8,
334 &samsung32GbX8,
335 &micron1GbX8,
336 &micron1GbX16,
337 &micron4GbX8,
338 &micron4GbX16,
339 &stm2GbX16,
340};
341#endif /* CONFIG_MTD_NAND_PXA3xx_BUILTIN */
342 217
343#define NDTR0_tCH(c) (min((c), 7) << 19) 218#define NDTR0_tCH(c) (min((c), 7) << 19)
344#define NDTR0_tCS(c) (min((c), 7) << 16) 219#define NDTR0_tCS(c) (min((c), 7) << 16)
@@ -351,23 +226,9 @@ static struct pxa3xx_nand_flash *builtin_flash_types[] = {
351#define NDTR1_tWHR(c) (min((c), 15) << 4) 226#define NDTR1_tWHR(c) (min((c), 15) << 4)
352#define NDTR1_tAR(c) (min((c), 15) << 0) 227#define NDTR1_tAR(c) (min((c), 15) << 0)
353 228
354#define tCH_NDTR0(r) (((r) >> 19) & 0x7)
355#define tCS_NDTR0(r) (((r) >> 16) & 0x7)
356#define tWH_NDTR0(r) (((r) >> 11) & 0x7)
357#define tWP_NDTR0(r) (((r) >> 8) & 0x7)
358#define tRH_NDTR0(r) (((r) >> 3) & 0x7)
359#define tRP_NDTR0(r) (((r) >> 0) & 0x7)
360
361#define tR_NDTR1(r) (((r) >> 16) & 0xffff)
362#define tWHR_NDTR1(r) (((r) >> 4) & 0xf)
363#define tAR_NDTR1(r) (((r) >> 0) & 0xf)
364
365/* convert nano-seconds to nand flash controller clock cycles */ 229/* convert nano-seconds to nand flash controller clock cycles */
366#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000) 230#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
367 231
368/* convert nand flash controller clock cycles to nano-seconds */
369#define cycle2ns(c, clk) ((((c) + 1) * 1000000 + clk / 500) / (clk / 1000))
370
371static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info, 232static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info,
372 const struct pxa3xx_nand_timing *t) 233 const struct pxa3xx_nand_timing *t)
373{ 234{
@@ -385,6 +246,8 @@ static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info,
385 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) | 246 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
386 NDTR1_tAR(ns2cycle(t->tAR, nand_clk)); 247 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
387 248
249 info->ndtr0cs0 = ndtr0;
250 info->ndtr1cs0 = ndtr1;
388 nand_writel(info, NDTR0CS0, ndtr0); 251 nand_writel(info, NDTR0CS0, ndtr0);
389 nand_writel(info, NDTR1CS0, ndtr1); 252 nand_writel(info, NDTR1CS0, ndtr1);
390} 253}
@@ -408,23 +271,31 @@ static int wait_for_event(struct pxa3xx_nand_info *info, uint32_t event)
408 return -ETIMEDOUT; 271 return -ETIMEDOUT;
409} 272}
410 273
411static int prepare_read_prog_cmd(struct pxa3xx_nand_info *info, 274static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info)
412 uint16_t cmd, int column, int page_addr)
413{ 275{
414 const struct pxa3xx_nand_flash *f = info->flash_info; 276 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
415 const struct pxa3xx_nand_cmdset *cmdset = f->cmdset;
416 277
417 /* calculate data size */ 278 info->data_size = info->page_size;
418 switch (f->page_size) { 279 if (!oob_enable) {
280 info->oob_size = 0;
281 return;
282 }
283
284 switch (info->page_size) {
419 case 2048: 285 case 2048:
420 info->data_size = (info->use_ecc) ? 2088 : 2112; 286 info->oob_size = (info->use_ecc) ? 40 : 64;
421 break; 287 break;
422 case 512: 288 case 512:
423 info->data_size = (info->use_ecc) ? 520 : 528; 289 info->oob_size = (info->use_ecc) ? 8 : 16;
424 break; 290 break;
425 default:
426 return -EINVAL;
427 } 291 }
292}
293
294static int prepare_read_prog_cmd(struct pxa3xx_nand_info *info,
295 uint16_t cmd, int column, int page_addr)
296{
297 const struct pxa3xx_nand_cmdset *cmdset = info->cmdset;
298 pxa3xx_set_datasize(info);
428 299
429 /* generate values for NDCBx registers */ 300 /* generate values for NDCBx registers */
430 info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0); 301 info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
@@ -463,12 +334,13 @@ static int prepare_erase_cmd(struct pxa3xx_nand_info *info,
463 334
464static int prepare_other_cmd(struct pxa3xx_nand_info *info, uint16_t cmd) 335static int prepare_other_cmd(struct pxa3xx_nand_info *info, uint16_t cmd)
465{ 336{
466 const struct pxa3xx_nand_cmdset *cmdset = info->flash_info->cmdset; 337 const struct pxa3xx_nand_cmdset *cmdset = info->cmdset;
467 338
468 info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0); 339 info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
469 info->ndcb1 = 0; 340 info->ndcb1 = 0;
470 info->ndcb2 = 0; 341 info->ndcb2 = 0;
471 342
343 info->oob_size = 0;
472 if (cmd == cmdset->read_id) { 344 if (cmd == cmdset->read_id) {
473 info->ndcb0 |= NDCB0_CMD_TYPE(3); 345 info->ndcb0 |= NDCB0_CMD_TYPE(3);
474 info->data_size = 8; 346 info->data_size = 8;
@@ -537,6 +409,9 @@ static int handle_data_pio(struct pxa3xx_nand_info *info)
537 case STATE_PIO_WRITING: 409 case STATE_PIO_WRITING:
538 __raw_writesl(info->mmio_base + NDDB, info->data_buff, 410 __raw_writesl(info->mmio_base + NDDB, info->data_buff,
539 DIV_ROUND_UP(info->data_size, 4)); 411 DIV_ROUND_UP(info->data_size, 4));
412 if (info->oob_size > 0)
413 __raw_writesl(info->mmio_base + NDDB, info->oob_buff,
414 DIV_ROUND_UP(info->oob_size, 4));
540 415
541 enable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD); 416 enable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
542 417
@@ -549,6 +424,9 @@ static int handle_data_pio(struct pxa3xx_nand_info *info)
549 case STATE_PIO_READING: 424 case STATE_PIO_READING:
550 __raw_readsl(info->mmio_base + NDDB, info->data_buff, 425 __raw_readsl(info->mmio_base + NDDB, info->data_buff,
551 DIV_ROUND_UP(info->data_size, 4)); 426 DIV_ROUND_UP(info->data_size, 4));
427 if (info->oob_size > 0)
428 __raw_readsl(info->mmio_base + NDDB, info->oob_buff,
429 DIV_ROUND_UP(info->oob_size, 4));
552 break; 430 break;
553 default: 431 default:
554 printk(KERN_ERR "%s: invalid state %d\n", __func__, 432 printk(KERN_ERR "%s: invalid state %d\n", __func__,
@@ -563,7 +441,7 @@ static int handle_data_pio(struct pxa3xx_nand_info *info)
563static void start_data_dma(struct pxa3xx_nand_info *info, int dir_out) 441static void start_data_dma(struct pxa3xx_nand_info *info, int dir_out)
564{ 442{
565 struct pxa_dma_desc *desc = info->data_desc; 443 struct pxa_dma_desc *desc = info->data_desc;
566 int dma_len = ALIGN(info->data_size, 32); 444 int dma_len = ALIGN(info->data_size + info->oob_size, 32);
567 445
568 desc->ddadr = DDADR_STOP; 446 desc->ddadr = DDADR_STOP;
569 desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len; 447 desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
@@ -700,8 +578,7 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
700 int column, int page_addr) 578 int column, int page_addr)
701{ 579{
702 struct pxa3xx_nand_info *info = mtd->priv; 580 struct pxa3xx_nand_info *info = mtd->priv;
703 const struct pxa3xx_nand_flash *flash_info = info->flash_info; 581 const struct pxa3xx_nand_cmdset *cmdset = info->cmdset;
704 const struct pxa3xx_nand_cmdset *cmdset = flash_info->cmdset;
705 int ret; 582 int ret;
706 583
707 info->use_dma = (use_dma) ? 1 : 0; 584 info->use_dma = (use_dma) ? 1 : 0;
@@ -925,8 +802,7 @@ static int pxa3xx_nand_ecc_correct(struct mtd_info *mtd,
925 802
926static int __readid(struct pxa3xx_nand_info *info, uint32_t *id) 803static int __readid(struct pxa3xx_nand_info *info, uint32_t *id)
927{ 804{
928 const struct pxa3xx_nand_flash *f = info->flash_info; 805 const struct pxa3xx_nand_cmdset *cmdset = info->cmdset;
929 const struct pxa3xx_nand_cmdset *cmdset = f->cmdset;
930 uint32_t ndcr; 806 uint32_t ndcr;
931 uint8_t id_buff[8]; 807 uint8_t id_buff[8];
932 808
@@ -968,7 +844,9 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
968 return -EINVAL; 844 return -EINVAL;
969 845
970 /* calculate flash information */ 846 /* calculate flash information */
971 info->oob_size = (f->page_size == 2048) ? 64 : 16; 847 info->cmdset = f->cmdset;
848 info->page_size = f->page_size;
849 info->oob_buff = info->data_buff + f->page_size;
972 info->read_id_bytes = (f->page_size == 2048) ? 4 : 2; 850 info->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
973 851
974 /* calculate addressing information */ 852 /* calculate addressing information */
@@ -992,49 +870,20 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
992 info->reg_ndcr = ndcr; 870 info->reg_ndcr = ndcr;
993 871
994 pxa3xx_nand_set_timing(info, f->timing); 872 pxa3xx_nand_set_timing(info, f->timing);
995 info->flash_info = f;
996 return 0; 873 return 0;
997} 874}
998 875
999static void pxa3xx_nand_detect_timing(struct pxa3xx_nand_info *info,
1000 struct pxa3xx_nand_timing *t)
1001{
1002 unsigned long nand_clk = clk_get_rate(info->clk);
1003 uint32_t ndtr0 = nand_readl(info, NDTR0CS0);
1004 uint32_t ndtr1 = nand_readl(info, NDTR1CS0);
1005
1006 t->tCH = cycle2ns(tCH_NDTR0(ndtr0), nand_clk);
1007 t->tCS = cycle2ns(tCS_NDTR0(ndtr0), nand_clk);
1008 t->tWH = cycle2ns(tWH_NDTR0(ndtr0), nand_clk);
1009 t->tWP = cycle2ns(tWP_NDTR0(ndtr0), nand_clk);
1010 t->tRH = cycle2ns(tRH_NDTR0(ndtr0), nand_clk);
1011 t->tRP = cycle2ns(tRP_NDTR0(ndtr0), nand_clk);
1012
1013 t->tR = cycle2ns(tR_NDTR1(ndtr1), nand_clk);
1014 t->tWHR = cycle2ns(tWHR_NDTR1(ndtr1), nand_clk);
1015 t->tAR = cycle2ns(tAR_NDTR1(ndtr1), nand_clk);
1016}
1017
1018static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info) 876static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1019{ 877{
1020 uint32_t ndcr = nand_readl(info, NDCR); 878 uint32_t ndcr = nand_readl(info, NDCR);
1021 struct nand_flash_dev *type = NULL; 879 struct nand_flash_dev *type = NULL;
1022 uint32_t id = -1; 880 uint32_t id = -1, page_per_block, num_blocks;
1023 int i; 881 int i;
1024 882
1025 default_flash.page_per_block = ndcr & NDCR_PG_PER_BLK ? 64 : 32; 883 page_per_block = ndcr & NDCR_PG_PER_BLK ? 64 : 32;
1026 default_flash.page_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512; 884 info->page_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1027 default_flash.flash_width = ndcr & NDCR_DWIDTH_M ? 16 : 8;
1028 default_flash.dfc_width = ndcr & NDCR_DWIDTH_C ? 16 : 8;
1029
1030 if (default_flash.page_size == 2048)
1031 default_flash.cmdset = &largepage_cmdset;
1032 else
1033 default_flash.cmdset = &smallpage_cmdset;
1034
1035 /* set info fields needed to __readid */ 885 /* set info fields needed to __readid */
1036 info->flash_info = &default_flash; 886 info->read_id_bytes = (info->page_size == 2048) ? 4 : 2;
1037 info->read_id_bytes = (default_flash.page_size == 2048) ? 4 : 2;
1038 info->reg_ndcr = ndcr; 887 info->reg_ndcr = ndcr;
1039 888
1040 if (__readid(info, &id)) 889 if (__readid(info, &id))
@@ -1053,21 +902,20 @@ static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1053 return -ENODEV; 902 return -ENODEV;
1054 903
1055 /* fill the missing flash information */ 904 /* fill the missing flash information */
1056 i = __ffs(default_flash.page_per_block * default_flash.page_size); 905 i = __ffs(page_per_block * info->page_size);
1057 default_flash.num_blocks = type->chipsize << (20 - i); 906 num_blocks = type->chipsize << (20 - i);
1058
1059 info->oob_size = (default_flash.page_size == 2048) ? 64 : 16;
1060 907
1061 /* calculate addressing information */ 908 /* calculate addressing information */
1062 info->col_addr_cycles = (default_flash.page_size == 2048) ? 2 : 1; 909 info->col_addr_cycles = (info->page_size == 2048) ? 2 : 1;
1063 910
1064 if (default_flash.num_blocks * default_flash.page_per_block > 65536) 911 if (num_blocks * page_per_block > 65536)
1065 info->row_addr_cycles = 3; 912 info->row_addr_cycles = 3;
1066 else 913 else
1067 info->row_addr_cycles = 2; 914 info->row_addr_cycles = 2;
1068 915
1069 pxa3xx_nand_detect_timing(info, &default_timing); 916 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1070 default_flash.timing = &default_timing; 917 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
918 info->cmdset = &default_cmdset;
1071 919
1072 return 0; 920 return 0;
1073} 921}
@@ -1083,38 +931,29 @@ static int pxa3xx_nand_detect_flash(struct pxa3xx_nand_info *info,
1083 if (pxa3xx_nand_detect_config(info) == 0) 931 if (pxa3xx_nand_detect_config(info) == 0)
1084 return 0; 932 return 0;
1085 933
1086 for (i = 0; i<pdata->num_flash; ++i) { 934 /* we use default timing to detect id */
1087 f = pdata->flash + i; 935 f = DEFAULT_FLASH_TYPE;
1088 936 pxa3xx_nand_config_flash(info, f);
1089 if (pxa3xx_nand_config_flash(info, f)) 937 if (__readid(info, &id))
1090 continue; 938 goto fail_detect;
1091 939
1092 if (__readid(info, &id)) 940 for (i=0; i<ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1; i++) {
1093 continue; 941 /* we first choose the flash definition from platfrom */
1094 942 if (i < pdata->num_flash)
1095 if (id == f->chip_id) 943 f = pdata->flash + i;
1096 return 0; 944 else
1097 } 945 f = &builtin_flash_types[i - pdata->num_flash + 1];
1098 946 if (f->chip_id == id) {
1099#ifdef CONFIG_MTD_NAND_PXA3xx_BUILTIN 947 dev_info(&info->pdev->dev, "detect chip id: 0x%x\n", id);
1100 for (i = 0; i < ARRAY_SIZE(builtin_flash_types); i++) { 948 pxa3xx_nand_config_flash(info, f);
1101
1102 f = builtin_flash_types[i];
1103
1104 if (pxa3xx_nand_config_flash(info, f))
1105 continue;
1106
1107 if (__readid(info, &id))
1108 continue;
1109
1110 if (id == f->chip_id)
1111 return 0; 949 return 0;
950 }
1112 } 951 }
1113#endif
1114 952
1115 dev_warn(&info->pdev->dev, 953 dev_warn(&info->pdev->dev,
1116 "failed to detect configured nand flash; found %04x instead of\n", 954 "failed to detect configured nand flash; found %04x instead of\n",
1117 id); 955 id);
956fail_detect:
1118 return -ENODEV; 957 return -ENODEV;
1119} 958}
1120 959
@@ -1177,10 +1016,9 @@ static struct nand_ecclayout hw_largepage_ecclayout = {
1177static void pxa3xx_nand_init_mtd(struct mtd_info *mtd, 1016static void pxa3xx_nand_init_mtd(struct mtd_info *mtd,
1178 struct pxa3xx_nand_info *info) 1017 struct pxa3xx_nand_info *info)
1179{ 1018{
1180 const struct pxa3xx_nand_flash *f = info->flash_info;
1181 struct nand_chip *this = &info->nand_chip; 1019 struct nand_chip *this = &info->nand_chip;
1182 1020
1183 this->options = (f->flash_width == 16) ? NAND_BUSWIDTH_16: 0; 1021 this->options = (info->reg_ndcr & NDCR_DWIDTH_C) ? NAND_BUSWIDTH_16: 0;
1184 1022
1185 this->waitfunc = pxa3xx_nand_waitfunc; 1023 this->waitfunc = pxa3xx_nand_waitfunc;
1186 this->select_chip = pxa3xx_nand_select_chip; 1024 this->select_chip = pxa3xx_nand_select_chip;
@@ -1196,9 +1034,9 @@ static void pxa3xx_nand_init_mtd(struct mtd_info *mtd,
1196 this->ecc.hwctl = pxa3xx_nand_ecc_hwctl; 1034 this->ecc.hwctl = pxa3xx_nand_ecc_hwctl;
1197 this->ecc.calculate = pxa3xx_nand_ecc_calculate; 1035 this->ecc.calculate = pxa3xx_nand_ecc_calculate;
1198 this->ecc.correct = pxa3xx_nand_ecc_correct; 1036 this->ecc.correct = pxa3xx_nand_ecc_correct;
1199 this->ecc.size = f->page_size; 1037 this->ecc.size = info->page_size;
1200 1038
1201 if (f->page_size == 2048) 1039 if (info->page_size == 2048)
1202 this->ecc.layout = &hw_largepage_ecclayout; 1040 this->ecc.layout = &hw_largepage_ecclayout;
1203 else 1041 else
1204 this->ecc.layout = &hw_smallpage_ecclayout; 1042 this->ecc.layout = &hw_smallpage_ecclayout;
@@ -1411,9 +1249,11 @@ static int pxa3xx_nand_resume(struct platform_device *pdev)
1411 struct mtd_info *mtd = (struct mtd_info *)platform_get_drvdata(pdev); 1249 struct mtd_info *mtd = (struct mtd_info *)platform_get_drvdata(pdev);
1412 struct pxa3xx_nand_info *info = mtd->priv; 1250 struct pxa3xx_nand_info *info = mtd->priv;
1413 1251
1252 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1253 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1414 clk_enable(info->clk); 1254 clk_enable(info->clk);
1415 1255
1416 return pxa3xx_nand_config_flash(info, info->flash_info); 1256 return 0;
1417} 1257}
1418#else 1258#else
1419#define pxa3xx_nand_suspend NULL 1259#define pxa3xx_nand_suspend NULL
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index 5169ca6a66bc..d9d7efbc77cc 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -757,11 +757,6 @@ static irqreturn_t r852_irq(int irq, void *data)
757 757
758 spin_lock_irqsave(&dev->irqlock, flags); 758 spin_lock_irqsave(&dev->irqlock, flags);
759 759
760 /* We can recieve shared interrupt while pci is suspended
761 in that case reads will return 0xFFFFFFFF.... */
762 if (dev->insuspend)
763 goto out;
764
765 /* handle card detection interrupts first */ 760 /* handle card detection interrupts first */
766 card_status = r852_read_reg(dev, R852_CARD_IRQ_STA); 761 card_status = r852_read_reg(dev, R852_CARD_IRQ_STA);
767 r852_write_reg(dev, R852_CARD_IRQ_STA, card_status); 762 r852_write_reg(dev, R852_CARD_IRQ_STA, card_status);
@@ -1035,7 +1030,6 @@ void r852_shutdown(struct pci_dev *pci_dev)
1035int r852_suspend(struct device *device) 1030int r852_suspend(struct device *device)
1036{ 1031{
1037 struct r852_device *dev = pci_get_drvdata(to_pci_dev(device)); 1032 struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
1038 unsigned long flags;
1039 1033
1040 if (dev->ctlreg & R852_CTL_CARDENABLE) 1034 if (dev->ctlreg & R852_CTL_CARDENABLE)
1041 return -EBUSY; 1035 return -EBUSY;
@@ -1047,43 +1041,22 @@ int r852_suspend(struct device *device)
1047 r852_disable_irqs(dev); 1041 r852_disable_irqs(dev);
1048 r852_engine_disable(dev); 1042 r852_engine_disable(dev);
1049 1043
1050 spin_lock_irqsave(&dev->irqlock, flags);
1051 dev->insuspend = 1;
1052 spin_unlock_irqrestore(&dev->irqlock, flags);
1053
1054 /* At that point, even if interrupt handler is running, it will quit */
1055 /* So wait for this to happen explictly */
1056 synchronize_irq(dev->irq);
1057
1058 /* If card was pulled off just during the suspend, which is very 1044 /* If card was pulled off just during the suspend, which is very
1059 unlikely, we will remove it on resume, it too late now 1045 unlikely, we will remove it on resume, it too late now
1060 anyway... */ 1046 anyway... */
1061 dev->card_unstable = 0; 1047 dev->card_unstable = 0;
1062 1048 return 0;
1063 pci_save_state(to_pci_dev(device));
1064 return pci_prepare_to_sleep(to_pci_dev(device));
1065} 1049}
1066 1050
1067int r852_resume(struct device *device) 1051int r852_resume(struct device *device)
1068{ 1052{
1069 struct r852_device *dev = pci_get_drvdata(to_pci_dev(device)); 1053 struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
1070 unsigned long flags;
1071
1072 /* Turn on the hardware */
1073 pci_back_from_sleep(to_pci_dev(device));
1074 pci_restore_state(to_pci_dev(device));
1075 1054
1076 r852_disable_irqs(dev); 1055 r852_disable_irqs(dev);
1077 r852_card_update_present(dev); 1056 r852_card_update_present(dev);
1078 r852_engine_disable(dev); 1057 r852_engine_disable(dev);
1079 1058
1080 1059
1081 /* Now its safe for IRQ to run */
1082 spin_lock_irqsave(&dev->irqlock, flags);
1083 dev->insuspend = 0;
1084 spin_unlock_irqrestore(&dev->irqlock, flags);
1085
1086
1087 /* If card status changed, just do the work */ 1060 /* If card status changed, just do the work */
1088 if (dev->card_detected != dev->card_registred) { 1061 if (dev->card_detected != dev->card_registred) {
1089 dbg("card was %s during low power state", 1062 dbg("card was %s during low power state",
@@ -1121,7 +1094,6 @@ MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl);
1121 1094
1122SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume); 1095SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume);
1123 1096
1124
1125static struct pci_driver r852_pci_driver = { 1097static struct pci_driver r852_pci_driver = {
1126 .name = DRV_NAME, 1098 .name = DRV_NAME,
1127 .id_table = r852_pci_id_tbl, 1099 .id_table = r852_pci_id_tbl,
diff --git a/drivers/mtd/nand/r852.h b/drivers/mtd/nand/r852.h
index 8096cc280c73..e6a21d9d22c6 100644
--- a/drivers/mtd/nand/r852.h
+++ b/drivers/mtd/nand/r852.h
@@ -140,8 +140,6 @@ struct r852_device {
140 /* interrupt handling */ 140 /* interrupt handling */
141 spinlock_t irqlock; /* IRQ protecting lock */ 141 spinlock_t irqlock; /* IRQ protecting lock */
142 int irq; /* irq num */ 142 int irq; /* irq num */
143 int insuspend; /* device is suspended */
144
145 /* misc */ 143 /* misc */
146 void *tmp_buffer; /* temporary buffer */ 144 void *tmp_buffer; /* temporary buffer */
147 uint8_t ctlreg; /* cached contents of control reg */ 145 uint8_t ctlreg; /* cached contents of control reg */
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index 7bd171eefd21..a996718fa6b0 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -44,7 +44,7 @@ int __devinit of_mtd_parse_partitions(struct device *dev,
44 pp = NULL; 44 pp = NULL;
45 i = 0; 45 i = 0;
46 while ((pp = of_get_next_child(node, pp))) { 46 while ((pp = of_get_next_child(node, pp))) {
47 const u32 *reg; 47 const __be32 *reg;
48 int len; 48 int len;
49 49
50 reg = of_get_property(pp, "reg", &len); 50 reg = of_get_property(pp, "reg", &len);
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index 3f32289fdbb5..4dbd0f58eebf 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -32,10 +32,11 @@ config MTD_ONENAND_OMAP2
32 32
33config MTD_ONENAND_SAMSUNG 33config MTD_ONENAND_SAMSUNG
34 tristate "OneNAND on Samsung SOC controller support" 34 tristate "OneNAND on Samsung SOC controller support"
35 depends on ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 35 depends on ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_S5PV310
36 help 36 help
37 Support for a OneNAND flash device connected to an Samsung SOC 37 Support for a OneNAND flash device connected to an Samsung SOC.
38 S3C64XX/S5PC1XX controller. 38 S3C64XX/S5PC100 use command mapping method.
39 S5PC110/S5PC210 use generic OneNAND method.
39 40
40config MTD_ONENAND_OTP 41config MTD_ONENAND_OTP
41 bool "OneNAND OTP Support" 42 bool "OneNAND OTP Support"
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index a2bb520286f8..6b3a875647c9 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -3365,18 +3365,19 @@ static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
3365static void onenand_check_features(struct mtd_info *mtd) 3365static void onenand_check_features(struct mtd_info *mtd)
3366{ 3366{
3367 struct onenand_chip *this = mtd->priv; 3367 struct onenand_chip *this = mtd->priv;
3368 unsigned int density, process; 3368 unsigned int density, process, numbufs;
3369 3369
3370 /* Lock scheme depends on density and process */ 3370 /* Lock scheme depends on density and process */
3371 density = onenand_get_density(this->device_id); 3371 density = onenand_get_density(this->device_id);
3372 process = this->version_id >> ONENAND_VERSION_PROCESS_SHIFT; 3372 process = this->version_id >> ONENAND_VERSION_PROCESS_SHIFT;
3373 numbufs = this->read_word(this->base + ONENAND_REG_NUM_BUFFERS) >> 8;
3373 3374
3374 /* Lock scheme */ 3375 /* Lock scheme */
3375 switch (density) { 3376 switch (density) {
3376 case ONENAND_DEVICE_DENSITY_4Gb: 3377 case ONENAND_DEVICE_DENSITY_4Gb:
3377 if (ONENAND_IS_DDP(this)) 3378 if (ONENAND_IS_DDP(this))
3378 this->options |= ONENAND_HAS_2PLANE; 3379 this->options |= ONENAND_HAS_2PLANE;
3379 else 3380 else if (numbufs == 1)
3380 this->options |= ONENAND_HAS_4KB_PAGE; 3381 this->options |= ONENAND_HAS_4KB_PAGE;
3381 3382
3382 case ONENAND_DEVICE_DENSITY_2Gb: 3383 case ONENAND_DEVICE_DENSITY_2Gb:
@@ -4027,7 +4028,7 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
4027 mtd->ecclayout = this->ecclayout; 4028 mtd->ecclayout = this->ecclayout;
4028 4029
4029 /* Fill in remaining MTD driver data */ 4030 /* Fill in remaining MTD driver data */
4030 mtd->type = MTD_NANDFLASH; 4031 mtd->type = ONENAND_IS_MLC(this) ? MTD_MLCNANDFLASH : MTD_NANDFLASH;
4031 mtd->flags = MTD_CAP_NANDFLASH; 4032 mtd->flags = MTD_CAP_NANDFLASH;
4032 mtd->erase = onenand_erase; 4033 mtd->erase = onenand_erase;
4033 mtd->point = NULL; 4034 mtd->point = NULL;
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index a460f1b748c2..0de7a05e6de0 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -22,6 +22,7 @@
22#include <linux/mtd/onenand.h> 22#include <linux/mtd/onenand.h>
23#include <linux/mtd/partitions.h> 23#include <linux/mtd/partitions.h>
24#include <linux/dma-mapping.h> 24#include <linux/dma-mapping.h>
25#include <linux/interrupt.h>
25 26
26#include <asm/mach/flash.h> 27#include <asm/mach/flash.h>
27#include <plat/regs-onenand.h> 28#include <plat/regs-onenand.h>
@@ -58,7 +59,7 @@ enum soc_type {
58#define MAP_11 (0x3) 59#define MAP_11 (0x3)
59 60
60#define S3C64XX_CMD_MAP_SHIFT 24 61#define S3C64XX_CMD_MAP_SHIFT 24
61#define S5PC1XX_CMD_MAP_SHIFT 26 62#define S5PC100_CMD_MAP_SHIFT 26
62 63
63#define S3C6400_FBA_SHIFT 10 64#define S3C6400_FBA_SHIFT 10
64#define S3C6400_FPA_SHIFT 4 65#define S3C6400_FPA_SHIFT 4
@@ -81,6 +82,17 @@ enum soc_type {
81#define S5PC110_DMA_TRANS_CMD 0x418 82#define S5PC110_DMA_TRANS_CMD 0x418
82#define S5PC110_DMA_TRANS_STATUS 0x41C 83#define S5PC110_DMA_TRANS_STATUS 0x41C
83#define S5PC110_DMA_TRANS_DIR 0x420 84#define S5PC110_DMA_TRANS_DIR 0x420
85#define S5PC110_INTC_DMA_CLR 0x1004
86#define S5PC110_INTC_ONENAND_CLR 0x1008
87#define S5PC110_INTC_DMA_MASK 0x1024
88#define S5PC110_INTC_ONENAND_MASK 0x1028
89#define S5PC110_INTC_DMA_PEND 0x1044
90#define S5PC110_INTC_ONENAND_PEND 0x1048
91#define S5PC110_INTC_DMA_STATUS 0x1064
92#define S5PC110_INTC_ONENAND_STATUS 0x1068
93
94#define S5PC110_INTC_DMA_TD (1 << 24)
95#define S5PC110_INTC_DMA_TE (1 << 16)
84 96
85#define S5PC110_DMA_CFG_SINGLE (0x0 << 16) 97#define S5PC110_DMA_CFG_SINGLE (0x0 << 16)
86#define S5PC110_DMA_CFG_4BURST (0x2 << 16) 98#define S5PC110_DMA_CFG_4BURST (0x2 << 16)
@@ -134,6 +146,7 @@ struct s3c_onenand {
134 void __iomem *dma_addr; 146 void __iomem *dma_addr;
135 struct resource *dma_res; 147 struct resource *dma_res;
136 unsigned long phys_base; 148 unsigned long phys_base;
149 struct completion complete;
137#ifdef CONFIG_MTD_PARTITIONS 150#ifdef CONFIG_MTD_PARTITIONS
138 struct mtd_partition *parts; 151 struct mtd_partition *parts;
139#endif 152#endif
@@ -191,7 +204,7 @@ static unsigned int s3c64xx_cmd_map(unsigned type, unsigned val)
191 204
192static unsigned int s5pc1xx_cmd_map(unsigned type, unsigned val) 205static unsigned int s5pc1xx_cmd_map(unsigned type, unsigned val)
193{ 206{
194 return (type << S5PC1XX_CMD_MAP_SHIFT) | val; 207 return (type << S5PC100_CMD_MAP_SHIFT) | val;
195} 208}
196 209
197static unsigned int s3c6400_mem_addr(int fba, int fpa, int fsa) 210static unsigned int s3c6400_mem_addr(int fba, int fpa, int fsa)
@@ -531,10 +544,13 @@ static int onenand_write_bufferram(struct mtd_info *mtd, int area,
531 return 0; 544 return 0;
532} 545}
533 546
534static int s5pc110_dma_ops(void *dst, void *src, size_t count, int direction) 547static int (*s5pc110_dma_ops)(void *dst, void *src, size_t count, int direction);
548
549static int s5pc110_dma_poll(void *dst, void *src, size_t count, int direction)
535{ 550{
536 void __iomem *base = onenand->dma_addr; 551 void __iomem *base = onenand->dma_addr;
537 int status; 552 int status;
553 unsigned long timeout;
538 554
539 writel(src, base + S5PC110_DMA_SRC_ADDR); 555 writel(src, base + S5PC110_DMA_SRC_ADDR);
540 writel(dst, base + S5PC110_DMA_DST_ADDR); 556 writel(dst, base + S5PC110_DMA_DST_ADDR);
@@ -552,6 +568,13 @@ static int s5pc110_dma_ops(void *dst, void *src, size_t count, int direction)
552 568
553 writel(S5PC110_DMA_TRANS_CMD_TR, base + S5PC110_DMA_TRANS_CMD); 569 writel(S5PC110_DMA_TRANS_CMD_TR, base + S5PC110_DMA_TRANS_CMD);
554 570
571 /*
572 * There's no exact timeout values at Spec.
573 * In real case it takes under 1 msec.
574 * So 20 msecs are enough.
575 */
576 timeout = jiffies + msecs_to_jiffies(20);
577
555 do { 578 do {
556 status = readl(base + S5PC110_DMA_TRANS_STATUS); 579 status = readl(base + S5PC110_DMA_TRANS_STATUS);
557 if (status & S5PC110_DMA_TRANS_STATUS_TE) { 580 if (status & S5PC110_DMA_TRANS_STATUS_TE) {
@@ -559,13 +582,68 @@ static int s5pc110_dma_ops(void *dst, void *src, size_t count, int direction)
559 base + S5PC110_DMA_TRANS_CMD); 582 base + S5PC110_DMA_TRANS_CMD);
560 return -EIO; 583 return -EIO;
561 } 584 }
562 } while (!(status & S5PC110_DMA_TRANS_STATUS_TD)); 585 } while (!(status & S5PC110_DMA_TRANS_STATUS_TD) &&
586 time_before(jiffies, timeout));
563 587
564 writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD); 588 writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
565 589
566 return 0; 590 return 0;
567} 591}
568 592
593static irqreturn_t s5pc110_onenand_irq(int irq, void *data)
594{
595 void __iomem *base = onenand->dma_addr;
596 int status, cmd = 0;
597
598 status = readl(base + S5PC110_INTC_DMA_STATUS);
599
600 if (likely(status & S5PC110_INTC_DMA_TD))
601 cmd = S5PC110_DMA_TRANS_CMD_TDC;
602
603 if (unlikely(status & S5PC110_INTC_DMA_TE))
604 cmd = S5PC110_DMA_TRANS_CMD_TEC;
605
606 writel(cmd, base + S5PC110_DMA_TRANS_CMD);
607 writel(status, base + S5PC110_INTC_DMA_CLR);
608
609 if (!onenand->complete.done)
610 complete(&onenand->complete);
611
612 return IRQ_HANDLED;
613}
614
615static int s5pc110_dma_irq(void *dst, void *src, size_t count, int direction)
616{
617 void __iomem *base = onenand->dma_addr;
618 int status;
619
620 status = readl(base + S5PC110_INTC_DMA_MASK);
621 if (status) {
622 status &= ~(S5PC110_INTC_DMA_TD | S5PC110_INTC_DMA_TE);
623 writel(status, base + S5PC110_INTC_DMA_MASK);
624 }
625
626 writel(src, base + S5PC110_DMA_SRC_ADDR);
627 writel(dst, base + S5PC110_DMA_DST_ADDR);
628
629 if (direction == S5PC110_DMA_DIR_READ) {
630 writel(S5PC110_DMA_SRC_CFG_READ, base + S5PC110_DMA_SRC_CFG);
631 writel(S5PC110_DMA_DST_CFG_READ, base + S5PC110_DMA_DST_CFG);
632 } else {
633 writel(S5PC110_DMA_SRC_CFG_WRITE, base + S5PC110_DMA_SRC_CFG);
634 writel(S5PC110_DMA_DST_CFG_WRITE, base + S5PC110_DMA_DST_CFG);
635 }
636
637 writel(count, base + S5PC110_DMA_TRANS_SIZE);
638 writel(direction, base + S5PC110_DMA_TRANS_DIR);
639
640 writel(S5PC110_DMA_TRANS_CMD_TR, base + S5PC110_DMA_TRANS_CMD);
641
642 wait_for_completion_timeout(&onenand->complete, msecs_to_jiffies(20));
643
644 return 0;
645}
646
569static int s5pc110_read_bufferram(struct mtd_info *mtd, int area, 647static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
570 unsigned char *buffer, int offset, size_t count) 648 unsigned char *buffer, int offset, size_t count)
571{ 649{
@@ -573,7 +651,8 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
573 void __iomem *p; 651 void __iomem *p;
574 void *buf = (void *) buffer; 652 void *buf = (void *) buffer;
575 dma_addr_t dma_src, dma_dst; 653 dma_addr_t dma_src, dma_dst;
576 int err; 654 int err, page_dma = 0;
655 struct device *dev = &onenand->pdev->dev;
577 656
578 p = this->base + area; 657 p = this->base + area;
579 if (ONENAND_CURRENT_BUFFERRAM(this)) { 658 if (ONENAND_CURRENT_BUFFERRAM(this)) {
@@ -597,21 +676,27 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
597 page = vmalloc_to_page(buf); 676 page = vmalloc_to_page(buf);
598 if (!page) 677 if (!page)
599 goto normal; 678 goto normal;
600 buf = page_address(page) + ((size_t) buf & ~PAGE_MASK);
601 }
602 679
603 /* DMA routine */ 680 page_dma = 1;
604 dma_src = onenand->phys_base + (p - this->base); 681 /* DMA routine */
605 dma_dst = dma_map_single(&onenand->pdev->dev, 682 dma_src = onenand->phys_base + (p - this->base);
606 buf, count, DMA_FROM_DEVICE); 683 dma_dst = dma_map_page(dev, page, 0, count, DMA_FROM_DEVICE);
607 if (dma_mapping_error(&onenand->pdev->dev, dma_dst)) { 684 } else {
608 dev_err(&onenand->pdev->dev, 685 /* DMA routine */
609 "Couldn't map a %d byte buffer for DMA\n", count); 686 dma_src = onenand->phys_base + (p - this->base);
687 dma_dst = dma_map_single(dev, buf, count, DMA_FROM_DEVICE);
688 }
689 if (dma_mapping_error(dev, dma_dst)) {
690 dev_err(dev, "Couldn't map a %d byte buffer for DMA\n", count);
610 goto normal; 691 goto normal;
611 } 692 }
612 err = s5pc110_dma_ops((void *) dma_dst, (void *) dma_src, 693 err = s5pc110_dma_ops((void *) dma_dst, (void *) dma_src,
613 count, S5PC110_DMA_DIR_READ); 694 count, S5PC110_DMA_DIR_READ);
614 dma_unmap_single(&onenand->pdev->dev, dma_dst, count, DMA_FROM_DEVICE); 695
696 if (page_dma)
697 dma_unmap_page(dev, dma_dst, count, DMA_FROM_DEVICE);
698 else
699 dma_unmap_single(dev, dma_dst, count, DMA_FROM_DEVICE);
615 700
616 if (!err) 701 if (!err)
617 return 0; 702 return 0;
@@ -759,7 +844,6 @@ static void s3c_onenand_setup(struct mtd_info *mtd)
759 onenand->cmd_map = s5pc1xx_cmd_map; 844 onenand->cmd_map = s5pc1xx_cmd_map;
760 } else if (onenand->type == TYPE_S5PC110) { 845 } else if (onenand->type == TYPE_S5PC110) {
761 /* Use generic onenand functions */ 846 /* Use generic onenand functions */
762 onenand->cmd_map = s5pc1xx_cmd_map;
763 this->read_bufferram = s5pc110_read_bufferram; 847 this->read_bufferram = s5pc110_read_bufferram;
764 this->chip_probe = s5pc110_chip_probe; 848 this->chip_probe = s5pc110_chip_probe;
765 return; 849 return;
@@ -904,6 +988,20 @@ static int s3c_onenand_probe(struct platform_device *pdev)
904 } 988 }
905 989
906 onenand->phys_base = onenand->base_res->start; 990 onenand->phys_base = onenand->base_res->start;
991
992 s5pc110_dma_ops = s5pc110_dma_poll;
993 /* Interrupt support */
994 r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
995 if (r) {
996 init_completion(&onenand->complete);
997 s5pc110_dma_ops = s5pc110_dma_irq;
998 err = request_irq(r->start, s5pc110_onenand_irq,
999 IRQF_SHARED, "onenand", &onenand);
1000 if (err) {
1001 dev_err(&pdev->dev, "failed to get irq\n");
1002 goto scan_failed;
1003 }
1004 }
907 } 1005 }
908 1006
909 if (onenand_scan(mtd, 1)) { 1007 if (onenand_scan(mtd, 1)) {
@@ -1000,7 +1098,7 @@ static int s3c_pm_ops_suspend(struct device *dev)
1000 struct onenand_chip *this = mtd->priv; 1098 struct onenand_chip *this = mtd->priv;
1001 1099
1002 this->wait(mtd, FL_PM_SUSPENDED); 1100 this->wait(mtd, FL_PM_SUSPENDED);
1003 return mtd->suspend(mtd); 1101 return 0;
1004} 1102}
1005 1103
1006static int s3c_pm_ops_resume(struct device *dev) 1104static int s3c_pm_ops_resume(struct device *dev)
@@ -1009,7 +1107,6 @@ static int s3c_pm_ops_resume(struct device *dev)
1009 struct mtd_info *mtd = platform_get_drvdata(pdev); 1107 struct mtd_info *mtd = platform_get_drvdata(pdev);
1010 struct onenand_chip *this = mtd->priv; 1108 struct onenand_chip *this = mtd->priv;
1011 1109
1012 mtd->resume(mtd);
1013 this->unlock_all(mtd); 1110 this->unlock_all(mtd);
1014 return 0; 1111 return 0;
1015} 1112}
diff --git a/drivers/mtd/sm_ftl.h b/drivers/mtd/sm_ftl.h
index e30e48e7f63d..43bb7300785b 100644
--- a/drivers/mtd/sm_ftl.h
+++ b/drivers/mtd/sm_ftl.h
@@ -20,7 +20,7 @@
20 20
21 21
22struct ftl_zone { 22struct ftl_zone {
23 int initialized; 23 bool initialized;
24 int16_t *lba_to_phys_table; /* LBA to physical table */ 24 int16_t *lba_to_phys_table; /* LBA to physical table */
25 struct kfifo free_sectors; /* queue of free sectors */ 25 struct kfifo free_sectors; /* queue of free sectors */
26}; 26};
@@ -37,8 +37,8 @@ struct sm_ftl {
37 int zone_count; /* number of zones */ 37 int zone_count; /* number of zones */
38 int max_lba; /* maximum lba in a zone */ 38 int max_lba; /* maximum lba in a zone */
39 int smallpagenand; /* 256 bytes/page nand */ 39 int smallpagenand; /* 256 bytes/page nand */
40 int readonly; /* is FS readonly */ 40 bool readonly; /* is FS readonly */
41 int unstable; 41 bool unstable;
42 int cis_block; /* CIS block location */ 42 int cis_block; /* CIS block location */
43 int cis_boffset; /* CIS offset in the block */ 43 int cis_boffset; /* CIS offset in the block */
44 int cis_page_offset; /* CIS offset in the page */ 44 int cis_page_offset; /* CIS offset in the page */
@@ -49,7 +49,7 @@ struct sm_ftl {
49 int cache_zone; /* zone of cached block */ 49 int cache_zone; /* zone of cached block */
50 unsigned char *cache_data; /* cached block data */ 50 unsigned char *cache_data; /* cached block data */
51 long unsigned int cache_data_invalid_bitmap; 51 long unsigned int cache_data_invalid_bitmap;
52 int cache_clean; 52 bool cache_clean;
53 struct work_struct flush_work; 53 struct work_struct flush_work;
54 struct timer_list timer; 54 struct timer_list timer;
55 55