aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/mtd/Kconfig13
-rw-r--r--drivers/mtd/Makefile1
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c137
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c198
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c136
-rw-r--r--drivers/mtd/chips/fwh_lock.h6
-rw-r--r--drivers/mtd/chips/gen_probe.c3
-rw-r--r--drivers/mtd/devices/Makefile2
-rw-r--r--drivers/mtd/devices/block2mtd.c4
-rw-r--r--drivers/mtd/devices/pmc551.c4
-rw-r--r--drivers/mtd/devices/sst25l.c11
-rw-r--r--drivers/mtd/ftl.c1
-rw-r--r--drivers/mtd/inftlcore.c1
-rw-r--r--drivers/mtd/inftlmount.c7
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c79
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c16
-rw-r--r--drivers/mtd/maps/ceiva.c2
-rw-r--r--drivers/mtd/maps/ixp4xx.c4
-rw-r--r--drivers/mtd/maps/pcmciamtd.c2
-rw-r--r--drivers/mtd/maps/physmap.c7
-rw-r--r--drivers/mtd/maps/physmap_of.c2
-rw-r--r--drivers/mtd/maps/pismo.c8
-rw-r--r--drivers/mtd/mtd_blkdevs.c335
-rw-r--r--drivers/mtd/mtdblock.c72
-rw-r--r--drivers/mtd/mtdblock_ro.c4
-rw-r--r--drivers/mtd/mtdchar.c12
-rw-r--r--drivers/mtd/mtdcore.c284
-rw-r--r--drivers/mtd/mtdcore.h7
-rw-r--r--drivers/mtd/mtdoops.c5
-rw-r--r--drivers/mtd/mtdsuper.c18
-rw-r--r--drivers/mtd/nand/Kconfig52
-rw-r--r--drivers/mtd/nand/Makefile9
-rw-r--r--drivers/mtd/nand/alauda.c2
-rw-r--r--drivers/mtd/nand/atmel_nand.c2
-rw-r--r--drivers/mtd/nand/au1550nd.c12
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c3
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c29
-rw-r--r--drivers/mtd/nand/cafe_nand.c4
-rw-r--r--drivers/mtd/nand/davinci_nand.c6
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c4
-rw-r--r--drivers/mtd/nand/fsl_upm.c9
-rw-r--r--drivers/mtd/nand/gpio.c12
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c916
-rw-r--r--drivers/mtd/nand/mxc_nand.c22
-rw-r--r--drivers/mtd/nand/nand_base.c312
-rw-r--r--drivers/mtd/nand/nand_bbt.c26
-rw-r--r--drivers/mtd/nand/nand_bcm_umi.h71
-rw-r--r--drivers/mtd/nand/nandsim.c17
-rw-r--r--drivers/mtd/nand/nomadik_nand.c6
-rw-r--r--drivers/mtd/nand/nuc900_nand.c (renamed from drivers/mtd/nand/w90p910_nand.c)144
-rw-r--r--drivers/mtd/nand/omap2.c16
-rw-r--r--drivers/mtd/nand/orion_nand.c10
-rw-r--r--drivers/mtd/nand/pasemi_nand.c2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c11
-rw-r--r--drivers/mtd/nand/r852.c1139
-rw-r--r--drivers/mtd/nand/r852.h163
-rw-r--r--drivers/mtd/nand/s3c2410.c5
-rw-r--r--drivers/mtd/nand/sh_flctl.c2
-rw-r--r--drivers/mtd/nand/sm_common.c143
-rw-r--r--drivers/mtd/nand/sm_common.h61
-rw-r--r--drivers/mtd/nand/socrates_nand.c4
-rw-r--r--drivers/mtd/nand/tmio_nand.c14
-rw-r--r--drivers/mtd/nand/ts7250.c207
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c2
-rw-r--r--drivers/mtd/nftlcore.c1
-rw-r--r--drivers/mtd/onenand/omap2.c12
-rw-r--r--drivers/mtd/rfd_ftl.c1
-rw-r--r--drivers/mtd/sm_ftl.c1284
-rw-r--r--drivers/mtd/sm_ftl.h94
-rw-r--r--drivers/mtd/ssfdc.c1
70 files changed, 5108 insertions, 1103 deletions
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index ecf90f5c97c2..f8210bf2d241 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -304,6 +304,19 @@ config SSFDC
304 This enables read only access to SmartMedia formatted NAND 304 This enables read only access to SmartMedia formatted NAND
305 flash. You can mount it with FAT file system. 305 flash. You can mount it with FAT file system.
306 306
307
308config SM_FTL
309 tristate "SmartMedia/xD new translation layer"
310 depends on EXPERIMENTAL && BLOCK
311 select MTD_BLKDEVS
312 select MTD_NAND_ECC
313 help
314 This enables new and very EXPERMENTAL support for SmartMedia/xD
315 FTL (Flash translation layer).
316 Write support isn't yet well tested, therefore this code IS likely to
317 eat your card, so please don't use it together with valuable data.
318 Use readonly driver (CONFIG_SSFDC) instead.
319
307config MTD_OOPS 320config MTD_OOPS
308 tristate "Log panic/oops to an MTD buffer" 321 tristate "Log panic/oops to an MTD buffer"
309 depends on MTD 322 depends on MTD
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 4521b1ecce45..760abc533395 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_NFTL) += nftl.o
24obj-$(CONFIG_INFTL) += inftl.o 24obj-$(CONFIG_INFTL) += inftl.o
25obj-$(CONFIG_RFD_FTL) += rfd_ftl.o 25obj-$(CONFIG_RFD_FTL) += rfd_ftl.o
26obj-$(CONFIG_SSFDC) += ssfdc.o 26obj-$(CONFIG_SSFDC) += ssfdc.o
27obj-$(CONFIG_SM_FTL) += sm_ftl.o
27obj-$(CONFIG_MTD_OOPS) += mtdoops.o 28obj-$(CONFIG_MTD_OOPS) += mtdoops.o
28 29
29nftl-objs := nftlcore.o nftlmount.o 30nftl-objs := nftlcore.o nftlmount.o
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 5fbf29e1e64f..62f3ea9de848 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -615,10 +615,8 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
615 return mtd; 615 return mtd;
616 616
617 setup_err: 617 setup_err:
618 if(mtd) { 618 kfree(mtd->eraseregions);
619 kfree(mtd->eraseregions); 619 kfree(mtd);
620 kfree(mtd);
621 }
622 kfree(cfi->cmdset_priv); 620 kfree(cfi->cmdset_priv);
623 return NULL; 621 return NULL;
624} 622}
@@ -727,8 +725,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
727 /* those should be reset too since 725 /* those should be reset too since
728 they create memory references. */ 726 they create memory references. */
729 init_waitqueue_head(&chip->wq); 727 init_waitqueue_head(&chip->wq);
730 spin_lock_init(&chip->_spinlock); 728 mutex_init(&chip->mutex);
731 chip->mutex = &chip->_spinlock;
732 chip++; 729 chip++;
733 } 730 }
734 } 731 }
@@ -774,9 +771,9 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
774 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS)) 771 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
775 break; 772 break;
776 773
777 spin_unlock(chip->mutex); 774 mutex_unlock(&chip->mutex);
778 cfi_udelay(1); 775 cfi_udelay(1);
779 spin_lock(chip->mutex); 776 mutex_lock(&chip->mutex);
780 /* Someone else might have been playing with it. */ 777 /* Someone else might have been playing with it. */
781 return -EAGAIN; 778 return -EAGAIN;
782 } 779 }
@@ -823,9 +820,9 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
823 return -EIO; 820 return -EIO;
824 } 821 }
825 822
826 spin_unlock(chip->mutex); 823 mutex_unlock(&chip->mutex);
827 cfi_udelay(1); 824 cfi_udelay(1);
828 spin_lock(chip->mutex); 825 mutex_lock(&chip->mutex);
829 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. 826 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
830 So we can just loop here. */ 827 So we can just loop here. */
831 } 828 }
@@ -852,10 +849,10 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
852 sleep: 849 sleep:
853 set_current_state(TASK_UNINTERRUPTIBLE); 850 set_current_state(TASK_UNINTERRUPTIBLE);
854 add_wait_queue(&chip->wq, &wait); 851 add_wait_queue(&chip->wq, &wait);
855 spin_unlock(chip->mutex); 852 mutex_unlock(&chip->mutex);
856 schedule(); 853 schedule();
857 remove_wait_queue(&chip->wq, &wait); 854 remove_wait_queue(&chip->wq, &wait);
858 spin_lock(chip->mutex); 855 mutex_lock(&chip->mutex);
859 return -EAGAIN; 856 return -EAGAIN;
860 } 857 }
861} 858}
@@ -901,20 +898,20 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
901 * it'll happily send us to sleep. In any case, when 898 * it'll happily send us to sleep. In any case, when
902 * get_chip returns success we're clear to go ahead. 899 * get_chip returns success we're clear to go ahead.
903 */ 900 */
904 ret = spin_trylock(contender->mutex); 901 ret = mutex_trylock(&contender->mutex);
905 spin_unlock(&shared->lock); 902 spin_unlock(&shared->lock);
906 if (!ret) 903 if (!ret)
907 goto retry; 904 goto retry;
908 spin_unlock(chip->mutex); 905 mutex_unlock(&chip->mutex);
909 ret = chip_ready(map, contender, contender->start, mode); 906 ret = chip_ready(map, contender, contender->start, mode);
910 spin_lock(chip->mutex); 907 mutex_lock(&chip->mutex);
911 908
912 if (ret == -EAGAIN) { 909 if (ret == -EAGAIN) {
913 spin_unlock(contender->mutex); 910 mutex_unlock(&contender->mutex);
914 goto retry; 911 goto retry;
915 } 912 }
916 if (ret) { 913 if (ret) {
917 spin_unlock(contender->mutex); 914 mutex_unlock(&contender->mutex);
918 return ret; 915 return ret;
919 } 916 }
920 spin_lock(&shared->lock); 917 spin_lock(&shared->lock);
@@ -923,10 +920,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
923 * in FL_SYNCING state. Put contender and retry. */ 920 * in FL_SYNCING state. Put contender and retry. */
924 if (chip->state == FL_SYNCING) { 921 if (chip->state == FL_SYNCING) {
925 put_chip(map, contender, contender->start); 922 put_chip(map, contender, contender->start);
926 spin_unlock(contender->mutex); 923 mutex_unlock(&contender->mutex);
927 goto retry; 924 goto retry;
928 } 925 }
929 spin_unlock(contender->mutex); 926 mutex_unlock(&contender->mutex);
930 } 927 }
931 928
932 /* Check if we already have suspended erase 929 /* Check if we already have suspended erase
@@ -936,10 +933,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
936 spin_unlock(&shared->lock); 933 spin_unlock(&shared->lock);
937 set_current_state(TASK_UNINTERRUPTIBLE); 934 set_current_state(TASK_UNINTERRUPTIBLE);
938 add_wait_queue(&chip->wq, &wait); 935 add_wait_queue(&chip->wq, &wait);
939 spin_unlock(chip->mutex); 936 mutex_unlock(&chip->mutex);
940 schedule(); 937 schedule();
941 remove_wait_queue(&chip->wq, &wait); 938 remove_wait_queue(&chip->wq, &wait);
942 spin_lock(chip->mutex); 939 mutex_lock(&chip->mutex);
943 goto retry; 940 goto retry;
944 } 941 }
945 942
@@ -969,12 +966,12 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
969 if (shared->writing && shared->writing != chip) { 966 if (shared->writing && shared->writing != chip) {
970 /* give back ownership to who we loaned it from */ 967 /* give back ownership to who we loaned it from */
971 struct flchip *loaner = shared->writing; 968 struct flchip *loaner = shared->writing;
972 spin_lock(loaner->mutex); 969 mutex_lock(&loaner->mutex);
973 spin_unlock(&shared->lock); 970 spin_unlock(&shared->lock);
974 spin_unlock(chip->mutex); 971 mutex_unlock(&chip->mutex);
975 put_chip(map, loaner, loaner->start); 972 put_chip(map, loaner, loaner->start);
976 spin_lock(chip->mutex); 973 mutex_lock(&chip->mutex);
977 spin_unlock(loaner->mutex); 974 mutex_unlock(&loaner->mutex);
978 wake_up(&chip->wq); 975 wake_up(&chip->wq);
979 return; 976 return;
980 } 977 }
@@ -1144,7 +1141,7 @@ static int __xipram xip_wait_for_operation(
1144 (void) map_read(map, adr); 1141 (void) map_read(map, adr);
1145 xip_iprefetch(); 1142 xip_iprefetch();
1146 local_irq_enable(); 1143 local_irq_enable();
1147 spin_unlock(chip->mutex); 1144 mutex_unlock(&chip->mutex);
1148 xip_iprefetch(); 1145 xip_iprefetch();
1149 cond_resched(); 1146 cond_resched();
1150 1147
@@ -1154,15 +1151,15 @@ static int __xipram xip_wait_for_operation(
1154 * a suspended erase state. If so let's wait 1151 * a suspended erase state. If so let's wait
1155 * until it's done. 1152 * until it's done.
1156 */ 1153 */
1157 spin_lock(chip->mutex); 1154 mutex_lock(&chip->mutex);
1158 while (chip->state != newstate) { 1155 while (chip->state != newstate) {
1159 DECLARE_WAITQUEUE(wait, current); 1156 DECLARE_WAITQUEUE(wait, current);
1160 set_current_state(TASK_UNINTERRUPTIBLE); 1157 set_current_state(TASK_UNINTERRUPTIBLE);
1161 add_wait_queue(&chip->wq, &wait); 1158 add_wait_queue(&chip->wq, &wait);
1162 spin_unlock(chip->mutex); 1159 mutex_unlock(&chip->mutex);
1163 schedule(); 1160 schedule();
1164 remove_wait_queue(&chip->wq, &wait); 1161 remove_wait_queue(&chip->wq, &wait);
1165 spin_lock(chip->mutex); 1162 mutex_lock(&chip->mutex);
1166 } 1163 }
1167 /* Disallow XIP again */ 1164 /* Disallow XIP again */
1168 local_irq_disable(); 1165 local_irq_disable();
@@ -1218,10 +1215,10 @@ static int inval_cache_and_wait_for_operation(
1218 int chip_state = chip->state; 1215 int chip_state = chip->state;
1219 unsigned int timeo, sleep_time, reset_timeo; 1216 unsigned int timeo, sleep_time, reset_timeo;
1220 1217
1221 spin_unlock(chip->mutex); 1218 mutex_unlock(&chip->mutex);
1222 if (inval_len) 1219 if (inval_len)
1223 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len); 1220 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1224 spin_lock(chip->mutex); 1221 mutex_lock(&chip->mutex);
1225 1222
1226 timeo = chip_op_time_max; 1223 timeo = chip_op_time_max;
1227 if (!timeo) 1224 if (!timeo)
@@ -1241,7 +1238,7 @@ static int inval_cache_and_wait_for_operation(
1241 } 1238 }
1242 1239
1243 /* OK Still waiting. Drop the lock, wait a while and retry. */ 1240 /* OK Still waiting. Drop the lock, wait a while and retry. */
1244 spin_unlock(chip->mutex); 1241 mutex_unlock(&chip->mutex);
1245 if (sleep_time >= 1000000/HZ) { 1242 if (sleep_time >= 1000000/HZ) {
1246 /* 1243 /*
1247 * Half of the normal delay still remaining 1244 * Half of the normal delay still remaining
@@ -1256,17 +1253,17 @@ static int inval_cache_and_wait_for_operation(
1256 cond_resched(); 1253 cond_resched();
1257 timeo--; 1254 timeo--;
1258 } 1255 }
1259 spin_lock(chip->mutex); 1256 mutex_lock(&chip->mutex);
1260 1257
1261 while (chip->state != chip_state) { 1258 while (chip->state != chip_state) {
1262 /* Someone's suspended the operation: sleep */ 1259 /* Someone's suspended the operation: sleep */
1263 DECLARE_WAITQUEUE(wait, current); 1260 DECLARE_WAITQUEUE(wait, current);
1264 set_current_state(TASK_UNINTERRUPTIBLE); 1261 set_current_state(TASK_UNINTERRUPTIBLE);
1265 add_wait_queue(&chip->wq, &wait); 1262 add_wait_queue(&chip->wq, &wait);
1266 spin_unlock(chip->mutex); 1263 mutex_unlock(&chip->mutex);
1267 schedule(); 1264 schedule();
1268 remove_wait_queue(&chip->wq, &wait); 1265 remove_wait_queue(&chip->wq, &wait);
1269 spin_lock(chip->mutex); 1266 mutex_lock(&chip->mutex);
1270 } 1267 }
1271 if (chip->erase_suspended && chip_state == FL_ERASING) { 1268 if (chip->erase_suspended && chip_state == FL_ERASING) {
1272 /* Erase suspend occured while sleep: reset timeout */ 1269 /* Erase suspend occured while sleep: reset timeout */
@@ -1302,7 +1299,7 @@ static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t a
1302 /* Ensure cmd read/writes are aligned. */ 1299 /* Ensure cmd read/writes are aligned. */
1303 cmd_addr = adr & ~(map_bankwidth(map)-1); 1300 cmd_addr = adr & ~(map_bankwidth(map)-1);
1304 1301
1305 spin_lock(chip->mutex); 1302 mutex_lock(&chip->mutex);
1306 1303
1307 ret = get_chip(map, chip, cmd_addr, FL_POINT); 1304 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1308 1305
@@ -1313,7 +1310,7 @@ static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t a
1313 chip->state = FL_POINT; 1310 chip->state = FL_POINT;
1314 chip->ref_point_counter++; 1311 chip->ref_point_counter++;
1315 } 1312 }
1316 spin_unlock(chip->mutex); 1313 mutex_unlock(&chip->mutex);
1317 1314
1318 return ret; 1315 return ret;
1319} 1316}
@@ -1398,7 +1395,7 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1398 else 1395 else
1399 thislen = len; 1396 thislen = len;
1400 1397
1401 spin_lock(chip->mutex); 1398 mutex_lock(&chip->mutex);
1402 if (chip->state == FL_POINT) { 1399 if (chip->state == FL_POINT) {
1403 chip->ref_point_counter--; 1400 chip->ref_point_counter--;
1404 if(chip->ref_point_counter == 0) 1401 if(chip->ref_point_counter == 0)
@@ -1407,7 +1404,7 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1407 printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */ 1404 printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1408 1405
1409 put_chip(map, chip, chip->start); 1406 put_chip(map, chip, chip->start);
1410 spin_unlock(chip->mutex); 1407 mutex_unlock(&chip->mutex);
1411 1408
1412 len -= thislen; 1409 len -= thislen;
1413 ofs = 0; 1410 ofs = 0;
@@ -1426,10 +1423,10 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
1426 /* Ensure cmd read/writes are aligned. */ 1423 /* Ensure cmd read/writes are aligned. */
1427 cmd_addr = adr & ~(map_bankwidth(map)-1); 1424 cmd_addr = adr & ~(map_bankwidth(map)-1);
1428 1425
1429 spin_lock(chip->mutex); 1426 mutex_lock(&chip->mutex);
1430 ret = get_chip(map, chip, cmd_addr, FL_READY); 1427 ret = get_chip(map, chip, cmd_addr, FL_READY);
1431 if (ret) { 1428 if (ret) {
1432 spin_unlock(chip->mutex); 1429 mutex_unlock(&chip->mutex);
1433 return ret; 1430 return ret;
1434 } 1431 }
1435 1432
@@ -1443,7 +1440,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
1443 1440
1444 put_chip(map, chip, cmd_addr); 1441 put_chip(map, chip, cmd_addr);
1445 1442
1446 spin_unlock(chip->mutex); 1443 mutex_unlock(&chip->mutex);
1447 return 0; 1444 return 0;
1448} 1445}
1449 1446
@@ -1506,10 +1503,10 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1506 return -EINVAL; 1503 return -EINVAL;
1507 } 1504 }
1508 1505
1509 spin_lock(chip->mutex); 1506 mutex_lock(&chip->mutex);
1510 ret = get_chip(map, chip, adr, mode); 1507 ret = get_chip(map, chip, adr, mode);
1511 if (ret) { 1508 if (ret) {
1512 spin_unlock(chip->mutex); 1509 mutex_unlock(&chip->mutex);
1513 return ret; 1510 return ret;
1514 } 1511 }
1515 1512
@@ -1555,7 +1552,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1555 1552
1556 xip_enable(map, chip, adr); 1553 xip_enable(map, chip, adr);
1557 out: put_chip(map, chip, adr); 1554 out: put_chip(map, chip, adr);
1558 spin_unlock(chip->mutex); 1555 mutex_unlock(&chip->mutex);
1559 return ret; 1556 return ret;
1560} 1557}
1561 1558
@@ -1664,10 +1661,10 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1664 /* Let's determine this according to the interleave only once */ 1661 /* Let's determine this according to the interleave only once */
1665 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9); 1662 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1666 1663
1667 spin_lock(chip->mutex); 1664 mutex_lock(&chip->mutex);
1668 ret = get_chip(map, chip, cmd_adr, FL_WRITING); 1665 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1669 if (ret) { 1666 if (ret) {
1670 spin_unlock(chip->mutex); 1667 mutex_unlock(&chip->mutex);
1671 return ret; 1668 return ret;
1672 } 1669 }
1673 1670
@@ -1798,7 +1795,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1798 1795
1799 xip_enable(map, chip, cmd_adr); 1796 xip_enable(map, chip, cmd_adr);
1800 out: put_chip(map, chip, cmd_adr); 1797 out: put_chip(map, chip, cmd_adr);
1801 spin_unlock(chip->mutex); 1798 mutex_unlock(&chip->mutex);
1802 return ret; 1799 return ret;
1803} 1800}
1804 1801
@@ -1877,10 +1874,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1877 adr += chip->start; 1874 adr += chip->start;
1878 1875
1879 retry: 1876 retry:
1880 spin_lock(chip->mutex); 1877 mutex_lock(&chip->mutex);
1881 ret = get_chip(map, chip, adr, FL_ERASING); 1878 ret = get_chip(map, chip, adr, FL_ERASING);
1882 if (ret) { 1879 if (ret) {
1883 spin_unlock(chip->mutex); 1880 mutex_unlock(&chip->mutex);
1884 return ret; 1881 return ret;
1885 } 1882 }
1886 1883
@@ -1936,7 +1933,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1936 } else if (chipstatus & 0x20 && retries--) { 1933 } else if (chipstatus & 0x20 && retries--) {
1937 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus); 1934 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1938 put_chip(map, chip, adr); 1935 put_chip(map, chip, adr);
1939 spin_unlock(chip->mutex); 1936 mutex_unlock(&chip->mutex);
1940 goto retry; 1937 goto retry;
1941 } else { 1938 } else {
1942 printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus); 1939 printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
@@ -1948,7 +1945,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1948 1945
1949 xip_enable(map, chip, adr); 1946 xip_enable(map, chip, adr);
1950 out: put_chip(map, chip, adr); 1947 out: put_chip(map, chip, adr);
1951 spin_unlock(chip->mutex); 1948 mutex_unlock(&chip->mutex);
1952 return ret; 1949 return ret;
1953} 1950}
1954 1951
@@ -1981,7 +1978,7 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
1981 for (i=0; !ret && i<cfi->numchips; i++) { 1978 for (i=0; !ret && i<cfi->numchips; i++) {
1982 chip = &cfi->chips[i]; 1979 chip = &cfi->chips[i];
1983 1980
1984 spin_lock(chip->mutex); 1981 mutex_lock(&chip->mutex);
1985 ret = get_chip(map, chip, chip->start, FL_SYNCING); 1982 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1986 1983
1987 if (!ret) { 1984 if (!ret) {
@@ -1992,7 +1989,7 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
1992 * with the chip now anyway. 1989 * with the chip now anyway.
1993 */ 1990 */
1994 } 1991 }
1995 spin_unlock(chip->mutex); 1992 mutex_unlock(&chip->mutex);
1996 } 1993 }
1997 1994
1998 /* Unlock the chips again */ 1995 /* Unlock the chips again */
@@ -2000,14 +1997,14 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
2000 for (i--; i >=0; i--) { 1997 for (i--; i >=0; i--) {
2001 chip = &cfi->chips[i]; 1998 chip = &cfi->chips[i];
2002 1999
2003 spin_lock(chip->mutex); 2000 mutex_lock(&chip->mutex);
2004 2001
2005 if (chip->state == FL_SYNCING) { 2002 if (chip->state == FL_SYNCING) {
2006 chip->state = chip->oldstate; 2003 chip->state = chip->oldstate;
2007 chip->oldstate = FL_READY; 2004 chip->oldstate = FL_READY;
2008 wake_up(&chip->wq); 2005 wake_up(&chip->wq);
2009 } 2006 }
2010 spin_unlock(chip->mutex); 2007 mutex_unlock(&chip->mutex);
2011 } 2008 }
2012} 2009}
2013 2010
@@ -2053,10 +2050,10 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
2053 2050
2054 adr += chip->start; 2051 adr += chip->start;
2055 2052
2056 spin_lock(chip->mutex); 2053 mutex_lock(&chip->mutex);
2057 ret = get_chip(map, chip, adr, FL_LOCKING); 2054 ret = get_chip(map, chip, adr, FL_LOCKING);
2058 if (ret) { 2055 if (ret) {
2059 spin_unlock(chip->mutex); 2056 mutex_unlock(&chip->mutex);
2060 return ret; 2057 return ret;
2061 } 2058 }
2062 2059
@@ -2090,7 +2087,7 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
2090 2087
2091 xip_enable(map, chip, adr); 2088 xip_enable(map, chip, adr);
2092out: put_chip(map, chip, adr); 2089out: put_chip(map, chip, adr);
2093 spin_unlock(chip->mutex); 2090 mutex_unlock(&chip->mutex);
2094 return ret; 2091 return ret;
2095} 2092}
2096 2093
@@ -2155,10 +2152,10 @@ do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2155 struct cfi_private *cfi = map->fldrv_priv; 2152 struct cfi_private *cfi = map->fldrv_priv;
2156 int ret; 2153 int ret;
2157 2154
2158 spin_lock(chip->mutex); 2155 mutex_lock(&chip->mutex);
2159 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY); 2156 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2160 if (ret) { 2157 if (ret) {
2161 spin_unlock(chip->mutex); 2158 mutex_unlock(&chip->mutex);
2162 return ret; 2159 return ret;
2163 } 2160 }
2164 2161
@@ -2177,7 +2174,7 @@ do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2177 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size); 2174 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2178 2175
2179 put_chip(map, chip, chip->start); 2176 put_chip(map, chip, chip->start);
2180 spin_unlock(chip->mutex); 2177 mutex_unlock(&chip->mutex);
2181 return 0; 2178 return 0;
2182} 2179}
2183 2180
@@ -2452,7 +2449,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
2452 for (i=0; !ret && i<cfi->numchips; i++) { 2449 for (i=0; !ret && i<cfi->numchips; i++) {
2453 chip = &cfi->chips[i]; 2450 chip = &cfi->chips[i];
2454 2451
2455 spin_lock(chip->mutex); 2452 mutex_lock(&chip->mutex);
2456 2453
2457 switch (chip->state) { 2454 switch (chip->state) {
2458 case FL_READY: 2455 case FL_READY:
@@ -2484,7 +2481,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
2484 case FL_PM_SUSPENDED: 2481 case FL_PM_SUSPENDED:
2485 break; 2482 break;
2486 } 2483 }
2487 spin_unlock(chip->mutex); 2484 mutex_unlock(&chip->mutex);
2488 } 2485 }
2489 2486
2490 /* Unlock the chips again */ 2487 /* Unlock the chips again */
@@ -2493,7 +2490,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
2493 for (i--; i >=0; i--) { 2490 for (i--; i >=0; i--) {
2494 chip = &cfi->chips[i]; 2491 chip = &cfi->chips[i];
2495 2492
2496 spin_lock(chip->mutex); 2493 mutex_lock(&chip->mutex);
2497 2494
2498 if (chip->state == FL_PM_SUSPENDED) { 2495 if (chip->state == FL_PM_SUSPENDED) {
2499 /* No need to force it into a known state here, 2496 /* No need to force it into a known state here,
@@ -2503,7 +2500,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
2503 chip->oldstate = FL_READY; 2500 chip->oldstate = FL_READY;
2504 wake_up(&chip->wq); 2501 wake_up(&chip->wq);
2505 } 2502 }
2506 spin_unlock(chip->mutex); 2503 mutex_unlock(&chip->mutex);
2507 } 2504 }
2508 } 2505 }
2509 2506
@@ -2544,7 +2541,7 @@ static void cfi_intelext_resume(struct mtd_info *mtd)
2544 2541
2545 chip = &cfi->chips[i]; 2542 chip = &cfi->chips[i];
2546 2543
2547 spin_lock(chip->mutex); 2544 mutex_lock(&chip->mutex);
2548 2545
2549 /* Go to known state. Chip may have been power cycled */ 2546 /* Go to known state. Chip may have been power cycled */
2550 if (chip->state == FL_PM_SUSPENDED) { 2547 if (chip->state == FL_PM_SUSPENDED) {
@@ -2553,7 +2550,7 @@ static void cfi_intelext_resume(struct mtd_info *mtd)
2553 wake_up(&chip->wq); 2550 wake_up(&chip->wq);
2554 } 2551 }
2555 2552
2556 spin_unlock(chip->mutex); 2553 mutex_unlock(&chip->mutex);
2557 } 2554 }
2558 2555
2559 if ((mtd->flags & MTD_POWERUP_LOCK) 2556 if ((mtd->flags & MTD_POWERUP_LOCK)
@@ -2573,14 +2570,14 @@ static int cfi_intelext_reset(struct mtd_info *mtd)
2573 /* force the completion of any ongoing operation 2570 /* force the completion of any ongoing operation
2574 and switch to array mode so any bootloader in 2571 and switch to array mode so any bootloader in
2575 flash is accessible for soft reboot. */ 2572 flash is accessible for soft reboot. */
2576 spin_lock(chip->mutex); 2573 mutex_lock(&chip->mutex);
2577 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN); 2574 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2578 if (!ret) { 2575 if (!ret) {
2579 map_write(map, CMD(0xff), chip->start); 2576 map_write(map, CMD(0xff), chip->start);
2580 chip->state = FL_SHUTDOWN; 2577 chip->state = FL_SHUTDOWN;
2581 put_chip(map, chip, chip->start); 2578 put_chip(map, chip, chip->start);
2582 } 2579 }
2583 spin_unlock(chip->mutex); 2580 mutex_unlock(&chip->mutex);
2584 } 2581 }
2585 2582
2586 return 0; 2583 return 0;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index f3600e8d5382..c16b8cecc3a8 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -32,6 +32,7 @@
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/delay.h> 33#include <linux/delay.h>
34#include <linux/interrupt.h> 34#include <linux/interrupt.h>
35#include <linux/reboot.h>
35#include <linux/mtd/compatmac.h> 36#include <linux/mtd/compatmac.h>
36#include <linux/mtd/map.h> 37#include <linux/mtd/map.h>
37#include <linux/mtd/mtd.h> 38#include <linux/mtd/mtd.h>
@@ -43,10 +44,6 @@
43 44
44#define MAX_WORD_RETRIES 3 45#define MAX_WORD_RETRIES 3
45 46
46#define MANUFACTURER_AMD 0x0001
47#define MANUFACTURER_ATMEL 0x001F
48#define MANUFACTURER_MACRONIX 0x00C2
49#define MANUFACTURER_SST 0x00BF
50#define SST49LF004B 0x0060 47#define SST49LF004B 0x0060
51#define SST49LF040B 0x0050 48#define SST49LF040B 0x0050
52#define SST49LF008A 0x005a 49#define SST49LF008A 0x005a
@@ -60,6 +57,7 @@ static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
60static void cfi_amdstd_sync (struct mtd_info *); 57static void cfi_amdstd_sync (struct mtd_info *);
61static int cfi_amdstd_suspend (struct mtd_info *); 58static int cfi_amdstd_suspend (struct mtd_info *);
62static void cfi_amdstd_resume (struct mtd_info *); 59static void cfi_amdstd_resume (struct mtd_info *);
60static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
63static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 61static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
64 62
65static void cfi_amdstd_destroy(struct mtd_info *); 63static void cfi_amdstd_destroy(struct mtd_info *);
@@ -168,7 +166,7 @@ static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
168 * This reduces the risk of false detection due to 166 * This reduces the risk of false detection due to
169 * the 8-bit device ID. 167 * the 8-bit device ID.
170 */ 168 */
171 (cfi->mfr == MANUFACTURER_MACRONIX)) { 169 (cfi->mfr == CFI_MFR_MACRONIX)) {
172 DEBUG(MTD_DEBUG_LEVEL1, 170 DEBUG(MTD_DEBUG_LEVEL1,
173 "%s: Macronix MX29LV400C with bottom boot block" 171 "%s: Macronix MX29LV400C with bottom boot block"
174 " detected\n", map->name); 172 " detected\n", map->name);
@@ -286,7 +284,7 @@ static struct cfi_fixup cfi_fixup_table[] = {
286 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL }, 284 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
287#ifdef AMD_BOOTLOC_BUG 285#ifdef AMD_BOOTLOC_BUG
288 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL }, 286 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
289 { MANUFACTURER_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL }, 287 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL },
290#endif 288#endif
291 { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, }, 289 { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
292 { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, }, 290 { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
@@ -304,9 +302,9 @@ static struct cfi_fixup cfi_fixup_table[] = {
304 { 0, 0, NULL, NULL } 302 { 0, 0, NULL, NULL }
305}; 303};
306static struct cfi_fixup jedec_fixup_table[] = { 304static struct cfi_fixup jedec_fixup_table[] = {
307 { MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, }, 305 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
308 { MANUFACTURER_SST, SST49LF040B, fixup_use_fwh_lock, NULL, }, 306 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
309 { MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, }, 307 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
310 { 0, 0, NULL, NULL } 308 { 0, 0, NULL, NULL }
311}; 309};
312 310
@@ -355,6 +353,8 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
355 mtd->name = map->name; 353 mtd->name = map->name;
356 mtd->writesize = 1; 354 mtd->writesize = 1;
357 355
356 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
357
358 if (cfi->cfi_mode==CFI_MODE_CFI){ 358 if (cfi->cfi_mode==CFI_MODE_CFI){
359 unsigned char bootloc; 359 unsigned char bootloc;
360 /* 360 /*
@@ -491,13 +491,12 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
491#endif 491#endif
492 492
493 __module_get(THIS_MODULE); 493 __module_get(THIS_MODULE);
494 register_reboot_notifier(&mtd->reboot_notifier);
494 return mtd; 495 return mtd;
495 496
496 setup_err: 497 setup_err:
497 if(mtd) { 498 kfree(mtd->eraseregions);
498 kfree(mtd->eraseregions); 499 kfree(mtd);
499 kfree(mtd);
500 }
501 kfree(cfi->cmdset_priv); 500 kfree(cfi->cmdset_priv);
502 kfree(cfi->cfiq); 501 kfree(cfi->cfiq);
503 return NULL; 502 return NULL;
@@ -571,9 +570,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
571 printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); 570 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
572 return -EIO; 571 return -EIO;
573 } 572 }
574 spin_unlock(chip->mutex); 573 mutex_unlock(&chip->mutex);
575 cfi_udelay(1); 574 cfi_udelay(1);
576 spin_lock(chip->mutex); 575 mutex_lock(&chip->mutex);
577 /* Someone else might have been playing with it. */ 576 /* Someone else might have been playing with it. */
578 goto retry; 577 goto retry;
579 } 578 }
@@ -617,9 +616,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
617 return -EIO; 616 return -EIO;
618 } 617 }
619 618
620 spin_unlock(chip->mutex); 619 mutex_unlock(&chip->mutex);
621 cfi_udelay(1); 620 cfi_udelay(1);
622 spin_lock(chip->mutex); 621 mutex_lock(&chip->mutex);
623 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. 622 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
624 So we can just loop here. */ 623 So we can just loop here. */
625 } 624 }
@@ -634,6 +633,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
634 chip->state = FL_READY; 633 chip->state = FL_READY;
635 return 0; 634 return 0;
636 635
636 case FL_SHUTDOWN:
637 /* The machine is rebooting */
638 return -EIO;
639
637 case FL_POINT: 640 case FL_POINT:
638 /* Only if there's no operation suspended... */ 641 /* Only if there's no operation suspended... */
639 if (mode == FL_READY && chip->oldstate == FL_READY) 642 if (mode == FL_READY && chip->oldstate == FL_READY)
@@ -643,10 +646,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
643 sleep: 646 sleep:
644 set_current_state(TASK_UNINTERRUPTIBLE); 647 set_current_state(TASK_UNINTERRUPTIBLE);
645 add_wait_queue(&chip->wq, &wait); 648 add_wait_queue(&chip->wq, &wait);
646 spin_unlock(chip->mutex); 649 mutex_unlock(&chip->mutex);
647 schedule(); 650 schedule();
648 remove_wait_queue(&chip->wq, &wait); 651 remove_wait_queue(&chip->wq, &wait);
649 spin_lock(chip->mutex); 652 mutex_lock(&chip->mutex);
650 goto resettime; 653 goto resettime;
651 } 654 }
652} 655}
@@ -778,7 +781,7 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
778 (void) map_read(map, adr); 781 (void) map_read(map, adr);
779 xip_iprefetch(); 782 xip_iprefetch();
780 local_irq_enable(); 783 local_irq_enable();
781 spin_unlock(chip->mutex); 784 mutex_unlock(&chip->mutex);
782 xip_iprefetch(); 785 xip_iprefetch();
783 cond_resched(); 786 cond_resched();
784 787
@@ -788,15 +791,15 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
788 * a suspended erase state. If so let's wait 791 * a suspended erase state. If so let's wait
789 * until it's done. 792 * until it's done.
790 */ 793 */
791 spin_lock(chip->mutex); 794 mutex_lock(&chip->mutex);
792 while (chip->state != FL_XIP_WHILE_ERASING) { 795 while (chip->state != FL_XIP_WHILE_ERASING) {
793 DECLARE_WAITQUEUE(wait, current); 796 DECLARE_WAITQUEUE(wait, current);
794 set_current_state(TASK_UNINTERRUPTIBLE); 797 set_current_state(TASK_UNINTERRUPTIBLE);
795 add_wait_queue(&chip->wq, &wait); 798 add_wait_queue(&chip->wq, &wait);
796 spin_unlock(chip->mutex); 799 mutex_unlock(&chip->mutex);
797 schedule(); 800 schedule();
798 remove_wait_queue(&chip->wq, &wait); 801 remove_wait_queue(&chip->wq, &wait);
799 spin_lock(chip->mutex); 802 mutex_lock(&chip->mutex);
800 } 803 }
801 /* Disallow XIP again */ 804 /* Disallow XIP again */
802 local_irq_disable(); 805 local_irq_disable();
@@ -858,17 +861,17 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
858 861
859#define UDELAY(map, chip, adr, usec) \ 862#define UDELAY(map, chip, adr, usec) \
860do { \ 863do { \
861 spin_unlock(chip->mutex); \ 864 mutex_unlock(&chip->mutex); \
862 cfi_udelay(usec); \ 865 cfi_udelay(usec); \
863 spin_lock(chip->mutex); \ 866 mutex_lock(&chip->mutex); \
864} while (0) 867} while (0)
865 868
866#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 869#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
867do { \ 870do { \
868 spin_unlock(chip->mutex); \ 871 mutex_unlock(&chip->mutex); \
869 INVALIDATE_CACHED_RANGE(map, adr, len); \ 872 INVALIDATE_CACHED_RANGE(map, adr, len); \
870 cfi_udelay(usec); \ 873 cfi_udelay(usec); \
871 spin_lock(chip->mutex); \ 874 mutex_lock(&chip->mutex); \
872} while (0) 875} while (0)
873 876
874#endif 877#endif
@@ -884,10 +887,10 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
884 /* Ensure cmd read/writes are aligned. */ 887 /* Ensure cmd read/writes are aligned. */
885 cmd_addr = adr & ~(map_bankwidth(map)-1); 888 cmd_addr = adr & ~(map_bankwidth(map)-1);
886 889
887 spin_lock(chip->mutex); 890 mutex_lock(&chip->mutex);
888 ret = get_chip(map, chip, cmd_addr, FL_READY); 891 ret = get_chip(map, chip, cmd_addr, FL_READY);
889 if (ret) { 892 if (ret) {
890 spin_unlock(chip->mutex); 893 mutex_unlock(&chip->mutex);
891 return ret; 894 return ret;
892 } 895 }
893 896
@@ -900,7 +903,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
900 903
901 put_chip(map, chip, cmd_addr); 904 put_chip(map, chip, cmd_addr);
902 905
903 spin_unlock(chip->mutex); 906 mutex_unlock(&chip->mutex);
904 return 0; 907 return 0;
905} 908}
906 909
@@ -954,7 +957,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
954 struct cfi_private *cfi = map->fldrv_priv; 957 struct cfi_private *cfi = map->fldrv_priv;
955 958
956 retry: 959 retry:
957 spin_lock(chip->mutex); 960 mutex_lock(&chip->mutex);
958 961
959 if (chip->state != FL_READY){ 962 if (chip->state != FL_READY){
960#if 0 963#if 0
@@ -963,7 +966,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
963 set_current_state(TASK_UNINTERRUPTIBLE); 966 set_current_state(TASK_UNINTERRUPTIBLE);
964 add_wait_queue(&chip->wq, &wait); 967 add_wait_queue(&chip->wq, &wait);
965 968
966 spin_unlock(chip->mutex); 969 mutex_unlock(&chip->mutex);
967 970
968 schedule(); 971 schedule();
969 remove_wait_queue(&chip->wq, &wait); 972 remove_wait_queue(&chip->wq, &wait);
@@ -992,7 +995,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
992 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 995 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
993 996
994 wake_up(&chip->wq); 997 wake_up(&chip->wq);
995 spin_unlock(chip->mutex); 998 mutex_unlock(&chip->mutex);
996 999
997 return 0; 1000 return 0;
998} 1001}
@@ -1061,10 +1064,10 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1061 1064
1062 adr += chip->start; 1065 adr += chip->start;
1063 1066
1064 spin_lock(chip->mutex); 1067 mutex_lock(&chip->mutex);
1065 ret = get_chip(map, chip, adr, FL_WRITING); 1068 ret = get_chip(map, chip, adr, FL_WRITING);
1066 if (ret) { 1069 if (ret) {
1067 spin_unlock(chip->mutex); 1070 mutex_unlock(&chip->mutex);
1068 return ret; 1071 return ret;
1069 } 1072 }
1070 1073
@@ -1107,11 +1110,11 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1107 1110
1108 set_current_state(TASK_UNINTERRUPTIBLE); 1111 set_current_state(TASK_UNINTERRUPTIBLE);
1109 add_wait_queue(&chip->wq, &wait); 1112 add_wait_queue(&chip->wq, &wait);
1110 spin_unlock(chip->mutex); 1113 mutex_unlock(&chip->mutex);
1111 schedule(); 1114 schedule();
1112 remove_wait_queue(&chip->wq, &wait); 1115 remove_wait_queue(&chip->wq, &wait);
1113 timeo = jiffies + (HZ / 2); /* FIXME */ 1116 timeo = jiffies + (HZ / 2); /* FIXME */
1114 spin_lock(chip->mutex); 1117 mutex_lock(&chip->mutex);
1115 continue; 1118 continue;
1116 } 1119 }
1117 1120
@@ -1143,7 +1146,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1143 op_done: 1146 op_done:
1144 chip->state = FL_READY; 1147 chip->state = FL_READY;
1145 put_chip(map, chip, adr); 1148 put_chip(map, chip, adr);
1146 spin_unlock(chip->mutex); 1149 mutex_unlock(&chip->mutex);
1147 1150
1148 return ret; 1151 return ret;
1149} 1152}
@@ -1175,7 +1178,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1175 map_word tmp_buf; 1178 map_word tmp_buf;
1176 1179
1177 retry: 1180 retry:
1178 spin_lock(cfi->chips[chipnum].mutex); 1181 mutex_lock(&cfi->chips[chipnum].mutex);
1179 1182
1180 if (cfi->chips[chipnum].state != FL_READY) { 1183 if (cfi->chips[chipnum].state != FL_READY) {
1181#if 0 1184#if 0
@@ -1184,7 +1187,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1184 set_current_state(TASK_UNINTERRUPTIBLE); 1187 set_current_state(TASK_UNINTERRUPTIBLE);
1185 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1188 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1186 1189
1187 spin_unlock(cfi->chips[chipnum].mutex); 1190 mutex_unlock(&cfi->chips[chipnum].mutex);
1188 1191
1189 schedule(); 1192 schedule();
1190 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1193 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
@@ -1198,7 +1201,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1198 /* Load 'tmp_buf' with old contents of flash */ 1201 /* Load 'tmp_buf' with old contents of flash */
1199 tmp_buf = map_read(map, bus_ofs+chipstart); 1202 tmp_buf = map_read(map, bus_ofs+chipstart);
1200 1203
1201 spin_unlock(cfi->chips[chipnum].mutex); 1204 mutex_unlock(&cfi->chips[chipnum].mutex);
1202 1205
1203 /* Number of bytes to copy from buffer */ 1206 /* Number of bytes to copy from buffer */
1204 n = min_t(int, len, map_bankwidth(map)-i); 1207 n = min_t(int, len, map_bankwidth(map)-i);
@@ -1253,7 +1256,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1253 map_word tmp_buf; 1256 map_word tmp_buf;
1254 1257
1255 retry1: 1258 retry1:
1256 spin_lock(cfi->chips[chipnum].mutex); 1259 mutex_lock(&cfi->chips[chipnum].mutex);
1257 1260
1258 if (cfi->chips[chipnum].state != FL_READY) { 1261 if (cfi->chips[chipnum].state != FL_READY) {
1259#if 0 1262#if 0
@@ -1262,7 +1265,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1262 set_current_state(TASK_UNINTERRUPTIBLE); 1265 set_current_state(TASK_UNINTERRUPTIBLE);
1263 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1266 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1264 1267
1265 spin_unlock(cfi->chips[chipnum].mutex); 1268 mutex_unlock(&cfi->chips[chipnum].mutex);
1266 1269
1267 schedule(); 1270 schedule();
1268 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1271 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
@@ -1275,7 +1278,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1275 1278
1276 tmp_buf = map_read(map, ofs + chipstart); 1279 tmp_buf = map_read(map, ofs + chipstart);
1277 1280
1278 spin_unlock(cfi->chips[chipnum].mutex); 1281 mutex_unlock(&cfi->chips[chipnum].mutex);
1279 1282
1280 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 1283 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1281 1284
@@ -1310,10 +1313,10 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1310 adr += chip->start; 1313 adr += chip->start;
1311 cmd_adr = adr; 1314 cmd_adr = adr;
1312 1315
1313 spin_lock(chip->mutex); 1316 mutex_lock(&chip->mutex);
1314 ret = get_chip(map, chip, adr, FL_WRITING); 1317 ret = get_chip(map, chip, adr, FL_WRITING);
1315 if (ret) { 1318 if (ret) {
1316 spin_unlock(chip->mutex); 1319 mutex_unlock(&chip->mutex);
1317 return ret; 1320 return ret;
1318 } 1321 }
1319 1322
@@ -1368,11 +1371,11 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1368 1371
1369 set_current_state(TASK_UNINTERRUPTIBLE); 1372 set_current_state(TASK_UNINTERRUPTIBLE);
1370 add_wait_queue(&chip->wq, &wait); 1373 add_wait_queue(&chip->wq, &wait);
1371 spin_unlock(chip->mutex); 1374 mutex_unlock(&chip->mutex);
1372 schedule(); 1375 schedule();
1373 remove_wait_queue(&chip->wq, &wait); 1376 remove_wait_queue(&chip->wq, &wait);
1374 timeo = jiffies + (HZ / 2); /* FIXME */ 1377 timeo = jiffies + (HZ / 2); /* FIXME */
1375 spin_lock(chip->mutex); 1378 mutex_lock(&chip->mutex);
1376 continue; 1379 continue;
1377 } 1380 }
1378 1381
@@ -1400,7 +1403,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1400 op_done: 1403 op_done:
1401 chip->state = FL_READY; 1404 chip->state = FL_READY;
1402 put_chip(map, chip, adr); 1405 put_chip(map, chip, adr);
1403 spin_unlock(chip->mutex); 1406 mutex_unlock(&chip->mutex);
1404 1407
1405 return ret; 1408 return ret;
1406} 1409}
@@ -1500,10 +1503,10 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1500 1503
1501 adr = cfi->addr_unlock1; 1504 adr = cfi->addr_unlock1;
1502 1505
1503 spin_lock(chip->mutex); 1506 mutex_lock(&chip->mutex);
1504 ret = get_chip(map, chip, adr, FL_WRITING); 1507 ret = get_chip(map, chip, adr, FL_WRITING);
1505 if (ret) { 1508 if (ret) {
1506 spin_unlock(chip->mutex); 1509 mutex_unlock(&chip->mutex);
1507 return ret; 1510 return ret;
1508 } 1511 }
1509 1512
@@ -1536,10 +1539,10 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1536 /* Someone's suspended the erase. Sleep */ 1539 /* Someone's suspended the erase. Sleep */
1537 set_current_state(TASK_UNINTERRUPTIBLE); 1540 set_current_state(TASK_UNINTERRUPTIBLE);
1538 add_wait_queue(&chip->wq, &wait); 1541 add_wait_queue(&chip->wq, &wait);
1539 spin_unlock(chip->mutex); 1542 mutex_unlock(&chip->mutex);
1540 schedule(); 1543 schedule();
1541 remove_wait_queue(&chip->wq, &wait); 1544 remove_wait_queue(&chip->wq, &wait);
1542 spin_lock(chip->mutex); 1545 mutex_lock(&chip->mutex);
1543 continue; 1546 continue;
1544 } 1547 }
1545 if (chip->erase_suspended) { 1548 if (chip->erase_suspended) {
@@ -1573,7 +1576,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1573 chip->state = FL_READY; 1576 chip->state = FL_READY;
1574 xip_enable(map, chip, adr); 1577 xip_enable(map, chip, adr);
1575 put_chip(map, chip, adr); 1578 put_chip(map, chip, adr);
1576 spin_unlock(chip->mutex); 1579 mutex_unlock(&chip->mutex);
1577 1580
1578 return ret; 1581 return ret;
1579} 1582}
@@ -1588,10 +1591,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1588 1591
1589 adr += chip->start; 1592 adr += chip->start;
1590 1593
1591 spin_lock(chip->mutex); 1594 mutex_lock(&chip->mutex);
1592 ret = get_chip(map, chip, adr, FL_ERASING); 1595 ret = get_chip(map, chip, adr, FL_ERASING);
1593 if (ret) { 1596 if (ret) {
1594 spin_unlock(chip->mutex); 1597 mutex_unlock(&chip->mutex);
1595 return ret; 1598 return ret;
1596 } 1599 }
1597 1600
@@ -1624,10 +1627,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1624 /* Someone's suspended the erase. Sleep */ 1627 /* Someone's suspended the erase. Sleep */
1625 set_current_state(TASK_UNINTERRUPTIBLE); 1628 set_current_state(TASK_UNINTERRUPTIBLE);
1626 add_wait_queue(&chip->wq, &wait); 1629 add_wait_queue(&chip->wq, &wait);
1627 spin_unlock(chip->mutex); 1630 mutex_unlock(&chip->mutex);
1628 schedule(); 1631 schedule();
1629 remove_wait_queue(&chip->wq, &wait); 1632 remove_wait_queue(&chip->wq, &wait);
1630 spin_lock(chip->mutex); 1633 mutex_lock(&chip->mutex);
1631 continue; 1634 continue;
1632 } 1635 }
1633 if (chip->erase_suspended) { 1636 if (chip->erase_suspended) {
@@ -1663,7 +1666,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1663 1666
1664 chip->state = FL_READY; 1667 chip->state = FL_READY;
1665 put_chip(map, chip, adr); 1668 put_chip(map, chip, adr);
1666 spin_unlock(chip->mutex); 1669 mutex_unlock(&chip->mutex);
1667 return ret; 1670 return ret;
1668} 1671}
1669 1672
@@ -1715,7 +1718,7 @@ static int do_atmel_lock(struct map_info *map, struct flchip *chip,
1715 struct cfi_private *cfi = map->fldrv_priv; 1718 struct cfi_private *cfi = map->fldrv_priv;
1716 int ret; 1719 int ret;
1717 1720
1718 spin_lock(chip->mutex); 1721 mutex_lock(&chip->mutex);
1719 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); 1722 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
1720 if (ret) 1723 if (ret)
1721 goto out_unlock; 1724 goto out_unlock;
@@ -1741,7 +1744,7 @@ static int do_atmel_lock(struct map_info *map, struct flchip *chip,
1741 ret = 0; 1744 ret = 0;
1742 1745
1743out_unlock: 1746out_unlock:
1744 spin_unlock(chip->mutex); 1747 mutex_unlock(&chip->mutex);
1745 return ret; 1748 return ret;
1746} 1749}
1747 1750
@@ -1751,7 +1754,7 @@ static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
1751 struct cfi_private *cfi = map->fldrv_priv; 1754 struct cfi_private *cfi = map->fldrv_priv;
1752 int ret; 1755 int ret;
1753 1756
1754 spin_lock(chip->mutex); 1757 mutex_lock(&chip->mutex);
1755 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING); 1758 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
1756 if (ret) 1759 if (ret)
1757 goto out_unlock; 1760 goto out_unlock;
@@ -1769,7 +1772,7 @@ static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
1769 ret = 0; 1772 ret = 0;
1770 1773
1771out_unlock: 1774out_unlock:
1772 spin_unlock(chip->mutex); 1775 mutex_unlock(&chip->mutex);
1773 return ret; 1776 return ret;
1774} 1777}
1775 1778
@@ -1797,7 +1800,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
1797 chip = &cfi->chips[i]; 1800 chip = &cfi->chips[i];
1798 1801
1799 retry: 1802 retry:
1800 spin_lock(chip->mutex); 1803 mutex_lock(&chip->mutex);
1801 1804
1802 switch(chip->state) { 1805 switch(chip->state) {
1803 case FL_READY: 1806 case FL_READY:
@@ -1811,7 +1814,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
1811 * with the chip now anyway. 1814 * with the chip now anyway.
1812 */ 1815 */
1813 case FL_SYNCING: 1816 case FL_SYNCING:
1814 spin_unlock(chip->mutex); 1817 mutex_unlock(&chip->mutex);
1815 break; 1818 break;
1816 1819
1817 default: 1820 default:
@@ -1819,7 +1822,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
1819 set_current_state(TASK_UNINTERRUPTIBLE); 1822 set_current_state(TASK_UNINTERRUPTIBLE);
1820 add_wait_queue(&chip->wq, &wait); 1823 add_wait_queue(&chip->wq, &wait);
1821 1824
1822 spin_unlock(chip->mutex); 1825 mutex_unlock(&chip->mutex);
1823 1826
1824 schedule(); 1827 schedule();
1825 1828
@@ -1834,13 +1837,13 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
1834 for (i--; i >=0; i--) { 1837 for (i--; i >=0; i--) {
1835 chip = &cfi->chips[i]; 1838 chip = &cfi->chips[i];
1836 1839
1837 spin_lock(chip->mutex); 1840 mutex_lock(&chip->mutex);
1838 1841
1839 if (chip->state == FL_SYNCING) { 1842 if (chip->state == FL_SYNCING) {
1840 chip->state = chip->oldstate; 1843 chip->state = chip->oldstate;
1841 wake_up(&chip->wq); 1844 wake_up(&chip->wq);
1842 } 1845 }
1843 spin_unlock(chip->mutex); 1846 mutex_unlock(&chip->mutex);
1844 } 1847 }
1845} 1848}
1846 1849
@@ -1856,7 +1859,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
1856 for (i=0; !ret && i<cfi->numchips; i++) { 1859 for (i=0; !ret && i<cfi->numchips; i++) {
1857 chip = &cfi->chips[i]; 1860 chip = &cfi->chips[i];
1858 1861
1859 spin_lock(chip->mutex); 1862 mutex_lock(&chip->mutex);
1860 1863
1861 switch(chip->state) { 1864 switch(chip->state) {
1862 case FL_READY: 1865 case FL_READY:
@@ -1876,7 +1879,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
1876 ret = -EAGAIN; 1879 ret = -EAGAIN;
1877 break; 1880 break;
1878 } 1881 }
1879 spin_unlock(chip->mutex); 1882 mutex_unlock(&chip->mutex);
1880 } 1883 }
1881 1884
1882 /* Unlock the chips again */ 1885 /* Unlock the chips again */
@@ -1885,13 +1888,13 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
1885 for (i--; i >=0; i--) { 1888 for (i--; i >=0; i--) {
1886 chip = &cfi->chips[i]; 1889 chip = &cfi->chips[i];
1887 1890
1888 spin_lock(chip->mutex); 1891 mutex_lock(&chip->mutex);
1889 1892
1890 if (chip->state == FL_PM_SUSPENDED) { 1893 if (chip->state == FL_PM_SUSPENDED) {
1891 chip->state = chip->oldstate; 1894 chip->state = chip->oldstate;
1892 wake_up(&chip->wq); 1895 wake_up(&chip->wq);
1893 } 1896 }
1894 spin_unlock(chip->mutex); 1897 mutex_unlock(&chip->mutex);
1895 } 1898 }
1896 } 1899 }
1897 1900
@@ -1910,7 +1913,7 @@ static void cfi_amdstd_resume(struct mtd_info *mtd)
1910 1913
1911 chip = &cfi->chips[i]; 1914 chip = &cfi->chips[i];
1912 1915
1913 spin_lock(chip->mutex); 1916 mutex_lock(&chip->mutex);
1914 1917
1915 if (chip->state == FL_PM_SUSPENDED) { 1918 if (chip->state == FL_PM_SUSPENDED) {
1916 chip->state = FL_READY; 1919 chip->state = FL_READY;
@@ -1920,15 +1923,62 @@ static void cfi_amdstd_resume(struct mtd_info *mtd)
1920 else 1923 else
1921 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n"); 1924 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1922 1925
1923 spin_unlock(chip->mutex); 1926 mutex_unlock(&chip->mutex);
1924 } 1927 }
1925} 1928}
1926 1929
1930
1931/*
1932 * Ensure that the flash device is put back into read array mode before
1933 * unloading the driver or rebooting. On some systems, rebooting while
1934 * the flash is in query/program/erase mode will prevent the CPU from
1935 * fetching the bootloader code, requiring a hard reset or power cycle.
1936 */
1937static int cfi_amdstd_reset(struct mtd_info *mtd)
1938{
1939 struct map_info *map = mtd->priv;
1940 struct cfi_private *cfi = map->fldrv_priv;
1941 int i, ret;
1942 struct flchip *chip;
1943
1944 for (i = 0; i < cfi->numchips; i++) {
1945
1946 chip = &cfi->chips[i];
1947
1948 mutex_lock(&chip->mutex);
1949
1950 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
1951 if (!ret) {
1952 map_write(map, CMD(0xF0), chip->start);
1953 chip->state = FL_SHUTDOWN;
1954 put_chip(map, chip, chip->start);
1955 }
1956
1957 mutex_unlock(&chip->mutex);
1958 }
1959
1960 return 0;
1961}
1962
1963
1964static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
1965 void *v)
1966{
1967 struct mtd_info *mtd;
1968
1969 mtd = container_of(nb, struct mtd_info, reboot_notifier);
1970 cfi_amdstd_reset(mtd);
1971 return NOTIFY_DONE;
1972}
1973
1974
1927static void cfi_amdstd_destroy(struct mtd_info *mtd) 1975static void cfi_amdstd_destroy(struct mtd_info *mtd)
1928{ 1976{
1929 struct map_info *map = mtd->priv; 1977 struct map_info *map = mtd->priv;
1930 struct cfi_private *cfi = map->fldrv_priv; 1978 struct cfi_private *cfi = map->fldrv_priv;
1931 1979
1980 cfi_amdstd_reset(mtd);
1981 unregister_reboot_notifier(&mtd->reboot_notifier);
1932 kfree(cfi->cmdset_priv); 1982 kfree(cfi->cmdset_priv);
1933 kfree(cfi->cfiq); 1983 kfree(cfi->cfiq);
1934 kfree(cfi); 1984 kfree(cfi);
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 0667a671525d..e54e8c169d76 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -265,7 +265,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
265 265
266 timeo = jiffies + HZ; 266 timeo = jiffies + HZ;
267 retry: 267 retry:
268 spin_lock_bh(chip->mutex); 268 mutex_lock(&chip->mutex);
269 269
270 /* Check that the chip's ready to talk to us. 270 /* Check that the chip's ready to talk to us.
271 * If it's in FL_ERASING state, suspend it and make it talk now. 271 * If it's in FL_ERASING state, suspend it and make it talk now.
@@ -296,15 +296,15 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
296 /* make sure we're in 'read status' mode */ 296 /* make sure we're in 'read status' mode */
297 map_write(map, CMD(0x70), cmd_addr); 297 map_write(map, CMD(0x70), cmd_addr);
298 chip->state = FL_ERASING; 298 chip->state = FL_ERASING;
299 spin_unlock_bh(chip->mutex); 299 mutex_unlock(&chip->mutex);
300 printk(KERN_ERR "Chip not ready after erase " 300 printk(KERN_ERR "Chip not ready after erase "
301 "suspended: status = 0x%lx\n", status.x[0]); 301 "suspended: status = 0x%lx\n", status.x[0]);
302 return -EIO; 302 return -EIO;
303 } 303 }
304 304
305 spin_unlock_bh(chip->mutex); 305 mutex_unlock(&chip->mutex);
306 cfi_udelay(1); 306 cfi_udelay(1);
307 spin_lock_bh(chip->mutex); 307 mutex_lock(&chip->mutex);
308 } 308 }
309 309
310 suspended = 1; 310 suspended = 1;
@@ -335,13 +335,13 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
335 335
336 /* Urgh. Chip not yet ready to talk to us. */ 336 /* Urgh. Chip not yet ready to talk to us. */
337 if (time_after(jiffies, timeo)) { 337 if (time_after(jiffies, timeo)) {
338 spin_unlock_bh(chip->mutex); 338 mutex_unlock(&chip->mutex);
339 printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]); 339 printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
340 return -EIO; 340 return -EIO;
341 } 341 }
342 342
343 /* Latency issues. Drop the lock, wait a while and retry */ 343 /* Latency issues. Drop the lock, wait a while and retry */
344 spin_unlock_bh(chip->mutex); 344 mutex_unlock(&chip->mutex);
345 cfi_udelay(1); 345 cfi_udelay(1);
346 goto retry; 346 goto retry;
347 347
@@ -351,7 +351,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
351 someone changes the status */ 351 someone changes the status */
352 set_current_state(TASK_UNINTERRUPTIBLE); 352 set_current_state(TASK_UNINTERRUPTIBLE);
353 add_wait_queue(&chip->wq, &wait); 353 add_wait_queue(&chip->wq, &wait);
354 spin_unlock_bh(chip->mutex); 354 mutex_unlock(&chip->mutex);
355 schedule(); 355 schedule();
356 remove_wait_queue(&chip->wq, &wait); 356 remove_wait_queue(&chip->wq, &wait);
357 timeo = jiffies + HZ; 357 timeo = jiffies + HZ;
@@ -376,7 +376,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
376 } 376 }
377 377
378 wake_up(&chip->wq); 378 wake_up(&chip->wq);
379 spin_unlock_bh(chip->mutex); 379 mutex_unlock(&chip->mutex);
380 return 0; 380 return 0;
381} 381}
382 382
@@ -445,7 +445,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
445#ifdef DEBUG_CFI_FEATURES 445#ifdef DEBUG_CFI_FEATURES
446 printk("%s: chip->state[%d]\n", __func__, chip->state); 446 printk("%s: chip->state[%d]\n", __func__, chip->state);
447#endif 447#endif
448 spin_lock_bh(chip->mutex); 448 mutex_lock(&chip->mutex);
449 449
450 /* Check that the chip's ready to talk to us. 450 /* Check that the chip's ready to talk to us.
451 * Later, we can actually think about interrupting it 451 * Later, we can actually think about interrupting it
@@ -470,14 +470,14 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
470 break; 470 break;
471 /* Urgh. Chip not yet ready to talk to us. */ 471 /* Urgh. Chip not yet ready to talk to us. */
472 if (time_after(jiffies, timeo)) { 472 if (time_after(jiffies, timeo)) {
473 spin_unlock_bh(chip->mutex); 473 mutex_unlock(&chip->mutex);
474 printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n", 474 printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
475 status.x[0], map_read(map, cmd_adr).x[0]); 475 status.x[0], map_read(map, cmd_adr).x[0]);
476 return -EIO; 476 return -EIO;
477 } 477 }
478 478
479 /* Latency issues. Drop the lock, wait a while and retry */ 479 /* Latency issues. Drop the lock, wait a while and retry */
480 spin_unlock_bh(chip->mutex); 480 mutex_unlock(&chip->mutex);
481 cfi_udelay(1); 481 cfi_udelay(1);
482 goto retry; 482 goto retry;
483 483
@@ -486,7 +486,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
486 someone changes the status */ 486 someone changes the status */
487 set_current_state(TASK_UNINTERRUPTIBLE); 487 set_current_state(TASK_UNINTERRUPTIBLE);
488 add_wait_queue(&chip->wq, &wait); 488 add_wait_queue(&chip->wq, &wait);
489 spin_unlock_bh(chip->mutex); 489 mutex_unlock(&chip->mutex);
490 schedule(); 490 schedule();
491 remove_wait_queue(&chip->wq, &wait); 491 remove_wait_queue(&chip->wq, &wait);
492 timeo = jiffies + HZ; 492 timeo = jiffies + HZ;
@@ -503,16 +503,16 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
503 if (map_word_andequal(map, status, status_OK, status_OK)) 503 if (map_word_andequal(map, status, status_OK, status_OK))
504 break; 504 break;
505 505
506 spin_unlock_bh(chip->mutex); 506 mutex_unlock(&chip->mutex);
507 cfi_udelay(1); 507 cfi_udelay(1);
508 spin_lock_bh(chip->mutex); 508 mutex_lock(&chip->mutex);
509 509
510 if (++z > 100) { 510 if (++z > 100) {
511 /* Argh. Not ready for write to buffer */ 511 /* Argh. Not ready for write to buffer */
512 DISABLE_VPP(map); 512 DISABLE_VPP(map);
513 map_write(map, CMD(0x70), cmd_adr); 513 map_write(map, CMD(0x70), cmd_adr);
514 chip->state = FL_STATUS; 514 chip->state = FL_STATUS;
515 spin_unlock_bh(chip->mutex); 515 mutex_unlock(&chip->mutex);
516 printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]); 516 printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
517 return -EIO; 517 return -EIO;
518 } 518 }
@@ -532,9 +532,9 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
532 map_write(map, CMD(0xd0), cmd_adr); 532 map_write(map, CMD(0xd0), cmd_adr);
533 chip->state = FL_WRITING; 533 chip->state = FL_WRITING;
534 534
535 spin_unlock_bh(chip->mutex); 535 mutex_unlock(&chip->mutex);
536 cfi_udelay(chip->buffer_write_time); 536 cfi_udelay(chip->buffer_write_time);
537 spin_lock_bh(chip->mutex); 537 mutex_lock(&chip->mutex);
538 538
539 timeo = jiffies + (HZ/2); 539 timeo = jiffies + (HZ/2);
540 z = 0; 540 z = 0;
@@ -543,11 +543,11 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
543 /* Someone's suspended the write. Sleep */ 543 /* Someone's suspended the write. Sleep */
544 set_current_state(TASK_UNINTERRUPTIBLE); 544 set_current_state(TASK_UNINTERRUPTIBLE);
545 add_wait_queue(&chip->wq, &wait); 545 add_wait_queue(&chip->wq, &wait);
546 spin_unlock_bh(chip->mutex); 546 mutex_unlock(&chip->mutex);
547 schedule(); 547 schedule();
548 remove_wait_queue(&chip->wq, &wait); 548 remove_wait_queue(&chip->wq, &wait);
549 timeo = jiffies + (HZ / 2); /* FIXME */ 549 timeo = jiffies + (HZ / 2); /* FIXME */
550 spin_lock_bh(chip->mutex); 550 mutex_lock(&chip->mutex);
551 continue; 551 continue;
552 } 552 }
553 553
@@ -563,16 +563,16 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
563 map_write(map, CMD(0x70), adr); 563 map_write(map, CMD(0x70), adr);
564 chip->state = FL_STATUS; 564 chip->state = FL_STATUS;
565 DISABLE_VPP(map); 565 DISABLE_VPP(map);
566 spin_unlock_bh(chip->mutex); 566 mutex_unlock(&chip->mutex);
567 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n"); 567 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
568 return -EIO; 568 return -EIO;
569 } 569 }
570 570
571 /* Latency issues. Drop the lock, wait a while and retry */ 571 /* Latency issues. Drop the lock, wait a while and retry */
572 spin_unlock_bh(chip->mutex); 572 mutex_unlock(&chip->mutex);
573 cfi_udelay(1); 573 cfi_udelay(1);
574 z++; 574 z++;
575 spin_lock_bh(chip->mutex); 575 mutex_lock(&chip->mutex);
576 } 576 }
577 if (!z) { 577 if (!z) {
578 chip->buffer_write_time--; 578 chip->buffer_write_time--;
@@ -596,11 +596,11 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
596 /* put back into read status register mode */ 596 /* put back into read status register mode */
597 map_write(map, CMD(0x70), adr); 597 map_write(map, CMD(0x70), adr);
598 wake_up(&chip->wq); 598 wake_up(&chip->wq);
599 spin_unlock_bh(chip->mutex); 599 mutex_unlock(&chip->mutex);
600 return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO; 600 return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
601 } 601 }
602 wake_up(&chip->wq); 602 wake_up(&chip->wq);
603 spin_unlock_bh(chip->mutex); 603 mutex_unlock(&chip->mutex);
604 604
605 return 0; 605 return 0;
606} 606}
@@ -749,7 +749,7 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
749 749
750 timeo = jiffies + HZ; 750 timeo = jiffies + HZ;
751retry: 751retry:
752 spin_lock_bh(chip->mutex); 752 mutex_lock(&chip->mutex);
753 753
754 /* Check that the chip's ready to talk to us. */ 754 /* Check that the chip's ready to talk to us. */
755 switch (chip->state) { 755 switch (chip->state) {
@@ -766,13 +766,13 @@ retry:
766 766
767 /* Urgh. Chip not yet ready to talk to us. */ 767 /* Urgh. Chip not yet ready to talk to us. */
768 if (time_after(jiffies, timeo)) { 768 if (time_after(jiffies, timeo)) {
769 spin_unlock_bh(chip->mutex); 769 mutex_unlock(&chip->mutex);
770 printk(KERN_ERR "waiting for chip to be ready timed out in erase\n"); 770 printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
771 return -EIO; 771 return -EIO;
772 } 772 }
773 773
774 /* Latency issues. Drop the lock, wait a while and retry */ 774 /* Latency issues. Drop the lock, wait a while and retry */
775 spin_unlock_bh(chip->mutex); 775 mutex_unlock(&chip->mutex);
776 cfi_udelay(1); 776 cfi_udelay(1);
777 goto retry; 777 goto retry;
778 778
@@ -781,7 +781,7 @@ retry:
781 someone changes the status */ 781 someone changes the status */
782 set_current_state(TASK_UNINTERRUPTIBLE); 782 set_current_state(TASK_UNINTERRUPTIBLE);
783 add_wait_queue(&chip->wq, &wait); 783 add_wait_queue(&chip->wq, &wait);
784 spin_unlock_bh(chip->mutex); 784 mutex_unlock(&chip->mutex);
785 schedule(); 785 schedule();
786 remove_wait_queue(&chip->wq, &wait); 786 remove_wait_queue(&chip->wq, &wait);
787 timeo = jiffies + HZ; 787 timeo = jiffies + HZ;
@@ -797,9 +797,9 @@ retry:
797 map_write(map, CMD(0xD0), adr); 797 map_write(map, CMD(0xD0), adr);
798 chip->state = FL_ERASING; 798 chip->state = FL_ERASING;
799 799
800 spin_unlock_bh(chip->mutex); 800 mutex_unlock(&chip->mutex);
801 msleep(1000); 801 msleep(1000);
802 spin_lock_bh(chip->mutex); 802 mutex_lock(&chip->mutex);
803 803
804 /* FIXME. Use a timer to check this, and return immediately. */ 804 /* FIXME. Use a timer to check this, and return immediately. */
805 /* Once the state machine's known to be working I'll do that */ 805 /* Once the state machine's known to be working I'll do that */
@@ -810,11 +810,11 @@ retry:
810 /* Someone's suspended the erase. Sleep */ 810 /* Someone's suspended the erase. Sleep */
811 set_current_state(TASK_UNINTERRUPTIBLE); 811 set_current_state(TASK_UNINTERRUPTIBLE);
812 add_wait_queue(&chip->wq, &wait); 812 add_wait_queue(&chip->wq, &wait);
813 spin_unlock_bh(chip->mutex); 813 mutex_unlock(&chip->mutex);
814 schedule(); 814 schedule();
815 remove_wait_queue(&chip->wq, &wait); 815 remove_wait_queue(&chip->wq, &wait);
816 timeo = jiffies + (HZ*20); /* FIXME */ 816 timeo = jiffies + (HZ*20); /* FIXME */
817 spin_lock_bh(chip->mutex); 817 mutex_lock(&chip->mutex);
818 continue; 818 continue;
819 } 819 }
820 820
@@ -828,14 +828,14 @@ retry:
828 chip->state = FL_STATUS; 828 chip->state = FL_STATUS;
829 printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]); 829 printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
830 DISABLE_VPP(map); 830 DISABLE_VPP(map);
831 spin_unlock_bh(chip->mutex); 831 mutex_unlock(&chip->mutex);
832 return -EIO; 832 return -EIO;
833 } 833 }
834 834
835 /* Latency issues. Drop the lock, wait a while and retry */ 835 /* Latency issues. Drop the lock, wait a while and retry */
836 spin_unlock_bh(chip->mutex); 836 mutex_unlock(&chip->mutex);
837 cfi_udelay(1); 837 cfi_udelay(1);
838 spin_lock_bh(chip->mutex); 838 mutex_lock(&chip->mutex);
839 } 839 }
840 840
841 DISABLE_VPP(map); 841 DISABLE_VPP(map);
@@ -878,7 +878,7 @@ retry:
878 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus); 878 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
879 timeo = jiffies + HZ; 879 timeo = jiffies + HZ;
880 chip->state = FL_STATUS; 880 chip->state = FL_STATUS;
881 spin_unlock_bh(chip->mutex); 881 mutex_unlock(&chip->mutex);
882 goto retry; 882 goto retry;
883 } 883 }
884 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus); 884 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
@@ -887,7 +887,7 @@ retry:
887 } 887 }
888 888
889 wake_up(&chip->wq); 889 wake_up(&chip->wq);
890 spin_unlock_bh(chip->mutex); 890 mutex_unlock(&chip->mutex);
891 return ret; 891 return ret;
892} 892}
893 893
@@ -995,7 +995,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
995 chip = &cfi->chips[i]; 995 chip = &cfi->chips[i];
996 996
997 retry: 997 retry:
998 spin_lock_bh(chip->mutex); 998 mutex_lock(&chip->mutex);
999 999
1000 switch(chip->state) { 1000 switch(chip->state) {
1001 case FL_READY: 1001 case FL_READY:
@@ -1009,7 +1009,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
1009 * with the chip now anyway. 1009 * with the chip now anyway.
1010 */ 1010 */
1011 case FL_SYNCING: 1011 case FL_SYNCING:
1012 spin_unlock_bh(chip->mutex); 1012 mutex_unlock(&chip->mutex);
1013 break; 1013 break;
1014 1014
1015 default: 1015 default:
@@ -1017,7 +1017,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
1017 set_current_state(TASK_UNINTERRUPTIBLE); 1017 set_current_state(TASK_UNINTERRUPTIBLE);
1018 add_wait_queue(&chip->wq, &wait); 1018 add_wait_queue(&chip->wq, &wait);
1019 1019
1020 spin_unlock_bh(chip->mutex); 1020 mutex_unlock(&chip->mutex);
1021 schedule(); 1021 schedule();
1022 remove_wait_queue(&chip->wq, &wait); 1022 remove_wait_queue(&chip->wq, &wait);
1023 1023
@@ -1030,13 +1030,13 @@ static void cfi_staa_sync (struct mtd_info *mtd)
1030 for (i--; i >=0; i--) { 1030 for (i--; i >=0; i--) {
1031 chip = &cfi->chips[i]; 1031 chip = &cfi->chips[i];
1032 1032
1033 spin_lock_bh(chip->mutex); 1033 mutex_lock(&chip->mutex);
1034 1034
1035 if (chip->state == FL_SYNCING) { 1035 if (chip->state == FL_SYNCING) {
1036 chip->state = chip->oldstate; 1036 chip->state = chip->oldstate;
1037 wake_up(&chip->wq); 1037 wake_up(&chip->wq);
1038 } 1038 }
1039 spin_unlock_bh(chip->mutex); 1039 mutex_unlock(&chip->mutex);
1040 } 1040 }
1041} 1041}
1042 1042
@@ -1054,7 +1054,7 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
1054 1054
1055 timeo = jiffies + HZ; 1055 timeo = jiffies + HZ;
1056retry: 1056retry:
1057 spin_lock_bh(chip->mutex); 1057 mutex_lock(&chip->mutex);
1058 1058
1059 /* Check that the chip's ready to talk to us. */ 1059 /* Check that the chip's ready to talk to us. */
1060 switch (chip->state) { 1060 switch (chip->state) {
@@ -1071,13 +1071,13 @@ retry:
1071 1071
1072 /* Urgh. Chip not yet ready to talk to us. */ 1072 /* Urgh. Chip not yet ready to talk to us. */
1073 if (time_after(jiffies, timeo)) { 1073 if (time_after(jiffies, timeo)) {
1074 spin_unlock_bh(chip->mutex); 1074 mutex_unlock(&chip->mutex);
1075 printk(KERN_ERR "waiting for chip to be ready timed out in lock\n"); 1075 printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
1076 return -EIO; 1076 return -EIO;
1077 } 1077 }
1078 1078
1079 /* Latency issues. Drop the lock, wait a while and retry */ 1079 /* Latency issues. Drop the lock, wait a while and retry */
1080 spin_unlock_bh(chip->mutex); 1080 mutex_unlock(&chip->mutex);
1081 cfi_udelay(1); 1081 cfi_udelay(1);
1082 goto retry; 1082 goto retry;
1083 1083
@@ -1086,7 +1086,7 @@ retry:
1086 someone changes the status */ 1086 someone changes the status */
1087 set_current_state(TASK_UNINTERRUPTIBLE); 1087 set_current_state(TASK_UNINTERRUPTIBLE);
1088 add_wait_queue(&chip->wq, &wait); 1088 add_wait_queue(&chip->wq, &wait);
1089 spin_unlock_bh(chip->mutex); 1089 mutex_unlock(&chip->mutex);
1090 schedule(); 1090 schedule();
1091 remove_wait_queue(&chip->wq, &wait); 1091 remove_wait_queue(&chip->wq, &wait);
1092 timeo = jiffies + HZ; 1092 timeo = jiffies + HZ;
@@ -1098,9 +1098,9 @@ retry:
1098 map_write(map, CMD(0x01), adr); 1098 map_write(map, CMD(0x01), adr);
1099 chip->state = FL_LOCKING; 1099 chip->state = FL_LOCKING;
1100 1100
1101 spin_unlock_bh(chip->mutex); 1101 mutex_unlock(&chip->mutex);
1102 msleep(1000); 1102 msleep(1000);
1103 spin_lock_bh(chip->mutex); 1103 mutex_lock(&chip->mutex);
1104 1104
1105 /* FIXME. Use a timer to check this, and return immediately. */ 1105 /* FIXME. Use a timer to check this, and return immediately. */
1106 /* Once the state machine's known to be working I'll do that */ 1106 /* Once the state machine's known to be working I'll do that */
@@ -1118,21 +1118,21 @@ retry:
1118 chip->state = FL_STATUS; 1118 chip->state = FL_STATUS;
1119 printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]); 1119 printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1120 DISABLE_VPP(map); 1120 DISABLE_VPP(map);
1121 spin_unlock_bh(chip->mutex); 1121 mutex_unlock(&chip->mutex);
1122 return -EIO; 1122 return -EIO;
1123 } 1123 }
1124 1124
1125 /* Latency issues. Drop the lock, wait a while and retry */ 1125 /* Latency issues. Drop the lock, wait a while and retry */
1126 spin_unlock_bh(chip->mutex); 1126 mutex_unlock(&chip->mutex);
1127 cfi_udelay(1); 1127 cfi_udelay(1);
1128 spin_lock_bh(chip->mutex); 1128 mutex_lock(&chip->mutex);
1129 } 1129 }
1130 1130
1131 /* Done and happy. */ 1131 /* Done and happy. */
1132 chip->state = FL_STATUS; 1132 chip->state = FL_STATUS;
1133 DISABLE_VPP(map); 1133 DISABLE_VPP(map);
1134 wake_up(&chip->wq); 1134 wake_up(&chip->wq);
1135 spin_unlock_bh(chip->mutex); 1135 mutex_unlock(&chip->mutex);
1136 return 0; 1136 return 0;
1137} 1137}
1138static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1138static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
@@ -1203,7 +1203,7 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
1203 1203
1204 timeo = jiffies + HZ; 1204 timeo = jiffies + HZ;
1205retry: 1205retry:
1206 spin_lock_bh(chip->mutex); 1206 mutex_lock(&chip->mutex);
1207 1207
1208 /* Check that the chip's ready to talk to us. */ 1208 /* Check that the chip's ready to talk to us. */
1209 switch (chip->state) { 1209 switch (chip->state) {
@@ -1220,13 +1220,13 @@ retry:
1220 1220
1221 /* Urgh. Chip not yet ready to talk to us. */ 1221 /* Urgh. Chip not yet ready to talk to us. */
1222 if (time_after(jiffies, timeo)) { 1222 if (time_after(jiffies, timeo)) {
1223 spin_unlock_bh(chip->mutex); 1223 mutex_unlock(&chip->mutex);
1224 printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n"); 1224 printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
1225 return -EIO; 1225 return -EIO;
1226 } 1226 }
1227 1227
1228 /* Latency issues. Drop the lock, wait a while and retry */ 1228 /* Latency issues. Drop the lock, wait a while and retry */
1229 spin_unlock_bh(chip->mutex); 1229 mutex_unlock(&chip->mutex);
1230 cfi_udelay(1); 1230 cfi_udelay(1);
1231 goto retry; 1231 goto retry;
1232 1232
@@ -1235,7 +1235,7 @@ retry:
1235 someone changes the status */ 1235 someone changes the status */
1236 set_current_state(TASK_UNINTERRUPTIBLE); 1236 set_current_state(TASK_UNINTERRUPTIBLE);
1237 add_wait_queue(&chip->wq, &wait); 1237 add_wait_queue(&chip->wq, &wait);
1238 spin_unlock_bh(chip->mutex); 1238 mutex_unlock(&chip->mutex);
1239 schedule(); 1239 schedule();
1240 remove_wait_queue(&chip->wq, &wait); 1240 remove_wait_queue(&chip->wq, &wait);
1241 timeo = jiffies + HZ; 1241 timeo = jiffies + HZ;
@@ -1247,9 +1247,9 @@ retry:
1247 map_write(map, CMD(0xD0), adr); 1247 map_write(map, CMD(0xD0), adr);
1248 chip->state = FL_UNLOCKING; 1248 chip->state = FL_UNLOCKING;
1249 1249
1250 spin_unlock_bh(chip->mutex); 1250 mutex_unlock(&chip->mutex);
1251 msleep(1000); 1251 msleep(1000);
1252 spin_lock_bh(chip->mutex); 1252 mutex_lock(&chip->mutex);
1253 1253
1254 /* FIXME. Use a timer to check this, and return immediately. */ 1254 /* FIXME. Use a timer to check this, and return immediately. */
1255 /* Once the state machine's known to be working I'll do that */ 1255 /* Once the state machine's known to be working I'll do that */
@@ -1267,21 +1267,21 @@ retry:
1267 chip->state = FL_STATUS; 1267 chip->state = FL_STATUS;
1268 printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]); 1268 printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1269 DISABLE_VPP(map); 1269 DISABLE_VPP(map);
1270 spin_unlock_bh(chip->mutex); 1270 mutex_unlock(&chip->mutex);
1271 return -EIO; 1271 return -EIO;
1272 } 1272 }
1273 1273
1274 /* Latency issues. Drop the unlock, wait a while and retry */ 1274 /* Latency issues. Drop the unlock, wait a while and retry */
1275 spin_unlock_bh(chip->mutex); 1275 mutex_unlock(&chip->mutex);
1276 cfi_udelay(1); 1276 cfi_udelay(1);
1277 spin_lock_bh(chip->mutex); 1277 mutex_lock(&chip->mutex);
1278 } 1278 }
1279 1279
1280 /* Done and happy. */ 1280 /* Done and happy. */
1281 chip->state = FL_STATUS; 1281 chip->state = FL_STATUS;
1282 DISABLE_VPP(map); 1282 DISABLE_VPP(map);
1283 wake_up(&chip->wq); 1283 wake_up(&chip->wq);
1284 spin_unlock_bh(chip->mutex); 1284 mutex_unlock(&chip->mutex);
1285 return 0; 1285 return 0;
1286} 1286}
1287static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1287static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
@@ -1334,7 +1334,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
1334 for (i=0; !ret && i<cfi->numchips; i++) { 1334 for (i=0; !ret && i<cfi->numchips; i++) {
1335 chip = &cfi->chips[i]; 1335 chip = &cfi->chips[i];
1336 1336
1337 spin_lock_bh(chip->mutex); 1337 mutex_lock(&chip->mutex);
1338 1338
1339 switch(chip->state) { 1339 switch(chip->state) {
1340 case FL_READY: 1340 case FL_READY:
@@ -1354,7 +1354,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
1354 ret = -EAGAIN; 1354 ret = -EAGAIN;
1355 break; 1355 break;
1356 } 1356 }
1357 spin_unlock_bh(chip->mutex); 1357 mutex_unlock(&chip->mutex);
1358 } 1358 }
1359 1359
1360 /* Unlock the chips again */ 1360 /* Unlock the chips again */
@@ -1363,7 +1363,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
1363 for (i--; i >=0; i--) { 1363 for (i--; i >=0; i--) {
1364 chip = &cfi->chips[i]; 1364 chip = &cfi->chips[i];
1365 1365
1366 spin_lock_bh(chip->mutex); 1366 mutex_lock(&chip->mutex);
1367 1367
1368 if (chip->state == FL_PM_SUSPENDED) { 1368 if (chip->state == FL_PM_SUSPENDED) {
1369 /* No need to force it into a known state here, 1369 /* No need to force it into a known state here,
@@ -1372,7 +1372,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
1372 chip->state = chip->oldstate; 1372 chip->state = chip->oldstate;
1373 wake_up(&chip->wq); 1373 wake_up(&chip->wq);
1374 } 1374 }
1375 spin_unlock_bh(chip->mutex); 1375 mutex_unlock(&chip->mutex);
1376 } 1376 }
1377 } 1377 }
1378 1378
@@ -1390,7 +1390,7 @@ static void cfi_staa_resume(struct mtd_info *mtd)
1390 1390
1391 chip = &cfi->chips[i]; 1391 chip = &cfi->chips[i];
1392 1392
1393 spin_lock_bh(chip->mutex); 1393 mutex_lock(&chip->mutex);
1394 1394
1395 /* Go to known state. Chip may have been power cycled */ 1395 /* Go to known state. Chip may have been power cycled */
1396 if (chip->state == FL_PM_SUSPENDED) { 1396 if (chip->state == FL_PM_SUSPENDED) {
@@ -1399,7 +1399,7 @@ static void cfi_staa_resume(struct mtd_info *mtd)
1399 wake_up(&chip->wq); 1399 wake_up(&chip->wq);
1400 } 1400 }
1401 1401
1402 spin_unlock_bh(chip->mutex); 1402 mutex_unlock(&chip->mutex);
1403 } 1403 }
1404} 1404}
1405 1405
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
index 57e0e4e921f9..d18064977192 100644
--- a/drivers/mtd/chips/fwh_lock.h
+++ b/drivers/mtd/chips/fwh_lock.h
@@ -58,10 +58,10 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
58 * to flash memory - that means that we don't have to check status 58 * to flash memory - that means that we don't have to check status
59 * and timeout. 59 * and timeout.
60 */ 60 */
61 spin_lock(chip->mutex); 61 mutex_lock(&chip->mutex);
62 ret = get_chip(map, chip, adr, FL_LOCKING); 62 ret = get_chip(map, chip, adr, FL_LOCKING);
63 if (ret) { 63 if (ret) {
64 spin_unlock(chip->mutex); 64 mutex_unlock(&chip->mutex);
65 return ret; 65 return ret;
66 } 66 }
67 67
@@ -72,7 +72,7 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
72 /* Done and happy. */ 72 /* Done and happy. */
73 chip->state = chip->oldstate; 73 chip->state = chip->oldstate;
74 put_chip(map, chip, adr); 74 put_chip(map, chip, adr);
75 spin_unlock(chip->mutex); 75 mutex_unlock(&chip->mutex);
76 return 0; 76 return 0;
77} 77}
78 78
diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c
index e2dc96441e05..fcc1bc02c8a2 100644
--- a/drivers/mtd/chips/gen_probe.c
+++ b/drivers/mtd/chips/gen_probe.c
@@ -155,8 +155,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
155 pchip->start = (i << cfi.chipshift); 155 pchip->start = (i << cfi.chipshift);
156 pchip->state = FL_READY; 156 pchip->state = FL_READY;
157 init_waitqueue_head(&pchip->wq); 157 init_waitqueue_head(&pchip->wq);
158 spin_lock_init(&pchip->_spinlock); 158 mutex_init(&pchip->mutex);
159 pchip->mutex = &pchip->_spinlock;
160 } 159 }
161 } 160 }
162 161
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index ab5c9b92ac82..f3226b1d38fc 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -1,5 +1,5 @@
1# 1#
2# linux/drivers/devices/Makefile 2# linux/drivers/mtd/devices/Makefile
3# 3#
4 4
5obj-$(CONFIG_MTD_DOC2000) += doc2000.o 5obj-$(CONFIG_MTD_DOC2000) += doc2000.o
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index ce6424008ed9..93651865ddbe 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -276,12 +276,10 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
276 276
277 /* Setup the MTD structure */ 277 /* Setup the MTD structure */
278 /* make the name contain the block device in */ 278 /* make the name contain the block device in */
279 name = kmalloc(sizeof("block2mtd: ") + strlen(devname) + 1, 279 name = kasprintf(GFP_KERNEL, "block2mtd: %s", devname);
280 GFP_KERNEL);
281 if (!name) 280 if (!name)
282 goto devinit_err; 281 goto devinit_err;
283 282
284 sprintf(name, "block2mtd: %s", devname);
285 dev->mtd.name = name; 283 dev->mtd.name = name;
286 284
287 dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK; 285 dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index d2fd550f7e09..fc8ea0a57ac2 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -668,7 +668,7 @@ static int __init init_pmc551(void)
668{ 668{
669 struct pci_dev *PCI_Device = NULL; 669 struct pci_dev *PCI_Device = NULL;
670 struct mypriv *priv; 670 struct mypriv *priv;
671 int count, found = 0; 671 int found = 0;
672 struct mtd_info *mtd; 672 struct mtd_info *mtd;
673 u32 length = 0; 673 u32 length = 0;
674 674
@@ -695,7 +695,7 @@ static int __init init_pmc551(void)
695 /* 695 /*
696 * PCU-bus chipset probe. 696 * PCU-bus chipset probe.
697 */ 697 */
698 for (count = 0; count < MAX_MTD_DEVICES; count++) { 698 for (;;) {
699 699
700 if ((PCI_Device = pci_get_device(PCI_VENDOR_ID_V3_SEMI, 700 if ((PCI_Device = pci_get_device(PCI_VENDOR_ID_V3_SEMI,
701 PCI_DEVICE_ID_V3_SEMI_V370PDC, 701 PCI_DEVICE_ID_V3_SEMI_V370PDC,
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index fe17054ee2fe..bcf040beb835 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -411,17 +411,6 @@ static int __init sst25l_probe(struct spi_device *spi)
411 flash->mtd.erasesize, flash->mtd.erasesize / 1024, 411 flash->mtd.erasesize, flash->mtd.erasesize / 1024,
412 flash->mtd.numeraseregions); 412 flash->mtd.numeraseregions);
413 413
414 if (flash->mtd.numeraseregions)
415 for (i = 0; i < flash->mtd.numeraseregions; i++)
416 DEBUG(MTD_DEBUG_LEVEL2,
417 "mtd.eraseregions[%d] = { .offset = 0x%llx, "
418 ".erasesize = 0x%.8x (%uKiB), "
419 ".numblocks = %d }\n",
420 i, (long long)flash->mtd.eraseregions[i].offset,
421 flash->mtd.eraseregions[i].erasesize,
422 flash->mtd.eraseregions[i].erasesize / 1024,
423 flash->mtd.eraseregions[i].numblocks);
424
425 if (mtd_has_partitions()) { 414 if (mtd_has_partitions()) {
426 struct mtd_partition *parts = NULL; 415 struct mtd_partition *parts = NULL;
427 int nr_parts = 0; 416 int nr_parts = 0;
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index e56d6b42f020..62da9eb7032b 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -1082,7 +1082,6 @@ static void ftl_remove_dev(struct mtd_blktrans_dev *dev)
1082{ 1082{
1083 del_mtd_blktrans_dev(dev); 1083 del_mtd_blktrans_dev(dev);
1084 ftl_freepart((partition_t *)dev); 1084 ftl_freepart((partition_t *)dev);
1085 kfree(dev);
1086} 1085}
1087 1086
1088static struct mtd_blktrans_ops ftl_tr = { 1087static struct mtd_blktrans_ops ftl_tr = {
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 8aca5523a337..015a7fe1b6ee 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -139,7 +139,6 @@ static void inftl_remove_dev(struct mtd_blktrans_dev *dev)
139 139
140 kfree(inftl->PUtable); 140 kfree(inftl->PUtable);
141 kfree(inftl->VUtable); 141 kfree(inftl->VUtable);
142 kfree(inftl);
143} 142}
144 143
145/* 144/*
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
index 32e82aef3e53..8f988d7d3c5c 100644
--- a/drivers/mtd/inftlmount.c
+++ b/drivers/mtd/inftlmount.c
@@ -100,9 +100,10 @@ static int find_boot_record(struct INFTLrecord *inftl)
100 } 100 }
101 101
102 /* To be safer with BIOS, also use erase mark as discriminant */ 102 /* To be safer with BIOS, also use erase mark as discriminant */
103 if ((ret = inftl_read_oob(mtd, block * inftl->EraseSize + 103 ret = inftl_read_oob(mtd,
104 SECTORSIZE + 8, 8, &retlen, 104 block * inftl->EraseSize + SECTORSIZE + 8,
105 (char *)&h1) < 0)) { 105 8, &retlen,(char *)&h1);
106 if (ret < 0) {
106 printk(KERN_WARNING "INFTL: ANAND header found at " 107 printk(KERN_WARNING "INFTL: ANAND header found at "
107 "0x%x in mtd%d, but OOB data read failed " 108 "0x%x in mtd%d, but OOB data read failed "
108 "(err %d)\n", block * inftl->EraseSize, 109 "(err %d)\n", block * inftl->EraseSize,
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index a73ee12aad81..fece5be58715 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -107,8 +107,7 @@ struct mtd_info *lpddr_cmdset(struct map_info *map)
107 /* those should be reset too since 107 /* those should be reset too since
108 they create memory references. */ 108 they create memory references. */
109 init_waitqueue_head(&chip->wq); 109 init_waitqueue_head(&chip->wq);
110 spin_lock_init(&chip->_spinlock); 110 mutex_init(&chip->mutex);
111 chip->mutex = &chip->_spinlock;
112 chip++; 111 chip++;
113 } 112 }
114 } 113 }
@@ -144,7 +143,7 @@ static int wait_for_ready(struct map_info *map, struct flchip *chip,
144 } 143 }
145 144
146 /* OK Still waiting. Drop the lock, wait a while and retry. */ 145 /* OK Still waiting. Drop the lock, wait a while and retry. */
147 spin_unlock(chip->mutex); 146 mutex_unlock(&chip->mutex);
148 if (sleep_time >= 1000000/HZ) { 147 if (sleep_time >= 1000000/HZ) {
149 /* 148 /*
150 * Half of the normal delay still remaining 149 * Half of the normal delay still remaining
@@ -159,17 +158,17 @@ static int wait_for_ready(struct map_info *map, struct flchip *chip,
159 cond_resched(); 158 cond_resched();
160 timeo--; 159 timeo--;
161 } 160 }
162 spin_lock(chip->mutex); 161 mutex_lock(&chip->mutex);
163 162
164 while (chip->state != chip_state) { 163 while (chip->state != chip_state) {
165 /* Someone's suspended the operation: sleep */ 164 /* Someone's suspended the operation: sleep */
166 DECLARE_WAITQUEUE(wait, current); 165 DECLARE_WAITQUEUE(wait, current);
167 set_current_state(TASK_UNINTERRUPTIBLE); 166 set_current_state(TASK_UNINTERRUPTIBLE);
168 add_wait_queue(&chip->wq, &wait); 167 add_wait_queue(&chip->wq, &wait);
169 spin_unlock(chip->mutex); 168 mutex_unlock(&chip->mutex);
170 schedule(); 169 schedule();
171 remove_wait_queue(&chip->wq, &wait); 170 remove_wait_queue(&chip->wq, &wait);
172 spin_lock(chip->mutex); 171 mutex_lock(&chip->mutex);
173 } 172 }
174 if (chip->erase_suspended || chip->write_suspended) { 173 if (chip->erase_suspended || chip->write_suspended) {
175 /* Suspend has occured while sleep: reset timeout */ 174 /* Suspend has occured while sleep: reset timeout */
@@ -230,20 +229,20 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode)
230 * it'll happily send us to sleep. In any case, when 229 * it'll happily send us to sleep. In any case, when
231 * get_chip returns success we're clear to go ahead. 230 * get_chip returns success we're clear to go ahead.
232 */ 231 */
233 ret = spin_trylock(contender->mutex); 232 ret = mutex_trylock(&contender->mutex);
234 spin_unlock(&shared->lock); 233 spin_unlock(&shared->lock);
235 if (!ret) 234 if (!ret)
236 goto retry; 235 goto retry;
237 spin_unlock(chip->mutex); 236 mutex_unlock(&chip->mutex);
238 ret = chip_ready(map, contender, mode); 237 ret = chip_ready(map, contender, mode);
239 spin_lock(chip->mutex); 238 mutex_lock(&chip->mutex);
240 239
241 if (ret == -EAGAIN) { 240 if (ret == -EAGAIN) {
242 spin_unlock(contender->mutex); 241 mutex_unlock(&contender->mutex);
243 goto retry; 242 goto retry;
244 } 243 }
245 if (ret) { 244 if (ret) {
246 spin_unlock(contender->mutex); 245 mutex_unlock(&contender->mutex);
247 return ret; 246 return ret;
248 } 247 }
249 spin_lock(&shared->lock); 248 spin_lock(&shared->lock);
@@ -252,10 +251,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode)
252 * state. Put contender and retry. */ 251 * state. Put contender and retry. */
253 if (chip->state == FL_SYNCING) { 252 if (chip->state == FL_SYNCING) {
254 put_chip(map, contender); 253 put_chip(map, contender);
255 spin_unlock(contender->mutex); 254 mutex_unlock(&contender->mutex);
256 goto retry; 255 goto retry;
257 } 256 }
258 spin_unlock(contender->mutex); 257 mutex_unlock(&contender->mutex);
259 } 258 }
260 259
261 /* Check if we have suspended erase on this chip. 260 /* Check if we have suspended erase on this chip.
@@ -265,10 +264,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode)
265 spin_unlock(&shared->lock); 264 spin_unlock(&shared->lock);
266 set_current_state(TASK_UNINTERRUPTIBLE); 265 set_current_state(TASK_UNINTERRUPTIBLE);
267 add_wait_queue(&chip->wq, &wait); 266 add_wait_queue(&chip->wq, &wait);
268 spin_unlock(chip->mutex); 267 mutex_unlock(&chip->mutex);
269 schedule(); 268 schedule();
270 remove_wait_queue(&chip->wq, &wait); 269 remove_wait_queue(&chip->wq, &wait);
271 spin_lock(chip->mutex); 270 mutex_lock(&chip->mutex);
272 goto retry; 271 goto retry;
273 } 272 }
274 273
@@ -337,10 +336,10 @@ static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
337sleep: 336sleep:
338 set_current_state(TASK_UNINTERRUPTIBLE); 337 set_current_state(TASK_UNINTERRUPTIBLE);
339 add_wait_queue(&chip->wq, &wait); 338 add_wait_queue(&chip->wq, &wait);
340 spin_unlock(chip->mutex); 339 mutex_unlock(&chip->mutex);
341 schedule(); 340 schedule();
342 remove_wait_queue(&chip->wq, &wait); 341 remove_wait_queue(&chip->wq, &wait);
343 spin_lock(chip->mutex); 342 mutex_lock(&chip->mutex);
344 return -EAGAIN; 343 return -EAGAIN;
345 } 344 }
346} 345}
@@ -356,12 +355,12 @@ static void put_chip(struct map_info *map, struct flchip *chip)
356 if (shared->writing && shared->writing != chip) { 355 if (shared->writing && shared->writing != chip) {
357 /* give back the ownership */ 356 /* give back the ownership */
358 struct flchip *loaner = shared->writing; 357 struct flchip *loaner = shared->writing;
359 spin_lock(loaner->mutex); 358 mutex_lock(&loaner->mutex);
360 spin_unlock(&shared->lock); 359 spin_unlock(&shared->lock);
361 spin_unlock(chip->mutex); 360 mutex_unlock(&chip->mutex);
362 put_chip(map, loaner); 361 put_chip(map, loaner);
363 spin_lock(chip->mutex); 362 mutex_lock(&chip->mutex);
364 spin_unlock(loaner->mutex); 363 mutex_unlock(&loaner->mutex);
365 wake_up(&chip->wq); 364 wake_up(&chip->wq);
366 return; 365 return;
367 } 366 }
@@ -414,10 +413,10 @@ int do_write_buffer(struct map_info *map, struct flchip *chip,
414 413
415 wbufsize = 1 << lpddr->qinfo->BufSizeShift; 414 wbufsize = 1 << lpddr->qinfo->BufSizeShift;
416 415
417 spin_lock(chip->mutex); 416 mutex_lock(&chip->mutex);
418 ret = get_chip(map, chip, FL_WRITING); 417 ret = get_chip(map, chip, FL_WRITING);
419 if (ret) { 418 if (ret) {
420 spin_unlock(chip->mutex); 419 mutex_unlock(&chip->mutex);
421 return ret; 420 return ret;
422 } 421 }
423 /* Figure out the number of words to write */ 422 /* Figure out the number of words to write */
@@ -478,7 +477,7 @@ int do_write_buffer(struct map_info *map, struct flchip *chip,
478 } 477 }
479 478
480 out: put_chip(map, chip); 479 out: put_chip(map, chip);
481 spin_unlock(chip->mutex); 480 mutex_unlock(&chip->mutex);
482 return ret; 481 return ret;
483} 482}
484 483
@@ -490,10 +489,10 @@ int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
490 struct flchip *chip = &lpddr->chips[chipnum]; 489 struct flchip *chip = &lpddr->chips[chipnum];
491 int ret; 490 int ret;
492 491
493 spin_lock(chip->mutex); 492 mutex_lock(&chip->mutex);
494 ret = get_chip(map, chip, FL_ERASING); 493 ret = get_chip(map, chip, FL_ERASING);
495 if (ret) { 494 if (ret) {
496 spin_unlock(chip->mutex); 495 mutex_unlock(&chip->mutex);
497 return ret; 496 return ret;
498 } 497 }
499 send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL); 498 send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL);
@@ -505,7 +504,7 @@ int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
505 goto out; 504 goto out;
506 } 505 }
507 out: put_chip(map, chip); 506 out: put_chip(map, chip);
508 spin_unlock(chip->mutex); 507 mutex_unlock(&chip->mutex);
509 return ret; 508 return ret;
510} 509}
511 510
@@ -518,10 +517,10 @@ static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
518 struct flchip *chip = &lpddr->chips[chipnum]; 517 struct flchip *chip = &lpddr->chips[chipnum];
519 int ret = 0; 518 int ret = 0;
520 519
521 spin_lock(chip->mutex); 520 mutex_lock(&chip->mutex);
522 ret = get_chip(map, chip, FL_READY); 521 ret = get_chip(map, chip, FL_READY);
523 if (ret) { 522 if (ret) {
524 spin_unlock(chip->mutex); 523 mutex_unlock(&chip->mutex);
525 return ret; 524 return ret;
526 } 525 }
527 526
@@ -529,7 +528,7 @@ static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
529 *retlen = len; 528 *retlen = len;
530 529
531 put_chip(map, chip); 530 put_chip(map, chip);
532 spin_unlock(chip->mutex); 531 mutex_unlock(&chip->mutex);
533 return ret; 532 return ret;
534} 533}
535 534
@@ -569,9 +568,9 @@ static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
569 else 568 else
570 thislen = len; 569 thislen = len;
571 /* get the chip */ 570 /* get the chip */
572 spin_lock(chip->mutex); 571 mutex_lock(&chip->mutex);
573 ret = get_chip(map, chip, FL_POINT); 572 ret = get_chip(map, chip, FL_POINT);
574 spin_unlock(chip->mutex); 573 mutex_unlock(&chip->mutex);
575 if (ret) 574 if (ret)
576 break; 575 break;
577 576
@@ -611,7 +610,7 @@ static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
611 else 610 else
612 thislen = len; 611 thislen = len;
613 612
614 spin_lock(chip->mutex); 613 mutex_lock(&chip->mutex);
615 if (chip->state == FL_POINT) { 614 if (chip->state == FL_POINT) {
616 chip->ref_point_counter--; 615 chip->ref_point_counter--;
617 if (chip->ref_point_counter == 0) 616 if (chip->ref_point_counter == 0)
@@ -621,7 +620,7 @@ static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
621 "pointed region\n", map->name); 620 "pointed region\n", map->name);
622 621
623 put_chip(map, chip); 622 put_chip(map, chip);
624 spin_unlock(chip->mutex); 623 mutex_unlock(&chip->mutex);
625 624
626 len -= thislen; 625 len -= thislen;
627 ofs = 0; 626 ofs = 0;
@@ -727,10 +726,10 @@ int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
727 int chipnum = adr >> lpddr->chipshift; 726 int chipnum = adr >> lpddr->chipshift;
728 struct flchip *chip = &lpddr->chips[chipnum]; 727 struct flchip *chip = &lpddr->chips[chipnum];
729 728
730 spin_lock(chip->mutex); 729 mutex_lock(&chip->mutex);
731 ret = get_chip(map, chip, FL_LOCKING); 730 ret = get_chip(map, chip, FL_LOCKING);
732 if (ret) { 731 if (ret) {
733 spin_unlock(chip->mutex); 732 mutex_unlock(&chip->mutex);
734 return ret; 733 return ret;
735 } 734 }
736 735
@@ -750,7 +749,7 @@ int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
750 goto out; 749 goto out;
751 } 750 }
752out: put_chip(map, chip); 751out: put_chip(map, chip);
753 spin_unlock(chip->mutex); 752 mutex_unlock(&chip->mutex);
754 return ret; 753 return ret;
755} 754}
756 755
@@ -771,10 +770,10 @@ int word_program(struct map_info *map, loff_t adr, uint32_t curval)
771 int chipnum = adr >> lpddr->chipshift; 770 int chipnum = adr >> lpddr->chipshift;
772 struct flchip *chip = &lpddr->chips[chipnum]; 771 struct flchip *chip = &lpddr->chips[chipnum];
773 772
774 spin_lock(chip->mutex); 773 mutex_lock(&chip->mutex);
775 ret = get_chip(map, chip, FL_WRITING); 774 ret = get_chip(map, chip, FL_WRITING);
776 if (ret) { 775 if (ret) {
777 spin_unlock(chip->mutex); 776 mutex_unlock(&chip->mutex);
778 return ret; 777 return ret;
779 } 778 }
780 779
@@ -788,7 +787,7 @@ int word_program(struct map_info *map, loff_t adr, uint32_t curval)
788 } 787 }
789 788
790out: put_chip(map, chip); 789out: put_chip(map, chip);
791 spin_unlock(chip->mutex); 790 mutex_unlock(&chip->mutex);
792 return ret; 791 return ret;
793} 792}
794 793
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index c0fd99b0c525..85dd18193cf2 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -70,7 +70,7 @@ static void switch_back(struct async_state *state)
70 local_irq_restore(state->irq_flags); 70 local_irq_restore(state->irq_flags);
71} 71}
72 72
73static map_word bfin_read(struct map_info *map, unsigned long ofs) 73static map_word bfin_flash_read(struct map_info *map, unsigned long ofs)
74{ 74{
75 struct async_state *state = (struct async_state *)map->map_priv_1; 75 struct async_state *state = (struct async_state *)map->map_priv_1;
76 uint16_t word; 76 uint16_t word;
@@ -86,7 +86,7 @@ static map_word bfin_read(struct map_info *map, unsigned long ofs)
86 return test; 86 return test;
87} 87}
88 88
89static void bfin_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) 89static void bfin_flash_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
90{ 90{
91 struct async_state *state = (struct async_state *)map->map_priv_1; 91 struct async_state *state = (struct async_state *)map->map_priv_1;
92 92
@@ -97,7 +97,7 @@ static void bfin_copy_from(struct map_info *map, void *to, unsigned long from, s
97 switch_back(state); 97 switch_back(state);
98} 98}
99 99
100static void bfin_write(struct map_info *map, map_word d1, unsigned long ofs) 100static void bfin_flash_write(struct map_info *map, map_word d1, unsigned long ofs)
101{ 101{
102 struct async_state *state = (struct async_state *)map->map_priv_1; 102 struct async_state *state = (struct async_state *)map->map_priv_1;
103 uint16_t d; 103 uint16_t d;
@@ -112,7 +112,7 @@ static void bfin_write(struct map_info *map, map_word d1, unsigned long ofs)
112 switch_back(state); 112 switch_back(state);
113} 113}
114 114
115static void bfin_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) 115static void bfin_flash_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
116{ 116{
117 struct async_state *state = (struct async_state *)map->map_priv_1; 117 struct async_state *state = (struct async_state *)map->map_priv_1;
118 118
@@ -141,10 +141,10 @@ static int __devinit bfin_flash_probe(struct platform_device *pdev)
141 return -ENOMEM; 141 return -ENOMEM;
142 142
143 state->map.name = DRIVER_NAME; 143 state->map.name = DRIVER_NAME;
144 state->map.read = bfin_read; 144 state->map.read = bfin_flash_read;
145 state->map.copy_from = bfin_copy_from; 145 state->map.copy_from = bfin_flash_copy_from;
146 state->map.write = bfin_write; 146 state->map.write = bfin_flash_write;
147 state->map.copy_to = bfin_copy_to; 147 state->map.copy_to = bfin_flash_copy_to;
148 state->map.bankwidth = pdata->width; 148 state->map.bankwidth = pdata->width;
149 state->map.size = memory->end - memory->start + 1; 149 state->map.size = memory->end - memory->start + 1;
150 state->map.virt = (void __iomem *)memory->start; 150 state->map.virt = (void __iomem *)memory->start;
diff --git a/drivers/mtd/maps/ceiva.c b/drivers/mtd/maps/ceiva.c
index d41f34766e53..c09f4f57093e 100644
--- a/drivers/mtd/maps/ceiva.c
+++ b/drivers/mtd/maps/ceiva.c
@@ -253,7 +253,7 @@ static void __exit clps_destroy_mtd(struct clps_info *clps, struct mtd_info *mtd
253 253
254static int __init clps_setup_flash(void) 254static int __init clps_setup_flash(void)
255{ 255{
256 int nr; 256 int nr = 0;
257 257
258#ifdef CONFIG_ARCH_CEIVA 258#ifdef CONFIG_ARCH_CEIVA
259 if (machine_is_ceiva()) { 259 if (machine_is_ceiva()) {
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 7b0515297411..7513d90fee6f 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -107,8 +107,8 @@ static void ixp4xx_copy_from(struct map_info *map, void *to,
107 return; 107 return;
108 108
109 if (from & 1) { 109 if (from & 1) {
110 *dest++ = BYTE1(flash_read16(src)); 110 *dest++ = BYTE1(flash_read16(src-1));
111 src++; 111 src++;
112 --len; 112 --len;
113 } 113 }
114 114
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index 689d6a79ffc0..81159d708f86 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -692,8 +692,8 @@ static void pcmciamtd_detach(struct pcmcia_device *link)
692 692
693 if(dev->mtd_info) { 693 if(dev->mtd_info) {
694 del_mtd_device(dev->mtd_info); 694 del_mtd_device(dev->mtd_info);
695 info("mtd%d: Removing", dev->mtd_info->index);
695 map_destroy(dev->mtd_info); 696 map_destroy(dev->mtd_info);
696 info("mtd%d: Removed", dev->mtd_info->index);
697 } 697 }
698 698
699 pcmciamtd_release(link); 699 pcmciamtd_release(link);
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index d9603f7f9652..426461a5f0d4 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -264,8 +264,11 @@ static int __init physmap_init(void)
264 264
265 err = platform_driver_register(&physmap_flash_driver); 265 err = platform_driver_register(&physmap_flash_driver);
266#ifdef CONFIG_MTD_PHYSMAP_COMPAT 266#ifdef CONFIG_MTD_PHYSMAP_COMPAT
267 if (err == 0) 267 if (err == 0) {
268 platform_device_register(&physmap_flash); 268 err = platform_device_register(&physmap_flash);
269 if (err)
270 platform_driver_unregister(&physmap_flash_driver);
271 }
269#endif 272#endif
270 273
271 return err; 274 return err;
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 101ee6ead05c..bbdd21941905 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -218,7 +218,7 @@ static int __devinit of_flash_probe(struct of_device *dev,
218 218
219 dev_set_drvdata(&dev->dev, info); 219 dev_set_drvdata(&dev->dev, info);
220 220
221 mtd_list = kzalloc(sizeof(struct mtd_info) * count, GFP_KERNEL); 221 mtd_list = kzalloc(sizeof(*mtd_list) * count, GFP_KERNEL);
222 if (!mtd_list) 222 if (!mtd_list)
223 goto err_flash_remove; 223 goto err_flash_remove;
224 224
diff --git a/drivers/mtd/maps/pismo.c b/drivers/mtd/maps/pismo.c
index 60c068db452d..eb476b7f8d11 100644
--- a/drivers/mtd/maps/pismo.c
+++ b/drivers/mtd/maps/pismo.c
@@ -234,6 +234,7 @@ static int __devexit pismo_remove(struct i2c_client *client)
234 /* FIXME: set_vpp needs saner arguments */ 234 /* FIXME: set_vpp needs saner arguments */
235 pismo_setvpp_remove_fix(pismo); 235 pismo_setvpp_remove_fix(pismo);
236 236
237 i2c_set_clientdata(client, NULL);
237 kfree(pismo); 238 kfree(pismo);
238 239
239 return 0; 240 return 0;
@@ -272,7 +273,7 @@ static int __devinit pismo_probe(struct i2c_client *client,
272 ret = pismo_eeprom_read(client, &eeprom, 0, sizeof(eeprom)); 273 ret = pismo_eeprom_read(client, &eeprom, 0, sizeof(eeprom));
273 if (ret < 0) { 274 if (ret < 0) {
274 dev_err(&client->dev, "error reading EEPROM: %d\n", ret); 275 dev_err(&client->dev, "error reading EEPROM: %d\n", ret);
275 return ret; 276 goto exit_free;
276 } 277 }
277 278
278 dev_info(&client->dev, "%.15s board found\n", eeprom.board); 279 dev_info(&client->dev, "%.15s board found\n", eeprom.board);
@@ -283,6 +284,11 @@ static int __devinit pismo_probe(struct i2c_client *client,
283 pdata->cs_addrs[i]); 284 pdata->cs_addrs[i]);
284 285
285 return 0; 286 return 0;
287
288 exit_free:
289 i2c_set_clientdata(client, NULL);
290 kfree(pismo);
291 return ret;
286} 292}
287 293
288static const struct i2c_device_id pismo_id[] = { 294static const struct i2c_device_id pismo_id[] = {
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index c82e09bbc5fd..03e19c1965cc 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -14,7 +14,6 @@
14#include <linux/mtd/mtd.h> 14#include <linux/mtd/mtd.h>
15#include <linux/blkdev.h> 15#include <linux/blkdev.h>
16#include <linux/blkpg.h> 16#include <linux/blkpg.h>
17#include <linux/freezer.h>
18#include <linux/spinlock.h> 17#include <linux/spinlock.h>
19#include <linux/hdreg.h> 18#include <linux/hdreg.h>
20#include <linux/init.h> 19#include <linux/init.h>
@@ -25,12 +24,42 @@
25#include "mtdcore.h" 24#include "mtdcore.h"
26 25
27static LIST_HEAD(blktrans_majors); 26static LIST_HEAD(blktrans_majors);
27static DEFINE_MUTEX(blktrans_ref_mutex);
28
29void blktrans_dev_release(struct kref *kref)
30{
31 struct mtd_blktrans_dev *dev =
32 container_of(kref, struct mtd_blktrans_dev, ref);
33
34 dev->disk->private_data = NULL;
35 blk_cleanup_queue(dev->rq);
36 put_disk(dev->disk);
37 list_del(&dev->list);
38 kfree(dev);
39}
40
41static struct mtd_blktrans_dev *blktrans_dev_get(struct gendisk *disk)
42{
43 struct mtd_blktrans_dev *dev;
44
45 mutex_lock(&blktrans_ref_mutex);
46 dev = disk->private_data;
47
48 if (!dev)
49 goto unlock;
50 kref_get(&dev->ref);
51unlock:
52 mutex_unlock(&blktrans_ref_mutex);
53 return dev;
54}
55
56void blktrans_dev_put(struct mtd_blktrans_dev *dev)
57{
58 mutex_lock(&blktrans_ref_mutex);
59 kref_put(&dev->ref, blktrans_dev_release);
60 mutex_unlock(&blktrans_ref_mutex);
61}
28 62
29struct mtd_blkcore_priv {
30 struct task_struct *thread;
31 struct request_queue *rq;
32 spinlock_t queue_lock;
33};
34 63
35static int do_blktrans_request(struct mtd_blktrans_ops *tr, 64static int do_blktrans_request(struct mtd_blktrans_ops *tr,
36 struct mtd_blktrans_dev *dev, 65 struct mtd_blktrans_dev *dev,
@@ -61,7 +90,6 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
61 return -EIO; 90 return -EIO;
62 rq_flush_dcache_pages(req); 91 rq_flush_dcache_pages(req);
63 return 0; 92 return 0;
64
65 case WRITE: 93 case WRITE:
66 if (!tr->writesect) 94 if (!tr->writesect)
67 return -EIO; 95 return -EIO;
@@ -71,7 +99,6 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
71 if (tr->writesect(dev, block, buf)) 99 if (tr->writesect(dev, block, buf))
72 return -EIO; 100 return -EIO;
73 return 0; 101 return 0;
74
75 default: 102 default:
76 printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req)); 103 printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
77 return -EIO; 104 return -EIO;
@@ -80,14 +107,13 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
80 107
81static int mtd_blktrans_thread(void *arg) 108static int mtd_blktrans_thread(void *arg)
82{ 109{
83 struct mtd_blktrans_ops *tr = arg; 110 struct mtd_blktrans_dev *dev = arg;
84 struct request_queue *rq = tr->blkcore_priv->rq; 111 struct request_queue *rq = dev->rq;
85 struct request *req = NULL; 112 struct request *req = NULL;
86 113
87 spin_lock_irq(rq->queue_lock); 114 spin_lock_irq(rq->queue_lock);
88 115
89 while (!kthread_should_stop()) { 116 while (!kthread_should_stop()) {
90 struct mtd_blktrans_dev *dev;
91 int res; 117 int res;
92 118
93 if (!req && !(req = blk_fetch_request(rq))) { 119 if (!req && !(req = blk_fetch_request(rq))) {
@@ -98,13 +124,10 @@ static int mtd_blktrans_thread(void *arg)
98 continue; 124 continue;
99 } 125 }
100 126
101 dev = req->rq_disk->private_data;
102 tr = dev->tr;
103
104 spin_unlock_irq(rq->queue_lock); 127 spin_unlock_irq(rq->queue_lock);
105 128
106 mutex_lock(&dev->lock); 129 mutex_lock(&dev->lock);
107 res = do_blktrans_request(tr, dev, req); 130 res = do_blktrans_request(dev->tr, dev, req);
108 mutex_unlock(&dev->lock); 131 mutex_unlock(&dev->lock);
109 132
110 spin_lock_irq(rq->queue_lock); 133 spin_lock_irq(rq->queue_lock);
@@ -123,81 +146,112 @@ static int mtd_blktrans_thread(void *arg)
123 146
124static void mtd_blktrans_request(struct request_queue *rq) 147static void mtd_blktrans_request(struct request_queue *rq)
125{ 148{
126 struct mtd_blktrans_ops *tr = rq->queuedata; 149 struct mtd_blktrans_dev *dev;
127 wake_up_process(tr->blkcore_priv->thread); 150 struct request *req = NULL;
128} 151
152 dev = rq->queuedata;
129 153
154 if (!dev)
155 while ((req = blk_fetch_request(rq)) != NULL)
156 __blk_end_request_all(req, -ENODEV);
157 else
158 wake_up_process(dev->thread);
159}
130 160
131static int blktrans_open(struct block_device *bdev, fmode_t mode) 161static int blktrans_open(struct block_device *bdev, fmode_t mode)
132{ 162{
133 struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data; 163 struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
134 struct mtd_blktrans_ops *tr = dev->tr; 164 int ret;
135 int ret = -ENODEV; 165
136 166 if (!dev)
137 if (!get_mtd_device(NULL, dev->mtd->index)) 167 return -ERESTARTSYS;
138 goto out; 168
139 169 mutex_lock(&dev->lock);
140 if (!try_module_get(tr->owner)) 170
141 goto out_tr; 171 if (!dev->mtd) {
142 172 ret = -ENXIO;
143 /* FIXME: Locking. A hot pluggable device can go away 173 goto unlock;
144 (del_mtd_device can be called for it) without its module
145 being unloaded. */
146 dev->mtd->usecount++;
147
148 ret = 0;
149 if (tr->open && (ret = tr->open(dev))) {
150 dev->mtd->usecount--;
151 put_mtd_device(dev->mtd);
152 out_tr:
153 module_put(tr->owner);
154 } 174 }
155 out: 175
176 ret = !dev->open++ && dev->tr->open ? dev->tr->open(dev) : 0;
177
178 /* Take another reference on the device so it won't go away till
179 last release */
180 if (!ret)
181 kref_get(&dev->ref);
182unlock:
183 mutex_unlock(&dev->lock);
184 blktrans_dev_put(dev);
156 return ret; 185 return ret;
157} 186}
158 187
159static int blktrans_release(struct gendisk *disk, fmode_t mode) 188static int blktrans_release(struct gendisk *disk, fmode_t mode)
160{ 189{
161 struct mtd_blktrans_dev *dev = disk->private_data; 190 struct mtd_blktrans_dev *dev = blktrans_dev_get(disk);
162 struct mtd_blktrans_ops *tr = dev->tr; 191 int ret = -ENXIO;
163 int ret = 0;
164 192
165 if (tr->release) 193 if (!dev)
166 ret = tr->release(dev); 194 return ret;
167 195
168 if (!ret) { 196 mutex_lock(&dev->lock);
169 dev->mtd->usecount--; 197
170 put_mtd_device(dev->mtd); 198 /* Release one reference, we sure its not the last one here*/
171 module_put(tr->owner); 199 kref_put(&dev->ref, blktrans_dev_release);
172 }
173 200
201 if (!dev->mtd)
202 goto unlock;
203
204 ret = !--dev->open && dev->tr->release ? dev->tr->release(dev) : 0;
205unlock:
206 mutex_unlock(&dev->lock);
207 blktrans_dev_put(dev);
174 return ret; 208 return ret;
175} 209}
176 210
177static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo) 211static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
178{ 212{
179 struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data; 213 struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
214 int ret = -ENXIO;
215
216 if (!dev)
217 return ret;
218
219 mutex_lock(&dev->lock);
220
221 if (!dev->mtd)
222 goto unlock;
180 223
181 if (dev->tr->getgeo) 224 ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : 0;
182 return dev->tr->getgeo(dev, geo); 225unlock:
183 return -ENOTTY; 226 mutex_unlock(&dev->lock);
227 blktrans_dev_put(dev);
228 return ret;
184} 229}
185 230
186static int blktrans_ioctl(struct block_device *bdev, fmode_t mode, 231static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
187 unsigned int cmd, unsigned long arg) 232 unsigned int cmd, unsigned long arg)
188{ 233{
189 struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data; 234 struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
190 struct mtd_blktrans_ops *tr = dev->tr; 235 int ret = -ENXIO;
236
237 if (!dev)
238 return ret;
239
240 mutex_lock(&dev->lock);
241
242 if (!dev->mtd)
243 goto unlock;
191 244
192 switch (cmd) { 245 switch (cmd) {
193 case BLKFLSBUF: 246 case BLKFLSBUF:
194 if (tr->flush) 247 ret = dev->tr->flush ? dev->tr->flush(dev) : 0;
195 return tr->flush(dev);
196 /* The core code did the work, we had nothing to do. */
197 return 0;
198 default: 248 default:
199 return -ENOTTY; 249 ret = -ENOTTY;
200 } 250 }
251unlock:
252 mutex_unlock(&dev->lock);
253 blktrans_dev_put(dev);
254 return ret;
201} 255}
202 256
203static const struct block_device_operations mtd_blktrans_ops = { 257static const struct block_device_operations mtd_blktrans_ops = {
@@ -214,12 +268,14 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
214 struct mtd_blktrans_dev *d; 268 struct mtd_blktrans_dev *d;
215 int last_devnum = -1; 269 int last_devnum = -1;
216 struct gendisk *gd; 270 struct gendisk *gd;
271 int ret;
217 272
218 if (mutex_trylock(&mtd_table_mutex)) { 273 if (mutex_trylock(&mtd_table_mutex)) {
219 mutex_unlock(&mtd_table_mutex); 274 mutex_unlock(&mtd_table_mutex);
220 BUG(); 275 BUG();
221 } 276 }
222 277
278 mutex_lock(&blktrans_ref_mutex);
223 list_for_each_entry(d, &tr->devs, list) { 279 list_for_each_entry(d, &tr->devs, list) {
224 if (new->devnum == -1) { 280 if (new->devnum == -1) {
225 /* Use first free number */ 281 /* Use first free number */
@@ -231,6 +287,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
231 } 287 }
232 } else if (d->devnum == new->devnum) { 288 } else if (d->devnum == new->devnum) {
233 /* Required number taken */ 289 /* Required number taken */
290 mutex_unlock(&blktrans_ref_mutex);
234 return -EBUSY; 291 return -EBUSY;
235 } else if (d->devnum > new->devnum) { 292 } else if (d->devnum > new->devnum) {
236 /* Required number was free */ 293 /* Required number was free */
@@ -239,24 +296,38 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
239 } 296 }
240 last_devnum = d->devnum; 297 last_devnum = d->devnum;
241 } 298 }
299
300 ret = -EBUSY;
242 if (new->devnum == -1) 301 if (new->devnum == -1)
243 new->devnum = last_devnum+1; 302 new->devnum = last_devnum+1;
244 303
245 if ((new->devnum << tr->part_bits) > 256) { 304 /* Check that the device and any partitions will get valid
246 return -EBUSY; 305 * minor numbers and that the disk naming code below can cope
306 * with this number. */
307 if (new->devnum > (MINORMASK >> tr->part_bits) ||
308 (tr->part_bits && new->devnum >= 27 * 26)) {
309 mutex_unlock(&blktrans_ref_mutex);
310 goto error1;
247 } 311 }
248 312
249 list_add_tail(&new->list, &tr->devs); 313 list_add_tail(&new->list, &tr->devs);
250 added: 314 added:
315 mutex_unlock(&blktrans_ref_mutex);
316
251 mutex_init(&new->lock); 317 mutex_init(&new->lock);
318 kref_init(&new->ref);
252 if (!tr->writesect) 319 if (!tr->writesect)
253 new->readonly = 1; 320 new->readonly = 1;
254 321
322 /* Create gendisk */
323 ret = -ENOMEM;
255 gd = alloc_disk(1 << tr->part_bits); 324 gd = alloc_disk(1 << tr->part_bits);
256 if (!gd) { 325
257 list_del(&new->list); 326 if (!gd)
258 return -ENOMEM; 327 goto error2;
259 } 328
329 new->disk = gd;
330 gd->private_data = new;
260 gd->major = tr->major; 331 gd->major = tr->major;
261 gd->first_minor = (new->devnum) << tr->part_bits; 332 gd->first_minor = (new->devnum) << tr->part_bits;
262 gd->fops = &mtd_blktrans_ops; 333 gd->fops = &mtd_blktrans_ops;
@@ -274,13 +345,35 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
274 snprintf(gd->disk_name, sizeof(gd->disk_name), 345 snprintf(gd->disk_name, sizeof(gd->disk_name),
275 "%s%d", tr->name, new->devnum); 346 "%s%d", tr->name, new->devnum);
276 347
277 /* 2.5 has capacity in units of 512 bytes while still
278 having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */
279 set_capacity(gd, (new->size * tr->blksize) >> 9); 348 set_capacity(gd, (new->size * tr->blksize) >> 9);
280 349
281 gd->private_data = new; 350 /* Create the request queue */
282 new->blkcore_priv = gd; 351 spin_lock_init(&new->queue_lock);
283 gd->queue = tr->blkcore_priv->rq; 352 new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock);
353
354 if (!new->rq)
355 goto error3;
356
357 new->rq->queuedata = new;
358 blk_queue_logical_block_size(new->rq, tr->blksize);
359
360 if (tr->discard)
361 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
362 new->rq);
363
364 gd->queue = new->rq;
365
366 __get_mtd_device(new->mtd);
367 __module_get(tr->owner);
368
369 /* Create processing thread */
370 /* TODO: workqueue ? */
371 new->thread = kthread_run(mtd_blktrans_thread, new,
372 "%s%d", tr->name, new->mtd->index);
373 if (IS_ERR(new->thread)) {
374 ret = PTR_ERR(new->thread);
375 goto error4;
376 }
284 gd->driverfs_dev = &new->mtd->dev; 377 gd->driverfs_dev = &new->mtd->dev;
285 378
286 if (new->readonly) 379 if (new->readonly)
@@ -288,21 +381,65 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
288 381
289 add_disk(gd); 382 add_disk(gd);
290 383
384 if (new->disk_attributes) {
385 ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
386 new->disk_attributes);
387 WARN_ON(ret);
388 }
291 return 0; 389 return 0;
390error4:
391 module_put(tr->owner);
392 __put_mtd_device(new->mtd);
393 blk_cleanup_queue(new->rq);
394error3:
395 put_disk(new->disk);
396error2:
397 list_del(&new->list);
398error1:
399 kfree(new);
400 return ret;
292} 401}
293 402
294int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old) 403int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
295{ 404{
405 unsigned long flags;
406
296 if (mutex_trylock(&mtd_table_mutex)) { 407 if (mutex_trylock(&mtd_table_mutex)) {
297 mutex_unlock(&mtd_table_mutex); 408 mutex_unlock(&mtd_table_mutex);
298 BUG(); 409 BUG();
299 } 410 }
300 411
301 list_del(&old->list); 412 /* Stop new requests to arrive */
413 del_gendisk(old->disk);
414
415 if (old->disk_attributes)
416 sysfs_remove_group(&disk_to_dev(old->disk)->kobj,
417 old->disk_attributes);
418
419 /* Stop the thread */
420 kthread_stop(old->thread);
421
422 /* Kill current requests */
423 spin_lock_irqsave(&old->queue_lock, flags);
424 old->rq->queuedata = NULL;
425 blk_start_queue(old->rq);
426 spin_unlock_irqrestore(&old->queue_lock, flags);
427
428 /* Ask trans driver for release to the mtd device */
429 mutex_lock(&old->lock);
430 if (old->open && old->tr->release) {
431 old->tr->release(old);
432 old->open = 0;
433 }
434
435 __put_mtd_device(old->mtd);
436 module_put(old->tr->owner);
302 437
303 del_gendisk(old->blkcore_priv); 438 /* At that point, we don't touch the mtd anymore */
304 put_disk(old->blkcore_priv); 439 old->mtd = NULL;
305 440
441 mutex_unlock(&old->lock);
442 blktrans_dev_put(old);
306 return 0; 443 return 0;
307} 444}
308 445
@@ -335,7 +472,8 @@ static struct mtd_notifier blktrans_notifier = {
335 472
336int register_mtd_blktrans(struct mtd_blktrans_ops *tr) 473int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
337{ 474{
338 int ret, i; 475 struct mtd_info *mtd;
476 int ret;
339 477
340 /* Register the notifier if/when the first device type is 478 /* Register the notifier if/when the first device type is
341 registered, to prevent the link/init ordering from fucking 479 registered, to prevent the link/init ordering from fucking
@@ -343,9 +481,6 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
343 if (!blktrans_notifier.list.next) 481 if (!blktrans_notifier.list.next)
344 register_mtd_user(&blktrans_notifier); 482 register_mtd_user(&blktrans_notifier);
345 483
346 tr->blkcore_priv = kzalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL);
347 if (!tr->blkcore_priv)
348 return -ENOMEM;
349 484
350 mutex_lock(&mtd_table_mutex); 485 mutex_lock(&mtd_table_mutex);
351 486
@@ -353,49 +488,20 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
353 if (ret) { 488 if (ret) {
354 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n", 489 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
355 tr->name, tr->major, ret); 490 tr->name, tr->major, ret);
356 kfree(tr->blkcore_priv);
357 mutex_unlock(&mtd_table_mutex); 491 mutex_unlock(&mtd_table_mutex);
358 return ret; 492 return ret;
359 } 493 }
360 spin_lock_init(&tr->blkcore_priv->queue_lock);
361
362 tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock);
363 if (!tr->blkcore_priv->rq) {
364 unregister_blkdev(tr->major, tr->name);
365 kfree(tr->blkcore_priv);
366 mutex_unlock(&mtd_table_mutex);
367 return -ENOMEM;
368 }
369
370 tr->blkcore_priv->rq->queuedata = tr;
371 blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize);
372 if (tr->discard)
373 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
374 tr->blkcore_priv->rq);
375 494
376 tr->blkshift = ffs(tr->blksize) - 1; 495 tr->blkshift = ffs(tr->blksize) - 1;
377 496
378 tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr,
379 "%sd", tr->name);
380 if (IS_ERR(tr->blkcore_priv->thread)) {
381 ret = PTR_ERR(tr->blkcore_priv->thread);
382 blk_cleanup_queue(tr->blkcore_priv->rq);
383 unregister_blkdev(tr->major, tr->name);
384 kfree(tr->blkcore_priv);
385 mutex_unlock(&mtd_table_mutex);
386 return ret;
387 }
388
389 INIT_LIST_HEAD(&tr->devs); 497 INIT_LIST_HEAD(&tr->devs);
390 list_add(&tr->list, &blktrans_majors); 498 list_add(&tr->list, &blktrans_majors);
391 499
392 for (i=0; i<MAX_MTD_DEVICES; i++) { 500 mtd_for_each_device(mtd)
393 if (mtd_table[i] && mtd_table[i]->type != MTD_ABSENT) 501 if (mtd->type != MTD_ABSENT)
394 tr->add_mtd(tr, mtd_table[i]); 502 tr->add_mtd(tr, mtd);
395 }
396 503
397 mutex_unlock(&mtd_table_mutex); 504 mutex_unlock(&mtd_table_mutex);
398
399 return 0; 505 return 0;
400} 506}
401 507
@@ -405,22 +511,15 @@ int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
405 511
406 mutex_lock(&mtd_table_mutex); 512 mutex_lock(&mtd_table_mutex);
407 513
408 /* Clean up the kernel thread */
409 kthread_stop(tr->blkcore_priv->thread);
410
411 /* Remove it from the list of active majors */ 514 /* Remove it from the list of active majors */
412 list_del(&tr->list); 515 list_del(&tr->list);
413 516
414 list_for_each_entry_safe(dev, next, &tr->devs, list) 517 list_for_each_entry_safe(dev, next, &tr->devs, list)
415 tr->remove_dev(dev); 518 tr->remove_dev(dev);
416 519
417 blk_cleanup_queue(tr->blkcore_priv->rq);
418 unregister_blkdev(tr->major, tr->name); 520 unregister_blkdev(tr->major, tr->name);
419
420 mutex_unlock(&mtd_table_mutex); 521 mutex_unlock(&mtd_table_mutex);
421 522
422 kfree(tr->blkcore_priv);
423
424 BUG_ON(!list_empty(&tr->devs)); 523 BUG_ON(!list_empty(&tr->devs));
425 return 0; 524 return 0;
426} 525}
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index 9f41b1a853c1..e6edbec609fd 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -19,15 +19,15 @@
19#include <linux/mutex.h> 19#include <linux/mutex.h>
20 20
21 21
22static struct mtdblk_dev { 22struct mtdblk_dev {
23 struct mtd_info *mtd; 23 struct mtd_blktrans_dev mbd;
24 int count; 24 int count;
25 struct mutex cache_mutex; 25 struct mutex cache_mutex;
26 unsigned char *cache_data; 26 unsigned char *cache_data;
27 unsigned long cache_offset; 27 unsigned long cache_offset;
28 unsigned int cache_size; 28 unsigned int cache_size;
29 enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state; 29 enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
30} *mtdblks[MAX_MTD_DEVICES]; 30};
31 31
32static struct mutex mtdblks_lock; 32static struct mutex mtdblks_lock;
33 33
@@ -98,7 +98,7 @@ static int erase_write (struct mtd_info *mtd, unsigned long pos,
98 98
99static int write_cached_data (struct mtdblk_dev *mtdblk) 99static int write_cached_data (struct mtdblk_dev *mtdblk)
100{ 100{
101 struct mtd_info *mtd = mtdblk->mtd; 101 struct mtd_info *mtd = mtdblk->mbd.mtd;
102 int ret; 102 int ret;
103 103
104 if (mtdblk->cache_state != STATE_DIRTY) 104 if (mtdblk->cache_state != STATE_DIRTY)
@@ -128,7 +128,7 @@ static int write_cached_data (struct mtdblk_dev *mtdblk)
128static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos, 128static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
129 int len, const char *buf) 129 int len, const char *buf)
130{ 130{
131 struct mtd_info *mtd = mtdblk->mtd; 131 struct mtd_info *mtd = mtdblk->mbd.mtd;
132 unsigned int sect_size = mtdblk->cache_size; 132 unsigned int sect_size = mtdblk->cache_size;
133 size_t retlen; 133 size_t retlen;
134 int ret; 134 int ret;
@@ -198,7 +198,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
198static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos, 198static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
199 int len, char *buf) 199 int len, char *buf)
200{ 200{
201 struct mtd_info *mtd = mtdblk->mtd; 201 struct mtd_info *mtd = mtdblk->mbd.mtd;
202 unsigned int sect_size = mtdblk->cache_size; 202 unsigned int sect_size = mtdblk->cache_size;
203 size_t retlen; 203 size_t retlen;
204 int ret; 204 int ret;
@@ -244,16 +244,16 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
244static int mtdblock_readsect(struct mtd_blktrans_dev *dev, 244static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
245 unsigned long block, char *buf) 245 unsigned long block, char *buf)
246{ 246{
247 struct mtdblk_dev *mtdblk = mtdblks[dev->devnum]; 247 struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
248 return do_cached_read(mtdblk, block<<9, 512, buf); 248 return do_cached_read(mtdblk, block<<9, 512, buf);
249} 249}
250 250
251static int mtdblock_writesect(struct mtd_blktrans_dev *dev, 251static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
252 unsigned long block, char *buf) 252 unsigned long block, char *buf)
253{ 253{
254 struct mtdblk_dev *mtdblk = mtdblks[dev->devnum]; 254 struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
255 if (unlikely(!mtdblk->cache_data && mtdblk->cache_size)) { 255 if (unlikely(!mtdblk->cache_data && mtdblk->cache_size)) {
256 mtdblk->cache_data = vmalloc(mtdblk->mtd->erasesize); 256 mtdblk->cache_data = vmalloc(mtdblk->mbd.mtd->erasesize);
257 if (!mtdblk->cache_data) 257 if (!mtdblk->cache_data)
258 return -EINTR; 258 return -EINTR;
259 /* -EINTR is not really correct, but it is the best match 259 /* -EINTR is not really correct, but it is the best match
@@ -266,37 +266,26 @@ static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
266 266
267static int mtdblock_open(struct mtd_blktrans_dev *mbd) 267static int mtdblock_open(struct mtd_blktrans_dev *mbd)
268{ 268{
269 struct mtdblk_dev *mtdblk; 269 struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
270 struct mtd_info *mtd = mbd->mtd;
271 int dev = mbd->devnum;
272 270
273 DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n"); 271 DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n");
274 272
275 mutex_lock(&mtdblks_lock); 273 mutex_lock(&mtdblks_lock);
276 if (mtdblks[dev]) { 274 if (mtdblk->count) {
277 mtdblks[dev]->count++; 275 mtdblk->count++;
278 mutex_unlock(&mtdblks_lock); 276 mutex_unlock(&mtdblks_lock);
279 return 0; 277 return 0;
280 } 278 }
281 279
282 /* OK, it's not open. Create cache info for it */ 280 /* OK, it's not open. Create cache info for it */
283 mtdblk = kzalloc(sizeof(struct mtdblk_dev), GFP_KERNEL);
284 if (!mtdblk) {
285 mutex_unlock(&mtdblks_lock);
286 return -ENOMEM;
287 }
288
289 mtdblk->count = 1; 281 mtdblk->count = 1;
290 mtdblk->mtd = mtd;
291
292 mutex_init(&mtdblk->cache_mutex); 282 mutex_init(&mtdblk->cache_mutex);
293 mtdblk->cache_state = STATE_EMPTY; 283 mtdblk->cache_state = STATE_EMPTY;
294 if ( !(mtdblk->mtd->flags & MTD_NO_ERASE) && mtdblk->mtd->erasesize) { 284 if (!(mbd->mtd->flags & MTD_NO_ERASE) && mbd->mtd->erasesize) {
295 mtdblk->cache_size = mtdblk->mtd->erasesize; 285 mtdblk->cache_size = mbd->mtd->erasesize;
296 mtdblk->cache_data = NULL; 286 mtdblk->cache_data = NULL;
297 } 287 }
298 288
299 mtdblks[dev] = mtdblk;
300 mutex_unlock(&mtdblks_lock); 289 mutex_unlock(&mtdblks_lock);
301 290
302 DEBUG(MTD_DEBUG_LEVEL1, "ok\n"); 291 DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
@@ -306,8 +295,7 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
306 295
307static int mtdblock_release(struct mtd_blktrans_dev *mbd) 296static int mtdblock_release(struct mtd_blktrans_dev *mbd)
308{ 297{
309 int dev = mbd->devnum; 298 struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
310 struct mtdblk_dev *mtdblk = mtdblks[dev];
311 299
312 DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n"); 300 DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n");
313 301
@@ -318,12 +306,10 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
318 mutex_unlock(&mtdblk->cache_mutex); 306 mutex_unlock(&mtdblk->cache_mutex);
319 307
320 if (!--mtdblk->count) { 308 if (!--mtdblk->count) {
321 /* It was the last usage. Free the device */ 309 /* It was the last usage. Free the cache */
322 mtdblks[dev] = NULL; 310 if (mbd->mtd->sync)
323 if (mtdblk->mtd->sync) 311 mbd->mtd->sync(mbd->mtd);
324 mtdblk->mtd->sync(mtdblk->mtd);
325 vfree(mtdblk->cache_data); 312 vfree(mtdblk->cache_data);
326 kfree(mtdblk);
327 } 313 }
328 314
329 mutex_unlock(&mtdblks_lock); 315 mutex_unlock(&mtdblks_lock);
@@ -335,40 +321,40 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
335 321
336static int mtdblock_flush(struct mtd_blktrans_dev *dev) 322static int mtdblock_flush(struct mtd_blktrans_dev *dev)
337{ 323{
338 struct mtdblk_dev *mtdblk = mtdblks[dev->devnum]; 324 struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
339 325
340 mutex_lock(&mtdblk->cache_mutex); 326 mutex_lock(&mtdblk->cache_mutex);
341 write_cached_data(mtdblk); 327 write_cached_data(mtdblk);
342 mutex_unlock(&mtdblk->cache_mutex); 328 mutex_unlock(&mtdblk->cache_mutex);
343 329
344 if (mtdblk->mtd->sync) 330 if (dev->mtd->sync)
345 mtdblk->mtd->sync(mtdblk->mtd); 331 dev->mtd->sync(dev->mtd);
346 return 0; 332 return 0;
347} 333}
348 334
349static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) 335static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
350{ 336{
351 struct mtd_blktrans_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL); 337 struct mtdblk_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
352 338
353 if (!dev) 339 if (!dev)
354 return; 340 return;
355 341
356 dev->mtd = mtd; 342 dev->mbd.mtd = mtd;
357 dev->devnum = mtd->index; 343 dev->mbd.devnum = mtd->index;
358 344
359 dev->size = mtd->size >> 9; 345 dev->mbd.size = mtd->size >> 9;
360 dev->tr = tr; 346 dev->mbd.tr = tr;
361 347
362 if (!(mtd->flags & MTD_WRITEABLE)) 348 if (!(mtd->flags & MTD_WRITEABLE))
363 dev->readonly = 1; 349 dev->mbd.readonly = 1;
364 350
365 add_mtd_blktrans_dev(dev); 351 if (add_mtd_blktrans_dev(&dev->mbd))
352 kfree(dev);
366} 353}
367 354
368static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev) 355static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
369{ 356{
370 del_mtd_blktrans_dev(dev); 357 del_mtd_blktrans_dev(dev);
371 kfree(dev);
372} 358}
373 359
374static struct mtd_blktrans_ops mtdblock_tr = { 360static struct mtd_blktrans_ops mtdblock_tr = {
diff --git a/drivers/mtd/mtdblock_ro.c b/drivers/mtd/mtdblock_ro.c
index 852165f8b1c3..d0d3f79f9d03 100644
--- a/drivers/mtd/mtdblock_ro.c
+++ b/drivers/mtd/mtdblock_ro.c
@@ -43,13 +43,13 @@ static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
43 dev->tr = tr; 43 dev->tr = tr;
44 dev->readonly = 1; 44 dev->readonly = 1;
45 45
46 add_mtd_blktrans_dev(dev); 46 if (add_mtd_blktrans_dev(dev))
47 kfree(dev);
47} 48}
48 49
49static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev) 50static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
50{ 51{
51 del_mtd_blktrans_dev(dev); 52 del_mtd_blktrans_dev(dev);
52 kfree(dev);
53} 53}
54 54
55static struct mtd_blktrans_ops mtdblock_tr = { 55static struct mtd_blktrans_ops mtdblock_tr = {
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 5b081cb84351..c355491d1326 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -67,9 +67,6 @@ static int mtd_open(struct inode *inode, struct file *file)
67 67
68 DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n"); 68 DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
69 69
70 if (devnum >= MAX_MTD_DEVICES)
71 return -ENODEV;
72
73 /* You can't open the RO devices RW */ 70 /* You can't open the RO devices RW */
74 if ((file->f_mode & FMODE_WRITE) && (minor & 1)) 71 if ((file->f_mode & FMODE_WRITE) && (minor & 1))
75 return -EACCES; 72 return -EACCES;
@@ -373,7 +370,7 @@ static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
373 if (!mtd->write_oob) 370 if (!mtd->write_oob)
374 ret = -EOPNOTSUPP; 371 ret = -EOPNOTSUPP;
375 else 372 else
376 ret = access_ok(VERIFY_READ, ptr, length) ? 0 : EFAULT; 373 ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT;
377 374
378 if (ret) 375 if (ret)
379 return ret; 376 return ret;
@@ -482,7 +479,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
482 { 479 {
483 uint32_t ur_idx; 480 uint32_t ur_idx;
484 struct mtd_erase_region_info *kr; 481 struct mtd_erase_region_info *kr;
485 struct region_info_user *ur = (struct region_info_user *) argp; 482 struct region_info_user __user *ur = argp;
486 483
487 if (get_user(ur_idx, &(ur->regionindex))) 484 if (get_user(ur_idx, &(ur->regionindex)))
488 return -EFAULT; 485 return -EFAULT;
@@ -958,7 +955,8 @@ static int __init init_mtdchar(void)
958{ 955{
959 int status; 956 int status;
960 957
961 status = register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops); 958 status = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
959 "mtd", &mtd_fops);
962 if (status < 0) { 960 if (status < 0) {
963 printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n", 961 printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
964 MTD_CHAR_MAJOR); 962 MTD_CHAR_MAJOR);
@@ -969,7 +967,7 @@ static int __init init_mtdchar(void)
969 967
970static void __exit cleanup_mtdchar(void) 968static void __exit cleanup_mtdchar(void)
971{ 969{
972 unregister_chrdev(MTD_CHAR_MAJOR, "mtd"); 970 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
973} 971}
974 972
975module_init(init_mtdchar); 973module_init(init_mtdchar);
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index b177e750efc3..3ae06c8935b5 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -19,6 +19,7 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/mtd/compatmac.h> 20#include <linux/mtd/compatmac.h>
21#include <linux/proc_fs.h> 21#include <linux/proc_fs.h>
22#include <linux/idr.h>
22#include <linux/backing-dev.h> 23#include <linux/backing-dev.h>
23 24
24#include <linux/mtd/mtd.h> 25#include <linux/mtd/mtd.h>
@@ -63,13 +64,18 @@ static struct class mtd_class = {
63 .resume = mtd_cls_resume, 64 .resume = mtd_cls_resume,
64}; 65};
65 66
67static DEFINE_IDR(mtd_idr);
68
66/* These are exported solely for the purpose of mtd_blkdevs.c. You 69/* These are exported solely for the purpose of mtd_blkdevs.c. You
67 should not use them for _anything_ else */ 70 should not use them for _anything_ else */
68DEFINE_MUTEX(mtd_table_mutex); 71DEFINE_MUTEX(mtd_table_mutex);
69struct mtd_info *mtd_table[MAX_MTD_DEVICES];
70
71EXPORT_SYMBOL_GPL(mtd_table_mutex); 72EXPORT_SYMBOL_GPL(mtd_table_mutex);
72EXPORT_SYMBOL_GPL(mtd_table); 73
74struct mtd_info *__mtd_next_device(int i)
75{
76 return idr_get_next(&mtd_idr, &i);
77}
78EXPORT_SYMBOL_GPL(__mtd_next_device);
73 79
74static LIST_HEAD(mtd_notifiers); 80static LIST_HEAD(mtd_notifiers);
75 81
@@ -265,13 +271,13 @@ static struct device_type mtd_devtype = {
265 * Add a device to the list of MTD devices present in the system, and 271 * Add a device to the list of MTD devices present in the system, and
266 * notify each currently active MTD 'user' of its arrival. Returns 272 * notify each currently active MTD 'user' of its arrival. Returns
267 * zero on success or 1 on failure, which currently will only happen 273 * zero on success or 1 on failure, which currently will only happen
268 * if the number of present devices exceeds MAX_MTD_DEVICES (i.e. 16) 274 * if there is insufficient memory or a sysfs error.
269 * or there's a sysfs error.
270 */ 275 */
271 276
272int add_mtd_device(struct mtd_info *mtd) 277int add_mtd_device(struct mtd_info *mtd)
273{ 278{
274 int i; 279 struct mtd_notifier *not;
280 int i, error;
275 281
276 if (!mtd->backing_dev_info) { 282 if (!mtd->backing_dev_info) {
277 switch (mtd->type) { 283 switch (mtd->type) {
@@ -290,70 +296,73 @@ int add_mtd_device(struct mtd_info *mtd)
290 BUG_ON(mtd->writesize == 0); 296 BUG_ON(mtd->writesize == 0);
291 mutex_lock(&mtd_table_mutex); 297 mutex_lock(&mtd_table_mutex);
292 298
293 for (i=0; i < MAX_MTD_DEVICES; i++) 299 do {
294 if (!mtd_table[i]) { 300 if (!idr_pre_get(&mtd_idr, GFP_KERNEL))
295 struct mtd_notifier *not; 301 goto fail_locked;
296 302 error = idr_get_new(&mtd_idr, mtd, &i);
297 mtd_table[i] = mtd; 303 } while (error == -EAGAIN);
298 mtd->index = i;
299 mtd->usecount = 0;
300
301 if (is_power_of_2(mtd->erasesize))
302 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
303 else
304 mtd->erasesize_shift = 0;
305
306 if (is_power_of_2(mtd->writesize))
307 mtd->writesize_shift = ffs(mtd->writesize) - 1;
308 else
309 mtd->writesize_shift = 0;
310
311 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
312 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
313
314 /* Some chips always power up locked. Unlock them now */
315 if ((mtd->flags & MTD_WRITEABLE)
316 && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
317 if (mtd->unlock(mtd, 0, mtd->size))
318 printk(KERN_WARNING
319 "%s: unlock failed, "
320 "writes may not work\n",
321 mtd->name);
322 }
323 304
324 /* Caller should have set dev.parent to match the 305 if (error)
325 * physical device. 306 goto fail_locked;
326 */
327 mtd->dev.type = &mtd_devtype;
328 mtd->dev.class = &mtd_class;
329 mtd->dev.devt = MTD_DEVT(i);
330 dev_set_name(&mtd->dev, "mtd%d", i);
331 dev_set_drvdata(&mtd->dev, mtd);
332 if (device_register(&mtd->dev) != 0) {
333 mtd_table[i] = NULL;
334 break;
335 }
336 307
337 if (MTD_DEVT(i)) 308 mtd->index = i;
338 device_create(&mtd_class, mtd->dev.parent, 309 mtd->usecount = 0;
339 MTD_DEVT(i) + 1, 310
340 NULL, "mtd%dro", i); 311 if (is_power_of_2(mtd->erasesize))
341 312 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
342 DEBUG(0, "mtd: Giving out device %d to %s\n",i, mtd->name); 313 else
343 /* No need to get a refcount on the module containing 314 mtd->erasesize_shift = 0;
344 the notifier, since we hold the mtd_table_mutex */ 315
345 list_for_each_entry(not, &mtd_notifiers, list) 316 if (is_power_of_2(mtd->writesize))
346 not->add(mtd); 317 mtd->writesize_shift = ffs(mtd->writesize) - 1;
347 318 else
348 mutex_unlock(&mtd_table_mutex); 319 mtd->writesize_shift = 0;
349 /* We _know_ we aren't being removed, because 320
350 our caller is still holding us here. So none 321 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
351 of this try_ nonsense, and no bitching about it 322 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
352 either. :) */ 323
353 __module_get(THIS_MODULE); 324 /* Some chips always power up locked. Unlock them now */
354 return 0; 325 if ((mtd->flags & MTD_WRITEABLE)
355 } 326 && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
327 if (mtd->unlock(mtd, 0, mtd->size))
328 printk(KERN_WARNING
329 "%s: unlock failed, writes may not work\n",
330 mtd->name);
331 }
332
333 /* Caller should have set dev.parent to match the
334 * physical device.
335 */
336 mtd->dev.type = &mtd_devtype;
337 mtd->dev.class = &mtd_class;
338 mtd->dev.devt = MTD_DEVT(i);
339 dev_set_name(&mtd->dev, "mtd%d", i);
340 dev_set_drvdata(&mtd->dev, mtd);
341 if (device_register(&mtd->dev) != 0)
342 goto fail_added;
343
344 if (MTD_DEVT(i))
345 device_create(&mtd_class, mtd->dev.parent,
346 MTD_DEVT(i) + 1,
347 NULL, "mtd%dro", i);
348
349 DEBUG(0, "mtd: Giving out device %d to %s\n", i, mtd->name);
350 /* No need to get a refcount on the module containing
351 the notifier, since we hold the mtd_table_mutex */
352 list_for_each_entry(not, &mtd_notifiers, list)
353 not->add(mtd);
354
355 mutex_unlock(&mtd_table_mutex);
356 /* We _know_ we aren't being removed, because
357 our caller is still holding us here. So none
358 of this try_ nonsense, and no bitching about it
359 either. :) */
360 __module_get(THIS_MODULE);
361 return 0;
356 362
363fail_added:
364 idr_remove(&mtd_idr, i);
365fail_locked:
357 mutex_unlock(&mtd_table_mutex); 366 mutex_unlock(&mtd_table_mutex);
358 return 1; 367 return 1;
359} 368}
@@ -371,31 +380,34 @@ int add_mtd_device(struct mtd_info *mtd)
371int del_mtd_device (struct mtd_info *mtd) 380int del_mtd_device (struct mtd_info *mtd)
372{ 381{
373 int ret; 382 int ret;
383 struct mtd_notifier *not;
374 384
375 mutex_lock(&mtd_table_mutex); 385 mutex_lock(&mtd_table_mutex);
376 386
377 if (mtd_table[mtd->index] != mtd) { 387 if (idr_find(&mtd_idr, mtd->index) != mtd) {
378 ret = -ENODEV; 388 ret = -ENODEV;
379 } else if (mtd->usecount) { 389 goto out_error;
390 }
391
392 /* No need to get a refcount on the module containing
393 the notifier, since we hold the mtd_table_mutex */
394 list_for_each_entry(not, &mtd_notifiers, list)
395 not->remove(mtd);
396
397 if (mtd->usecount) {
380 printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n", 398 printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
381 mtd->index, mtd->name, mtd->usecount); 399 mtd->index, mtd->name, mtd->usecount);
382 ret = -EBUSY; 400 ret = -EBUSY;
383 } else { 401 } else {
384 struct mtd_notifier *not;
385
386 device_unregister(&mtd->dev); 402 device_unregister(&mtd->dev);
387 403
388 /* No need to get a refcount on the module containing 404 idr_remove(&mtd_idr, mtd->index);
389 the notifier, since we hold the mtd_table_mutex */
390 list_for_each_entry(not, &mtd_notifiers, list)
391 not->remove(mtd);
392
393 mtd_table[mtd->index] = NULL;
394 405
395 module_put(THIS_MODULE); 406 module_put(THIS_MODULE);
396 ret = 0; 407 ret = 0;
397 } 408 }
398 409
410out_error:
399 mutex_unlock(&mtd_table_mutex); 411 mutex_unlock(&mtd_table_mutex);
400 return ret; 412 return ret;
401} 413}
@@ -411,7 +423,7 @@ int del_mtd_device (struct mtd_info *mtd)
411 423
412void register_mtd_user (struct mtd_notifier *new) 424void register_mtd_user (struct mtd_notifier *new)
413{ 425{
414 int i; 426 struct mtd_info *mtd;
415 427
416 mutex_lock(&mtd_table_mutex); 428 mutex_lock(&mtd_table_mutex);
417 429
@@ -419,9 +431,8 @@ void register_mtd_user (struct mtd_notifier *new)
419 431
420 __module_get(THIS_MODULE); 432 __module_get(THIS_MODULE);
421 433
422 for (i=0; i< MAX_MTD_DEVICES; i++) 434 mtd_for_each_device(mtd)
423 if (mtd_table[i]) 435 new->add(mtd);
424 new->add(mtd_table[i]);
425 436
426 mutex_unlock(&mtd_table_mutex); 437 mutex_unlock(&mtd_table_mutex);
427} 438}
@@ -438,15 +449,14 @@ void register_mtd_user (struct mtd_notifier *new)
438 449
439int unregister_mtd_user (struct mtd_notifier *old) 450int unregister_mtd_user (struct mtd_notifier *old)
440{ 451{
441 int i; 452 struct mtd_info *mtd;
442 453
443 mutex_lock(&mtd_table_mutex); 454 mutex_lock(&mtd_table_mutex);
444 455
445 module_put(THIS_MODULE); 456 module_put(THIS_MODULE);
446 457
447 for (i=0; i< MAX_MTD_DEVICES; i++) 458 mtd_for_each_device(mtd)
448 if (mtd_table[i]) 459 old->remove(mtd);
449 old->remove(mtd_table[i]);
450 460
451 list_del(&old->list); 461 list_del(&old->list);
452 mutex_unlock(&mtd_table_mutex); 462 mutex_unlock(&mtd_table_mutex);
@@ -468,42 +478,56 @@ int unregister_mtd_user (struct mtd_notifier *old)
468 478
469struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num) 479struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
470{ 480{
471 struct mtd_info *ret = NULL; 481 struct mtd_info *ret = NULL, *other;
472 int i, err = -ENODEV; 482 int err = -ENODEV;
473 483
474 mutex_lock(&mtd_table_mutex); 484 mutex_lock(&mtd_table_mutex);
475 485
476 if (num == -1) { 486 if (num == -1) {
477 for (i=0; i< MAX_MTD_DEVICES; i++) 487 mtd_for_each_device(other) {
478 if (mtd_table[i] == mtd) 488 if (other == mtd) {
479 ret = mtd_table[i]; 489 ret = mtd;
480 } else if (num >= 0 && num < MAX_MTD_DEVICES) { 490 break;
481 ret = mtd_table[num]; 491 }
492 }
493 } else if (num >= 0) {
494 ret = idr_find(&mtd_idr, num);
482 if (mtd && mtd != ret) 495 if (mtd && mtd != ret)
483 ret = NULL; 496 ret = NULL;
484 } 497 }
485 498
486 if (!ret) 499 if (!ret) {
487 goto out_unlock; 500 ret = ERR_PTR(err);
488 501 goto out;
489 if (!try_module_get(ret->owner))
490 goto out_unlock;
491
492 if (ret->get_device) {
493 err = ret->get_device(ret);
494 if (err)
495 goto out_put;
496 } 502 }
497 503
498 ret->usecount++; 504 err = __get_mtd_device(ret);
505 if (err)
506 ret = ERR_PTR(err);
507out:
499 mutex_unlock(&mtd_table_mutex); 508 mutex_unlock(&mtd_table_mutex);
500 return ret; 509 return ret;
510}
501 511
502out_put: 512
503 module_put(ret->owner); 513int __get_mtd_device(struct mtd_info *mtd)
504out_unlock: 514{
505 mutex_unlock(&mtd_table_mutex); 515 int err;
506 return ERR_PTR(err); 516
517 if (!try_module_get(mtd->owner))
518 return -ENODEV;
519
520 if (mtd->get_device) {
521
522 err = mtd->get_device(mtd);
523
524 if (err) {
525 module_put(mtd->owner);
526 return err;
527 }
528 }
529 mtd->usecount++;
530 return 0;
507} 531}
508 532
509/** 533/**
@@ -517,14 +541,14 @@ out_unlock:
517 541
518struct mtd_info *get_mtd_device_nm(const char *name) 542struct mtd_info *get_mtd_device_nm(const char *name)
519{ 543{
520 int i, err = -ENODEV; 544 int err = -ENODEV;
521 struct mtd_info *mtd = NULL; 545 struct mtd_info *mtd = NULL, *other;
522 546
523 mutex_lock(&mtd_table_mutex); 547 mutex_lock(&mtd_table_mutex);
524 548
525 for (i = 0; i < MAX_MTD_DEVICES; i++) { 549 mtd_for_each_device(other) {
526 if (mtd_table[i] && !strcmp(name, mtd_table[i]->name)) { 550 if (!strcmp(name, other->name)) {
527 mtd = mtd_table[i]; 551 mtd = other;
528 break; 552 break;
529 } 553 }
530 } 554 }
@@ -554,14 +578,19 @@ out_unlock:
554 578
555void put_mtd_device(struct mtd_info *mtd) 579void put_mtd_device(struct mtd_info *mtd)
556{ 580{
557 int c;
558
559 mutex_lock(&mtd_table_mutex); 581 mutex_lock(&mtd_table_mutex);
560 c = --mtd->usecount; 582 __put_mtd_device(mtd);
583 mutex_unlock(&mtd_table_mutex);
584
585}
586
587void __put_mtd_device(struct mtd_info *mtd)
588{
589 --mtd->usecount;
590 BUG_ON(mtd->usecount < 0);
591
561 if (mtd->put_device) 592 if (mtd->put_device)
562 mtd->put_device(mtd); 593 mtd->put_device(mtd);
563 mutex_unlock(&mtd_table_mutex);
564 BUG_ON(c < 0);
565 594
566 module_put(mtd->owner); 595 module_put(mtd->owner);
567} 596}
@@ -599,7 +628,9 @@ EXPORT_SYMBOL_GPL(add_mtd_device);
599EXPORT_SYMBOL_GPL(del_mtd_device); 628EXPORT_SYMBOL_GPL(del_mtd_device);
600EXPORT_SYMBOL_GPL(get_mtd_device); 629EXPORT_SYMBOL_GPL(get_mtd_device);
601EXPORT_SYMBOL_GPL(get_mtd_device_nm); 630EXPORT_SYMBOL_GPL(get_mtd_device_nm);
631EXPORT_SYMBOL_GPL(__get_mtd_device);
602EXPORT_SYMBOL_GPL(put_mtd_device); 632EXPORT_SYMBOL_GPL(put_mtd_device);
633EXPORT_SYMBOL_GPL(__put_mtd_device);
603EXPORT_SYMBOL_GPL(register_mtd_user); 634EXPORT_SYMBOL_GPL(register_mtd_user);
604EXPORT_SYMBOL_GPL(unregister_mtd_user); 635EXPORT_SYMBOL_GPL(unregister_mtd_user);
605EXPORT_SYMBOL_GPL(default_mtd_writev); 636EXPORT_SYMBOL_GPL(default_mtd_writev);
@@ -611,14 +642,9 @@ EXPORT_SYMBOL_GPL(default_mtd_writev);
611 642
612static struct proc_dir_entry *proc_mtd; 643static struct proc_dir_entry *proc_mtd;
613 644
614static inline int mtd_proc_info (char *buf, int i) 645static inline int mtd_proc_info(char *buf, struct mtd_info *this)
615{ 646{
616 struct mtd_info *this = mtd_table[i]; 647 return sprintf(buf, "mtd%d: %8.8llx %8.8x \"%s\"\n", this->index,
617
618 if (!this)
619 return 0;
620
621 return sprintf(buf, "mtd%d: %8.8llx %8.8x \"%s\"\n", i,
622 (unsigned long long)this->size, 648 (unsigned long long)this->size,
623 this->erasesize, this->name); 649 this->erasesize, this->name);
624} 650}
@@ -626,15 +652,15 @@ static inline int mtd_proc_info (char *buf, int i)
626static int mtd_read_proc (char *page, char **start, off_t off, int count, 652static int mtd_read_proc (char *page, char **start, off_t off, int count,
627 int *eof, void *data_unused) 653 int *eof, void *data_unused)
628{ 654{
629 int len, l, i; 655 struct mtd_info *mtd;
656 int len, l;
630 off_t begin = 0; 657 off_t begin = 0;
631 658
632 mutex_lock(&mtd_table_mutex); 659 mutex_lock(&mtd_table_mutex);
633 660
634 len = sprintf(page, "dev: size erasesize name\n"); 661 len = sprintf(page, "dev: size erasesize name\n");
635 for (i=0; i< MAX_MTD_DEVICES; i++) { 662 mtd_for_each_device(mtd) {
636 663 l = mtd_proc_info(page + len, mtd);
637 l = mtd_proc_info(page + len, i);
638 len += l; 664 len += l;
639 if (len+begin > off+count) 665 if (len+begin > off+count)
640 goto done; 666 goto done;
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
index a33251f4b872..6a64fdebc898 100644
--- a/drivers/mtd/mtdcore.h
+++ b/drivers/mtd/mtdcore.h
@@ -8,4 +8,9 @@
8 should not use them for _anything_ else */ 8 should not use them for _anything_ else */
9 9
10extern struct mutex mtd_table_mutex; 10extern struct mutex mtd_table_mutex;
11extern struct mtd_info *mtd_table[MAX_MTD_DEVICES]; 11extern struct mtd_info *__mtd_next_device(int i);
12
13#define mtd_for_each_device(mtd) \
14 for ((mtd) = __mtd_next_device(0); \
15 (mtd) != NULL; \
16 (mtd) = __mtd_next_device(mtd->index + 1))
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 92e12df0917f..328313c3dccb 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -429,11 +429,6 @@ static int __init mtdoops_init(void)
429 mtd_index = simple_strtoul(mtddev, &endp, 0); 429 mtd_index = simple_strtoul(mtddev, &endp, 0);
430 if (*endp == '\0') 430 if (*endp == '\0')
431 cxt->mtd_index = mtd_index; 431 cxt->mtd_index = mtd_index;
432 if (cxt->mtd_index > MAX_MTD_DEVICES) {
433 printk(KERN_ERR "mtdoops: invalid mtd device number (%u) given\n",
434 mtd_index);
435 return -EINVAL;
436 }
437 432
438 cxt->oops_buf = vmalloc(record_size); 433 cxt->oops_buf = vmalloc(record_size);
439 if (!cxt->oops_buf) { 434 if (!cxt->oops_buf) {
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index 7c003191fca4..bd9a443ccf69 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -152,18 +152,12 @@ int get_sb_mtd(struct file_system_type *fs_type, int flags,
152 DEBUG(1, "MTDSB: mtd:%%s, name \"%s\"\n", 152 DEBUG(1, "MTDSB: mtd:%%s, name \"%s\"\n",
153 dev_name + 4); 153 dev_name + 4);
154 154
155 for (mtdnr = 0; mtdnr < MAX_MTD_DEVICES; mtdnr++) { 155 mtd = get_mtd_device_nm(dev_name + 4);
156 mtd = get_mtd_device(NULL, mtdnr); 156 if (!IS_ERR(mtd))
157 if (!IS_ERR(mtd)) { 157 return get_sb_mtd_aux(
158 if (!strcmp(mtd->name, dev_name + 4)) 158 fs_type, flags,
159 return get_sb_mtd_aux( 159 dev_name, data, mtd,
160 fs_type, flags, 160 fill_super, mnt);
161 dev_name, data, mtd,
162 fill_super, mnt);
163
164 put_mtd_device(mtd);
165 }
166 }
167 161
168 printk(KERN_NOTICE "MTD:" 162 printk(KERN_NOTICE "MTD:"
169 " MTD device with name \"%s\" not found.\n", 163 " MTD device with name \"%s\" not found.\n",
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 42e5ea49e975..8f402d46a362 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -2,11 +2,23 @@ menuconfig MTD_NAND
2 tristate "NAND Device Support" 2 tristate "NAND Device Support"
3 depends on MTD 3 depends on MTD
4 select MTD_NAND_IDS 4 select MTD_NAND_IDS
5 select MTD_NAND_ECC
5 help 6 help
6 This enables support for accessing all type of NAND flash 7 This enables support for accessing all type of NAND flash
7 devices. For further information see 8 devices. For further information see
8 <http://www.linux-mtd.infradead.org/doc/nand.html>. 9 <http://www.linux-mtd.infradead.org/doc/nand.html>.
9 10
11config MTD_NAND_ECC
12 tristate
13
14config MTD_NAND_ECC_SMC
15 bool "NAND ECC Smart Media byte order"
16 depends on MTD_NAND_ECC
17 default n
18 help
19 Software ECC according to the Smart Media Specification.
20 The original Linux implementation had byte 0 and 1 swapped.
21
10if MTD_NAND 22if MTD_NAND
11 23
12config MTD_NAND_VERIFY_WRITE 24config MTD_NAND_VERIFY_WRITE
@@ -18,12 +30,9 @@ config MTD_NAND_VERIFY_WRITE
18 device thinks the write was successful, a bit could have been 30 device thinks the write was successful, a bit could have been
19 flipped accidentally due to device wear or something else. 31 flipped accidentally due to device wear or something else.
20 32
21config MTD_NAND_ECC_SMC 33config MTD_SM_COMMON
22 bool "NAND ECC Smart Media byte order" 34 tristate
23 default n 35 default n
24 help
25 Software ECC according to the Smart Media Specification.
26 The original Linux implementation had byte 0 and 1 swapped.
27 36
28config MTD_NAND_MUSEUM_IDS 37config MTD_NAND_MUSEUM_IDS
29 bool "Enable chip ids for obsolete ancient NAND devices" 38 bool "Enable chip ids for obsolete ancient NAND devices"
@@ -95,15 +104,21 @@ config MTD_NAND_OMAP_PREFETCH_DMA
95 or in DMA interrupt mode. 104 or in DMA interrupt mode.
96 Say y for DMA mode or MPU mode will be used 105 Say y for DMA mode or MPU mode will be used
97 106
98config MTD_NAND_TS7250
99 tristate "NAND Flash device on TS-7250 board"
100 depends on MACH_TS72XX
101 help
102 Support for NAND flash on Technologic Systems TS-7250 platform.
103
104config MTD_NAND_IDS 107config MTD_NAND_IDS
105 tristate 108 tristate
106 109
110config MTD_NAND_RICOH
111 tristate "Ricoh xD card reader"
112 default n
113 depends on PCI
114 select MTD_SM_COMMON
115 help
116 Enable support for Ricoh R5C852 xD card reader
117 You also need to enable ether
118 NAND SSFDC (SmartMedia) read only translation layer' or new
119 expermental, readwrite
120 'SmartMedia/xD new translation layer'
121
107config MTD_NAND_AU1550 122config MTD_NAND_AU1550
108 tristate "Au1550/1200 NAND support" 123 tristate "Au1550/1200 NAND support"
109 depends on SOC_AU1200 || SOC_AU1550 124 depends on SOC_AU1200 || SOC_AU1550
@@ -358,8 +373,6 @@ config MTD_NAND_ATMEL_ECC_NONE
358 373
359 If unsure, say N 374 If unsure, say N
360 375
361 endchoice
362
363endchoice 376endchoice
364 377
365config MTD_NAND_PXA3xx 378config MTD_NAND_PXA3xx
@@ -442,6 +455,13 @@ config MTD_NAND_FSL_UPM
442 Enables support for NAND Flash chips wired onto Freescale PowerPC 455 Enables support for NAND Flash chips wired onto Freescale PowerPC
443 processor localbus with User-Programmable Machine support. 456 processor localbus with User-Programmable Machine support.
444 457
458config MTD_NAND_MPC5121_NFC
459 tristate "MPC5121 built-in NAND Flash Controller support"
460 depends on PPC_MPC512x
461 help
462 This enables the driver for the NAND flash controller on the
463 MPC5121 SoC.
464
445config MTD_NAND_MXC 465config MTD_NAND_MXC
446 tristate "MXC NAND support" 466 tristate "MXC NAND support"
447 depends on ARCH_MX2 || ARCH_MX25 || ARCH_MX3 467 depends on ARCH_MX2 || ARCH_MX25 || ARCH_MX3
@@ -481,11 +501,11 @@ config MTD_NAND_SOCRATES
481 help 501 help
482 Enables support for NAND Flash chips wired onto Socrates board. 502 Enables support for NAND Flash chips wired onto Socrates board.
483 503
484config MTD_NAND_W90P910 504config MTD_NAND_NUC900
485 tristate "Support for NAND on w90p910 evaluation board." 505 tristate "Support for NAND on Nuvoton NUC9xx/w90p910 evaluation boards."
486 depends on ARCH_W90X900 && MTD_PARTITIONS 506 depends on ARCH_W90X900 && MTD_PARTITIONS
487 help 507 help
488 This enables the driver for the NAND Flash on evaluation board based 508 This enables the driver for the NAND Flash on evaluation board based
489 on w90p910. 509 on w90p910 / NUC9xx.
490 510
491endif # MTD_NAND 511endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 1407bd144015..04bccf9d7b53 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -2,8 +2,10 @@
2# linux/drivers/nand/Makefile 2# linux/drivers/nand/Makefile
3# 3#
4 4
5obj-$(CONFIG_MTD_NAND) += nand.o nand_ecc.o 5obj-$(CONFIG_MTD_NAND) += nand.o
6obj-$(CONFIG_MTD_NAND_ECC) += nand_ecc.o
6obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o 7obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o
8obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o
7 9
8obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o 10obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o
9obj-$(CONFIG_MTD_NAND_SPIA) += spia.o 11obj-$(CONFIG_MTD_NAND_SPIA) += spia.o
@@ -19,7 +21,6 @@ obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o
19obj-$(CONFIG_MTD_NAND_H1900) += h1910.o 21obj-$(CONFIG_MTD_NAND_H1900) += h1910.o
20obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o 22obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o
21obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o 23obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o
22obj-$(CONFIG_MTD_NAND_TS7250) += ts7250.o
23obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o 24obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o
24obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o 25obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o
25obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o 26obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
@@ -39,8 +40,10 @@ obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o
39obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o 40obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o
40obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o 41obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o
41obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o 42obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o
42obj-$(CONFIG_MTD_NAND_W90P910) += w90p910_nand.o 43obj-$(CONFIG_MTD_NAND_NUC900) += nuc900_nand.o
43obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o 44obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o
44obj-$(CONFIG_MTD_NAND_BCM_UMI) += bcm_umi_nand.o nand_bcm_umi.o 45obj-$(CONFIG_MTD_NAND_BCM_UMI) += bcm_umi_nand.o nand_bcm_umi.o
46obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o
47obj-$(CONFIG_MTD_NAND_RICOH) += r852.o
45 48
46nand-objs := nand_base.o nand_bbt.o 49nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
index 2d6773281fd9..8691e0482ed2 100644
--- a/drivers/mtd/nand/alauda.c
+++ b/drivers/mtd/nand/alauda.c
@@ -49,7 +49,7 @@
49 49
50#define TIMEOUT HZ 50#define TIMEOUT HZ
51 51
52static struct usb_device_id alauda_table [] = { 52static const struct usb_device_id alauda_table[] = {
53 { USB_DEVICE(0x0584, 0x0008) }, /* Fujifilm DPC-R1 */ 53 { USB_DEVICE(0x0584, 0x0008) }, /* Fujifilm DPC-R1 */
54 { USB_DEVICE(0x07b4, 0x010a) }, /* Olympus MAUSB-10 */ 54 { USB_DEVICE(0x07b4, 0x010a) }, /* Olympus MAUSB-10 */
55 { } 55 { }
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 524e6c9e0672..04d30887ca7f 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -474,7 +474,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
474 } 474 }
475 475
476 /* first scan to find the device and get the page size */ 476 /* first scan to find the device and get the page size */
477 if (nand_scan_ident(mtd, 1)) { 477 if (nand_scan_ident(mtd, 1, NULL)) {
478 res = -ENXIO; 478 res = -ENXIO;
479 goto err_scan_ident; 479 goto err_scan_ident;
480 } 480 }
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 43d46e424040..3ffe05db4923 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -451,7 +451,7 @@ static int __init au1xxx_nand_init(void)
451 u32 nand_phys; 451 u32 nand_phys;
452 452
453 /* Allocate memory for MTD device structure and private data */ 453 /* Allocate memory for MTD device structure and private data */
454 au1550_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); 454 au1550_mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
455 if (!au1550_mtd) { 455 if (!au1550_mtd) {
456 printk("Unable to allocate NAND MTD dev structure.\n"); 456 printk("Unable to allocate NAND MTD dev structure.\n");
457 return -ENOMEM; 457 return -ENOMEM;
@@ -460,10 +460,6 @@ static int __init au1xxx_nand_init(void)
460 /* Get pointer to private data */ 460 /* Get pointer to private data */
461 this = (struct nand_chip *)(&au1550_mtd[1]); 461 this = (struct nand_chip *)(&au1550_mtd[1]);
462 462
463 /* Initialize structures */
464 memset(au1550_mtd, 0, sizeof(struct mtd_info));
465 memset(this, 0, sizeof(struct nand_chip));
466
467 /* Link the private data with the MTD structure */ 463 /* Link the private data with the MTD structure */
468 au1550_mtd->priv = this; 464 au1550_mtd->priv = this;
469 au1550_mtd->owner = THIS_MODULE; 465 au1550_mtd->owner = THIS_MODULE;
@@ -544,7 +540,7 @@ static int __init au1xxx_nand_init(void)
544 } 540 }
545 nand_phys = (mem_staddr << 4) & 0xFFFC0000; 541 nand_phys = (mem_staddr << 4) & 0xFFFC0000;
546 542
547 p_nand = (void __iomem *)ioremap(nand_phys, 0x1000); 543 p_nand = ioremap(nand_phys, 0x1000);
548 544
549 /* make controller and MTD agree */ 545 /* make controller and MTD agree */
550 if (NAND_CS == 0) 546 if (NAND_CS == 0)
@@ -589,7 +585,7 @@ static int __init au1xxx_nand_init(void)
589 return 0; 585 return 0;
590 586
591 outio: 587 outio:
592 iounmap((void *)p_nand); 588 iounmap(p_nand);
593 589
594 outmem: 590 outmem:
595 kfree(au1550_mtd); 591 kfree(au1550_mtd);
@@ -610,7 +606,7 @@ static void __exit au1550_cleanup(void)
610 kfree(au1550_mtd); 606 kfree(au1550_mtd);
611 607
612 /* Unmap */ 608 /* Unmap */
613 iounmap((void *)p_nand); 609 iounmap(p_nand);
614} 610}
615 611
616module_exit(au1550_cleanup); 612module_exit(au1550_cleanup);
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
index c997f98eeb3d..dfe262c726fb 100644
--- a/drivers/mtd/nand/bcm_umi_nand.c
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -13,7 +13,6 @@
13*****************************************************************************/ 13*****************************************************************************/
14 14
15/* ---- Include Files ---------------------------------------------------- */ 15/* ---- Include Files ---------------------------------------------------- */
16#include <linux/version.h>
17#include <linux/module.h> 16#include <linux/module.h>
18#include <linux/types.h> 17#include <linux/types.h>
19#include <linux/init.h> 18#include <linux/init.h>
@@ -447,7 +446,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
447 * layout we'll be using. 446 * layout we'll be using.
448 */ 447 */
449 448
450 err = nand_scan_ident(board_mtd, 1); 449 err = nand_scan_ident(board_mtd, 1, NULL);
451 if (err) { 450 if (err) {
452 printk(KERN_ERR "nand_scan failed: %d\n", err); 451 printk(KERN_ERR "nand_scan failed: %d\n", err);
453 iounmap(bcm_umi_io_base); 452 iounmap(bcm_umi_io_base);
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 8506e7e606fd..2974995e194d 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -68,6 +68,27 @@
68#define DRV_AUTHOR "Bryan Wu <bryan.wu@analog.com>" 68#define DRV_AUTHOR "Bryan Wu <bryan.wu@analog.com>"
69#define DRV_DESC "BF5xx on-chip NAND FLash Controller Driver" 69#define DRV_DESC "BF5xx on-chip NAND FLash Controller Driver"
70 70
71/* NFC_STAT Masks */
72#define NBUSY 0x01 /* Not Busy */
73#define WB_FULL 0x02 /* Write Buffer Full */
74#define PG_WR_STAT 0x04 /* Page Write Pending */
75#define PG_RD_STAT 0x08 /* Page Read Pending */
76#define WB_EMPTY 0x10 /* Write Buffer Empty */
77
78/* NFC_IRQSTAT Masks */
79#define NBUSYIRQ 0x01 /* Not Busy IRQ */
80#define WB_OVF 0x02 /* Write Buffer Overflow */
81#define WB_EDGE 0x04 /* Write Buffer Edge Detect */
82#define RD_RDY 0x08 /* Read Data Ready */
83#define WR_DONE 0x10 /* Page Write Done */
84
85/* NFC_RST Masks */
86#define ECC_RST 0x01 /* ECC (and NFC counters) Reset */
87
88/* NFC_PGCTL Masks */
89#define PG_RD_START 0x01 /* Page Read Start */
90#define PG_WR_START 0x02 /* Page Write Start */
91
71#ifdef CONFIG_MTD_NAND_BF5XX_HWECC 92#ifdef CONFIG_MTD_NAND_BF5XX_HWECC
72static int hardware_ecc = 1; 93static int hardware_ecc = 1;
73#else 94#else
@@ -487,7 +508,7 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
487 * transferred to generate the correct ECC register 508 * transferred to generate the correct ECC register
488 * values. 509 * values.
489 */ 510 */
490 bfin_write_NFC_RST(0x1); 511 bfin_write_NFC_RST(ECC_RST);
491 SSYNC(); 512 SSYNC();
492 513
493 disable_dma(CH_NFC); 514 disable_dma(CH_NFC);
@@ -497,7 +518,7 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
497 set_dma_config(CH_NFC, 0x0); 518 set_dma_config(CH_NFC, 0x0);
498 set_dma_start_addr(CH_NFC, (unsigned long) buf); 519 set_dma_start_addr(CH_NFC, (unsigned long) buf);
499 520
500/* The DMAs have different size on BF52x and BF54x */ 521 /* The DMAs have different size on BF52x and BF54x */
501#ifdef CONFIG_BF52x 522#ifdef CONFIG_BF52x
502 set_dma_x_count(CH_NFC, (page_size >> 1)); 523 set_dma_x_count(CH_NFC, (page_size >> 1));
503 set_dma_x_modify(CH_NFC, 2); 524 set_dma_x_modify(CH_NFC, 2);
@@ -517,9 +538,9 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
517 538
518 /* Start PAGE read/write operation */ 539 /* Start PAGE read/write operation */
519 if (is_read) 540 if (is_read)
520 bfin_write_NFC_PGCTL(0x1); 541 bfin_write_NFC_PGCTL(PG_RD_START);
521 else 542 else
522 bfin_write_NFC_PGCTL(0x2); 543 bfin_write_NFC_PGCTL(PG_WR_START);
523 wait_for_completion(&info->dma_completion); 544 wait_for_completion(&info->dma_completion);
524} 545}
525 546
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index e5a9f9ccea60..db1dfc5a1b11 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -762,7 +762,7 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
762 cafe_readl(cafe, GLOBAL_CTRL), cafe_readl(cafe, GLOBAL_IRQ_MASK)); 762 cafe_readl(cafe, GLOBAL_CTRL), cafe_readl(cafe, GLOBAL_IRQ_MASK));
763 763
764 /* Scan to find existence of the device */ 764 /* Scan to find existence of the device */
765 if (nand_scan_ident(mtd, 2)) { 765 if (nand_scan_ident(mtd, 2, NULL)) {
766 err = -ENXIO; 766 err = -ENXIO;
767 goto out_irq; 767 goto out_irq;
768 } 768 }
@@ -849,7 +849,7 @@ static void __devexit cafe_nand_remove(struct pci_dev *pdev)
849 kfree(mtd); 849 kfree(mtd);
850} 850}
851 851
852static struct pci_device_id cafe_nand_tbl[] = { 852static const struct pci_device_id cafe_nand_tbl[] = {
853 { PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_88ALP01_NAND, 853 { PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_88ALP01_NAND,
854 PCI_ANY_ID, PCI_ANY_ID }, 854 PCI_ANY_ID, PCI_ANY_ID },
855 { } 855 { }
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 76e2dc8e62f7..9c9d893affeb 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -567,8 +567,8 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
567 goto err_nomem; 567 goto err_nomem;
568 } 568 }
569 569
570 vaddr = ioremap(res1->start, res1->end - res1->start); 570 vaddr = ioremap(res1->start, resource_size(res1));
571 base = ioremap(res2->start, res2->end - res2->start); 571 base = ioremap(res2->start, resource_size(res2));
572 if (!vaddr || !base) { 572 if (!vaddr || !base) {
573 dev_err(&pdev->dev, "ioremap failed\n"); 573 dev_err(&pdev->dev, "ioremap failed\n");
574 ret = -EINVAL; 574 ret = -EINVAL;
@@ -691,7 +691,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
691 spin_unlock_irq(&davinci_nand_lock); 691 spin_unlock_irq(&davinci_nand_lock);
692 692
693 /* Scan to find existence of the device(s) */ 693 /* Scan to find existence of the device(s) */
694 ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1); 694 ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1, NULL);
695 if (ret < 0) { 695 if (ret < 0) {
696 dev_dbg(&pdev->dev, "no NAND chip(s) found\n"); 696 dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
697 goto err_scan; 697 goto err_scan;
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index ae30fb6eed97..3f38fb8e6666 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -874,7 +874,7 @@ static int __devinit fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl,
874 priv->ctrl = ctrl; 874 priv->ctrl = ctrl;
875 priv->dev = ctrl->dev; 875 priv->dev = ctrl->dev;
876 876
877 priv->vbase = ioremap(res.start, res.end - res.start + 1); 877 priv->vbase = ioremap(res.start, resource_size(&res));
878 if (!priv->vbase) { 878 if (!priv->vbase) {
879 dev_err(ctrl->dev, "failed to map chip region\n"); 879 dev_err(ctrl->dev, "failed to map chip region\n");
880 ret = -ENOMEM; 880 ret = -ENOMEM;
@@ -891,7 +891,7 @@ static int __devinit fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl,
891 if (ret) 891 if (ret)
892 goto err; 892 goto err;
893 893
894 ret = nand_scan_ident(&priv->mtd, 1); 894 ret = nand_scan_ident(&priv->mtd, 1, NULL);
895 if (ret) 895 if (ret)
896 goto err; 896 goto err;
897 897
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 4b96296af321..2d215ccb564d 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -49,7 +49,10 @@ struct fsl_upm_nand {
49 uint32_t wait_flags; 49 uint32_t wait_flags;
50}; 50};
51 51
52#define to_fsl_upm_nand(mtd) container_of(mtd, struct fsl_upm_nand, mtd) 52static inline struct fsl_upm_nand *to_fsl_upm_nand(struct mtd_info *mtdinfo)
53{
54 return container_of(mtdinfo, struct fsl_upm_nand, mtd);
55}
53 56
54static int fun_chip_ready(struct mtd_info *mtd) 57static int fun_chip_ready(struct mtd_info *mtd)
55{ 58{
@@ -303,7 +306,7 @@ static int __devinit fun_probe(struct of_device *ofdev,
303 FSL_UPM_WAIT_WRITE_BYTE; 306 FSL_UPM_WAIT_WRITE_BYTE;
304 307
305 fun->io_base = devm_ioremap_nocache(&ofdev->dev, io_res.start, 308 fun->io_base = devm_ioremap_nocache(&ofdev->dev, io_res.start,
306 io_res.end - io_res.start + 1); 309 resource_size(&io_res));
307 if (!fun->io_base) { 310 if (!fun->io_base) {
308 ret = -ENOMEM; 311 ret = -ENOMEM;
309 goto err2; 312 goto err2;
@@ -350,7 +353,7 @@ static int __devexit fun_remove(struct of_device *ofdev)
350 return 0; 353 return 0;
351} 354}
352 355
353static struct of_device_id of_fun_match[] = { 356static const struct of_device_id of_fun_match[] = {
354 { .compatible = "fsl,upm-nand" }, 357 { .compatible = "fsl,upm-nand" },
355 {}, 358 {},
356}; 359};
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index 8f902e75aa85..0cde618bcc1e 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -181,11 +181,11 @@ static int __devexit gpio_nand_remove(struct platform_device *dev)
181 res = platform_get_resource(dev, IORESOURCE_MEM, 1); 181 res = platform_get_resource(dev, IORESOURCE_MEM, 1);
182 iounmap(gpiomtd->io_sync); 182 iounmap(gpiomtd->io_sync);
183 if (res) 183 if (res)
184 release_mem_region(res->start, res->end - res->start + 1); 184 release_mem_region(res->start, resource_size(res));
185 185
186 res = platform_get_resource(dev, IORESOURCE_MEM, 0); 186 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
187 iounmap(gpiomtd->nand_chip.IO_ADDR_R); 187 iounmap(gpiomtd->nand_chip.IO_ADDR_R);
188 release_mem_region(res->start, res->end - res->start + 1); 188 release_mem_region(res->start, resource_size(res));
189 189
190 if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) 190 if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
191 gpio_set_value(gpiomtd->plat.gpio_nwp, 0); 191 gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
@@ -208,14 +208,14 @@ static void __iomem *request_and_remap(struct resource *res, size_t size,
208{ 208{
209 void __iomem *ptr; 209 void __iomem *ptr;
210 210
211 if (!request_mem_region(res->start, res->end - res->start + 1, name)) { 211 if (!request_mem_region(res->start, resource_size(res), name)) {
212 *err = -EBUSY; 212 *err = -EBUSY;
213 return NULL; 213 return NULL;
214 } 214 }
215 215
216 ptr = ioremap(res->start, size); 216 ptr = ioremap(res->start, size);
217 if (!ptr) { 217 if (!ptr) {
218 release_mem_region(res->start, res->end - res->start + 1); 218 release_mem_region(res->start, resource_size(res));
219 *err = -ENOMEM; 219 *err = -ENOMEM;
220 } 220 }
221 return ptr; 221 return ptr;
@@ -338,10 +338,10 @@ err_nwp:
338err_nce: 338err_nce:
339 iounmap(gpiomtd->io_sync); 339 iounmap(gpiomtd->io_sync);
340 if (res1) 340 if (res1)
341 release_mem_region(res1->start, res1->end - res1->start + 1); 341 release_mem_region(res1->start, resource_size(res1));
342err_sync: 342err_sync:
343 iounmap(gpiomtd->nand_chip.IO_ADDR_R); 343 iounmap(gpiomtd->nand_chip.IO_ADDR_R);
344 release_mem_region(res0->start, res0->end - res0->start + 1); 344 release_mem_region(res0->start, resource_size(res0));
345err_map: 345err_map:
346 kfree(gpiomtd); 346 kfree(gpiomtd);
347 return ret; 347 return ret;
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
new file mode 100644
index 000000000000..f713b15fa454
--- /dev/null
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -0,0 +1,916 @@
1/*
2 * Copyright 2004-2008 Freescale Semiconductor, Inc.
3 * Copyright 2009 Semihalf.
4 *
5 * Approved as OSADL project by a majority of OSADL members and funded
6 * by OSADL membership fees in 2009; for details see www.osadl.org.
7 *
8 * Based on original driver from Freescale Semiconductor
9 * written by John Rigby <jrigby@freescale.com> on basis
10 * of drivers/mtd/nand/mxc_nand.c. Reworked and extended
11 * Piotr Ziecik <kosmo@semihalf.com>.
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version 2
16 * of the License, or (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
25 * MA 02110-1301, USA.
26 */
27
28#include <linux/module.h>
29#include <linux/clk.h>
30#include <linux/delay.h>
31#include <linux/init.h>
32#include <linux/interrupt.h>
33#include <linux/io.h>
34#include <linux/mtd/mtd.h>
35#include <linux/mtd/nand.h>
36#include <linux/mtd/partitions.h>
37#include <linux/of_device.h>
38#include <linux/of_platform.h>
39
40#include <asm/mpc5121.h>
41
42/* Addresses for NFC MAIN RAM BUFFER areas */
43#define NFC_MAIN_AREA(n) ((n) * 0x200)
44
45/* Addresses for NFC SPARE BUFFER areas */
46#define NFC_SPARE_BUFFERS 8
47#define NFC_SPARE_LEN 0x40
48#define NFC_SPARE_AREA(n) (0x1000 + ((n) * NFC_SPARE_LEN))
49
50/* MPC5121 NFC registers */
51#define NFC_BUF_ADDR 0x1E04
52#define NFC_FLASH_ADDR 0x1E06
53#define NFC_FLASH_CMD 0x1E08
54#define NFC_CONFIG 0x1E0A
55#define NFC_ECC_STATUS1 0x1E0C
56#define NFC_ECC_STATUS2 0x1E0E
57#define NFC_SPAS 0x1E10
58#define NFC_WRPROT 0x1E12
59#define NFC_NF_WRPRST 0x1E18
60#define NFC_CONFIG1 0x1E1A
61#define NFC_CONFIG2 0x1E1C
62#define NFC_UNLOCKSTART_BLK0 0x1E20
63#define NFC_UNLOCKEND_BLK0 0x1E22
64#define NFC_UNLOCKSTART_BLK1 0x1E24
65#define NFC_UNLOCKEND_BLK1 0x1E26
66#define NFC_UNLOCKSTART_BLK2 0x1E28
67#define NFC_UNLOCKEND_BLK2 0x1E2A
68#define NFC_UNLOCKSTART_BLK3 0x1E2C
69#define NFC_UNLOCKEND_BLK3 0x1E2E
70
71/* Bit Definitions: NFC_BUF_ADDR */
72#define NFC_RBA_MASK (7 << 0)
73#define NFC_ACTIVE_CS_SHIFT 5
74#define NFC_ACTIVE_CS_MASK (3 << NFC_ACTIVE_CS_SHIFT)
75
76/* Bit Definitions: NFC_CONFIG */
77#define NFC_BLS_UNLOCKED (1 << 1)
78
79/* Bit Definitions: NFC_CONFIG1 */
80#define NFC_ECC_4BIT (1 << 0)
81#define NFC_FULL_PAGE_DMA (1 << 1)
82#define NFC_SPARE_ONLY (1 << 2)
83#define NFC_ECC_ENABLE (1 << 3)
84#define NFC_INT_MASK (1 << 4)
85#define NFC_BIG_ENDIAN (1 << 5)
86#define NFC_RESET (1 << 6)
87#define NFC_CE (1 << 7)
88#define NFC_ONE_CYCLE (1 << 8)
89#define NFC_PPB_32 (0 << 9)
90#define NFC_PPB_64 (1 << 9)
91#define NFC_PPB_128 (2 << 9)
92#define NFC_PPB_256 (3 << 9)
93#define NFC_PPB_MASK (3 << 9)
94#define NFC_FULL_PAGE_INT (1 << 11)
95
96/* Bit Definitions: NFC_CONFIG2 */
97#define NFC_COMMAND (1 << 0)
98#define NFC_ADDRESS (1 << 1)
99#define NFC_INPUT (1 << 2)
100#define NFC_OUTPUT (1 << 3)
101#define NFC_ID (1 << 4)
102#define NFC_STATUS (1 << 5)
103#define NFC_CMD_FAIL (1 << 15)
104#define NFC_INT (1 << 15)
105
106/* Bit Definitions: NFC_WRPROT */
107#define NFC_WPC_LOCK_TIGHT (1 << 0)
108#define NFC_WPC_LOCK (1 << 1)
109#define NFC_WPC_UNLOCK (1 << 2)
110
111#define DRV_NAME "mpc5121_nfc"
112
113/* Timeouts */
114#define NFC_RESET_TIMEOUT 1000 /* 1 ms */
115#define NFC_TIMEOUT (HZ / 10) /* 1/10 s */
116
117struct mpc5121_nfc_prv {
118 struct mtd_info mtd;
119 struct nand_chip chip;
120 int irq;
121 void __iomem *regs;
122 struct clk *clk;
123 wait_queue_head_t irq_waitq;
124 uint column;
125 int spareonly;
126 void __iomem *csreg;
127 struct device *dev;
128};
129
130static void mpc5121_nfc_done(struct mtd_info *mtd);
131
132#ifdef CONFIG_MTD_PARTITIONS
133static const char *mpc5121_nfc_pprobes[] = { "cmdlinepart", NULL };
134#endif
135
136/* Read NFC register */
137static inline u16 nfc_read(struct mtd_info *mtd, uint reg)
138{
139 struct nand_chip *chip = mtd->priv;
140 struct mpc5121_nfc_prv *prv = chip->priv;
141
142 return in_be16(prv->regs + reg);
143}
144
145/* Write NFC register */
146static inline void nfc_write(struct mtd_info *mtd, uint reg, u16 val)
147{
148 struct nand_chip *chip = mtd->priv;
149 struct mpc5121_nfc_prv *prv = chip->priv;
150
151 out_be16(prv->regs + reg, val);
152}
153
154/* Set bits in NFC register */
155static inline void nfc_set(struct mtd_info *mtd, uint reg, u16 bits)
156{
157 nfc_write(mtd, reg, nfc_read(mtd, reg) | bits);
158}
159
160/* Clear bits in NFC register */
161static inline void nfc_clear(struct mtd_info *mtd, uint reg, u16 bits)
162{
163 nfc_write(mtd, reg, nfc_read(mtd, reg) & ~bits);
164}
165
166/* Invoke address cycle */
167static inline void mpc5121_nfc_send_addr(struct mtd_info *mtd, u16 addr)
168{
169 nfc_write(mtd, NFC_FLASH_ADDR, addr);
170 nfc_write(mtd, NFC_CONFIG2, NFC_ADDRESS);
171 mpc5121_nfc_done(mtd);
172}
173
174/* Invoke command cycle */
175static inline void mpc5121_nfc_send_cmd(struct mtd_info *mtd, u16 cmd)
176{
177 nfc_write(mtd, NFC_FLASH_CMD, cmd);
178 nfc_write(mtd, NFC_CONFIG2, NFC_COMMAND);
179 mpc5121_nfc_done(mtd);
180}
181
182/* Send data from NFC buffers to NAND flash */
183static inline void mpc5121_nfc_send_prog_page(struct mtd_info *mtd)
184{
185 nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
186 nfc_write(mtd, NFC_CONFIG2, NFC_INPUT);
187 mpc5121_nfc_done(mtd);
188}
189
190/* Receive data from NAND flash */
191static inline void mpc5121_nfc_send_read_page(struct mtd_info *mtd)
192{
193 nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
194 nfc_write(mtd, NFC_CONFIG2, NFC_OUTPUT);
195 mpc5121_nfc_done(mtd);
196}
197
198/* Receive ID from NAND flash */
199static inline void mpc5121_nfc_send_read_id(struct mtd_info *mtd)
200{
201 nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
202 nfc_write(mtd, NFC_CONFIG2, NFC_ID);
203 mpc5121_nfc_done(mtd);
204}
205
206/* Receive status from NAND flash */
207static inline void mpc5121_nfc_send_read_status(struct mtd_info *mtd)
208{
209 nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
210 nfc_write(mtd, NFC_CONFIG2, NFC_STATUS);
211 mpc5121_nfc_done(mtd);
212}
213
214/* NFC interrupt handler */
215static irqreturn_t mpc5121_nfc_irq(int irq, void *data)
216{
217 struct mtd_info *mtd = data;
218 struct nand_chip *chip = mtd->priv;
219 struct mpc5121_nfc_prv *prv = chip->priv;
220
221 nfc_set(mtd, NFC_CONFIG1, NFC_INT_MASK);
222 wake_up(&prv->irq_waitq);
223
224 return IRQ_HANDLED;
225}
226
227/* Wait for operation complete */
228static void mpc5121_nfc_done(struct mtd_info *mtd)
229{
230 struct nand_chip *chip = mtd->priv;
231 struct mpc5121_nfc_prv *prv = chip->priv;
232 int rv;
233
234 if ((nfc_read(mtd, NFC_CONFIG2) & NFC_INT) == 0) {
235 nfc_clear(mtd, NFC_CONFIG1, NFC_INT_MASK);
236 rv = wait_event_timeout(prv->irq_waitq,
237 (nfc_read(mtd, NFC_CONFIG2) & NFC_INT), NFC_TIMEOUT);
238
239 if (!rv)
240 dev_warn(prv->dev,
241 "Timeout while waiting for interrupt.\n");
242 }
243
244 nfc_clear(mtd, NFC_CONFIG2, NFC_INT);
245}
246
247/* Do address cycle(s) */
248static void mpc5121_nfc_addr_cycle(struct mtd_info *mtd, int column, int page)
249{
250 struct nand_chip *chip = mtd->priv;
251 u32 pagemask = chip->pagemask;
252
253 if (column != -1) {
254 mpc5121_nfc_send_addr(mtd, column);
255 if (mtd->writesize > 512)
256 mpc5121_nfc_send_addr(mtd, column >> 8);
257 }
258
259 if (page != -1) {
260 do {
261 mpc5121_nfc_send_addr(mtd, page & 0xFF);
262 page >>= 8;
263 pagemask >>= 8;
264 } while (pagemask);
265 }
266}
267
268/* Control chip select signals */
269static void mpc5121_nfc_select_chip(struct mtd_info *mtd, int chip)
270{
271 if (chip < 0) {
272 nfc_clear(mtd, NFC_CONFIG1, NFC_CE);
273 return;
274 }
275
276 nfc_clear(mtd, NFC_BUF_ADDR, NFC_ACTIVE_CS_MASK);
277 nfc_set(mtd, NFC_BUF_ADDR, (chip << NFC_ACTIVE_CS_SHIFT) &
278 NFC_ACTIVE_CS_MASK);
279 nfc_set(mtd, NFC_CONFIG1, NFC_CE);
280}
281
282/* Init external chip select logic on ADS5121 board */
283static int ads5121_chipselect_init(struct mtd_info *mtd)
284{
285 struct nand_chip *chip = mtd->priv;
286 struct mpc5121_nfc_prv *prv = chip->priv;
287 struct device_node *dn;
288
289 dn = of_find_compatible_node(NULL, NULL, "fsl,mpc5121ads-cpld");
290 if (dn) {
291 prv->csreg = of_iomap(dn, 0);
292 of_node_put(dn);
293 if (!prv->csreg)
294 return -ENOMEM;
295
296 /* CPLD Register 9 controls NAND /CE Lines */
297 prv->csreg += 9;
298 return 0;
299 }
300
301 return -EINVAL;
302}
303
304/* Control chips select signal on ADS5121 board */
305static void ads5121_select_chip(struct mtd_info *mtd, int chip)
306{
307 struct nand_chip *nand = mtd->priv;
308 struct mpc5121_nfc_prv *prv = nand->priv;
309 u8 v;
310
311 v = in_8(prv->csreg);
312 v |= 0x0F;
313
314 if (chip >= 0) {
315 mpc5121_nfc_select_chip(mtd, 0);
316 v &= ~(1 << chip);
317 } else
318 mpc5121_nfc_select_chip(mtd, -1);
319
320 out_8(prv->csreg, v);
321}
322
323/* Read NAND Ready/Busy signal */
324static int mpc5121_nfc_dev_ready(struct mtd_info *mtd)
325{
326 /*
327 * NFC handles ready/busy signal internally. Therefore, this function
328 * always returns status as ready.
329 */
330 return 1;
331}
332
333/* Write command to NAND flash */
334static void mpc5121_nfc_command(struct mtd_info *mtd, unsigned command,
335 int column, int page)
336{
337 struct nand_chip *chip = mtd->priv;
338 struct mpc5121_nfc_prv *prv = chip->priv;
339
340 prv->column = (column >= 0) ? column : 0;
341 prv->spareonly = 0;
342
343 switch (command) {
344 case NAND_CMD_PAGEPROG:
345 mpc5121_nfc_send_prog_page(mtd);
346 break;
347 /*
348 * NFC does not support sub-page reads and writes,
349 * so emulate them using full page transfers.
350 */
351 case NAND_CMD_READ0:
352 column = 0;
353 break;
354
355 case NAND_CMD_READ1:
356 prv->column += 256;
357 command = NAND_CMD_READ0;
358 column = 0;
359 break;
360
361 case NAND_CMD_READOOB:
362 prv->spareonly = 1;
363 command = NAND_CMD_READ0;
364 column = 0;
365 break;
366
367 case NAND_CMD_SEQIN:
368 mpc5121_nfc_command(mtd, NAND_CMD_READ0, column, page);
369 column = 0;
370 break;
371
372 case NAND_CMD_ERASE1:
373 case NAND_CMD_ERASE2:
374 case NAND_CMD_READID:
375 case NAND_CMD_STATUS:
376 break;
377
378 default:
379 return;
380 }
381
382 mpc5121_nfc_send_cmd(mtd, command);
383 mpc5121_nfc_addr_cycle(mtd, column, page);
384
385 switch (command) {
386 case NAND_CMD_READ0:
387 if (mtd->writesize > 512)
388 mpc5121_nfc_send_cmd(mtd, NAND_CMD_READSTART);
389 mpc5121_nfc_send_read_page(mtd);
390 break;
391
392 case NAND_CMD_READID:
393 mpc5121_nfc_send_read_id(mtd);
394 break;
395
396 case NAND_CMD_STATUS:
397 mpc5121_nfc_send_read_status(mtd);
398 if (chip->options & NAND_BUSWIDTH_16)
399 prv->column = 1;
400 else
401 prv->column = 0;
402 break;
403 }
404}
405
406/* Copy data from/to NFC spare buffers. */
407static void mpc5121_nfc_copy_spare(struct mtd_info *mtd, uint offset,
408 u8 *buffer, uint size, int wr)
409{
410 struct nand_chip *nand = mtd->priv;
411 struct mpc5121_nfc_prv *prv = nand->priv;
412 uint o, s, sbsize, blksize;
413
414 /*
415 * NAND spare area is available through NFC spare buffers.
416 * The NFC divides spare area into (page_size / 512) chunks.
417 * Each chunk is placed into separate spare memory area, using
418 * first (spare_size / num_of_chunks) bytes of the buffer.
419 *
420 * For NAND device in which the spare area is not divided fully
421 * by the number of chunks, number of used bytes in each spare
422 * buffer is rounded down to the nearest even number of bytes,
423 * and all remaining bytes are added to the last used spare area.
424 *
425 * For more information read section 26.6.10 of MPC5121e
426 * Microcontroller Reference Manual, Rev. 3.
427 */
428
429 /* Calculate number of valid bytes in each spare buffer */
430 sbsize = (mtd->oobsize / (mtd->writesize / 512)) & ~1;
431
432 while (size) {
433 /* Calculate spare buffer number */
434 s = offset / sbsize;
435 if (s > NFC_SPARE_BUFFERS - 1)
436 s = NFC_SPARE_BUFFERS - 1;
437
438 /*
439 * Calculate offset to requested data block in selected spare
440 * buffer and its size.
441 */
442 o = offset - (s * sbsize);
443 blksize = min(sbsize - o, size);
444
445 if (wr)
446 memcpy_toio(prv->regs + NFC_SPARE_AREA(s) + o,
447 buffer, blksize);
448 else
449 memcpy_fromio(buffer,
450 prv->regs + NFC_SPARE_AREA(s) + o, blksize);
451
452 buffer += blksize;
453 offset += blksize;
454 size -= blksize;
455 };
456}
457
458/* Copy data from/to NFC main and spare buffers */
459static void mpc5121_nfc_buf_copy(struct mtd_info *mtd, u_char *buf, int len,
460 int wr)
461{
462 struct nand_chip *chip = mtd->priv;
463 struct mpc5121_nfc_prv *prv = chip->priv;
464 uint c = prv->column;
465 uint l;
466
467 /* Handle spare area access */
468 if (prv->spareonly || c >= mtd->writesize) {
469 /* Calculate offset from beginning of spare area */
470 if (c >= mtd->writesize)
471 c -= mtd->writesize;
472
473 prv->column += len;
474 mpc5121_nfc_copy_spare(mtd, c, buf, len, wr);
475 return;
476 }
477
478 /*
479 * Handle main area access - limit copy length to prevent
480 * crossing main/spare boundary.
481 */
482 l = min((uint)len, mtd->writesize - c);
483 prv->column += l;
484
485 if (wr)
486 memcpy_toio(prv->regs + NFC_MAIN_AREA(0) + c, buf, l);
487 else
488 memcpy_fromio(buf, prv->regs + NFC_MAIN_AREA(0) + c, l);
489
490 /* Handle crossing main/spare boundary */
491 if (l != len) {
492 buf += l;
493 len -= l;
494 mpc5121_nfc_buf_copy(mtd, buf, len, wr);
495 }
496}
497
498/* Read data from NFC buffers */
499static void mpc5121_nfc_read_buf(struct mtd_info *mtd, u_char *buf, int len)
500{
501 mpc5121_nfc_buf_copy(mtd, buf, len, 0);
502}
503
504/* Write data to NFC buffers */
505static void mpc5121_nfc_write_buf(struct mtd_info *mtd,
506 const u_char *buf, int len)
507{
508 mpc5121_nfc_buf_copy(mtd, (u_char *)buf, len, 1);
509}
510
511/* Compare buffer with NAND flash */
512static int mpc5121_nfc_verify_buf(struct mtd_info *mtd,
513 const u_char *buf, int len)
514{
515 u_char tmp[256];
516 uint bsize;
517
518 while (len) {
519 bsize = min(len, 256);
520 mpc5121_nfc_read_buf(mtd, tmp, bsize);
521
522 if (memcmp(buf, tmp, bsize))
523 return 1;
524
525 buf += bsize;
526 len -= bsize;
527 }
528
529 return 0;
530}
531
532/* Read byte from NFC buffers */
533static u8 mpc5121_nfc_read_byte(struct mtd_info *mtd)
534{
535 u8 tmp;
536
537 mpc5121_nfc_read_buf(mtd, &tmp, sizeof(tmp));
538
539 return tmp;
540}
541
542/* Read word from NFC buffers */
543static u16 mpc5121_nfc_read_word(struct mtd_info *mtd)
544{
545 u16 tmp;
546
547 mpc5121_nfc_read_buf(mtd, (u_char *)&tmp, sizeof(tmp));
548
549 return tmp;
550}
551
552/*
553 * Read NFC configuration from Reset Config Word
554 *
555 * NFC is configured during reset in basis of information stored
556 * in Reset Config Word. There is no other way to set NAND block
557 * size, spare size and bus width.
558 */
559static int mpc5121_nfc_read_hw_config(struct mtd_info *mtd)
560{
561 struct nand_chip *chip = mtd->priv;
562 struct mpc5121_nfc_prv *prv = chip->priv;
563 struct mpc512x_reset_module *rm;
564 struct device_node *rmnode;
565 uint rcw_pagesize = 0;
566 uint rcw_sparesize = 0;
567 uint rcw_width;
568 uint rcwh;
569 uint romloc, ps;
570
571 rmnode = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-reset");
572 if (!rmnode) {
573 dev_err(prv->dev, "Missing 'fsl,mpc5121-reset' "
574 "node in device tree!\n");
575 return -ENODEV;
576 }
577
578 rm = of_iomap(rmnode, 0);
579 if (!rm) {
580 dev_err(prv->dev, "Error mapping reset module node!\n");
581 return -EBUSY;
582 }
583
584 rcwh = in_be32(&rm->rcwhr);
585
586 /* Bit 6: NFC bus width */
587 rcw_width = ((rcwh >> 6) & 0x1) ? 2 : 1;
588
589 /* Bit 7: NFC Page/Spare size */
590 ps = (rcwh >> 7) & 0x1;
591
592 /* Bits [22:21]: ROM Location */
593 romloc = (rcwh >> 21) & 0x3;
594
595 /* Decode RCW bits */
596 switch ((ps << 2) | romloc) {
597 case 0x00:
598 case 0x01:
599 rcw_pagesize = 512;
600 rcw_sparesize = 16;
601 break;
602 case 0x02:
603 case 0x03:
604 rcw_pagesize = 4096;
605 rcw_sparesize = 128;
606 break;
607 case 0x04:
608 case 0x05:
609 rcw_pagesize = 2048;
610 rcw_sparesize = 64;
611 break;
612 case 0x06:
613 case 0x07:
614 rcw_pagesize = 4096;
615 rcw_sparesize = 218;
616 break;
617 }
618
619 mtd->writesize = rcw_pagesize;
620 mtd->oobsize = rcw_sparesize;
621 if (rcw_width == 2)
622 chip->options |= NAND_BUSWIDTH_16;
623
624 dev_notice(prv->dev, "Configured for "
625 "%u-bit NAND, page size %u "
626 "with %u spare.\n",
627 rcw_width * 8, rcw_pagesize,
628 rcw_sparesize);
629 iounmap(rm);
630 of_node_put(rmnode);
631 return 0;
632}
633
634/* Free driver resources */
635static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
636{
637 struct nand_chip *chip = mtd->priv;
638 struct mpc5121_nfc_prv *prv = chip->priv;
639
640 if (prv->clk) {
641 clk_disable(prv->clk);
642 clk_put(prv->clk);
643 }
644
645 if (prv->csreg)
646 iounmap(prv->csreg);
647}
648
649static int __devinit mpc5121_nfc_probe(struct of_device *op,
650 const struct of_device_id *match)
651{
652 struct device_node *rootnode, *dn = op->node;
653 struct device *dev = &op->dev;
654 struct mpc5121_nfc_prv *prv;
655 struct resource res;
656 struct mtd_info *mtd;
657#ifdef CONFIG_MTD_PARTITIONS
658 struct mtd_partition *parts;
659#endif
660 struct nand_chip *chip;
661 unsigned long regs_paddr, regs_size;
662 const uint *chips_no;
663 int resettime = 0;
664 int retval = 0;
665 int rev, len;
666
667 /*
668 * Check SoC revision. This driver supports only NFC
669 * in MPC5121 revision 2 and MPC5123 revision 3.
670 */
671 rev = (mfspr(SPRN_SVR) >> 4) & 0xF;
672 if ((rev != 2) && (rev != 3)) {
673 dev_err(dev, "SoC revision %u is not supported!\n", rev);
674 return -ENXIO;
675 }
676
677 prv = devm_kzalloc(dev, sizeof(*prv), GFP_KERNEL);
678 if (!prv) {
679 dev_err(dev, "Memory exhausted!\n");
680 return -ENOMEM;
681 }
682
683 mtd = &prv->mtd;
684 chip = &prv->chip;
685
686 mtd->priv = chip;
687 chip->priv = prv;
688 prv->dev = dev;
689
690 /* Read NFC configuration from Reset Config Word */
691 retval = mpc5121_nfc_read_hw_config(mtd);
692 if (retval) {
693 dev_err(dev, "Unable to read NFC config!\n");
694 return retval;
695 }
696
697 prv->irq = irq_of_parse_and_map(dn, 0);
698 if (prv->irq == NO_IRQ) {
699 dev_err(dev, "Error mapping IRQ!\n");
700 return -EINVAL;
701 }
702
703 retval = of_address_to_resource(dn, 0, &res);
704 if (retval) {
705 dev_err(dev, "Error parsing memory region!\n");
706 return retval;
707 }
708
709 chips_no = of_get_property(dn, "chips", &len);
710 if (!chips_no || len != sizeof(*chips_no)) {
711 dev_err(dev, "Invalid/missing 'chips' property!\n");
712 return -EINVAL;
713 }
714
715 regs_paddr = res.start;
716 regs_size = res.end - res.start + 1;
717
718 if (!devm_request_mem_region(dev, regs_paddr, regs_size, DRV_NAME)) {
719 dev_err(dev, "Error requesting memory region!\n");
720 return -EBUSY;
721 }
722
723 prv->regs = devm_ioremap(dev, regs_paddr, regs_size);
724 if (!prv->regs) {
725 dev_err(dev, "Error mapping memory region!\n");
726 return -ENOMEM;
727 }
728
729 mtd->name = "MPC5121 NAND";
730 chip->dev_ready = mpc5121_nfc_dev_ready;
731 chip->cmdfunc = mpc5121_nfc_command;
732 chip->read_byte = mpc5121_nfc_read_byte;
733 chip->read_word = mpc5121_nfc_read_word;
734 chip->read_buf = mpc5121_nfc_read_buf;
735 chip->write_buf = mpc5121_nfc_write_buf;
736 chip->verify_buf = mpc5121_nfc_verify_buf;
737 chip->select_chip = mpc5121_nfc_select_chip;
738 chip->options = NAND_NO_AUTOINCR | NAND_USE_FLASH_BBT;
739 chip->ecc.mode = NAND_ECC_SOFT;
740
741 /* Support external chip-select logic on ADS5121 board */
742 rootnode = of_find_node_by_path("/");
743 if (of_device_is_compatible(rootnode, "fsl,mpc5121ads")) {
744 retval = ads5121_chipselect_init(mtd);
745 if (retval) {
746 dev_err(dev, "Chipselect init error!\n");
747 of_node_put(rootnode);
748 return retval;
749 }
750
751 chip->select_chip = ads5121_select_chip;
752 }
753 of_node_put(rootnode);
754
755 /* Enable NFC clock */
756 prv->clk = clk_get(dev, "nfc_clk");
757 if (!prv->clk) {
758 dev_err(dev, "Unable to acquire NFC clock!\n");
759 retval = -ENODEV;
760 goto error;
761 }
762
763 clk_enable(prv->clk);
764
765 /* Reset NAND Flash controller */
766 nfc_set(mtd, NFC_CONFIG1, NFC_RESET);
767 while (nfc_read(mtd, NFC_CONFIG1) & NFC_RESET) {
768 if (resettime++ >= NFC_RESET_TIMEOUT) {
769 dev_err(dev, "Timeout while resetting NFC!\n");
770 retval = -EINVAL;
771 goto error;
772 }
773
774 udelay(1);
775 }
776
777 /* Enable write to NFC memory */
778 nfc_write(mtd, NFC_CONFIG, NFC_BLS_UNLOCKED);
779
780 /* Enable write to all NAND pages */
781 nfc_write(mtd, NFC_UNLOCKSTART_BLK0, 0x0000);
782 nfc_write(mtd, NFC_UNLOCKEND_BLK0, 0xFFFF);
783 nfc_write(mtd, NFC_WRPROT, NFC_WPC_UNLOCK);
784
785 /*
786 * Setup NFC:
787 * - Big Endian transfers,
788 * - Interrupt after full page read/write.
789 */
790 nfc_write(mtd, NFC_CONFIG1, NFC_BIG_ENDIAN | NFC_INT_MASK |
791 NFC_FULL_PAGE_INT);
792
793 /* Set spare area size */
794 nfc_write(mtd, NFC_SPAS, mtd->oobsize >> 1);
795
796 init_waitqueue_head(&prv->irq_waitq);
797 retval = devm_request_irq(dev, prv->irq, &mpc5121_nfc_irq, 0, DRV_NAME,
798 mtd);
799 if (retval) {
800 dev_err(dev, "Error requesting IRQ!\n");
801 goto error;
802 }
803
804 /* Detect NAND chips */
805 if (nand_scan(mtd, *chips_no)) {
806 dev_err(dev, "NAND Flash not found !\n");
807 devm_free_irq(dev, prv->irq, mtd);
808 retval = -ENXIO;
809 goto error;
810 }
811
812 /* Set erase block size */
813 switch (mtd->erasesize / mtd->writesize) {
814 case 32:
815 nfc_set(mtd, NFC_CONFIG1, NFC_PPB_32);
816 break;
817
818 case 64:
819 nfc_set(mtd, NFC_CONFIG1, NFC_PPB_64);
820 break;
821
822 case 128:
823 nfc_set(mtd, NFC_CONFIG1, NFC_PPB_128);
824 break;
825
826 case 256:
827 nfc_set(mtd, NFC_CONFIG1, NFC_PPB_256);
828 break;
829
830 default:
831 dev_err(dev, "Unsupported NAND flash!\n");
832 devm_free_irq(dev, prv->irq, mtd);
833 retval = -ENXIO;
834 goto error;
835 }
836
837 dev_set_drvdata(dev, mtd);
838
839 /* Register device in MTD */
840#ifdef CONFIG_MTD_PARTITIONS
841 retval = parse_mtd_partitions(mtd, mpc5121_nfc_pprobes, &parts, 0);
842#ifdef CONFIG_MTD_OF_PARTS
843 if (retval == 0)
844 retval = of_mtd_parse_partitions(dev, dn, &parts);
845#endif
846 if (retval < 0) {
847 dev_err(dev, "Error parsing MTD partitions!\n");
848 devm_free_irq(dev, prv->irq, mtd);
849 retval = -EINVAL;
850 goto error;
851 }
852
853 if (retval > 0)
854 retval = add_mtd_partitions(mtd, parts, retval);
855 else
856#endif
857 retval = add_mtd_device(mtd);
858
859 if (retval) {
860 dev_err(dev, "Error adding MTD device!\n");
861 devm_free_irq(dev, prv->irq, mtd);
862 goto error;
863 }
864
865 return 0;
866error:
867 mpc5121_nfc_free(dev, mtd);
868 return retval;
869}
870
871static int __devexit mpc5121_nfc_remove(struct of_device *op)
872{
873 struct device *dev = &op->dev;
874 struct mtd_info *mtd = dev_get_drvdata(dev);
875 struct nand_chip *chip = mtd->priv;
876 struct mpc5121_nfc_prv *prv = chip->priv;
877
878 nand_release(mtd);
879 devm_free_irq(dev, prv->irq, mtd);
880 mpc5121_nfc_free(dev, mtd);
881
882 return 0;
883}
884
885static struct of_device_id mpc5121_nfc_match[] __devinitdata = {
886 { .compatible = "fsl,mpc5121-nfc", },
887 {},
888};
889
890static struct of_platform_driver mpc5121_nfc_driver = {
891 .match_table = mpc5121_nfc_match,
892 .probe = mpc5121_nfc_probe,
893 .remove = __devexit_p(mpc5121_nfc_remove),
894 .driver = {
895 .name = DRV_NAME,
896 .owner = THIS_MODULE,
897 },
898};
899
900static int __init mpc5121_nfc_init(void)
901{
902 return of_register_platform_driver(&mpc5121_nfc_driver);
903}
904
905module_init(mpc5121_nfc_init);
906
907static void __exit mpc5121_nfc_cleanup(void)
908{
909 of_unregister_platform_driver(&mpc5121_nfc_driver);
910}
911
912module_exit(mpc5121_nfc_cleanup);
913
914MODULE_AUTHOR("Freescale Semiconductor, Inc.");
915MODULE_DESCRIPTION("MPC5121 NAND MTD driver");
916MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index b2900d8406d3..2ba3be1f4937 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -638,6 +638,7 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
638 638
639 case NAND_CMD_ERASE1: 639 case NAND_CMD_ERASE1:
640 case NAND_CMD_ERASE2: 640 case NAND_CMD_ERASE2:
641 case NAND_CMD_RESET:
641 send_cmd(host, command, false); 642 send_cmd(host, command, false);
642 mxc_do_addr_cycle(mtd, column, page_addr); 643 mxc_do_addr_cycle(mtd, column, page_addr);
643 644
@@ -818,7 +819,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
818 } 819 }
819 820
820 /* first scan to find the device and get the page size */ 821 /* first scan to find the device and get the page size */
821 if (nand_scan_ident(mtd, 1)) { 822 if (nand_scan_ident(mtd, 1, NULL)) {
822 err = -ENXIO; 823 err = -ENXIO;
823 goto escan; 824 goto escan;
824 } 825 }
@@ -886,11 +887,14 @@ static int mxcnd_suspend(struct platform_device *pdev, pm_message_t state)
886 int ret = 0; 887 int ret = 0;
887 888
888 DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND suspend\n"); 889 DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND suspend\n");
889 if (mtd) { 890
890 ret = mtd->suspend(mtd); 891 ret = mtd->suspend(mtd);
891 /* Disable the NFC clock */ 892
892 clk_disable(host->clk); 893 /*
893 } 894 * nand_suspend locks the device for exclusive access, so
895 * the clock must already be off.
896 */
897 BUG_ON(!ret && host->clk_act);
894 898
895 return ret; 899 return ret;
896} 900}
@@ -904,11 +908,7 @@ static int mxcnd_resume(struct platform_device *pdev)
904 908
905 DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND resume\n"); 909 DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND resume\n");
906 910
907 if (mtd) { 911 mtd->resume(mtd);
908 /* Enable the NFC clock */
909 clk_enable(host->clk);
910 mtd->resume(mtd);
911 }
912 912
913 return ret; 913 return ret;
914} 914}
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 8f2958fe2148..b9dc65c7253c 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -108,6 +108,35 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
108 */ 108 */
109DEFINE_LED_TRIGGER(nand_led_trigger); 109DEFINE_LED_TRIGGER(nand_led_trigger);
110 110
111static int check_offs_len(struct mtd_info *mtd,
112 loff_t ofs, uint64_t len)
113{
114 struct nand_chip *chip = mtd->priv;
115 int ret = 0;
116
117 /* Start address must align on block boundary */
118 if (ofs & ((1 << chip->phys_erase_shift) - 1)) {
119 DEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __func__);
120 ret = -EINVAL;
121 }
122
123 /* Length must align on block boundary */
124 if (len & ((1 << chip->phys_erase_shift) - 1)) {
125 DEBUG(MTD_DEBUG_LEVEL0, "%s: Length not block aligned\n",
126 __func__);
127 ret = -EINVAL;
128 }
129
130 /* Do not allow past end of device */
131 if (ofs + len > mtd->size) {
132 DEBUG(MTD_DEBUG_LEVEL0, "%s: Past end of device\n",
133 __func__);
134 ret = -EINVAL;
135 }
136
137 return ret;
138}
139
111/** 140/**
112 * nand_release_device - [GENERIC] release chip 141 * nand_release_device - [GENERIC] release chip
113 * @mtd: MTD device structure 142 * @mtd: MTD device structure
@@ -335,14 +364,18 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
335 bad = cpu_to_le16(chip->read_word(mtd)); 364 bad = cpu_to_le16(chip->read_word(mtd));
336 if (chip->badblockpos & 0x1) 365 if (chip->badblockpos & 0x1)
337 bad >>= 8; 366 bad >>= 8;
338 if ((bad & 0xFF) != 0xff) 367 else
339 res = 1; 368 bad &= 0xFF;
340 } else { 369 } else {
341 chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, page); 370 chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, page);
342 if (chip->read_byte(mtd) != 0xff) 371 bad = chip->read_byte(mtd);
343 res = 1;
344 } 372 }
345 373
374 if (likely(chip->badblockbits == 8))
375 res = bad != 0xFF;
376 else
377 res = hweight8(bad) < chip->badblockbits;
378
346 if (getchip) 379 if (getchip)
347 nand_release_device(mtd); 380 nand_release_device(mtd);
348 381
@@ -401,6 +434,11 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
401static int nand_check_wp(struct mtd_info *mtd) 434static int nand_check_wp(struct mtd_info *mtd)
402{ 435{
403 struct nand_chip *chip = mtd->priv; 436 struct nand_chip *chip = mtd->priv;
437
438 /* broken xD cards report WP despite being writable */
439 if (chip->options & NAND_BROKEN_XD)
440 return 0;
441
404 /* Check the WP bit */ 442 /* Check the WP bit */
405 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); 443 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
406 return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1; 444 return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
@@ -744,9 +782,6 @@ nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state)
744 chip->state = FL_PM_SUSPENDED; 782 chip->state = FL_PM_SUSPENDED;
745 spin_unlock(lock); 783 spin_unlock(lock);
746 return 0; 784 return 0;
747 } else {
748 spin_unlock(lock);
749 return -EAGAIN;
750 } 785 }
751 } 786 }
752 set_current_state(TASK_UNINTERRUPTIBLE); 787 set_current_state(TASK_UNINTERRUPTIBLE);
@@ -835,6 +870,168 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
835} 870}
836 871
837/** 872/**
873 * __nand_unlock - [REPLACABLE] unlocks specified locked blockes
874 *
875 * @param mtd - mtd info
876 * @param ofs - offset to start unlock from
877 * @param len - length to unlock
878 * @invert - when = 0, unlock the range of blocks within the lower and
879 * upper boundary address
880 * whne = 1, unlock the range of blocks outside the boundaries
881 * of the lower and upper boundary address
882 *
883 * @return - unlock status
884 */
885static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
886 uint64_t len, int invert)
887{
888 int ret = 0;
889 int status, page;
890 struct nand_chip *chip = mtd->priv;
891
892 /* Submit address of first page to unlock */
893 page = ofs >> chip->page_shift;
894 chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask);
895
896 /* Submit address of last page to unlock */
897 page = (ofs + len) >> chip->page_shift;
898 chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1,
899 (page | invert) & chip->pagemask);
900
901 /* Call wait ready function */
902 status = chip->waitfunc(mtd, chip);
903 udelay(1000);
904 /* See if device thinks it succeeded */
905 if (status & 0x01) {
906 DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n",
907 __func__, status);
908 ret = -EIO;
909 }
910
911 return ret;
912}
913
914/**
915 * nand_unlock - [REPLACABLE] unlocks specified locked blockes
916 *
917 * @param mtd - mtd info
918 * @param ofs - offset to start unlock from
919 * @param len - length to unlock
920 *
921 * @return - unlock status
922 */
923int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
924{
925 int ret = 0;
926 int chipnr;
927 struct nand_chip *chip = mtd->priv;
928
929 DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n",
930 __func__, (unsigned long long)ofs, len);
931
932 if (check_offs_len(mtd, ofs, len))
933 ret = -EINVAL;
934
935 /* Align to last block address if size addresses end of the device */
936 if (ofs + len == mtd->size)
937 len -= mtd->erasesize;
938
939 nand_get_device(chip, mtd, FL_UNLOCKING);
940
941 /* Shift to get chip number */
942 chipnr = ofs >> chip->chip_shift;
943
944 chip->select_chip(mtd, chipnr);
945
946 /* Check, if it is write protected */
947 if (nand_check_wp(mtd)) {
948 DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n",
949 __func__);
950 ret = -EIO;
951 goto out;
952 }
953
954 ret = __nand_unlock(mtd, ofs, len, 0);
955
956out:
957 /* de-select the NAND device */
958 chip->select_chip(mtd, -1);
959
960 nand_release_device(mtd);
961
962 return ret;
963}
964
965/**
966 * nand_lock - [REPLACABLE] locks all blockes present in the device
967 *
968 * @param mtd - mtd info
969 * @param ofs - offset to start unlock from
970 * @param len - length to unlock
971 *
972 * @return - lock status
973 *
974 * This feature is not support in many NAND parts. 'Micron' NAND parts
975 * do have this feature, but it allows only to lock all blocks not for
976 * specified range for block.
977 *
978 * Implementing 'lock' feature by making use of 'unlock', for now.
979 */
980int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
981{
982 int ret = 0;
983 int chipnr, status, page;
984 struct nand_chip *chip = mtd->priv;
985
986 DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n",
987 __func__, (unsigned long long)ofs, len);
988
989 if (check_offs_len(mtd, ofs, len))
990 ret = -EINVAL;
991
992 nand_get_device(chip, mtd, FL_LOCKING);
993
994 /* Shift to get chip number */
995 chipnr = ofs >> chip->chip_shift;
996
997 chip->select_chip(mtd, chipnr);
998
999 /* Check, if it is write protected */
1000 if (nand_check_wp(mtd)) {
1001 DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n",
1002 __func__);
1003 status = MTD_ERASE_FAILED;
1004 ret = -EIO;
1005 goto out;
1006 }
1007
1008 /* Submit address of first page to lock */
1009 page = ofs >> chip->page_shift;
1010 chip->cmdfunc(mtd, NAND_CMD_LOCK, -1, page & chip->pagemask);
1011
1012 /* Call wait ready function */
1013 status = chip->waitfunc(mtd, chip);
1014 udelay(1000);
1015 /* See if device thinks it succeeded */
1016 if (status & 0x01) {
1017 DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n",
1018 __func__, status);
1019 ret = -EIO;
1020 goto out;
1021 }
1022
1023 ret = __nand_unlock(mtd, ofs, len, 0x1);
1024
1025out:
1026 /* de-select the NAND device */
1027 chip->select_chip(mtd, -1);
1028
1029 nand_release_device(mtd);
1030
1031 return ret;
1032}
1033
1034/**
838 * nand_read_page_raw - [Intern] read raw page data without ecc 1035 * nand_read_page_raw - [Intern] read raw page data without ecc
839 * @mtd: mtd info structure 1036 * @mtd: mtd info structure
840 * @chip: nand chip info structure 1037 * @chip: nand chip info structure
@@ -1232,6 +1429,9 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1232 int ret = 0; 1429 int ret = 0;
1233 uint32_t readlen = ops->len; 1430 uint32_t readlen = ops->len;
1234 uint32_t oobreadlen = ops->ooblen; 1431 uint32_t oobreadlen = ops->ooblen;
1432 uint32_t max_oobsize = ops->mode == MTD_OOB_AUTO ?
1433 mtd->oobavail : mtd->oobsize;
1434
1235 uint8_t *bufpoi, *oob, *buf; 1435 uint8_t *bufpoi, *oob, *buf;
1236 1436
1237 stats = mtd->ecc_stats; 1437 stats = mtd->ecc_stats;
@@ -1282,18 +1482,14 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1282 buf += bytes; 1482 buf += bytes;
1283 1483
1284 if (unlikely(oob)) { 1484 if (unlikely(oob)) {
1285 /* Raw mode does data:oob:data:oob */ 1485
1286 if (ops->mode != MTD_OOB_RAW) { 1486 int toread = min(oobreadlen, max_oobsize);
1287 int toread = min(oobreadlen, 1487
1288 chip->ecc.layout->oobavail); 1488 if (toread) {
1289 if (toread) { 1489 oob = nand_transfer_oob(chip,
1290 oob = nand_transfer_oob(chip, 1490 oob, ops, toread);
1291 oob, ops, toread); 1491 oobreadlen -= toread;
1292 oobreadlen -= toread; 1492 }
1293 }
1294 } else
1295 buf = nand_transfer_oob(chip,
1296 buf, ops, mtd->oobsize);
1297 } 1493 }
1298 1494
1299 if (!(chip->options & NAND_NO_READRDY)) { 1495 if (!(chip->options & NAND_NO_READRDY)) {
@@ -1880,11 +2076,9 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1880 * @oob: oob data buffer 2076 * @oob: oob data buffer
1881 * @ops: oob ops structure 2077 * @ops: oob ops structure
1882 */ 2078 */
1883static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, 2079static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
1884 struct mtd_oob_ops *ops) 2080 struct mtd_oob_ops *ops)
1885{ 2081{
1886 size_t len = ops->ooblen;
1887
1888 switch(ops->mode) { 2082 switch(ops->mode) {
1889 2083
1890 case MTD_OOB_PLACE: 2084 case MTD_OOB_PLACE:
@@ -1939,6 +2133,11 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
1939 int chipnr, realpage, page, blockmask, column; 2133 int chipnr, realpage, page, blockmask, column;
1940 struct nand_chip *chip = mtd->priv; 2134 struct nand_chip *chip = mtd->priv;
1941 uint32_t writelen = ops->len; 2135 uint32_t writelen = ops->len;
2136
2137 uint32_t oobwritelen = ops->ooblen;
2138 uint32_t oobmaxlen = ops->mode == MTD_OOB_AUTO ?
2139 mtd->oobavail : mtd->oobsize;
2140
1942 uint8_t *oob = ops->oobbuf; 2141 uint8_t *oob = ops->oobbuf;
1943 uint8_t *buf = ops->datbuf; 2142 uint8_t *buf = ops->datbuf;
1944 int ret, subpage; 2143 int ret, subpage;
@@ -1980,6 +2179,10 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
1980 if (likely(!oob)) 2179 if (likely(!oob))
1981 memset(chip->oob_poi, 0xff, mtd->oobsize); 2180 memset(chip->oob_poi, 0xff, mtd->oobsize);
1982 2181
2182 /* Don't allow multipage oob writes with offset */
2183 if (ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen))
2184 return -EINVAL;
2185
1983 while(1) { 2186 while(1) {
1984 int bytes = mtd->writesize; 2187 int bytes = mtd->writesize;
1985 int cached = writelen > bytes && page != blockmask; 2188 int cached = writelen > bytes && page != blockmask;
@@ -1995,8 +2198,11 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
1995 wbuf = chip->buffers->databuf; 2198 wbuf = chip->buffers->databuf;
1996 } 2199 }
1997 2200
1998 if (unlikely(oob)) 2201 if (unlikely(oob)) {
1999 oob = nand_fill_oob(chip, oob, ops); 2202 size_t len = min(oobwritelen, oobmaxlen);
2203 oob = nand_fill_oob(chip, oob, len, ops);
2204 oobwritelen -= len;
2205 }
2000 2206
2001 ret = chip->write_page(mtd, chip, wbuf, page, cached, 2207 ret = chip->write_page(mtd, chip, wbuf, page, cached,
2002 (ops->mode == MTD_OOB_RAW)); 2208 (ops->mode == MTD_OOB_RAW));
@@ -2170,7 +2376,7 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
2170 chip->pagebuf = -1; 2376 chip->pagebuf = -1;
2171 2377
2172 memset(chip->oob_poi, 0xff, mtd->oobsize); 2378 memset(chip->oob_poi, 0xff, mtd->oobsize);
2173 nand_fill_oob(chip, ops->oobbuf, ops); 2379 nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
2174 status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask); 2380 status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
2175 memset(chip->oob_poi, 0xff, mtd->oobsize); 2381 memset(chip->oob_poi, 0xff, mtd->oobsize);
2176 2382
@@ -2293,25 +2499,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2293 __func__, (unsigned long long)instr->addr, 2499 __func__, (unsigned long long)instr->addr,
2294 (unsigned long long)instr->len); 2500 (unsigned long long)instr->len);
2295 2501
2296 /* Start address must align on block boundary */ 2502 if (check_offs_len(mtd, instr->addr, instr->len))
2297 if (instr->addr & ((1 << chip->phys_erase_shift) - 1)) {
2298 DEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __func__);
2299 return -EINVAL; 2503 return -EINVAL;
2300 }
2301
2302 /* Length must align on block boundary */
2303 if (instr->len & ((1 << chip->phys_erase_shift) - 1)) {
2304 DEBUG(MTD_DEBUG_LEVEL0, "%s: Length not block aligned\n",
2305 __func__);
2306 return -EINVAL;
2307 }
2308
2309 /* Do not allow erase past end of device */
2310 if ((instr->len + instr->addr) > mtd->size) {
2311 DEBUG(MTD_DEBUG_LEVEL0, "%s: Erase past end of device\n",
2312 __func__);
2313 return -EINVAL;
2314 }
2315 2504
2316 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; 2505 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
2317 2506
@@ -2582,10 +2771,10 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
2582 */ 2771 */
2583static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, 2772static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2584 struct nand_chip *chip, 2773 struct nand_chip *chip,
2585 int busw, int *maf_id) 2774 int busw, int *maf_id,
2775 struct nand_flash_dev *type)
2586{ 2776{
2587 struct nand_flash_dev *type = NULL; 2777 int dev_id, maf_idx;
2588 int i, dev_id, maf_idx;
2589 int tmp_id, tmp_manf; 2778 int tmp_id, tmp_manf;
2590 2779
2591 /* Select the device */ 2780 /* Select the device */
@@ -2624,15 +2813,14 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2624 return ERR_PTR(-ENODEV); 2813 return ERR_PTR(-ENODEV);
2625 } 2814 }
2626 2815
2627 /* Lookup the flash id */
2628 for (i = 0; nand_flash_ids[i].name != NULL; i++) {
2629 if (dev_id == nand_flash_ids[i].id) {
2630 type = &nand_flash_ids[i];
2631 break;
2632 }
2633 }
2634
2635 if (!type) 2816 if (!type)
2817 type = nand_flash_ids;
2818
2819 for (; type->name != NULL; type++)
2820 if (dev_id == type->id)
2821 break;
2822
2823 if (!type->name)
2636 return ERR_PTR(-ENODEV); 2824 return ERR_PTR(-ENODEV);
2637 2825
2638 if (!mtd->name) 2826 if (!mtd->name)
@@ -2704,6 +2892,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2704 /* Set the bad block position */ 2892 /* Set the bad block position */
2705 chip->badblockpos = mtd->writesize > 512 ? 2893 chip->badblockpos = mtd->writesize > 512 ?
2706 NAND_LARGE_BADBLOCK_POS : NAND_SMALL_BADBLOCK_POS; 2894 NAND_LARGE_BADBLOCK_POS : NAND_SMALL_BADBLOCK_POS;
2895 chip->badblockbits = 8;
2707 2896
2708 /* Get chip options, preserve non chip based options */ 2897 /* Get chip options, preserve non chip based options */
2709 chip->options &= ~NAND_CHIPOPTIONS_MSK; 2898 chip->options &= ~NAND_CHIPOPTIONS_MSK;
@@ -2741,13 +2930,15 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2741 * nand_scan_ident - [NAND Interface] Scan for the NAND device 2930 * nand_scan_ident - [NAND Interface] Scan for the NAND device
2742 * @mtd: MTD device structure 2931 * @mtd: MTD device structure
2743 * @maxchips: Number of chips to scan for 2932 * @maxchips: Number of chips to scan for
2933 * @table: Alternative NAND ID table
2744 * 2934 *
2745 * This is the first phase of the normal nand_scan() function. It 2935 * This is the first phase of the normal nand_scan() function. It
2746 * reads the flash ID and sets up MTD fields accordingly. 2936 * reads the flash ID and sets up MTD fields accordingly.
2747 * 2937 *
2748 * The mtd->owner field must be set to the module of the caller. 2938 * The mtd->owner field must be set to the module of the caller.
2749 */ 2939 */
2750int nand_scan_ident(struct mtd_info *mtd, int maxchips) 2940int nand_scan_ident(struct mtd_info *mtd, int maxchips,
2941 struct nand_flash_dev *table)
2751{ 2942{
2752 int i, busw, nand_maf_id; 2943 int i, busw, nand_maf_id;
2753 struct nand_chip *chip = mtd->priv; 2944 struct nand_chip *chip = mtd->priv;
@@ -2759,7 +2950,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips)
2759 nand_set_defaults(chip, busw); 2950 nand_set_defaults(chip, busw);
2760 2951
2761 /* Read the flash type */ 2952 /* Read the flash type */
2762 type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id); 2953 type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id, table);
2763 2954
2764 if (IS_ERR(type)) { 2955 if (IS_ERR(type)) {
2765 if (!(chip->options & NAND_SCAN_SILENT_NODEV)) 2956 if (!(chip->options & NAND_SCAN_SILENT_NODEV))
@@ -2989,7 +3180,8 @@ int nand_scan_tail(struct mtd_info *mtd)
2989 3180
2990 /* Fill in remaining MTD driver data */ 3181 /* Fill in remaining MTD driver data */
2991 mtd->type = MTD_NANDFLASH; 3182 mtd->type = MTD_NANDFLASH;
2992 mtd->flags = MTD_CAP_NANDFLASH; 3183 mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
3184 MTD_CAP_NANDFLASH;
2993 mtd->erase = nand_erase; 3185 mtd->erase = nand_erase;
2994 mtd->point = NULL; 3186 mtd->point = NULL;
2995 mtd->unpoint = NULL; 3187 mtd->unpoint = NULL;
@@ -3050,7 +3242,7 @@ int nand_scan(struct mtd_info *mtd, int maxchips)
3050 BUG(); 3242 BUG();
3051 } 3243 }
3052 3244
3053 ret = nand_scan_ident(mtd, maxchips); 3245 ret = nand_scan_ident(mtd, maxchips, NULL);
3054 if (!ret) 3246 if (!ret)
3055 ret = nand_scan_tail(mtd); 3247 ret = nand_scan_tail(mtd);
3056 return ret; 3248 return ret;
@@ -3077,6 +3269,8 @@ void nand_release(struct mtd_info *mtd)
3077 kfree(chip->buffers); 3269 kfree(chip->buffers);
3078} 3270}
3079 3271
3272EXPORT_SYMBOL_GPL(nand_lock);
3273EXPORT_SYMBOL_GPL(nand_unlock);
3080EXPORT_SYMBOL_GPL(nand_scan); 3274EXPORT_SYMBOL_GPL(nand_scan);
3081EXPORT_SYMBOL_GPL(nand_scan_ident); 3275EXPORT_SYMBOL_GPL(nand_scan_ident);
3082EXPORT_SYMBOL_GPL(nand_scan_tail); 3276EXPORT_SYMBOL_GPL(nand_scan_tail);
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 55c23e5cd210..387c45c366fe 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -237,15 +237,33 @@ static int scan_read_raw(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
237 size_t len) 237 size_t len)
238{ 238{
239 struct mtd_oob_ops ops; 239 struct mtd_oob_ops ops;
240 int res;
240 241
241 ops.mode = MTD_OOB_RAW; 242 ops.mode = MTD_OOB_RAW;
242 ops.ooboffs = 0; 243 ops.ooboffs = 0;
243 ops.ooblen = mtd->oobsize; 244 ops.ooblen = mtd->oobsize;
244 ops.oobbuf = buf;
245 ops.datbuf = buf;
246 ops.len = len;
247 245
248 return mtd->read_oob(mtd, offs, &ops); 246
247 while (len > 0) {
248 if (len <= mtd->writesize) {
249 ops.oobbuf = buf + len;
250 ops.datbuf = buf;
251 ops.len = len;
252 return mtd->read_oob(mtd, offs, &ops);
253 } else {
254 ops.oobbuf = buf + mtd->writesize;
255 ops.datbuf = buf;
256 ops.len = mtd->writesize;
257 res = mtd->read_oob(mtd, offs, &ops);
258
259 if (res)
260 return res;
261 }
262
263 buf += mtd->oobsize + mtd->writesize;
264 len -= mtd->writesize;
265 }
266 return 0;
249} 267}
250 268
251/* 269/*
diff --git a/drivers/mtd/nand/nand_bcm_umi.h b/drivers/mtd/nand/nand_bcm_umi.h
index 7cec2cd97854..198b304d6f72 100644
--- a/drivers/mtd/nand/nand_bcm_umi.h
+++ b/drivers/mtd/nand/nand_bcm_umi.h
@@ -167,18 +167,27 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
167 int numToRead = 16; /* There are 16 bytes per sector in the OOB */ 167 int numToRead = 16; /* There are 16 bytes per sector in the OOB */
168 168
169 /* ECC is already paused when this function is called */ 169 /* ECC is already paused when this function is called */
170 if (pageSize != NAND_DATA_ACCESS_SIZE) {
171 /* skip BI */
172#if defined(__KERNEL__) && !defined(STANDALONE)
173 *oobp++ = REG_NAND_DATA8;
174#else
175 REG_NAND_DATA8;
176#endif
177 numToRead--;
178 }
170 179
171 if (pageSize == NAND_DATA_ACCESS_SIZE) { 180 while (numToRead > numEccBytes) {
172 while (numToRead > numEccBytes) { 181 /* skip free oob region */
173 /* skip free oob region */
174#if defined(__KERNEL__) && !defined(STANDALONE) 182#if defined(__KERNEL__) && !defined(STANDALONE)
175 *oobp++ = REG_NAND_DATA8; 183 *oobp++ = REG_NAND_DATA8;
176#else 184#else
177 REG_NAND_DATA8; 185 REG_NAND_DATA8;
178#endif 186#endif
179 numToRead--; 187 numToRead--;
180 } 188 }
181 189
190 if (pageSize == NAND_DATA_ACCESS_SIZE) {
182 /* read ECC bytes before BI */ 191 /* read ECC bytes before BI */
183 nand_bcm_umi_bch_resume_read_ecc_calc(); 192 nand_bcm_umi_bch_resume_read_ecc_calc();
184 193
@@ -190,6 +199,7 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
190#else 199#else
191 eccCalc[eccPos++] = REG_NAND_DATA8; 200 eccCalc[eccPos++] = REG_NAND_DATA8;
192#endif 201#endif
202 numToRead--;
193 } 203 }
194 204
195 nand_bcm_umi_bch_pause_read_ecc_calc(); 205 nand_bcm_umi_bch_pause_read_ecc_calc();
@@ -204,49 +214,18 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
204 numToRead--; 214 numToRead--;
205 } 215 }
206 216
207 /* read ECC bytes */ 217 }
208 nand_bcm_umi_bch_resume_read_ecc_calc(); 218 /* read ECC bytes */
209 while (numToRead) { 219 nand_bcm_umi_bch_resume_read_ecc_calc();
210#if defined(__KERNEL__) && !defined(STANDALONE) 220 while (numToRead) {
211 *oobp = REG_NAND_DATA8;
212 eccCalc[eccPos++] = *oobp;
213 oobp++;
214#else
215 eccCalc[eccPos++] = REG_NAND_DATA8;
216#endif
217 numToRead--;
218 }
219 } else {
220 /* skip BI */
221#if defined(__KERNEL__) && !defined(STANDALONE) 221#if defined(__KERNEL__) && !defined(STANDALONE)
222 *oobp++ = REG_NAND_DATA8; 222 *oobp = REG_NAND_DATA8;
223 eccCalc[eccPos++] = *oobp;
224 oobp++;
223#else 225#else
224 REG_NAND_DATA8; 226 eccCalc[eccPos++] = REG_NAND_DATA8;
225#endif 227#endif
226 numToRead--; 228 numToRead--;
227
228 while (numToRead > numEccBytes) {
229 /* skip free oob region */
230#if defined(__KERNEL__) && !defined(STANDALONE)
231 *oobp++ = REG_NAND_DATA8;
232#else
233 REG_NAND_DATA8;
234#endif
235 numToRead--;
236 }
237
238 /* read ECC bytes */
239 nand_bcm_umi_bch_resume_read_ecc_calc();
240 while (numToRead) {
241#if defined(__KERNEL__) && !defined(STANDALONE)
242 *oobp = REG_NAND_DATA8;
243 eccCalc[eccPos++] = *oobp;
244 oobp++;
245#else
246 eccCalc[eccPos++] = REG_NAND_DATA8;
247#endif
248 numToRead--;
249 }
250 } 229 }
251} 230}
252 231
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 7281000fef2d..261337efe0ee 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -80,6 +80,9 @@
80#ifndef CONFIG_NANDSIM_DBG 80#ifndef CONFIG_NANDSIM_DBG
81#define CONFIG_NANDSIM_DBG 0 81#define CONFIG_NANDSIM_DBG 0
82#endif 82#endif
83#ifndef CONFIG_NANDSIM_MAX_PARTS
84#define CONFIG_NANDSIM_MAX_PARTS 32
85#endif
83 86
84static uint first_id_byte = CONFIG_NANDSIM_FIRST_ID_BYTE; 87static uint first_id_byte = CONFIG_NANDSIM_FIRST_ID_BYTE;
85static uint second_id_byte = CONFIG_NANDSIM_SECOND_ID_BYTE; 88static uint second_id_byte = CONFIG_NANDSIM_SECOND_ID_BYTE;
@@ -94,7 +97,7 @@ static uint bus_width = CONFIG_NANDSIM_BUS_WIDTH;
94static uint do_delays = CONFIG_NANDSIM_DO_DELAYS; 97static uint do_delays = CONFIG_NANDSIM_DO_DELAYS;
95static uint log = CONFIG_NANDSIM_LOG; 98static uint log = CONFIG_NANDSIM_LOG;
96static uint dbg = CONFIG_NANDSIM_DBG; 99static uint dbg = CONFIG_NANDSIM_DBG;
97static unsigned long parts[MAX_MTD_DEVICES]; 100static unsigned long parts[CONFIG_NANDSIM_MAX_PARTS];
98static unsigned int parts_num; 101static unsigned int parts_num;
99static char *badblocks = NULL; 102static char *badblocks = NULL;
100static char *weakblocks = NULL; 103static char *weakblocks = NULL;
@@ -135,8 +138,8 @@ MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read I
135MODULE_PARM_DESC(access_delay, "Initial page access delay (microseconds)"); 138MODULE_PARM_DESC(access_delay, "Initial page access delay (microseconds)");
136MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds"); 139MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds");
137MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)"); 140MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)");
138MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanodeconds)"); 141MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanoseconds)");
139MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanodeconds)"); 142MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanoseconds)");
140MODULE_PARM_DESC(bus_width, "Chip's bus width (8- or 16-bit)"); 143MODULE_PARM_DESC(bus_width, "Chip's bus width (8- or 16-bit)");
141MODULE_PARM_DESC(do_delays, "Simulate NAND delays using busy-waits if not zero"); 144MODULE_PARM_DESC(do_delays, "Simulate NAND delays using busy-waits if not zero");
142MODULE_PARM_DESC(log, "Perform logging if not zero"); 145MODULE_PARM_DESC(log, "Perform logging if not zero");
@@ -288,7 +291,7 @@ union ns_mem {
288 * The structure which describes all the internal simulator data. 291 * The structure which describes all the internal simulator data.
289 */ 292 */
290struct nandsim { 293struct nandsim {
291 struct mtd_partition partitions[MAX_MTD_DEVICES]; 294 struct mtd_partition partitions[CONFIG_NANDSIM_MAX_PARTS];
292 unsigned int nbparts; 295 unsigned int nbparts;
293 296
294 uint busw; /* flash chip bus width (8 or 16) */ 297 uint busw; /* flash chip bus width (8 or 16) */
@@ -312,7 +315,7 @@ struct nandsim {
312 union ns_mem buf; 315 union ns_mem buf;
313 316
314 /* NAND flash "geometry" */ 317 /* NAND flash "geometry" */
315 struct nandsin_geometry { 318 struct {
316 uint64_t totsz; /* total flash size, bytes */ 319 uint64_t totsz; /* total flash size, bytes */
317 uint32_t secsz; /* flash sector (erase block) size, bytes */ 320 uint32_t secsz; /* flash sector (erase block) size, bytes */
318 uint pgsz; /* NAND flash page size, bytes */ 321 uint pgsz; /* NAND flash page size, bytes */
@@ -331,7 +334,7 @@ struct nandsim {
331 } geom; 334 } geom;
332 335
333 /* NAND flash internal registers */ 336 /* NAND flash internal registers */
334 struct nandsim_regs { 337 struct {
335 unsigned command; /* the command register */ 338 unsigned command; /* the command register */
336 u_char status; /* the status register */ 339 u_char status; /* the status register */
337 uint row; /* the page number */ 340 uint row; /* the page number */
@@ -342,7 +345,7 @@ struct nandsim {
342 } regs; 345 } regs;
343 346
344 /* NAND flash lines state */ 347 /* NAND flash lines state */
345 struct ns_lines_status { 348 struct {
346 int ce; /* chip Enable */ 349 int ce; /* chip Enable */
347 int cle; /* command Latch Enable */ 350 int cle; /* command Latch Enable */
348 int ale; /* address Latch Enable */ 351 int ale; /* address Latch Enable */
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
index 1f6f741af5da..8c0b69375224 100644
--- a/drivers/mtd/nand/nomadik_nand.c
+++ b/drivers/mtd/nand/nomadik_nand.c
@@ -105,21 +105,21 @@ static int nomadik_nand_probe(struct platform_device *pdev)
105 ret = -EIO; 105 ret = -EIO;
106 goto err_unmap; 106 goto err_unmap;
107 } 107 }
108 host->addr_va = ioremap(res->start, res->end - res->start + 1); 108 host->addr_va = ioremap(res->start, resource_size(res));
109 109
110 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data"); 110 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
111 if (!res) { 111 if (!res) {
112 ret = -EIO; 112 ret = -EIO;
113 goto err_unmap; 113 goto err_unmap;
114 } 114 }
115 host->data_va = ioremap(res->start, res->end - res->start + 1); 115 host->data_va = ioremap(res->start, resource_size(res));
116 116
117 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd"); 117 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
118 if (!res) { 118 if (!res) {
119 ret = -EIO; 119 ret = -EIO;
120 goto err_unmap; 120 goto err_unmap;
121 } 121 }
122 host->cmd_va = ioremap(res->start, res->end - res->start + 1); 122 host->cmd_va = ioremap(res->start, resource_size(res));
123 123
124 if (!host->addr_va || !host->data_va || !host->cmd_va) { 124 if (!host->addr_va || !host->data_va || !host->cmd_va) {
125 ret = -ENOMEM; 125 ret = -ENOMEM;
diff --git a/drivers/mtd/nand/w90p910_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 7680e731348a..6eddf7361ed7 100644
--- a/drivers/mtd/nand/w90p910_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2009 Nuvoton technology corporation. 2 * Copyright © 2009 Nuvoton technology corporation.
3 * 3 *
4 * Wan ZongShun <mcuos.com@gmail.com> 4 * Wan ZongShun <mcuos.com@gmail.com>
5 * 5 *
@@ -55,7 +55,7 @@
55#define write_addr_reg(dev, val) \ 55#define write_addr_reg(dev, val) \
56 __raw_writel((val), (dev)->reg + REG_SMADDR) 56 __raw_writel((val), (dev)->reg + REG_SMADDR)
57 57
58struct w90p910_nand { 58struct nuc900_nand {
59 struct mtd_info mtd; 59 struct mtd_info mtd;
60 struct nand_chip chip; 60 struct nand_chip chip;
61 void __iomem *reg; 61 void __iomem *reg;
@@ -76,49 +76,49 @@ static const struct mtd_partition partitions[] = {
76 } 76 }
77}; 77};
78 78
79static unsigned char w90p910_nand_read_byte(struct mtd_info *mtd) 79static unsigned char nuc900_nand_read_byte(struct mtd_info *mtd)
80{ 80{
81 unsigned char ret; 81 unsigned char ret;
82 struct w90p910_nand *nand; 82 struct nuc900_nand *nand;
83 83
84 nand = container_of(mtd, struct w90p910_nand, mtd); 84 nand = container_of(mtd, struct nuc900_nand, mtd);
85 85
86 ret = (unsigned char)read_data_reg(nand); 86 ret = (unsigned char)read_data_reg(nand);
87 87
88 return ret; 88 return ret;
89} 89}
90 90
91static void w90p910_nand_read_buf(struct mtd_info *mtd, 91static void nuc900_nand_read_buf(struct mtd_info *mtd,
92 unsigned char *buf, int len) 92 unsigned char *buf, int len)
93{ 93{
94 int i; 94 int i;
95 struct w90p910_nand *nand; 95 struct nuc900_nand *nand;
96 96
97 nand = container_of(mtd, struct w90p910_nand, mtd); 97 nand = container_of(mtd, struct nuc900_nand, mtd);
98 98
99 for (i = 0; i < len; i++) 99 for (i = 0; i < len; i++)
100 buf[i] = (unsigned char)read_data_reg(nand); 100 buf[i] = (unsigned char)read_data_reg(nand);
101} 101}
102 102
103static void w90p910_nand_write_buf(struct mtd_info *mtd, 103static void nuc900_nand_write_buf(struct mtd_info *mtd,
104 const unsigned char *buf, int len) 104 const unsigned char *buf, int len)
105{ 105{
106 int i; 106 int i;
107 struct w90p910_nand *nand; 107 struct nuc900_nand *nand;
108 108
109 nand = container_of(mtd, struct w90p910_nand, mtd); 109 nand = container_of(mtd, struct nuc900_nand, mtd);
110 110
111 for (i = 0; i < len; i++) 111 for (i = 0; i < len; i++)
112 write_data_reg(nand, buf[i]); 112 write_data_reg(nand, buf[i]);
113} 113}
114 114
115static int w90p910_verify_buf(struct mtd_info *mtd, 115static int nuc900_verify_buf(struct mtd_info *mtd,
116 const unsigned char *buf, int len) 116 const unsigned char *buf, int len)
117{ 117{
118 int i; 118 int i;
119 struct w90p910_nand *nand; 119 struct nuc900_nand *nand;
120 120
121 nand = container_of(mtd, struct w90p910_nand, mtd); 121 nand = container_of(mtd, struct nuc900_nand, mtd);
122 122
123 for (i = 0; i < len; i++) { 123 for (i = 0; i < len; i++) {
124 if (buf[i] != (unsigned char)read_data_reg(nand)) 124 if (buf[i] != (unsigned char)read_data_reg(nand))
@@ -128,7 +128,7 @@ static int w90p910_verify_buf(struct mtd_info *mtd,
128 return 0; 128 return 0;
129} 129}
130 130
131static int w90p910_check_rb(struct w90p910_nand *nand) 131static int nuc900_check_rb(struct nuc900_nand *nand)
132{ 132{
133 unsigned int val; 133 unsigned int val;
134 spin_lock(&nand->lock); 134 spin_lock(&nand->lock);
@@ -139,24 +139,24 @@ static int w90p910_check_rb(struct w90p910_nand *nand)
139 return val; 139 return val;
140} 140}
141 141
142static int w90p910_nand_devready(struct mtd_info *mtd) 142static int nuc900_nand_devready(struct mtd_info *mtd)
143{ 143{
144 struct w90p910_nand *nand; 144 struct nuc900_nand *nand;
145 int ready; 145 int ready;
146 146
147 nand = container_of(mtd, struct w90p910_nand, mtd); 147 nand = container_of(mtd, struct nuc900_nand, mtd);
148 148
149 ready = (w90p910_check_rb(nand)) ? 1 : 0; 149 ready = (nuc900_check_rb(nand)) ? 1 : 0;
150 return ready; 150 return ready;
151} 151}
152 152
153static void w90p910_nand_command_lp(struct mtd_info *mtd, 153static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command,
154 unsigned int command, int column, int page_addr) 154 int column, int page_addr)
155{ 155{
156 register struct nand_chip *chip = mtd->priv; 156 register struct nand_chip *chip = mtd->priv;
157 struct w90p910_nand *nand; 157 struct nuc900_nand *nand;
158 158
159 nand = container_of(mtd, struct w90p910_nand, mtd); 159 nand = container_of(mtd, struct nuc900_nand, mtd);
160 160
161 if (command == NAND_CMD_READOOB) { 161 if (command == NAND_CMD_READOOB) {
162 column += mtd->writesize; 162 column += mtd->writesize;
@@ -212,7 +212,7 @@ static void w90p910_nand_command_lp(struct mtd_info *mtd,
212 write_cmd_reg(nand, NAND_CMD_STATUS); 212 write_cmd_reg(nand, NAND_CMD_STATUS);
213 write_cmd_reg(nand, command); 213 write_cmd_reg(nand, command);
214 214
215 while (!w90p910_check_rb(nand)) 215 while (!nuc900_check_rb(nand))
216 ; 216 ;
217 217
218 return; 218 return;
@@ -241,7 +241,7 @@ static void w90p910_nand_command_lp(struct mtd_info *mtd,
241} 241}
242 242
243 243
244static void w90p910_nand_enable(struct w90p910_nand *nand) 244static void nuc900_nand_enable(struct nuc900_nand *nand)
245{ 245{
246 unsigned int val; 246 unsigned int val;
247 spin_lock(&nand->lock); 247 spin_lock(&nand->lock);
@@ -262,37 +262,37 @@ static void w90p910_nand_enable(struct w90p910_nand *nand)
262 spin_unlock(&nand->lock); 262 spin_unlock(&nand->lock);
263} 263}
264 264
265static int __devinit w90p910_nand_probe(struct platform_device *pdev) 265static int __devinit nuc900_nand_probe(struct platform_device *pdev)
266{ 266{
267 struct w90p910_nand *w90p910_nand; 267 struct nuc900_nand *nuc900_nand;
268 struct nand_chip *chip; 268 struct nand_chip *chip;
269 int retval; 269 int retval;
270 struct resource *res; 270 struct resource *res;
271 271
272 retval = 0; 272 retval = 0;
273 273
274 w90p910_nand = kzalloc(sizeof(struct w90p910_nand), GFP_KERNEL); 274 nuc900_nand = kzalloc(sizeof(struct nuc900_nand), GFP_KERNEL);
275 if (!w90p910_nand) 275 if (!nuc900_nand)
276 return -ENOMEM; 276 return -ENOMEM;
277 chip = &(w90p910_nand->chip); 277 chip = &(nuc900_nand->chip);
278 278
279 w90p910_nand->mtd.priv = chip; 279 nuc900_nand->mtd.priv = chip;
280 w90p910_nand->mtd.owner = THIS_MODULE; 280 nuc900_nand->mtd.owner = THIS_MODULE;
281 spin_lock_init(&w90p910_nand->lock); 281 spin_lock_init(&nuc900_nand->lock);
282 282
283 w90p910_nand->clk = clk_get(&pdev->dev, NULL); 283 nuc900_nand->clk = clk_get(&pdev->dev, NULL);
284 if (IS_ERR(w90p910_nand->clk)) { 284 if (IS_ERR(nuc900_nand->clk)) {
285 retval = -ENOENT; 285 retval = -ENOENT;
286 goto fail1; 286 goto fail1;
287 } 287 }
288 clk_enable(w90p910_nand->clk); 288 clk_enable(nuc900_nand->clk);
289 289
290 chip->cmdfunc = w90p910_nand_command_lp; 290 chip->cmdfunc = nuc900_nand_command_lp;
291 chip->dev_ready = w90p910_nand_devready; 291 chip->dev_ready = nuc900_nand_devready;
292 chip->read_byte = w90p910_nand_read_byte; 292 chip->read_byte = nuc900_nand_read_byte;
293 chip->write_buf = w90p910_nand_write_buf; 293 chip->write_buf = nuc900_nand_write_buf;
294 chip->read_buf = w90p910_nand_read_buf; 294 chip->read_buf = nuc900_nand_read_buf;
295 chip->verify_buf = w90p910_verify_buf; 295 chip->verify_buf = nuc900_verify_buf;
296 chip->chip_delay = 50; 296 chip->chip_delay = 50;
297 chip->options = 0; 297 chip->options = 0;
298 chip->ecc.mode = NAND_ECC_SOFT; 298 chip->ecc.mode = NAND_ECC_SOFT;
@@ -308,75 +308,75 @@ static int __devinit w90p910_nand_probe(struct platform_device *pdev)
308 goto fail1; 308 goto fail1;
309 } 309 }
310 310
311 w90p910_nand->reg = ioremap(res->start, resource_size(res)); 311 nuc900_nand->reg = ioremap(res->start, resource_size(res));
312 if (!w90p910_nand->reg) { 312 if (!nuc900_nand->reg) {
313 retval = -ENOMEM; 313 retval = -ENOMEM;
314 goto fail2; 314 goto fail2;
315 } 315 }
316 316
317 w90p910_nand_enable(w90p910_nand); 317 nuc900_nand_enable(nuc900_nand);
318 318
319 if (nand_scan(&(w90p910_nand->mtd), 1)) { 319 if (nand_scan(&(nuc900_nand->mtd), 1)) {
320 retval = -ENXIO; 320 retval = -ENXIO;
321 goto fail3; 321 goto fail3;
322 } 322 }
323 323
324 add_mtd_partitions(&(w90p910_nand->mtd), partitions, 324 add_mtd_partitions(&(nuc900_nand->mtd), partitions,
325 ARRAY_SIZE(partitions)); 325 ARRAY_SIZE(partitions));
326 326
327 platform_set_drvdata(pdev, w90p910_nand); 327 platform_set_drvdata(pdev, nuc900_nand);
328 328
329 return retval; 329 return retval;
330 330
331fail3: iounmap(w90p910_nand->reg); 331fail3: iounmap(nuc900_nand->reg);
332fail2: release_mem_region(res->start, resource_size(res)); 332fail2: release_mem_region(res->start, resource_size(res));
333fail1: kfree(w90p910_nand); 333fail1: kfree(nuc900_nand);
334 return retval; 334 return retval;
335} 335}
336 336
337static int __devexit w90p910_nand_remove(struct platform_device *pdev) 337static int __devexit nuc900_nand_remove(struct platform_device *pdev)
338{ 338{
339 struct w90p910_nand *w90p910_nand = platform_get_drvdata(pdev); 339 struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev);
340 struct resource *res; 340 struct resource *res;
341 341
342 iounmap(w90p910_nand->reg); 342 iounmap(nuc900_nand->reg);
343 343
344 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 344 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
345 release_mem_region(res->start, resource_size(res)); 345 release_mem_region(res->start, resource_size(res));
346 346
347 clk_disable(w90p910_nand->clk); 347 clk_disable(nuc900_nand->clk);
348 clk_put(w90p910_nand->clk); 348 clk_put(nuc900_nand->clk);
349 349
350 kfree(w90p910_nand); 350 kfree(nuc900_nand);
351 351
352 platform_set_drvdata(pdev, NULL); 352 platform_set_drvdata(pdev, NULL);
353 353
354 return 0; 354 return 0;
355} 355}
356 356
357static struct platform_driver w90p910_nand_driver = { 357static struct platform_driver nuc900_nand_driver = {
358 .probe = w90p910_nand_probe, 358 .probe = nuc900_nand_probe,
359 .remove = __devexit_p(w90p910_nand_remove), 359 .remove = __devexit_p(nuc900_nand_remove),
360 .driver = { 360 .driver = {
361 .name = "w90p910-fmi", 361 .name = "nuc900-fmi",
362 .owner = THIS_MODULE, 362 .owner = THIS_MODULE,
363 }, 363 },
364}; 364};
365 365
366static int __init w90p910_nand_init(void) 366static int __init nuc900_nand_init(void)
367{ 367{
368 return platform_driver_register(&w90p910_nand_driver); 368 return platform_driver_register(&nuc900_nand_driver);
369} 369}
370 370
371static void __exit w90p910_nand_exit(void) 371static void __exit nuc900_nand_exit(void)
372{ 372{
373 platform_driver_unregister(&w90p910_nand_driver); 373 platform_driver_unregister(&nuc900_nand_driver);
374} 374}
375 375
376module_init(w90p910_nand_init); 376module_init(nuc900_nand_init);
377module_exit(w90p910_nand_exit); 377module_exit(nuc900_nand_exit);
378 378
379MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>"); 379MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
380MODULE_DESCRIPTION("w90p910 nand driver!"); 380MODULE_DESCRIPTION("w90p910/NUC9xx nand driver!");
381MODULE_LICENSE("GPL"); 381MODULE_LICENSE("GPL");
382MODULE_ALIAS("platform:w90p910-fmi"); 382MODULE_ALIAS("platform:nuc900-fmi");
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 7545568fce47..ee87325c7712 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -292,11 +292,14 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
292 u32 *p = (u32 *)buf; 292 u32 *p = (u32 *)buf;
293 293
294 /* take care of subpage reads */ 294 /* take care of subpage reads */
295 for (; len % 4 != 0; ) { 295 if (len % 4) {
296 *buf++ = __raw_readb(info->nand.IO_ADDR_R); 296 if (info->nand.options & NAND_BUSWIDTH_16)
297 len--; 297 omap_read_buf16(mtd, buf, len % 4);
298 else
299 omap_read_buf8(mtd, buf, len % 4);
300 p = (u32 *) (buf + len % 4);
301 len -= len % 4;
298 } 302 }
299 p = (u32 *) buf;
300 303
301 /* configure and start prefetch transfer */ 304 /* configure and start prefetch transfer */
302 ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0); 305 ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0);
@@ -502,7 +505,7 @@ static void omap_write_buf_dma_pref(struct mtd_info *mtd,
502 omap_write_buf_pref(mtd, buf, len); 505 omap_write_buf_pref(mtd, buf, len);
503 else 506 else
504 /* start transfer in DMA mode */ 507 /* start transfer in DMA mode */
505 omap_nand_dma_transfer(mtd, buf, len, 0x1); 508 omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
506} 509}
507 510
508/** 511/**
@@ -1028,7 +1031,8 @@ out_free_info:
1028static int omap_nand_remove(struct platform_device *pdev) 1031static int omap_nand_remove(struct platform_device *pdev)
1029{ 1032{
1030 struct mtd_info *mtd = platform_get_drvdata(pdev); 1033 struct mtd_info *mtd = platform_get_drvdata(pdev);
1031 struct omap_nand_info *info = mtd->priv; 1034 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1035 mtd);
1032 1036
1033 platform_set_drvdata(pdev, NULL); 1037 platform_set_drvdata(pdev, NULL);
1034 if (use_dma) 1038 if (use_dma)
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index d60fc5719fef..f4444fe960a1 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -80,6 +80,7 @@ static int __init orion_nand_probe(struct platform_device *pdev)
80 struct mtd_info *mtd; 80 struct mtd_info *mtd;
81 struct nand_chip *nc; 81 struct nand_chip *nc;
82 struct orion_nand_data *board; 82 struct orion_nand_data *board;
83 struct resource *res;
83 void __iomem *io_base; 84 void __iomem *io_base;
84 int ret = 0; 85 int ret = 0;
85#ifdef CONFIG_MTD_PARTITIONS 86#ifdef CONFIG_MTD_PARTITIONS
@@ -95,8 +96,13 @@ static int __init orion_nand_probe(struct platform_device *pdev)
95 } 96 }
96 mtd = (struct mtd_info *)(nc + 1); 97 mtd = (struct mtd_info *)(nc + 1);
97 98
98 io_base = ioremap(pdev->resource[0].start, 99 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
99 pdev->resource[0].end - pdev->resource[0].start + 1); 100 if (!res) {
101 ret = -ENODEV;
102 goto no_res;
103 }
104
105 io_base = ioremap(res->start, resource_size(res));
100 if (!io_base) { 106 if (!io_base) {
101 printk(KERN_ERR "orion_nand: ioremap failed\n"); 107 printk(KERN_ERR "orion_nand: ioremap failed\n");
102 ret = -EIO; 108 ret = -EIO;
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index a8b9376cf324..090a05c12cbe 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -209,7 +209,7 @@ static int __devexit pasemi_nand_remove(struct of_device *ofdev)
209 return 0; 209 return 0;
210} 210}
211 211
212static struct of_device_id pasemi_nand_match[] = 212static const struct of_device_id pasemi_nand_match[] =
213{ 213{
214 { 214 {
215 .compatible = "pasemi,localbus-nand", 215 .compatible = "pasemi,localbus-nand",
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 5d55152162cf..e02fa4f0e3c9 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1320,6 +1320,17 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
1320 goto fail_free_irq; 1320 goto fail_free_irq;
1321 } 1321 }
1322 1322
1323 if (mtd_has_cmdlinepart()) {
1324 static const char *probes[] = { "cmdlinepart", NULL };
1325 struct mtd_partition *parts;
1326 int nr_parts;
1327
1328 nr_parts = parse_mtd_partitions(mtd, probes, &parts, 0);
1329
1330 if (nr_parts)
1331 return add_mtd_partitions(mtd, parts, nr_parts);
1332 }
1333
1323 return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts); 1334 return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
1324 1335
1325fail_free_irq: 1336fail_free_irq:
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
new file mode 100644
index 000000000000..96bfbd8e8fdb
--- /dev/null
+++ b/drivers/mtd/nand/r852.c
@@ -0,0 +1,1139 @@
1/*
2 * Copyright © 2009 - Maxim Levitsky
3 * driver for Ricoh xD readers
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/jiffies.h>
13#include <linux/workqueue.h>
14#include <linux/interrupt.h>
15#include <linux/pci.h>
16#include <linux/pci_ids.h>
17#include <linux/delay.h>
18#include <asm/byteorder.h>
19#include <linux/sched.h>
20#include "sm_common.h"
21#include "r852.h"
22
23
24static int r852_enable_dma = 1;
25module_param(r852_enable_dma, bool, S_IRUGO);
26MODULE_PARM_DESC(r852_enable_dma, "Enable usage of the DMA (default)");
27
28static int debug;
29module_param(debug, int, S_IRUGO | S_IWUSR);
30MODULE_PARM_DESC(debug, "Debug level (0-2)");
31
32/* read register */
33static inline uint8_t r852_read_reg(struct r852_device *dev, int address)
34{
35 uint8_t reg = readb(dev->mmio + address);
36 return reg;
37}
38
39/* write register */
40static inline void r852_write_reg(struct r852_device *dev,
41 int address, uint8_t value)
42{
43 writeb(value, dev->mmio + address);
44 mmiowb();
45}
46
47
48/* read dword sized register */
49static inline uint32_t r852_read_reg_dword(struct r852_device *dev, int address)
50{
51 uint32_t reg = le32_to_cpu(readl(dev->mmio + address));
52 return reg;
53}
54
55/* write dword sized register */
56static inline void r852_write_reg_dword(struct r852_device *dev,
57 int address, uint32_t value)
58{
59 writel(cpu_to_le32(value), dev->mmio + address);
60 mmiowb();
61}
62
63/* returns pointer to our private structure */
64static inline struct r852_device *r852_get_dev(struct mtd_info *mtd)
65{
66 struct nand_chip *chip = (struct nand_chip *)mtd->priv;
67 return (struct r852_device *)chip->priv;
68}
69
70
71/* check if controller supports dma */
72static void r852_dma_test(struct r852_device *dev)
73{
74 dev->dma_usable = (r852_read_reg(dev, R852_DMA_CAP) &
75 (R852_DMA1 | R852_DMA2)) == (R852_DMA1 | R852_DMA2);
76
77 if (!dev->dma_usable)
78 message("Non dma capable device detected, dma disabled");
79
80 if (!r852_enable_dma) {
81 message("disabling dma on user request");
82 dev->dma_usable = 0;
83 }
84}
85
86/*
87 * Enable dma. Enables ether first or second stage of the DMA,
88 * Expects dev->dma_dir and dev->dma_state be set
89 */
90static void r852_dma_enable(struct r852_device *dev)
91{
92 uint8_t dma_reg, dma_irq_reg;
93
94 /* Set up dma settings */
95 dma_reg = r852_read_reg_dword(dev, R852_DMA_SETTINGS);
96 dma_reg &= ~(R852_DMA_READ | R852_DMA_INTERNAL | R852_DMA_MEMORY);
97
98 if (dev->dma_dir)
99 dma_reg |= R852_DMA_READ;
100
101 if (dev->dma_state == DMA_INTERNAL) {
102 dma_reg |= R852_DMA_INTERNAL;
103 /* Precaution to make sure HW doesn't write */
104 /* to random kernel memory */
105 r852_write_reg_dword(dev, R852_DMA_ADDR,
106 cpu_to_le32(dev->phys_bounce_buffer));
107 } else {
108 dma_reg |= R852_DMA_MEMORY;
109 r852_write_reg_dword(dev, R852_DMA_ADDR,
110 cpu_to_le32(dev->phys_dma_addr));
111 }
112
113 /* Precaution: make sure write reached the device */
114 r852_read_reg_dword(dev, R852_DMA_ADDR);
115
116 r852_write_reg_dword(dev, R852_DMA_SETTINGS, dma_reg);
117
118 /* Set dma irq */
119 dma_irq_reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
120 r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
121 dma_irq_reg |
122 R852_DMA_IRQ_INTERNAL |
123 R852_DMA_IRQ_ERROR |
124 R852_DMA_IRQ_MEMORY);
125}
126
127/*
128 * Disable dma, called from the interrupt handler, which specifies
129 * success of the operation via 'error' argument
130 */
131static void r852_dma_done(struct r852_device *dev, int error)
132{
133 WARN_ON(dev->dma_stage == 0);
134
135 r852_write_reg_dword(dev, R852_DMA_IRQ_STA,
136 r852_read_reg_dword(dev, R852_DMA_IRQ_STA));
137
138 r852_write_reg_dword(dev, R852_DMA_SETTINGS, 0);
139 r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 0);
140
141 /* Precaution to make sure HW doesn't write to random kernel memory */
142 r852_write_reg_dword(dev, R852_DMA_ADDR,
143 cpu_to_le32(dev->phys_bounce_buffer));
144 r852_read_reg_dword(dev, R852_DMA_ADDR);
145
146 dev->dma_error = error;
147 dev->dma_stage = 0;
148
149 if (dev->phys_dma_addr && dev->phys_dma_addr != dev->phys_bounce_buffer)
150 pci_unmap_single(dev->pci_dev, dev->phys_dma_addr, R852_DMA_LEN,
151 dev->dma_dir ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
152 complete(&dev->dma_done);
153}
154
155/*
156 * Wait, till dma is done, which includes both phases of it
157 */
158static int r852_dma_wait(struct r852_device *dev)
159{
160 long timeout = wait_for_completion_timeout(&dev->dma_done,
161 msecs_to_jiffies(1000));
162 if (!timeout) {
163 dbg("timeout waiting for DMA interrupt");
164 return -ETIMEDOUT;
165 }
166
167 return 0;
168}
169
170/*
171 * Read/Write one page using dma. Only pages can be read (512 bytes)
172*/
173static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read)
174{
175 int bounce = 0;
176 unsigned long flags;
177 int error;
178
179 dev->dma_error = 0;
180
181 /* Set dma direction */
182 dev->dma_dir = do_read;
183 dev->dma_stage = 1;
184
185 dbg_verbose("doing dma %s ", do_read ? "read" : "write");
186
187 /* Set intial dma state: for reading first fill on board buffer,
188 from device, for writes first fill the buffer from memory*/
189 dev->dma_state = do_read ? DMA_INTERNAL : DMA_MEMORY;
190
191 /* if incoming buffer is not page aligned, we should do bounce */
192 if ((unsigned long)buf & (R852_DMA_LEN-1))
193 bounce = 1;
194
195 if (!bounce) {
196 dev->phys_dma_addr = pci_map_single(dev->pci_dev, (void *)buf,
197 R852_DMA_LEN,
198 (do_read ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE));
199
200 if (pci_dma_mapping_error(dev->pci_dev, dev->phys_dma_addr))
201 bounce = 1;
202 }
203
204 if (bounce) {
205 dbg_verbose("dma: using bounce buffer");
206 dev->phys_dma_addr = dev->phys_bounce_buffer;
207 if (!do_read)
208 memcpy(dev->bounce_buffer, buf, R852_DMA_LEN);
209 }
210
211 /* Enable DMA */
212 spin_lock_irqsave(&dev->irqlock, flags);
213 r852_dma_enable(dev);
214 spin_unlock_irqrestore(&dev->irqlock, flags);
215
216 /* Wait till complete */
217 error = r852_dma_wait(dev);
218
219 if (error) {
220 r852_dma_done(dev, error);
221 return;
222 }
223
224 if (do_read && bounce)
225 memcpy((void *)buf, dev->bounce_buffer, R852_DMA_LEN);
226}
227
228/*
229 * Program data lines of the nand chip to send data to it
230 */
231void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
232{
233 struct r852_device *dev = r852_get_dev(mtd);
234 uint32_t reg;
235
236 /* Don't allow any access to hardware if we suspect card removal */
237 if (dev->card_unstable)
238 return;
239
240 /* Special case for whole sector read */
241 if (len == R852_DMA_LEN && dev->dma_usable) {
242 r852_do_dma(dev, (uint8_t *)buf, 0);
243 return;
244 }
245
246 /* write DWORD chinks - faster */
247 while (len) {
248 reg = buf[0] | buf[1] << 8 | buf[2] << 16 | buf[3] << 24;
249 r852_write_reg_dword(dev, R852_DATALINE, reg);
250 buf += 4;
251 len -= 4;
252
253 }
254
255 /* write rest */
256 while (len)
257 r852_write_reg(dev, R852_DATALINE, *buf++);
258}
259
260/*
261 * Read data lines of the nand chip to retrieve data
262 */
263void r852_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
264{
265 struct r852_device *dev = r852_get_dev(mtd);
266 uint32_t reg;
267
268 if (dev->card_unstable) {
269 /* since we can't signal error here, at least, return
270 predictable buffer */
271 memset(buf, 0, len);
272 return;
273 }
274
275 /* special case for whole sector read */
276 if (len == R852_DMA_LEN && dev->dma_usable) {
277 r852_do_dma(dev, buf, 1);
278 return;
279 }
280
281 /* read in dword sized chunks */
282 while (len >= 4) {
283
284 reg = r852_read_reg_dword(dev, R852_DATALINE);
285 *buf++ = reg & 0xFF;
286 *buf++ = (reg >> 8) & 0xFF;
287 *buf++ = (reg >> 16) & 0xFF;
288 *buf++ = (reg >> 24) & 0xFF;
289 len -= 4;
290 }
291
292 /* read the reset by bytes */
293 while (len--)
294 *buf++ = r852_read_reg(dev, R852_DATALINE);
295}
296
297/*
298 * Read one byte from nand chip
299 */
300static uint8_t r852_read_byte(struct mtd_info *mtd)
301{
302 struct r852_device *dev = r852_get_dev(mtd);
303
304 /* Same problem as in r852_read_buf.... */
305 if (dev->card_unstable)
306 return 0;
307
308 return r852_read_reg(dev, R852_DATALINE);
309}
310
311
312/*
313 * Readback the buffer to verify it
314 */
315int r852_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
316{
317 struct r852_device *dev = r852_get_dev(mtd);
318
319 /* We can't be sure about anything here... */
320 if (dev->card_unstable)
321 return -1;
322
323 /* This will never happen, unless you wired up a nand chip
324 with > 512 bytes page size to the reader */
325 if (len > SM_SECTOR_SIZE)
326 return 0;
327
328 r852_read_buf(mtd, dev->tmp_buffer, len);
329 return memcmp(buf, dev->tmp_buffer, len);
330}
331
332/*
333 * Control several chip lines & send commands
334 */
335void r852_cmdctl(struct mtd_info *mtd, int dat, unsigned int ctrl)
336{
337 struct r852_device *dev = r852_get_dev(mtd);
338
339 if (dev->card_unstable)
340 return;
341
342 if (ctrl & NAND_CTRL_CHANGE) {
343
344 dev->ctlreg &= ~(R852_CTL_DATA | R852_CTL_COMMAND |
345 R852_CTL_ON | R852_CTL_CARDENABLE);
346
347 if (ctrl & NAND_ALE)
348 dev->ctlreg |= R852_CTL_DATA;
349
350 if (ctrl & NAND_CLE)
351 dev->ctlreg |= R852_CTL_COMMAND;
352
353 if (ctrl & NAND_NCE)
354 dev->ctlreg |= (R852_CTL_CARDENABLE | R852_CTL_ON);
355 else
356 dev->ctlreg &= ~R852_CTL_WRITE;
357
358 /* when write is stareted, enable write access */
359 if (dat == NAND_CMD_ERASE1)
360 dev->ctlreg |= R852_CTL_WRITE;
361
362 r852_write_reg(dev, R852_CTL, dev->ctlreg);
363 }
364
365 /* HACK: NAND_CMD_SEQIN is called without NAND_CTRL_CHANGE, but we need
366 to set write mode */
367 if (dat == NAND_CMD_SEQIN && (dev->ctlreg & R852_CTL_COMMAND)) {
368 dev->ctlreg |= R852_CTL_WRITE;
369 r852_write_reg(dev, R852_CTL, dev->ctlreg);
370 }
371
372 if (dat != NAND_CMD_NONE)
373 r852_write_reg(dev, R852_DATALINE, dat);
374}
375
376/*
377 * Wait till card is ready.
378 * based on nand_wait, but returns errors on DMA error
379 */
380int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
381{
382 struct r852_device *dev = (struct r852_device *)chip->priv;
383
384 unsigned long timeout;
385 int status;
386
387 timeout = jiffies + (chip->state == FL_ERASING ?
388 msecs_to_jiffies(400) : msecs_to_jiffies(20));
389
390 while (time_before(jiffies, timeout))
391 if (chip->dev_ready(mtd))
392 break;
393
394 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
395 status = (int)chip->read_byte(mtd);
396
397 /* Unfortunelly, no way to send detailed error status... */
398 if (dev->dma_error) {
399 status |= NAND_STATUS_FAIL;
400 dev->dma_error = 0;
401 }
402 return status;
403}
404
405/*
406 * Check if card is ready
407 */
408
409int r852_ready(struct mtd_info *mtd)
410{
411 struct r852_device *dev = r852_get_dev(mtd);
412 return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY);
413}
414
415
416/*
417 * Set ECC engine mode
418*/
419
420void r852_ecc_hwctl(struct mtd_info *mtd, int mode)
421{
422 struct r852_device *dev = r852_get_dev(mtd);
423
424 if (dev->card_unstable)
425 return;
426
427 switch (mode) {
428 case NAND_ECC_READ:
429 case NAND_ECC_WRITE:
430 /* enable ecc generation/check*/
431 dev->ctlreg |= R852_CTL_ECC_ENABLE;
432
433 /* flush ecc buffer */
434 r852_write_reg(dev, R852_CTL,
435 dev->ctlreg | R852_CTL_ECC_ACCESS);
436
437 r852_read_reg_dword(dev, R852_DATALINE);
438 r852_write_reg(dev, R852_CTL, dev->ctlreg);
439 return;
440
441 case NAND_ECC_READSYN:
442 /* disable ecc generation */
443 dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
444 r852_write_reg(dev, R852_CTL, dev->ctlreg);
445 }
446}
447
448/*
449 * Calculate ECC, only used for writes
450 */
451
452int r852_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat,
453 uint8_t *ecc_code)
454{
455 struct r852_device *dev = r852_get_dev(mtd);
456 struct sm_oob *oob = (struct sm_oob *)ecc_code;
457 uint32_t ecc1, ecc2;
458
459 if (dev->card_unstable)
460 return 0;
461
462 dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
463 r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
464
465 ecc1 = r852_read_reg_dword(dev, R852_DATALINE);
466 ecc2 = r852_read_reg_dword(dev, R852_DATALINE);
467
468 oob->ecc1[0] = (ecc1) & 0xFF;
469 oob->ecc1[1] = (ecc1 >> 8) & 0xFF;
470 oob->ecc1[2] = (ecc1 >> 16) & 0xFF;
471
472 oob->ecc2[0] = (ecc2) & 0xFF;
473 oob->ecc2[1] = (ecc2 >> 8) & 0xFF;
474 oob->ecc2[2] = (ecc2 >> 16) & 0xFF;
475
476 r852_write_reg(dev, R852_CTL, dev->ctlreg);
477 return 0;
478}
479
480/*
481 * Correct the data using ECC, hw did almost everything for us
482 */
483
484int r852_ecc_correct(struct mtd_info *mtd, uint8_t *dat,
485 uint8_t *read_ecc, uint8_t *calc_ecc)
486{
487 uint16_t ecc_reg;
488 uint8_t ecc_status, err_byte;
489 int i, error = 0;
490
491 struct r852_device *dev = r852_get_dev(mtd);
492
493 if (dev->card_unstable)
494 return 0;
495
496 r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
497 ecc_reg = r852_read_reg_dword(dev, R852_DATALINE);
498 r852_write_reg(dev, R852_CTL, dev->ctlreg);
499
500 for (i = 0 ; i <= 1 ; i++) {
501
502 ecc_status = (ecc_reg >> 8) & 0xFF;
503
504 /* ecc uncorrectable error */
505 if (ecc_status & R852_ECC_FAIL) {
506 dbg("ecc: unrecoverable error, in half %d", i);
507 error = -1;
508 goto exit;
509 }
510
511 /* correctable error */
512 if (ecc_status & R852_ECC_CORRECTABLE) {
513
514 err_byte = ecc_reg & 0xFF;
515 dbg("ecc: recoverable error, "
516 "in half %d, byte %d, bit %d", i,
517 err_byte, ecc_status & R852_ECC_ERR_BIT_MSK);
518
519 dat[err_byte] ^=
520 1 << (ecc_status & R852_ECC_ERR_BIT_MSK);
521 error++;
522 }
523
524 dat += 256;
525 ecc_reg >>= 16;
526 }
527exit:
528 return error;
529}
530
531/*
532 * This is copy of nand_read_oob_std
533 * nand_read_oob_syndrome assumes we can send column address - we can't
534 */
535static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
536 int page, int sndcmd)
537{
538 if (sndcmd) {
539 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
540 sndcmd = 0;
541 }
542 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
543 return sndcmd;
544}
545
546/*
547 * Start the nand engine
548 */
549
550void r852_engine_enable(struct r852_device *dev)
551{
552 if (r852_read_reg_dword(dev, R852_HW) & R852_HW_UNKNOWN) {
553 r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
554 r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
555 } else {
556 r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
557 r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
558 }
559 msleep(300);
560 r852_write_reg(dev, R852_CTL, 0);
561}
562
563
564/*
565 * Stop the nand engine
566 */
567
568void r852_engine_disable(struct r852_device *dev)
569{
570 r852_write_reg_dword(dev, R852_HW, 0);
571 r852_write_reg(dev, R852_CTL, R852_CTL_RESET);
572}
573
574/*
575 * Test if card is present
576 */
577
578void r852_card_update_present(struct r852_device *dev)
579{
580 unsigned long flags;
581 uint8_t reg;
582
583 spin_lock_irqsave(&dev->irqlock, flags);
584 reg = r852_read_reg(dev, R852_CARD_STA);
585 dev->card_detected = !!(reg & R852_CARD_STA_PRESENT);
586 spin_unlock_irqrestore(&dev->irqlock, flags);
587}
588
589/*
590 * Update card detection IRQ state according to current card state
591 * which is read in r852_card_update_present
592 */
593void r852_update_card_detect(struct r852_device *dev)
594{
595 int card_detect_reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
596 dev->card_unstable = 0;
597
598 card_detect_reg &= ~(R852_CARD_IRQ_REMOVE | R852_CARD_IRQ_INSERT);
599 card_detect_reg |= R852_CARD_IRQ_GENABLE;
600
601 card_detect_reg |= dev->card_detected ?
602 R852_CARD_IRQ_REMOVE : R852_CARD_IRQ_INSERT;
603
604 r852_write_reg(dev, R852_CARD_IRQ_ENABLE, card_detect_reg);
605}
606
607ssize_t r852_media_type_show(struct device *sys_dev,
608 struct device_attribute *attr, char *buf)
609{
610 struct mtd_info *mtd = container_of(sys_dev, struct mtd_info, dev);
611 struct r852_device *dev = r852_get_dev(mtd);
612 char *data = dev->sm ? "smartmedia" : "xd";
613
614 strcpy(buf, data);
615 return strlen(data);
616}
617
618DEVICE_ATTR(media_type, S_IRUGO, r852_media_type_show, NULL);
619
620
621/* Detect properties of card in slot */
622void r852_update_media_status(struct r852_device *dev)
623{
624 uint8_t reg;
625 unsigned long flags;
626 int readonly;
627
628 spin_lock_irqsave(&dev->irqlock, flags);
629 if (!dev->card_detected) {
630 message("card removed");
631 spin_unlock_irqrestore(&dev->irqlock, flags);
632 return ;
633 }
634
635 readonly = r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_RO;
636 reg = r852_read_reg(dev, R852_DMA_CAP);
637 dev->sm = (reg & (R852_DMA1 | R852_DMA2)) && (reg & R852_SMBIT);
638
639 message("detected %s %s card in slot",
640 dev->sm ? "SmartMedia" : "xD",
641 readonly ? "readonly" : "writeable");
642
643 dev->readonly = readonly;
644 spin_unlock_irqrestore(&dev->irqlock, flags);
645}
646
647/*
648 * Register the nand device
649 * Called when the card is detected
650 */
651int r852_register_nand_device(struct r852_device *dev)
652{
653 dev->mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
654
655 if (!dev->mtd)
656 goto error1;
657
658 WARN_ON(dev->card_registred);
659
660 dev->mtd->owner = THIS_MODULE;
661 dev->mtd->priv = dev->chip;
662 dev->mtd->dev.parent = &dev->pci_dev->dev;
663
664 if (dev->readonly)
665 dev->chip->options |= NAND_ROM;
666
667 r852_engine_enable(dev);
668
669 if (sm_register_device(dev->mtd))
670 goto error2;
671
672 if (device_create_file(&dev->mtd->dev, &dev_attr_media_type))
673 message("can't create media type sysfs attribute");
674
675 dev->card_registred = 1;
676 return 0;
677error2:
678 kfree(dev->mtd);
679error1:
680 /* Force card redetect */
681 dev->card_detected = 0;
682 return -1;
683}
684
685/*
686 * Unregister the card
687 */
688
689void r852_unregister_nand_device(struct r852_device *dev)
690{
691 if (!dev->card_registred)
692 return;
693
694 device_remove_file(&dev->mtd->dev, &dev_attr_media_type);
695 nand_release(dev->mtd);
696 r852_engine_disable(dev);
697 dev->card_registred = 0;
698 kfree(dev->mtd);
699 dev->mtd = NULL;
700}
701
702/* Card state updater */
703void r852_card_detect_work(struct work_struct *work)
704{
705 struct r852_device *dev =
706 container_of(work, struct r852_device, card_detect_work.work);
707
708 r852_card_update_present(dev);
709 dev->card_unstable = 0;
710
711 /* False alarm */
712 if (dev->card_detected == dev->card_registred)
713 goto exit;
714
715 /* Read media properties */
716 r852_update_media_status(dev);
717
718 /* Register the card */
719 if (dev->card_detected)
720 r852_register_nand_device(dev);
721 else
722 r852_unregister_nand_device(dev);
723exit:
724 /* Update detection logic */
725 r852_update_card_detect(dev);
726}
727
728/* Ack + disable IRQ generation */
729static void r852_disable_irqs(struct r852_device *dev)
730{
731 uint8_t reg;
732 reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
733 r852_write_reg(dev, R852_CARD_IRQ_ENABLE, reg & ~R852_CARD_IRQ_MASK);
734
735 reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
736 r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
737 reg & ~R852_DMA_IRQ_MASK);
738
739 r852_write_reg(dev, R852_CARD_IRQ_STA, R852_CARD_IRQ_MASK);
740 r852_write_reg_dword(dev, R852_DMA_IRQ_STA, R852_DMA_IRQ_MASK);
741}
742
743/* Interrupt handler */
744static irqreturn_t r852_irq(int irq, void *data)
745{
746 struct r852_device *dev = (struct r852_device *)data;
747
748 uint8_t card_status, dma_status;
749 unsigned long flags;
750 irqreturn_t ret = IRQ_NONE;
751
752 spin_lock_irqsave(&dev->irqlock, flags);
753
754 /* We can recieve shared interrupt while pci is suspended
755 in that case reads will return 0xFFFFFFFF.... */
756 if (dev->insuspend)
757 goto out;
758
759 /* handle card detection interrupts first */
760 card_status = r852_read_reg(dev, R852_CARD_IRQ_STA);
761 r852_write_reg(dev, R852_CARD_IRQ_STA, card_status);
762
763 if (card_status & (R852_CARD_IRQ_INSERT|R852_CARD_IRQ_REMOVE)) {
764
765 ret = IRQ_HANDLED;
766 dev->card_detected = !!(card_status & R852_CARD_IRQ_INSERT);
767
768 /* we shouldn't recieve any interrupts if we wait for card
769 to settle */
770 WARN_ON(dev->card_unstable);
771
772 /* disable irqs while card is unstable */
773 /* this will timeout DMA if active, but better that garbage */
774 r852_disable_irqs(dev);
775
776 if (dev->card_unstable)
777 goto out;
778
779 /* let, card state to settle a bit, and then do the work */
780 dev->card_unstable = 1;
781 queue_delayed_work(dev->card_workqueue,
782 &dev->card_detect_work, msecs_to_jiffies(100));
783 goto out;
784 }
785
786
787 /* Handle dma interrupts */
788 dma_status = r852_read_reg_dword(dev, R852_DMA_IRQ_STA);
789 r852_write_reg_dword(dev, R852_DMA_IRQ_STA, dma_status);
790
791 if (dma_status & R852_DMA_IRQ_MASK) {
792
793 ret = IRQ_HANDLED;
794
795 if (dma_status & R852_DMA_IRQ_ERROR) {
796 dbg("recieved dma error IRQ");
797 r852_dma_done(dev, -EIO);
798 goto out;
799 }
800
801 /* recieved DMA interrupt out of nowhere? */
802 WARN_ON_ONCE(dev->dma_stage == 0);
803
804 if (dev->dma_stage == 0)
805 goto out;
806
807 /* done device access */
808 if (dev->dma_state == DMA_INTERNAL &&
809 (dma_status & R852_DMA_IRQ_INTERNAL)) {
810
811 dev->dma_state = DMA_MEMORY;
812 dev->dma_stage++;
813 }
814
815 /* done memory DMA */
816 if (dev->dma_state == DMA_MEMORY &&
817 (dma_status & R852_DMA_IRQ_MEMORY)) {
818 dev->dma_state = DMA_INTERNAL;
819 dev->dma_stage++;
820 }
821
822 /* Enable 2nd half of dma dance */
823 if (dev->dma_stage == 2)
824 r852_dma_enable(dev);
825
826 /* Operation done */
827 if (dev->dma_stage == 3)
828 r852_dma_done(dev, 0);
829 goto out;
830 }
831
832 /* Handle unknown interrupts */
833 if (dma_status)
834 dbg("bad dma IRQ status = %x", dma_status);
835
836 if (card_status & ~R852_CARD_STA_CD)
837 dbg("strange card status = %x", card_status);
838
839out:
840 spin_unlock_irqrestore(&dev->irqlock, flags);
841 return ret;
842}
843
844int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
845{
846 int error;
847 struct nand_chip *chip;
848 struct r852_device *dev;
849
850 /* pci initialization */
851 error = pci_enable_device(pci_dev);
852
853 if (error)
854 goto error1;
855
856 pci_set_master(pci_dev);
857
858 error = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
859 if (error)
860 goto error2;
861
862 error = pci_request_regions(pci_dev, DRV_NAME);
863
864 if (error)
865 goto error3;
866
867 error = -ENOMEM;
868
869 /* init nand chip, but register it only on card insert */
870 chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
871
872 if (!chip)
873 goto error4;
874
875 /* commands */
876 chip->cmd_ctrl = r852_cmdctl;
877 chip->waitfunc = r852_wait;
878 chip->dev_ready = r852_ready;
879
880 /* I/O */
881 chip->read_byte = r852_read_byte;
882 chip->read_buf = r852_read_buf;
883 chip->write_buf = r852_write_buf;
884 chip->verify_buf = r852_verify_buf;
885
886 /* ecc */
887 chip->ecc.mode = NAND_ECC_HW_SYNDROME;
888 chip->ecc.size = R852_DMA_LEN;
889 chip->ecc.bytes = SM_OOB_SIZE;
890 chip->ecc.hwctl = r852_ecc_hwctl;
891 chip->ecc.calculate = r852_ecc_calculate;
892 chip->ecc.correct = r852_ecc_correct;
893
894 /* TODO: hack */
895 chip->ecc.read_oob = r852_read_oob;
896
897 /* init our device structure */
898 dev = kzalloc(sizeof(struct r852_device), GFP_KERNEL);
899
900 if (!dev)
901 goto error5;
902
903 chip->priv = dev;
904 dev->chip = chip;
905 dev->pci_dev = pci_dev;
906 pci_set_drvdata(pci_dev, dev);
907
908 dev->bounce_buffer = pci_alloc_consistent(pci_dev, R852_DMA_LEN,
909 &dev->phys_bounce_buffer);
910
911 if (!dev->bounce_buffer)
912 goto error6;
913
914
915 error = -ENODEV;
916 dev->mmio = pci_ioremap_bar(pci_dev, 0);
917
918 if (!dev->mmio)
919 goto error7;
920
921 error = -ENOMEM;
922 dev->tmp_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
923
924 if (!dev->tmp_buffer)
925 goto error8;
926
927 init_completion(&dev->dma_done);
928
929 dev->card_workqueue = create_freezeable_workqueue(DRV_NAME);
930
931 if (!dev->card_workqueue)
932 goto error9;
933
934 INIT_DELAYED_WORK(&dev->card_detect_work, r852_card_detect_work);
935
936 /* shutdown everything - precation */
937 r852_engine_disable(dev);
938 r852_disable_irqs(dev);
939
940 r852_dma_test(dev);
941
942 /*register irq handler*/
943 error = -ENODEV;
944 if (request_irq(pci_dev->irq, &r852_irq, IRQF_SHARED,
945 DRV_NAME, dev))
946 goto error10;
947
948 dev->irq = pci_dev->irq;
949 spin_lock_init(&dev->irqlock);
950
951 /* kick initial present test */
952 dev->card_detected = 0;
953 r852_card_update_present(dev);
954 queue_delayed_work(dev->card_workqueue,
955 &dev->card_detect_work, 0);
956
957
958 printk(KERN_NOTICE DRV_NAME ": driver loaded succesfully\n");
959 return 0;
960
961error10:
962 destroy_workqueue(dev->card_workqueue);
963error9:
964 kfree(dev->tmp_buffer);
965error8:
966 pci_iounmap(pci_dev, dev->mmio);
967error7:
968 pci_free_consistent(pci_dev, R852_DMA_LEN,
969 dev->bounce_buffer, dev->phys_bounce_buffer);
970error6:
971 kfree(dev);
972error5:
973 kfree(chip);
974error4:
975 pci_release_regions(pci_dev);
976error3:
977error2:
978 pci_disable_device(pci_dev);
979error1:
980 return error;
981}
982
983void r852_remove(struct pci_dev *pci_dev)
984{
985 struct r852_device *dev = pci_get_drvdata(pci_dev);
986
987 /* Stop detect workqueue -
988 we are going to unregister the device anyway*/
989 cancel_delayed_work_sync(&dev->card_detect_work);
990 destroy_workqueue(dev->card_workqueue);
991
992 /* Unregister the device, this might make more IO */
993 r852_unregister_nand_device(dev);
994
995 /* Stop interrupts */
996 r852_disable_irqs(dev);
997 synchronize_irq(dev->irq);
998 free_irq(dev->irq, dev);
999
1000 /* Cleanup */
1001 kfree(dev->tmp_buffer);
1002 pci_iounmap(pci_dev, dev->mmio);
1003 pci_free_consistent(pci_dev, R852_DMA_LEN,
1004 dev->bounce_buffer, dev->phys_bounce_buffer);
1005
1006 kfree(dev->chip);
1007 kfree(dev);
1008
1009 /* Shutdown the PCI device */
1010 pci_release_regions(pci_dev);
1011 pci_disable_device(pci_dev);
1012}
1013
1014void r852_shutdown(struct pci_dev *pci_dev)
1015{
1016 struct r852_device *dev = pci_get_drvdata(pci_dev);
1017
1018 cancel_delayed_work_sync(&dev->card_detect_work);
1019 r852_disable_irqs(dev);
1020 synchronize_irq(dev->irq);
1021 pci_disable_device(pci_dev);
1022}
1023
1024#ifdef CONFIG_PM
1025int r852_suspend(struct device *device)
1026{
1027 struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
1028 unsigned long flags;
1029
1030 if (dev->ctlreg & R852_CTL_CARDENABLE)
1031 return -EBUSY;
1032
1033 /* First make sure the detect work is gone */
1034 cancel_delayed_work_sync(&dev->card_detect_work);
1035
1036 /* Turn off the interrupts and stop the device */
1037 r852_disable_irqs(dev);
1038 r852_engine_disable(dev);
1039
1040 spin_lock_irqsave(&dev->irqlock, flags);
1041 dev->insuspend = 1;
1042 spin_unlock_irqrestore(&dev->irqlock, flags);
1043
1044 /* At that point, even if interrupt handler is running, it will quit */
1045 /* So wait for this to happen explictly */
1046 synchronize_irq(dev->irq);
1047
1048 /* If card was pulled off just during the suspend, which is very
1049 unlikely, we will remove it on resume, it too late now
1050 anyway... */
1051 dev->card_unstable = 0;
1052
1053 pci_save_state(to_pci_dev(device));
1054 return pci_prepare_to_sleep(to_pci_dev(device));
1055}
1056
1057int r852_resume(struct device *device)
1058{
1059 struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
1060 unsigned long flags;
1061
1062 /* Turn on the hardware */
1063 pci_back_from_sleep(to_pci_dev(device));
1064 pci_restore_state(to_pci_dev(device));
1065
1066 r852_disable_irqs(dev);
1067 r852_card_update_present(dev);
1068 r852_engine_disable(dev);
1069
1070
1071 /* Now its safe for IRQ to run */
1072 spin_lock_irqsave(&dev->irqlock, flags);
1073 dev->insuspend = 0;
1074 spin_unlock_irqrestore(&dev->irqlock, flags);
1075
1076
1077 /* If card status changed, just do the work */
1078 if (dev->card_detected != dev->card_registred) {
1079 dbg("card was %s during low power state",
1080 dev->card_detected ? "added" : "removed");
1081
1082 queue_delayed_work(dev->card_workqueue,
1083 &dev->card_detect_work, 1000);
1084 return 0;
1085 }
1086
1087 /* Otherwise, initialize the card */
1088 if (dev->card_registred) {
1089 r852_engine_enable(dev);
1090 dev->chip->select_chip(dev->mtd, 0);
1091 dev->chip->cmdfunc(dev->mtd, NAND_CMD_RESET, -1, -1);
1092 dev->chip->select_chip(dev->mtd, -1);
1093 }
1094
1095 /* Program card detection IRQ */
1096 r852_update_card_detect(dev);
1097 return 0;
1098}
1099#else
1100#define r852_suspend NULL
1101#define r852_resume NULL
1102#endif
1103
1104static const struct pci_device_id r852_pci_id_tbl[] = {
1105
1106 { PCI_VDEVICE(RICOH, 0x0852), },
1107 { },
1108};
1109
1110MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl);
1111
1112SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume);
1113
1114
1115static struct pci_driver r852_pci_driver = {
1116 .name = DRV_NAME,
1117 .id_table = r852_pci_id_tbl,
1118 .probe = r852_probe,
1119 .remove = r852_remove,
1120 .shutdown = r852_shutdown,
1121 .driver.pm = &r852_pm_ops,
1122};
1123
1124static __init int r852_module_init(void)
1125{
1126 return pci_register_driver(&r852_pci_driver);
1127}
1128
1129static void __exit r852_module_exit(void)
1130{
1131 pci_unregister_driver(&r852_pci_driver);
1132}
1133
1134module_init(r852_module_init);
1135module_exit(r852_module_exit);
1136
1137MODULE_LICENSE("GPL");
1138MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
1139MODULE_DESCRIPTION("Ricoh 85xx xD/smartmedia card reader driver");
diff --git a/drivers/mtd/nand/r852.h b/drivers/mtd/nand/r852.h
new file mode 100644
index 000000000000..8096cc280c73
--- /dev/null
+++ b/drivers/mtd/nand/r852.h
@@ -0,0 +1,163 @@
1/*
2 * Copyright © 2009 - Maxim Levitsky
3 * driver for Ricoh xD readers
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/pci.h>
11#include <linux/completion.h>
12#include <linux/workqueue.h>
13#include <linux/mtd/nand.h>
14#include <linux/spinlock.h>
15
16
17/* nand interface + ecc
18 byte write/read does one cycle on nand data lines.
19 dword write/read does 4 cycles
20 if R852_CTL_ECC_ACCESS is set in R852_CTL, then dword read reads
21 results of ecc correction, if DMA read was done before.
22 If write was done two dword reads read generated ecc checksums
23*/
24#define R852_DATALINE 0x00
25
26/* control register */
27#define R852_CTL 0x04
28#define R852_CTL_COMMAND 0x01 /* send command (#CLE)*/
29#define R852_CTL_DATA 0x02 /* read/write data (#ALE)*/
30#define R852_CTL_ON 0x04 /* only seem to controls the hd led, */
31 /* but has to be set on start...*/
32#define R852_CTL_RESET 0x08 /* unknown, set only on start once*/
33#define R852_CTL_CARDENABLE 0x10 /* probably (#CE) - always set*/
34#define R852_CTL_ECC_ENABLE 0x20 /* enable ecc engine */
35#define R852_CTL_ECC_ACCESS 0x40 /* read/write ecc via reg #0*/
36#define R852_CTL_WRITE 0x80 /* set when performing writes (#WP) */
37
38/* card detection status */
39#define R852_CARD_STA 0x05
40
41#define R852_CARD_STA_CD 0x01 /* state of #CD line, same as 0x04 */
42#define R852_CARD_STA_RO 0x02 /* card is readonly */
43#define R852_CARD_STA_PRESENT 0x04 /* card is present (#CD) */
44#define R852_CARD_STA_ABSENT 0x08 /* card is absent */
45#define R852_CARD_STA_BUSY 0x80 /* card is busy - (#R/B) */
46
47/* card detection irq status & enable*/
48#define R852_CARD_IRQ_STA 0x06 /* IRQ status */
49#define R852_CARD_IRQ_ENABLE 0x07 /* IRQ enable */
50
51#define R852_CARD_IRQ_CD 0x01 /* fire when #CD lights, same as 0x04*/
52#define R852_CARD_IRQ_REMOVE 0x04 /* detect card removal */
53#define R852_CARD_IRQ_INSERT 0x08 /* detect card insert */
54#define R852_CARD_IRQ_UNK1 0x10 /* unknown */
55#define R852_CARD_IRQ_GENABLE 0x80 /* general enable */
56#define R852_CARD_IRQ_MASK 0x1D
57
58
59
60/* hardware enable */
61#define R852_HW 0x08
62#define R852_HW_ENABLED 0x01 /* hw enabled */
63#define R852_HW_UNKNOWN 0x80
64
65
66/* dma capabilities */
67#define R852_DMA_CAP 0x09
68#define R852_SMBIT 0x20 /* if set with bit #6 or bit #7, then */
69 /* hw is smartmedia */
70#define R852_DMA1 0x40 /* if set w/bit #7, dma is supported */
71#define R852_DMA2 0x80 /* if set w/bit #6, dma is supported */
72
73
74/* physical DMA address - 32 bit value*/
75#define R852_DMA_ADDR 0x0C
76
77
78/* dma settings */
79#define R852_DMA_SETTINGS 0x10
80#define R852_DMA_MEMORY 0x01 /* (memory <-> internal hw buffer) */
81#define R852_DMA_READ 0x02 /* 0 = write, 1 = read */
82#define R852_DMA_INTERNAL 0x04 /* (internal hw buffer <-> card) */
83
84/* dma IRQ status */
85#define R852_DMA_IRQ_STA 0x14
86
87/* dma IRQ enable */
88#define R852_DMA_IRQ_ENABLE 0x18
89
90#define R852_DMA_IRQ_MEMORY 0x01 /* (memory <-> internal hw buffer) */
91#define R852_DMA_IRQ_ERROR 0x02 /* error did happen */
92#define R852_DMA_IRQ_INTERNAL 0x04 /* (internal hw buffer <-> card) */
93#define R852_DMA_IRQ_MASK 0x07 /* mask of all IRQ bits */
94
95
96/* ECC syndrome format - read from reg #0 will return two copies of these for
97 each half of the page.
98 first byte is error byte location, and second, bit location + flags */
99#define R852_ECC_ERR_BIT_MSK 0x07 /* error bit location */
100#define R852_ECC_CORRECT 0x10 /* no errors - (guessed) */
101#define R852_ECC_CORRECTABLE 0x20 /* correctable error exist */
102#define R852_ECC_FAIL 0x40 /* non correctable error detected */
103
104#define R852_DMA_LEN 512
105
106#define DMA_INTERNAL 0
107#define DMA_MEMORY 1
108
109struct r852_device {
110 void __iomem *mmio; /* mmio */
111 struct mtd_info *mtd; /* mtd backpointer */
112 struct nand_chip *chip; /* nand chip backpointer */
113 struct pci_dev *pci_dev; /* pci backpointer */
114
115 /* dma area */
116 dma_addr_t phys_dma_addr; /* bus address of buffer*/
117 struct completion dma_done; /* data transfer done */
118
119 dma_addr_t phys_bounce_buffer; /* bus address of bounce buffer */
120 uint8_t *bounce_buffer; /* virtual address of bounce buffer */
121
122 int dma_dir; /* 1 = read, 0 = write */
123 int dma_stage; /* 0 - idle, 1 - first step,
124 2 - second step */
125
126 int dma_state; /* 0 = internal, 1 = memory */
127 int dma_error; /* dma errors */
128 int dma_usable; /* is it possible to use dma */
129
130 /* card status area */
131 struct delayed_work card_detect_work;
132 struct workqueue_struct *card_workqueue;
133 int card_registred; /* card registered with mtd */
134 int card_detected; /* card detected in slot */
135 int card_unstable; /* whenever the card is inserted,
136 is not known yet */
137 int readonly; /* card is readonly */
138 int sm; /* Is card smartmedia */
139
140 /* interrupt handling */
141 spinlock_t irqlock; /* IRQ protecting lock */
142 int irq; /* irq num */
143 int insuspend; /* device is suspended */
144
145 /* misc */
146 void *tmp_buffer; /* temporary buffer */
147 uint8_t ctlreg; /* cached contents of control reg */
148};
149
150#define DRV_NAME "r852"
151
152
153#define dbg(format, ...) \
154 if (debug) \
155 printk(KERN_DEBUG DRV_NAME ": " format "\n", ## __VA_ARGS__)
156
157#define dbg_verbose(format, ...) \
158 if (debug > 1) \
159 printk(KERN_DEBUG DRV_NAME ": " format "\n", ## __VA_ARGS__)
160
161
162#define message(format, ...) \
163 printk(KERN_INFO DRV_NAME ": " format "\n", ## __VA_ARGS__)
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index fa6e9c7fe511..dc02dcd0c08f 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -957,7 +957,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
957 957
958 /* currently we assume we have the one resource */ 958 /* currently we assume we have the one resource */
959 res = pdev->resource; 959 res = pdev->resource;
960 size = res->end - res->start + 1; 960 size = resource_size(res);
961 961
962 info->area = request_mem_region(res->start, size, pdev->name); 962 info->area = request_mem_region(res->start, size, pdev->name);
963 963
@@ -1013,7 +1013,8 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
1013 s3c2410_nand_init_chip(info, nmtd, sets); 1013 s3c2410_nand_init_chip(info, nmtd, sets);
1014 1014
1015 nmtd->scan_res = nand_scan_ident(&nmtd->mtd, 1015 nmtd->scan_res = nand_scan_ident(&nmtd->mtd,
1016 (sets) ? sets->nr_chips : 1); 1016 (sets) ? sets->nr_chips : 1,
1017 NULL);
1017 1018
1018 if (nmtd->scan_res == 0) { 1019 if (nmtd->scan_res == 0) {
1019 s3c2410_nand_update_chip(info, nmtd); 1020 s3c2410_nand_update_chip(info, nmtd);
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 34752fce0793..546c2f0eb2e8 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -855,7 +855,7 @@ static int __devinit flctl_probe(struct platform_device *pdev)
855 nand->read_word = flctl_read_word; 855 nand->read_word = flctl_read_word;
856 } 856 }
857 857
858 ret = nand_scan_ident(flctl_mtd, 1); 858 ret = nand_scan_ident(flctl_mtd, 1, NULL);
859 if (ret) 859 if (ret)
860 goto err; 860 goto err;
861 861
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
new file mode 100644
index 000000000000..aae0b9acd7ae
--- /dev/null
+++ b/drivers/mtd/nand/sm_common.c
@@ -0,0 +1,143 @@
1/*
2 * Copyright © 2009 - Maxim Levitsky
3 * Common routines & support for xD format
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9#include <linux/kernel.h>
10#include <linux/mtd/nand.h>
11#include "sm_common.h"
12
13static struct nand_ecclayout nand_oob_sm = {
14 .eccbytes = 6,
15 .eccpos = {8, 9, 10, 13, 14, 15},
16 .oobfree = {
17 {.offset = 0 , .length = 4}, /* reserved */
18 {.offset = 6 , .length = 2}, /* LBA1 */
19 {.offset = 11, .length = 2} /* LBA2 */
20 }
21};
22
23/* NOTE: This layout is is not compatabable with SmartMedia, */
24/* because the 256 byte devices have page depenent oob layout */
25/* However it does preserve the bad block markers */
26/* If you use smftl, it will bypass this and work correctly */
27/* If you not, then you break SmartMedia compliance anyway */
28
29static struct nand_ecclayout nand_oob_sm_small = {
30 .eccbytes = 3,
31 .eccpos = {0, 1, 2},
32 .oobfree = {
33 {.offset = 3 , .length = 2}, /* reserved */
34 {.offset = 6 , .length = 2}, /* LBA1 */
35 }
36};
37
38
39static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs)
40{
41 struct mtd_oob_ops ops;
42 struct sm_oob oob;
43 int ret, error = 0;
44
45 memset(&oob, -1, SM_OOB_SIZE);
46 oob.block_status = 0x0F;
47
48 /* As long as this function is called on erase block boundaries
49 it will work correctly for 256 byte nand */
50 ops.mode = MTD_OOB_PLACE;
51 ops.ooboffs = 0;
52 ops.ooblen = mtd->oobsize;
53 ops.oobbuf = (void *)&oob;
54 ops.datbuf = NULL;
55
56
57 ret = mtd->write_oob(mtd, ofs, &ops);
58 if (ret < 0 || ops.oobretlen != SM_OOB_SIZE) {
59 printk(KERN_NOTICE
60 "sm_common: can't mark sector at %i as bad\n",
61 (int)ofs);
62 error = -EIO;
63 } else
64 mtd->ecc_stats.badblocks++;
65
66 return error;
67}
68
69
70static struct nand_flash_dev nand_smartmedia_flash_ids[] = {
71
72 /* SmartMedia */
73 {"SmartMedia 1MiB 5V", 0x6e, 256, 1, 0x1000, 0},
74 {"SmartMedia 1MiB 3,3V", 0xe8, 256, 1, 0x1000, 0},
75 {"SmartMedia 1MiB 3,3V", 0xec, 256, 1, 0x1000, 0},
76 {"SmartMedia 2MiB 3,3V", 0xea, 256, 2, 0x1000, 0},
77 {"SmartMedia 2MiB 5V", 0x64, 256, 2, 0x1000, 0},
78 {"SmartMedia 2MiB 3,3V ROM", 0x5d, 512, 2, 0x2000, NAND_ROM},
79 {"SmartMedia 4MiB 3,3V", 0xe3, 512, 4, 0x2000, 0},
80 {"SmartMedia 4MiB 3,3/5V", 0xe5, 512, 4, 0x2000, 0},
81 {"SmartMedia 4MiB 5V", 0x6b, 512, 4, 0x2000, 0},
82 {"SmartMedia 4MiB 3,3V ROM", 0xd5, 512, 4, 0x2000, NAND_ROM},
83 {"SmartMedia 8MiB 3,3V", 0xe6, 512, 8, 0x2000, 0},
84 {"SmartMedia 8MiB 3,3V ROM", 0xd6, 512, 8, 0x2000, NAND_ROM},
85
86#define XD_TYPEM (NAND_NO_AUTOINCR | NAND_BROKEN_XD)
87 /* xD / SmartMedia */
88 {"SmartMedia/xD 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0},
89 {"SmartMedia 16MiB 3,3V ROM", 0x57, 512, 16, 0x4000, NAND_ROM},
90 {"SmartMedia/xD 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0},
91 {"SmartMedia 32MiB 3,3V ROM", 0x58, 512, 32, 0x4000, NAND_ROM},
92 {"SmartMedia/xD 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0},
93 {"SmartMedia 64MiB 3,3V ROM", 0xd9, 512, 64, 0x4000, NAND_ROM},
94 {"SmartMedia/xD 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0},
95 {"SmartMedia 128MiB 3,3V ROM", 0xda, 512, 128, 0x4000, NAND_ROM},
96 {"SmartMedia/xD 256MiB 3,3V", 0x71, 512, 256, 0x4000, XD_TYPEM},
97 {"SmartMedia 256MiB 3,3V ROM", 0x5b, 512, 256, 0x4000, NAND_ROM},
98
99 /* xD only */
100 {"xD 512MiB 3,3V", 0xDC, 512, 512, 0x4000, XD_TYPEM},
101 {"xD 1GiB 3,3V", 0xD3, 512, 1024, 0x4000, XD_TYPEM},
102 {"xD 2GiB 3,3V", 0xD5, 512, 2048, 0x4000, XD_TYPEM},
103 {NULL,}
104};
105
106int sm_register_device(struct mtd_info *mtd)
107{
108 struct nand_chip *chip = (struct nand_chip *)mtd->priv;
109 int ret;
110
111 chip->options |= NAND_SKIP_BBTSCAN;
112
113 /* Scan for card properties */
114 ret = nand_scan_ident(mtd, 1, nand_smartmedia_flash_ids);
115
116 if (ret)
117 return ret;
118
119 /* Bad block marker postion */
120 chip->badblockpos = 0x05;
121 chip->badblockbits = 7;
122 chip->block_markbad = sm_block_markbad;
123
124 /* ECC layout */
125 if (mtd->writesize == SM_SECTOR_SIZE)
126 chip->ecc.layout = &nand_oob_sm;
127 else if (mtd->writesize == SM_SMALL_PAGE)
128 chip->ecc.layout = &nand_oob_sm_small;
129 else
130 return -ENODEV;
131
132 ret = nand_scan_tail(mtd);
133
134 if (ret)
135 return ret;
136
137 return add_mtd_device(mtd);
138}
139EXPORT_SYMBOL_GPL(sm_register_device);
140
141MODULE_LICENSE("GPL");
142MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
143MODULE_DESCRIPTION("Common SmartMedia/xD functions");
diff --git a/drivers/mtd/nand/sm_common.h b/drivers/mtd/nand/sm_common.h
new file mode 100644
index 000000000000..18284f5fae64
--- /dev/null
+++ b/drivers/mtd/nand/sm_common.h
@@ -0,0 +1,61 @@
1/*
2 * Copyright © 2009 - Maxim Levitsky
3 * Common routines & support for SmartMedia/xD format
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9#include <linux/bitops.h>
10#include <linux/mtd/mtd.h>
11
12/* Full oob structure as written on the flash */
13struct sm_oob {
14 uint32_t reserved;
15 uint8_t data_status;
16 uint8_t block_status;
17 uint8_t lba_copy1[2];
18 uint8_t ecc2[3];
19 uint8_t lba_copy2[2];
20 uint8_t ecc1[3];
21} __attribute__((packed));
22
23
24/* one sector is always 512 bytes, but it can consist of two nand pages */
25#define SM_SECTOR_SIZE 512
26
27/* oob area is also 16 bytes, but might be from two pages */
28#define SM_OOB_SIZE 16
29
30/* This is maximum zone size, and all devices that have more that one zone
31 have this size */
32#define SM_MAX_ZONE_SIZE 1024
33
34/* support for small page nand */
35#define SM_SMALL_PAGE 256
36#define SM_SMALL_OOB_SIZE 8
37
38
39extern int sm_register_device(struct mtd_info *mtd);
40
41
42static inline int sm_sector_valid(struct sm_oob *oob)
43{
44 return hweight16(oob->data_status) >= 5;
45}
46
47static inline int sm_block_valid(struct sm_oob *oob)
48{
49 return hweight16(oob->block_status) >= 7;
50}
51
52static inline int sm_block_erased(struct sm_oob *oob)
53{
54 static const uint32_t erased_pattern[4] = {
55 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
56
57 /* First test for erased block */
58 if (!memcmp(oob, erased_pattern, sizeof(*oob)))
59 return 1;
60 return 0;
61}
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index a4519a7bd683..b37cbde6e7db 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -220,7 +220,7 @@ static int __devinit socrates_nand_probe(struct of_device *ofdev,
220 dev_set_drvdata(&ofdev->dev, host); 220 dev_set_drvdata(&ofdev->dev, host);
221 221
222 /* first scan to find the device and get the page size */ 222 /* first scan to find the device and get the page size */
223 if (nand_scan_ident(mtd, 1)) { 223 if (nand_scan_ident(mtd, 1, NULL)) {
224 res = -ENXIO; 224 res = -ENXIO;
225 goto out; 225 goto out;
226 } 226 }
@@ -290,7 +290,7 @@ static int __devexit socrates_nand_remove(struct of_device *ofdev)
290 return 0; 290 return 0;
291} 291}
292 292
293static struct of_device_id socrates_nand_match[] = 293static const struct of_device_id socrates_nand_match[] =
294{ 294{
295 { 295 {
296 .compatible = "abb,socrates-nand", 296 .compatible = "abb,socrates-nand",
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index fa28f01ae009..3041d1f7ae3f 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -319,7 +319,7 @@ static int tmio_nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
319 319
320static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio) 320static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
321{ 321{
322 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; 322 struct mfd_cell *cell = dev_get_platdata(&dev->dev);
323 int ret; 323 int ret;
324 324
325 if (cell->enable) { 325 if (cell->enable) {
@@ -363,7 +363,7 @@ static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
363 363
364static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio) 364static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
365{ 365{
366 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; 366 struct mfd_cell *cell = dev_get_platdata(&dev->dev);
367 367
368 tmio_iowrite8(FCR_MODE_POWER_OFF, tmio->fcr + FCR_MODE); 368 tmio_iowrite8(FCR_MODE_POWER_OFF, tmio->fcr + FCR_MODE);
369 if (cell->disable) 369 if (cell->disable)
@@ -372,7 +372,7 @@ static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
372 372
373static int tmio_probe(struct platform_device *dev) 373static int tmio_probe(struct platform_device *dev)
374{ 374{
375 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; 375 struct mfd_cell *cell = dev_get_platdata(&dev->dev);
376 struct tmio_nand_data *data = cell->driver_data; 376 struct tmio_nand_data *data = cell->driver_data;
377 struct resource *fcr = platform_get_resource(dev, 377 struct resource *fcr = platform_get_resource(dev,
378 IORESOURCE_MEM, 0); 378 IORESOURCE_MEM, 0);
@@ -405,14 +405,14 @@ static int tmio_probe(struct platform_device *dev)
405 mtd->priv = nand_chip; 405 mtd->priv = nand_chip;
406 mtd->name = "tmio-nand"; 406 mtd->name = "tmio-nand";
407 407
408 tmio->ccr = ioremap(ccr->start, ccr->end - ccr->start + 1); 408 tmio->ccr = ioremap(ccr->start, resource_size(ccr));
409 if (!tmio->ccr) { 409 if (!tmio->ccr) {
410 retval = -EIO; 410 retval = -EIO;
411 goto err_iomap_ccr; 411 goto err_iomap_ccr;
412 } 412 }
413 413
414 tmio->fcr_base = fcr->start & 0xfffff; 414 tmio->fcr_base = fcr->start & 0xfffff;
415 tmio->fcr = ioremap(fcr->start, fcr->end - fcr->start + 1); 415 tmio->fcr = ioremap(fcr->start, resource_size(fcr));
416 if (!tmio->fcr) { 416 if (!tmio->fcr) {
417 retval = -EIO; 417 retval = -EIO;
418 goto err_iomap_fcr; 418 goto err_iomap_fcr;
@@ -516,7 +516,7 @@ static int tmio_remove(struct platform_device *dev)
516#ifdef CONFIG_PM 516#ifdef CONFIG_PM
517static int tmio_suspend(struct platform_device *dev, pm_message_t state) 517static int tmio_suspend(struct platform_device *dev, pm_message_t state)
518{ 518{
519 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; 519 struct mfd_cell *cell = dev_get_platdata(&dev->dev);
520 520
521 if (cell->suspend) 521 if (cell->suspend)
522 cell->suspend(dev); 522 cell->suspend(dev);
@@ -527,7 +527,7 @@ static int tmio_suspend(struct platform_device *dev, pm_message_t state)
527 527
528static int tmio_resume(struct platform_device *dev) 528static int tmio_resume(struct platform_device *dev)
529{ 529{
530 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; 530 struct mfd_cell *cell = dev_get_platdata(&dev->dev);
531 531
532 /* FIXME - is this required or merely another attack of the broken 532 /* FIXME - is this required or merely another attack of the broken
533 * SHARP platform? Looks suspicious. 533 * SHARP platform? Looks suspicious.
diff --git a/drivers/mtd/nand/ts7250.c b/drivers/mtd/nand/ts7250.c
deleted file mode 100644
index 0f5562aeedc1..000000000000
--- a/drivers/mtd/nand/ts7250.c
+++ /dev/null
@@ -1,207 +0,0 @@
1/*
2 * drivers/mtd/nand/ts7250.c
3 *
4 * Copyright (C) 2004 Technologic Systems (support@embeddedARM.com)
5 *
6 * Derived from drivers/mtd/nand/edb7312.c
7 * Copyright (C) 2004 Marius Gröger (mag@sysgo.de)
8 *
9 * Derived from drivers/mtd/nand/autcpu12.c
10 * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 * Overview:
17 * This is a device driver for the NAND flash device found on the
18 * TS-7250 board which utilizes a Samsung 32 Mbyte part.
19 */
20
21#include <linux/slab.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/mtd/mtd.h>
25#include <linux/mtd/nand.h>
26#include <linux/mtd/partitions.h>
27#include <linux/io.h>
28
29#include <mach/hardware.h>
30#include <mach/ts72xx.h>
31
32#include <asm/sizes.h>
33#include <asm/mach-types.h>
34
35/*
36 * MTD structure for TS7250 board
37 */
38static struct mtd_info *ts7250_mtd = NULL;
39
40#ifdef CONFIG_MTD_PARTITIONS
41static const char *part_probes[] = { "cmdlinepart", NULL };
42
43#define NUM_PARTITIONS 3
44
45/*
46 * Define static partitions for flash device
47 */
48static struct mtd_partition partition_info32[] = {
49 {
50 .name = "TS-BOOTROM",
51 .offset = 0x00000000,
52 .size = 0x00004000,
53 }, {
54 .name = "Linux",
55 .offset = 0x00004000,
56 .size = 0x01d00000,
57 }, {
58 .name = "RedBoot",
59 .offset = 0x01d04000,
60 .size = 0x002fc000,
61 },
62};
63
64/*
65 * Define static partitions for flash device
66 */
67static struct mtd_partition partition_info128[] = {
68 {
69 .name = "TS-BOOTROM",
70 .offset = 0x00000000,
71 .size = 0x00004000,
72 }, {
73 .name = "Linux",
74 .offset = 0x00004000,
75 .size = 0x07d00000,
76 }, {
77 .name = "RedBoot",
78 .offset = 0x07d04000,
79 .size = 0x002fc000,
80 },
81};
82#endif
83
84
85/*
86 * hardware specific access to control-lines
87 *
88 * ctrl:
89 * NAND_NCE: bit 0 -> bit 2
90 * NAND_CLE: bit 1 -> bit 1
91 * NAND_ALE: bit 2 -> bit 0
92 */
93static void ts7250_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
94{
95 struct nand_chip *chip = mtd->priv;
96
97 if (ctrl & NAND_CTRL_CHANGE) {
98 unsigned long addr = TS72XX_NAND_CONTROL_VIRT_BASE;
99 unsigned char bits;
100
101 bits = (ctrl & NAND_NCE) << 2;
102 bits |= ctrl & NAND_CLE;
103 bits |= (ctrl & NAND_ALE) >> 2;
104
105 __raw_writeb((__raw_readb(addr) & ~0x7) | bits, addr);
106 }
107
108 if (cmd != NAND_CMD_NONE)
109 writeb(cmd, chip->IO_ADDR_W);
110}
111
112/*
113 * read device ready pin
114 */
115static int ts7250_device_ready(struct mtd_info *mtd)
116{
117 return __raw_readb(TS72XX_NAND_BUSY_VIRT_BASE) & 0x20;
118}
119
120/*
121 * Main initialization routine
122 */
123static int __init ts7250_init(void)
124{
125 struct nand_chip *this;
126 const char *part_type = 0;
127 int mtd_parts_nb = 0;
128 struct mtd_partition *mtd_parts = 0;
129
130 if (!machine_is_ts72xx() || board_is_ts7200())
131 return -ENXIO;
132
133 /* Allocate memory for MTD device structure and private data */
134 ts7250_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
135 if (!ts7250_mtd) {
136 printk("Unable to allocate TS7250 NAND MTD device structure.\n");
137 return -ENOMEM;
138 }
139
140 /* Get pointer to private data */
141 this = (struct nand_chip *)(&ts7250_mtd[1]);
142
143 /* Initialize structures */
144 memset(ts7250_mtd, 0, sizeof(struct mtd_info));
145 memset(this, 0, sizeof(struct nand_chip));
146
147 /* Link the private data with the MTD structure */
148 ts7250_mtd->priv = this;
149 ts7250_mtd->owner = THIS_MODULE;
150
151 /* insert callbacks */
152 this->IO_ADDR_R = (void *)TS72XX_NAND_DATA_VIRT_BASE;
153 this->IO_ADDR_W = (void *)TS72XX_NAND_DATA_VIRT_BASE;
154 this->cmd_ctrl = ts7250_hwcontrol;
155 this->dev_ready = ts7250_device_ready;
156 this->chip_delay = 15;
157 this->ecc.mode = NAND_ECC_SOFT;
158
159 printk("Searching for NAND flash...\n");
160 /* Scan to find existence of the device */
161 if (nand_scan(ts7250_mtd, 1)) {
162 kfree(ts7250_mtd);
163 return -ENXIO;
164 }
165#ifdef CONFIG_MTD_PARTITIONS
166 ts7250_mtd->name = "ts7250-nand";
167 mtd_parts_nb = parse_mtd_partitions(ts7250_mtd, part_probes, &mtd_parts, 0);
168 if (mtd_parts_nb > 0)
169 part_type = "command line";
170 else
171 mtd_parts_nb = 0;
172#endif
173 if (mtd_parts_nb == 0) {
174 mtd_parts = partition_info32;
175 if (ts7250_mtd->size >= (128 * 0x100000))
176 mtd_parts = partition_info128;
177 mtd_parts_nb = NUM_PARTITIONS;
178 part_type = "static";
179 }
180
181 /* Register the partitions */
182 printk(KERN_NOTICE "Using %s partition definition\n", part_type);
183 add_mtd_partitions(ts7250_mtd, mtd_parts, mtd_parts_nb);
184
185 /* Return happy */
186 return 0;
187}
188
189module_init(ts7250_init);
190
191/*
192 * Clean up routine
193 */
194static void __exit ts7250_cleanup(void)
195{
196 /* Unregister the device */
197 del_mtd_device(ts7250_mtd);
198
199 /* Free the MTD device structure */
200 kfree(ts7250_mtd);
201}
202
203module_exit(ts7250_cleanup);
204
205MODULE_LICENSE("GPL");
206MODULE_AUTHOR("Jesse Off <joff@embeddedARM.com>");
207MODULE_DESCRIPTION("MTD map driver for Technologic Systems TS-7250 board");
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index 863513c3b69a..054a41c0ef4a 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -274,7 +274,7 @@ static int txx9ndfmc_nand_scan(struct mtd_info *mtd)
274 struct nand_chip *chip = mtd->priv; 274 struct nand_chip *chip = mtd->priv;
275 int ret; 275 int ret;
276 276
277 ret = nand_scan_ident(mtd, 1); 277 ret = nand_scan_ident(mtd, 1, NULL);
278 if (!ret) { 278 if (!ret) {
279 if (mtd->writesize >= 512) { 279 if (mtd->writesize >= 512) {
280 chip->ecc.size = mtd->writesize; 280 chip->ecc.size = mtd->writesize;
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index 1002e1882996..a4578bf903aa 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -126,7 +126,6 @@ static void nftl_remove_dev(struct mtd_blktrans_dev *dev)
126 del_mtd_blktrans_dev(dev); 126 del_mtd_blktrans_dev(dev);
127 kfree(nftl->ReplUnitTable); 127 kfree(nftl->ReplUnitTable);
128 kfree(nftl->EUNtable); 128 kfree(nftl->EUNtable);
129 kfree(nftl);
130} 129}
131 130
132/* 131/*
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index fd406348fdfd..9f322f1a7f22 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -309,7 +309,7 @@ static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
309 goto out_copy; 309 goto out_copy;
310 310
311 /* panic_write() may be in an interrupt context */ 311 /* panic_write() may be in an interrupt context */
312 if (in_interrupt()) 312 if (in_interrupt() || oops_in_progress)
313 goto out_copy; 313 goto out_copy;
314 314
315 if (buf >= high_memory) { 315 if (buf >= high_memory) {
@@ -386,7 +386,7 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
386 goto out_copy; 386 goto out_copy;
387 387
388 /* panic_write() may be in an interrupt context */ 388 /* panic_write() may be in an interrupt context */
389 if (in_interrupt()) 389 if (in_interrupt() || oops_in_progress)
390 goto out_copy; 390 goto out_copy;
391 391
392 if (buf >= high_memory) { 392 if (buf >= high_memory) {
@@ -403,7 +403,7 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
403 403
404 dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE); 404 dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
405 dma_dst = c->phys_base + bram_offset; 405 dma_dst = c->phys_base + bram_offset;
406 if (dma_mapping_error(&c->pdev->dev, dma_dst)) { 406 if (dma_mapping_error(&c->pdev->dev, dma_src)) {
407 dev_err(&c->pdev->dev, 407 dev_err(&c->pdev->dev,
408 "Couldn't DMA map a %d byte buffer\n", 408 "Couldn't DMA map a %d byte buffer\n",
409 count); 409 count);
@@ -426,7 +426,7 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
426 if (*done) 426 if (*done)
427 break; 427 break;
428 428
429 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE); 429 dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
430 430
431 if (!*done) { 431 if (!*done) {
432 dev_err(&c->pdev->dev, "timeout waiting for DMA\n"); 432 dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
@@ -521,7 +521,7 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
521 dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count, 521 dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
522 DMA_TO_DEVICE); 522 DMA_TO_DEVICE);
523 dma_dst = c->phys_base + bram_offset; 523 dma_dst = c->phys_base + bram_offset;
524 if (dma_mapping_error(&c->pdev->dev, dma_dst)) { 524 if (dma_mapping_error(&c->pdev->dev, dma_src)) {
525 dev_err(&c->pdev->dev, 525 dev_err(&c->pdev->dev,
526 "Couldn't DMA map a %d byte buffer\n", 526 "Couldn't DMA map a %d byte buffer\n",
527 count); 527 count);
@@ -539,7 +539,7 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
539 omap_start_dma(c->dma_channel); 539 omap_start_dma(c->dma_channel);
540 wait_for_completion(&c->dma_done); 540 wait_for_completion(&c->dma_done);
541 541
542 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE); 542 dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
543 543
544 return 0; 544 return 0;
545} 545}
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c
index d2aa9c46530f..63b83c0d9a13 100644
--- a/drivers/mtd/rfd_ftl.c
+++ b/drivers/mtd/rfd_ftl.c
@@ -817,7 +817,6 @@ static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
817 vfree(part->sector_map); 817 vfree(part->sector_map);
818 kfree(part->header_cache); 818 kfree(part->header_cache);
819 kfree(part->blocks); 819 kfree(part->blocks);
820 kfree(part);
821} 820}
822 821
823static struct mtd_blktrans_ops rfd_ftl_tr = { 822static struct mtd_blktrans_ops rfd_ftl_tr = {
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
new file mode 100644
index 000000000000..67822cf6c025
--- /dev/null
+++ b/drivers/mtd/sm_ftl.c
@@ -0,0 +1,1284 @@
1/*
2 * Copyright © 2009 - Maxim Levitsky
3 * SmartMedia/xD translation layer
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/random.h>
13#include <linux/hdreg.h>
14#include <linux/kthread.h>
15#include <linux/freezer.h>
16#include <linux/sysfs.h>
17#include <linux/bitops.h>
18#include <linux/slab.h>
19#include <linux/mtd/nand_ecc.h>
20#include "nand/sm_common.h"
21#include "sm_ftl.h"
22
23
24
25struct workqueue_struct *cache_flush_workqueue;
26
27static int cache_timeout = 1000;
28module_param(cache_timeout, bool, S_IRUGO);
29MODULE_PARM_DESC(cache_timeout,
30 "Timeout (in ms) for cache flush (1000 ms default");
31
32static int debug;
33module_param(debug, int, S_IRUGO | S_IWUSR);
34MODULE_PARM_DESC(debug, "Debug level (0-2)");
35
36
37/* ------------------- sysfs attributtes ---------------------------------- */
38struct sm_sysfs_attribute {
39 struct device_attribute dev_attr;
40 char *data;
41 int len;
42};
43
44ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
45 char *buf)
46{
47 struct sm_sysfs_attribute *sm_attr =
48 container_of(attr, struct sm_sysfs_attribute, dev_attr);
49
50 strncpy(buf, sm_attr->data, sm_attr->len);
51 return sm_attr->len;
52}
53
54
55#define NUM_ATTRIBUTES 1
56#define SM_CIS_VENDOR_OFFSET 0x59
57struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
58{
59 struct attribute_group *attr_group;
60 struct attribute **attributes;
61 struct sm_sysfs_attribute *vendor_attribute;
62
63 int vendor_len = strnlen(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET,
64 SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET);
65
66 char *vendor = kmalloc(vendor_len, GFP_KERNEL);
67 memcpy(vendor, ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, vendor_len);
68 vendor[vendor_len] = 0;
69
70 /* Initialize sysfs attributes */
71 vendor_attribute =
72 kzalloc(sizeof(struct sm_sysfs_attribute), GFP_KERNEL);
73
74 sysfs_attr_init(&vendor_attribute->dev_attr.attr);
75
76 vendor_attribute->data = vendor;
77 vendor_attribute->len = vendor_len;
78 vendor_attribute->dev_attr.attr.name = "vendor";
79 vendor_attribute->dev_attr.attr.mode = S_IRUGO;
80 vendor_attribute->dev_attr.show = sm_attr_show;
81
82
83 /* Create array of pointers to the attributes */
84 attributes = kzalloc(sizeof(struct attribute *) * (NUM_ATTRIBUTES + 1),
85 GFP_KERNEL);
86 attributes[0] = &vendor_attribute->dev_attr.attr;
87
88 /* Finally create the attribute group */
89 attr_group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
90 attr_group->attrs = attributes;
91 return attr_group;
92}
93
94void sm_delete_sysfs_attributes(struct sm_ftl *ftl)
95{
96 struct attribute **attributes = ftl->disk_attributes->attrs;
97 int i;
98
99 for (i = 0; attributes[i] ; i++) {
100
101 struct device_attribute *dev_attr = container_of(attributes[i],
102 struct device_attribute, attr);
103
104 struct sm_sysfs_attribute *sm_attr =
105 container_of(dev_attr,
106 struct sm_sysfs_attribute, dev_attr);
107
108 kfree(sm_attr->data);
109 kfree(sm_attr);
110 }
111
112 kfree(ftl->disk_attributes->attrs);
113 kfree(ftl->disk_attributes);
114}
115
116
117/* ----------------------- oob helpers -------------------------------------- */
118
119static int sm_get_lba(uint8_t *lba)
120{
121 /* check fixed bits */
122 if ((lba[0] & 0xF8) != 0x10)
123 return -2;
124
125 /* check parity - endianess doesn't matter */
126 if (hweight16(*(uint16_t *)lba) & 1)
127 return -2;
128
129 return (lba[1] >> 1) | ((lba[0] & 0x07) << 7);
130}
131
132
133/*
134 * Read LBA asscociated with block
135 * returns -1, if block is erased
136 * returns -2 if error happens
137 */
138static int sm_read_lba(struct sm_oob *oob)
139{
140 static const uint32_t erased_pattern[4] = {
141 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
142
143 uint16_t lba_test;
144 int lba;
145
146 /* First test for erased block */
147 if (!memcmp(oob, erased_pattern, SM_OOB_SIZE))
148 return -1;
149
150 /* Now check is both copies of the LBA differ too much */
151 lba_test = *(uint16_t *)oob->lba_copy1 ^ *(uint16_t*)oob->lba_copy2;
152 if (lba_test && !is_power_of_2(lba_test))
153 return -2;
154
155 /* And read it */
156 lba = sm_get_lba(oob->lba_copy1);
157
158 if (lba == -2)
159 lba = sm_get_lba(oob->lba_copy2);
160
161 return lba;
162}
163
164static void sm_write_lba(struct sm_oob *oob, uint16_t lba)
165{
166 uint8_t tmp[2];
167
168 WARN_ON(lba >= 1000);
169
170 tmp[0] = 0x10 | ((lba >> 7) & 0x07);
171 tmp[1] = (lba << 1) & 0xFF;
172
173 if (hweight16(*(uint16_t *)tmp) & 0x01)
174 tmp[1] |= 1;
175
176 oob->lba_copy1[0] = oob->lba_copy2[0] = tmp[0];
177 oob->lba_copy1[1] = oob->lba_copy2[1] = tmp[1];
178}
179
180
181/* Make offset from parts */
182static loff_t sm_mkoffset(struct sm_ftl *ftl, int zone, int block, int boffset)
183{
184 WARN_ON(boffset & (SM_SECTOR_SIZE - 1));
185 WARN_ON(zone < 0 || zone >= ftl->zone_count);
186 WARN_ON(block >= ftl->zone_size);
187 WARN_ON(boffset >= ftl->block_size);
188
189 if (block == -1)
190 return -1;
191
192 return (zone * SM_MAX_ZONE_SIZE + block) * ftl->block_size + boffset;
193}
194
195/* Breaks offset into parts */
196static void sm_break_offset(struct sm_ftl *ftl, loff_t offset,
197 int *zone, int *block, int *boffset)
198{
199 *boffset = do_div(offset, ftl->block_size);
200 *block = do_div(offset, ftl->max_lba);
201 *zone = offset >= ftl->zone_count ? -1 : offset;
202}
203
204/* ---------------------- low level IO ------------------------------------- */
205
206static int sm_correct_sector(uint8_t *buffer, struct sm_oob *oob)
207{
208 uint8_t ecc[3];
209
210 __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc);
211 if (__nand_correct_data(buffer, ecc, oob->ecc1, SM_SMALL_PAGE) < 0)
212 return -EIO;
213
214 buffer += SM_SMALL_PAGE;
215
216 __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc);
217 if (__nand_correct_data(buffer, ecc, oob->ecc2, SM_SMALL_PAGE) < 0)
218 return -EIO;
219 return 0;
220}
221
222/* Reads a sector + oob*/
223static int sm_read_sector(struct sm_ftl *ftl,
224 int zone, int block, int boffset,
225 uint8_t *buffer, struct sm_oob *oob)
226{
227 struct mtd_info *mtd = ftl->trans->mtd;
228 struct mtd_oob_ops ops;
229 struct sm_oob tmp_oob;
230 int ret = -EIO;
231 int try = 0;
232
233 /* FTL can contain -1 entries that are by default filled with bits */
234 if (block == -1) {
235 memset(buffer, 0xFF, SM_SECTOR_SIZE);
236 return 0;
237 }
238
239 /* User might not need the oob, but we do for data vertification */
240 if (!oob)
241 oob = &tmp_oob;
242
243 ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE;
244 ops.ooboffs = 0;
245 ops.ooblen = SM_OOB_SIZE;
246 ops.oobbuf = (void *)oob;
247 ops.len = SM_SECTOR_SIZE;
248 ops.datbuf = buffer;
249
250again:
251 if (try++) {
252 /* Avoid infinite recursion on CIS reads, sm_recheck_media
253 won't help anyway */
254 if (zone == 0 && block == ftl->cis_block && boffset ==
255 ftl->cis_boffset)
256 return ret;
257
258 /* Test if media is stable */
259 if (try == 3 || sm_recheck_media(ftl))
260 return ret;
261 }
262
263 /* Unfortunelly, oob read will _always_ succeed,
264 despite card removal..... */
265 ret = mtd->read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
266
267 /* Test for unknown errors */
268 if (ret != 0 && ret != -EUCLEAN && ret != -EBADMSG) {
269 dbg("read of block %d at zone %d, failed due to error (%d)",
270 block, zone, ret);
271 goto again;
272 }
273
274 /* Do a basic test on the oob, to guard against returned garbage */
275 if (oob->reserved != 0xFFFFFFFF && !is_power_of_2(~oob->reserved))
276 goto again;
277
278 /* This should never happen, unless there is a bug in the mtd driver */
279 WARN_ON(ops.oobretlen != SM_OOB_SIZE);
280 WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
281
282 if (!buffer)
283 return 0;
284
285 /* Test if sector marked as bad */
286 if (!sm_sector_valid(oob)) {
287 dbg("read of block %d at zone %d, failed because it is marked"
288 " as bad" , block, zone);
289 goto again;
290 }
291
292 /* Test ECC*/
293 if (ret == -EBADMSG ||
294 (ftl->smallpagenand && sm_correct_sector(buffer, oob))) {
295
296 dbg("read of block %d at zone %d, failed due to ECC error",
297 block, zone);
298 goto again;
299 }
300
301 return 0;
302}
303
304/* Writes a sector to media */
305static int sm_write_sector(struct sm_ftl *ftl,
306 int zone, int block, int boffset,
307 uint8_t *buffer, struct sm_oob *oob)
308{
309 struct mtd_oob_ops ops;
310 struct mtd_info *mtd = ftl->trans->mtd;
311 int ret;
312
313 BUG_ON(ftl->readonly);
314
315 if (zone == 0 && (block == ftl->cis_block || block == 0)) {
316 dbg("attempted to write the CIS!");
317 return -EIO;
318 }
319
320 if (ftl->unstable)
321 return -EIO;
322
323 ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE;
324 ops.len = SM_SECTOR_SIZE;
325 ops.datbuf = buffer;
326 ops.ooboffs = 0;
327 ops.ooblen = SM_OOB_SIZE;
328 ops.oobbuf = (void *)oob;
329
330 ret = mtd->write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
331
332 /* Now we assume that hardware will catch write bitflip errors */
333 /* If you are paranoid, use CONFIG_MTD_NAND_VERIFY_WRITE */
334
335 if (ret) {
336 dbg("write to block %d at zone %d, failed with error %d",
337 block, zone, ret);
338
339 sm_recheck_media(ftl);
340 return ret;
341 }
342
343 /* This should never happen, unless there is a bug in the driver */
344 WARN_ON(ops.oobretlen != SM_OOB_SIZE);
345 WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
346
347 return 0;
348}
349
350/* ------------------------ block IO ------------------------------------- */
351
352/* Write a block using data and lba, and invalid sector bitmap */
353static int sm_write_block(struct sm_ftl *ftl, uint8_t *buf,
354 int zone, int block, int lba,
355 unsigned long invalid_bitmap)
356{
357 struct sm_oob oob;
358 int boffset;
359 int retry = 0;
360
361 /* Initialize the oob with requested values */
362 memset(&oob, 0xFF, SM_OOB_SIZE);
363 sm_write_lba(&oob, lba);
364restart:
365 if (ftl->unstable)
366 return -EIO;
367
368 for (boffset = 0; boffset < ftl->block_size;
369 boffset += SM_SECTOR_SIZE) {
370
371 oob.data_status = 0xFF;
372
373 if (test_bit(boffset / SM_SECTOR_SIZE, &invalid_bitmap)) {
374
375 sm_printk("sector %d of block at LBA %d of zone %d"
376 " coudn't be read, marking it as invalid",
377 boffset / SM_SECTOR_SIZE, lba, zone);
378
379 oob.data_status = 0;
380 }
381
382 if (ftl->smallpagenand) {
383 __nand_calculate_ecc(buf + boffset,
384 SM_SMALL_PAGE, oob.ecc1);
385
386 __nand_calculate_ecc(buf + boffset + SM_SMALL_PAGE,
387 SM_SMALL_PAGE, oob.ecc2);
388 }
389 if (!sm_write_sector(ftl, zone, block, boffset,
390 buf + boffset, &oob))
391 continue;
392
393 if (!retry) {
394
395 /* If write fails. try to erase the block */
396 /* This is safe, because we never write in blocks
397 that contain valuable data.
398 This is intended to repair block that are marked
399 as erased, but that isn't fully erased*/
400
401 if (sm_erase_block(ftl, zone, block, 0))
402 return -EIO;
403
404 retry = 1;
405 goto restart;
406 } else {
407 sm_mark_block_bad(ftl, zone, block);
408 return -EIO;
409 }
410 }
411 return 0;
412}
413
414
415/* Mark whole block at offset 'offs' as bad. */
416static void sm_mark_block_bad(struct sm_ftl *ftl, int zone, int block)
417{
418 struct sm_oob oob;
419 int boffset;
420
421 memset(&oob, 0xFF, SM_OOB_SIZE);
422 oob.block_status = 0xF0;
423
424 if (ftl->unstable)
425 return;
426
427 if (sm_recheck_media(ftl))
428 return;
429
430 sm_printk("marking block %d of zone %d as bad", block, zone);
431
432 /* We aren't checking the return value, because we don't care */
433 /* This also fails on fake xD cards, but I guess these won't expose
434 any bad blocks till fail completly */
435 for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE)
436 sm_write_sector(ftl, zone, block, boffset, NULL, &oob);
437}
438
439/*
440 * Erase a block within a zone
441 * If erase succedes, it updates free block fifo, otherwise marks block as bad
442 */
443static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
444 int put_free)
445{
446 struct ftl_zone *zone = &ftl->zones[zone_num];
447 struct mtd_info *mtd = ftl->trans->mtd;
448 struct erase_info erase;
449
450 erase.mtd = mtd;
451 erase.callback = sm_erase_callback;
452 erase.addr = sm_mkoffset(ftl, zone_num, block, 0);
453 erase.len = ftl->block_size;
454 erase.priv = (u_long)ftl;
455
456 if (ftl->unstable)
457 return -EIO;
458
459 BUG_ON(ftl->readonly);
460
461 if (zone_num == 0 && (block == ftl->cis_block || block == 0)) {
462 sm_printk("attempted to erase the CIS!");
463 return -EIO;
464 }
465
466 if (mtd->erase(mtd, &erase)) {
467 sm_printk("erase of block %d in zone %d failed",
468 block, zone_num);
469 goto error;
470 }
471
472 if (erase.state == MTD_ERASE_PENDING)
473 wait_for_completion(&ftl->erase_completion);
474
475 if (erase.state != MTD_ERASE_DONE) {
476 sm_printk("erase of block %d in zone %d failed after wait",
477 block, zone_num);
478 goto error;
479 }
480
481 if (put_free)
482 kfifo_in(&zone->free_sectors,
483 (const unsigned char *)&block, sizeof(block));
484
485 return 0;
486error:
487 sm_mark_block_bad(ftl, zone_num, block);
488 return -EIO;
489}
490
491static void sm_erase_callback(struct erase_info *self)
492{
493 struct sm_ftl *ftl = (struct sm_ftl *)self->priv;
494 complete(&ftl->erase_completion);
495}
496
497/* Throughtly test that block is valid. */
498static int sm_check_block(struct sm_ftl *ftl, int zone, int block)
499{
500 int boffset;
501 struct sm_oob oob;
502 int lbas[] = { -3, 0, 0, 0 };
503 int i = 0;
504 int test_lba;
505
506
507 /* First just check that block doesn't look fishy */
508 /* Only blocks that are valid or are sliced in two parts, are
509 accepted */
510 for (boffset = 0; boffset < ftl->block_size;
511 boffset += SM_SECTOR_SIZE) {
512
513 /* This shoudn't happen anyway */
514 if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob))
515 return -2;
516
517 test_lba = sm_read_lba(&oob);
518
519 if (lbas[i] != test_lba)
520 lbas[++i] = test_lba;
521
522 /* If we found three different LBAs, something is fishy */
523 if (i == 3)
524 return -EIO;
525 }
526
527 /* If the block is sliced (partialy erased usually) erase it */
528 if (i == 2) {
529 sm_erase_block(ftl, zone, block, 1);
530 return 1;
531 }
532
533 return 0;
534}
535
536/* ----------------- media scanning --------------------------------- */
537static const struct chs_entry chs_table[] = {
538 { 1, 125, 4, 4 },
539 { 2, 125, 4, 8 },
540 { 4, 250, 4, 8 },
541 { 8, 250, 4, 16 },
542 { 16, 500, 4, 16 },
543 { 32, 500, 8, 16 },
544 { 64, 500, 8, 32 },
545 { 128, 500, 16, 32 },
546 { 256, 1000, 16, 32 },
547 { 512, 1015, 32, 63 },
548 { 1024, 985, 33, 63 },
549 { 2048, 985, 33, 63 },
550 { 0 },
551};
552
553
554static const uint8_t cis_signature[] = {
555 0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20
556};
557/* Find out media parameters.
558 * This ideally has to be based on nand id, but for now device size is enough */
559int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd)
560{
561 int i;
562 int size_in_megs = mtd->size / (1024 * 1024);
563
564 ftl->readonly = mtd->type == MTD_ROM;
565
566 /* Manual settings for very old devices */
567 ftl->zone_count = 1;
568 ftl->smallpagenand = 0;
569
570 switch (size_in_megs) {
571 case 1:
572 /* 1 MiB flash/rom SmartMedia card (256 byte pages)*/
573 ftl->zone_size = 256;
574 ftl->max_lba = 250;
575 ftl->block_size = 8 * SM_SECTOR_SIZE;
576 ftl->smallpagenand = 1;
577
578 break;
579 case 2:
580 /* 2 MiB flash SmartMedia (256 byte pages)*/
581 if (mtd->writesize == SM_SMALL_PAGE) {
582 ftl->zone_size = 512;
583 ftl->max_lba = 500;
584 ftl->block_size = 8 * SM_SECTOR_SIZE;
585 ftl->smallpagenand = 1;
586 /* 2 MiB rom SmartMedia */
587 } else {
588
589 if (!ftl->readonly)
590 return -ENODEV;
591
592 ftl->zone_size = 256;
593 ftl->max_lba = 250;
594 ftl->block_size = 16 * SM_SECTOR_SIZE;
595 }
596 break;
597 case 4:
598 /* 4 MiB flash/rom SmartMedia device */
599 ftl->zone_size = 512;
600 ftl->max_lba = 500;
601 ftl->block_size = 16 * SM_SECTOR_SIZE;
602 break;
603 case 8:
604 /* 8 MiB flash/rom SmartMedia device */
605 ftl->zone_size = 1024;
606 ftl->max_lba = 1000;
607 ftl->block_size = 16 * SM_SECTOR_SIZE;
608 }
609
610 /* Minimum xD size is 16MiB. Also, all xD cards have standard zone
611 sizes. SmartMedia cards exist up to 128 MiB and have same layout*/
612 if (size_in_megs >= 16) {
613 ftl->zone_count = size_in_megs / 16;
614 ftl->zone_size = 1024;
615 ftl->max_lba = 1000;
616 ftl->block_size = 32 * SM_SECTOR_SIZE;
617 }
618
619 /* Test for proper write,erase and oob sizes */
620 if (mtd->erasesize > ftl->block_size)
621 return -ENODEV;
622
623 if (mtd->writesize > SM_SECTOR_SIZE)
624 return -ENODEV;
625
626 if (ftl->smallpagenand && mtd->oobsize < SM_SMALL_OOB_SIZE)
627 return -ENODEV;
628
629 if (!ftl->smallpagenand && mtd->oobsize < SM_OOB_SIZE)
630 return -ENODEV;
631
632 /* We use these functions for IO */
633 if (!mtd->read_oob || !mtd->write_oob)
634 return -ENODEV;
635
636 /* Find geometry information */
637 for (i = 0 ; i < ARRAY_SIZE(chs_table) ; i++) {
638 if (chs_table[i].size == size_in_megs) {
639 ftl->cylinders = chs_table[i].cyl;
640 ftl->heads = chs_table[i].head;
641 ftl->sectors = chs_table[i].sec;
642 return 0;
643 }
644 }
645
646 sm_printk("media has unknown size : %dMiB", size_in_megs);
647 ftl->cylinders = 985;
648 ftl->heads = 33;
649 ftl->sectors = 63;
650 return 0;
651}
652
653/* Validate the CIS */
654static int sm_read_cis(struct sm_ftl *ftl)
655{
656 struct sm_oob oob;
657
658 if (sm_read_sector(ftl,
659 0, ftl->cis_block, ftl->cis_boffset, ftl->cis_buffer, &oob))
660 return -EIO;
661
662 if (!sm_sector_valid(&oob) || !sm_block_valid(&oob))
663 return -EIO;
664
665 if (!memcmp(ftl->cis_buffer + ftl->cis_page_offset,
666 cis_signature, sizeof(cis_signature))) {
667 return 0;
668 }
669
670 return -EIO;
671}
672
673/* Scan the media for the CIS */
674static int sm_find_cis(struct sm_ftl *ftl)
675{
676 struct sm_oob oob;
677 int block, boffset;
678 int block_found = 0;
679 int cis_found = 0;
680
681 /* Search for first valid block */
682 for (block = 0 ; block < ftl->zone_size - ftl->max_lba ; block++) {
683
684 if (sm_read_sector(ftl, 0, block, 0, NULL, &oob))
685 continue;
686
687 if (!sm_block_valid(&oob))
688 continue;
689 block_found = 1;
690 break;
691 }
692
693 if (!block_found)
694 return -EIO;
695
696 /* Search for first valid sector in this block */
697 for (boffset = 0 ; boffset < ftl->block_size;
698 boffset += SM_SECTOR_SIZE) {
699
700 if (sm_read_sector(ftl, 0, block, boffset, NULL, &oob))
701 continue;
702
703 if (!sm_sector_valid(&oob))
704 continue;
705 break;
706 }
707
708 if (boffset == ftl->block_size)
709 return -EIO;
710
711 ftl->cis_block = block;
712 ftl->cis_boffset = boffset;
713 ftl->cis_page_offset = 0;
714
715 cis_found = !sm_read_cis(ftl);
716
717 if (!cis_found) {
718 ftl->cis_page_offset = SM_SMALL_PAGE;
719 cis_found = !sm_read_cis(ftl);
720 }
721
722 if (cis_found) {
723 dbg("CIS block found at offset %x",
724 block * ftl->block_size +
725 boffset + ftl->cis_page_offset);
726 return 0;
727 }
728 return -EIO;
729}
730
731/* Basic test to determine if underlying mtd device if functional */
732static int sm_recheck_media(struct sm_ftl *ftl)
733{
734 if (sm_read_cis(ftl)) {
735
736 if (!ftl->unstable) {
737 sm_printk("media unstable, not allowing writes");
738 ftl->unstable = 1;
739 }
740 return -EIO;
741 }
742 return 0;
743}
744
745/* Initialize a FTL zone */
746static int sm_init_zone(struct sm_ftl *ftl, int zone_num)
747{
748 struct ftl_zone *zone = &ftl->zones[zone_num];
749 struct sm_oob oob;
750 uint16_t block;
751 int lba;
752 int i = 0;
753 int len;
754
755 dbg("initializing zone %d", zone_num);
756
757 /* Allocate memory for FTL table */
758 zone->lba_to_phys_table = kmalloc(ftl->max_lba * 2, GFP_KERNEL);
759
760 if (!zone->lba_to_phys_table)
761 return -ENOMEM;
762 memset(zone->lba_to_phys_table, -1, ftl->max_lba * 2);
763
764
765 /* Allocate memory for free sectors FIFO */
766 if (kfifo_alloc(&zone->free_sectors, ftl->zone_size * 2, GFP_KERNEL)) {
767 kfree(zone->lba_to_phys_table);
768 return -ENOMEM;
769 }
770
771 /* Now scan the zone */
772 for (block = 0 ; block < ftl->zone_size ; block++) {
773
774 /* Skip blocks till the CIS (including) */
775 if (zone_num == 0 && block <= ftl->cis_block)
776 continue;
777
778 /* Read the oob of first sector */
779 if (sm_read_sector(ftl, zone_num, block, 0, NULL, &oob))
780 return -EIO;
781
782 /* Test to see if block is erased. It is enough to test
783 first sector, because erase happens in one shot */
784 if (sm_block_erased(&oob)) {
785 kfifo_in(&zone->free_sectors,
786 (unsigned char *)&block, 2);
787 continue;
788 }
789
790 /* If block is marked as bad, skip it */
791 /* This assumes we can trust first sector*/
792 /* However the way the block valid status is defined, ensures
793 very low probability of failure here */
794 if (!sm_block_valid(&oob)) {
795 dbg("PH %04d <-> <marked bad>", block);
796 continue;
797 }
798
799
800 lba = sm_read_lba(&oob);
801
802 /* Invalid LBA means that block is damaged. */
803 /* We can try to erase it, or mark it as bad, but
804 lets leave that to recovery application */
805 if (lba == -2 || lba >= ftl->max_lba) {
806 dbg("PH %04d <-> LBA %04d(bad)", block, lba);
807 continue;
808 }
809
810
811 /* If there is no collision,
812 just put the sector in the FTL table */
813 if (zone->lba_to_phys_table[lba] < 0) {
814 dbg_verbose("PH %04d <-> LBA %04d", block, lba);
815 zone->lba_to_phys_table[lba] = block;
816 continue;
817 }
818
819 sm_printk("collision"
820 " of LBA %d between blocks %d and %d in zone %d",
821 lba, zone->lba_to_phys_table[lba], block, zone_num);
822
823 /* Test that this block is valid*/
824 if (sm_check_block(ftl, zone_num, block))
825 continue;
826
827 /* Test now the old block */
828 if (sm_check_block(ftl, zone_num,
829 zone->lba_to_phys_table[lba])) {
830 zone->lba_to_phys_table[lba] = block;
831 continue;
832 }
833
834 /* If both blocks are valid and share same LBA, it means that
835 they hold different versions of same data. It not
836 known which is more recent, thus just erase one of them
837 */
838 sm_printk("both blocks are valid, erasing the later");
839 sm_erase_block(ftl, zone_num, block, 1);
840 }
841
842 dbg("zone initialized");
843 zone->initialized = 1;
844
845 /* No free sectors, means that the zone is heavily damaged, write won't
846 work, but it can still can be (partially) read */
847 if (!kfifo_len(&zone->free_sectors)) {
848 sm_printk("no free blocks in zone %d", zone_num);
849 return 0;
850 }
851
852 /* Randomize first block we write to */
853 get_random_bytes(&i, 2);
854 i %= (kfifo_len(&zone->free_sectors) / 2);
855
856 while (i--) {
857 len = kfifo_out(&zone->free_sectors,
858 (unsigned char *)&block, 2);
859 WARN_ON(len != 2);
860 kfifo_in(&zone->free_sectors, (const unsigned char *)&block, 2);
861 }
862 return 0;
863}
864
865/* Get and automaticly initialize an FTL mapping for one zone */
866struct ftl_zone *sm_get_zone(struct sm_ftl *ftl, int zone_num)
867{
868 struct ftl_zone *zone;
869 int error;
870
871 BUG_ON(zone_num >= ftl->zone_count);
872 zone = &ftl->zones[zone_num];
873
874 if (!zone->initialized) {
875 error = sm_init_zone(ftl, zone_num);
876
877 if (error)
878 return ERR_PTR(error);
879 }
880 return zone;
881}
882
883
884/* ----------------- cache handling ------------------------------------------*/
885
886/* Initialize the one block cache */
887void sm_cache_init(struct sm_ftl *ftl)
888{
889 ftl->cache_data_invalid_bitmap = 0xFFFFFFFF;
890 ftl->cache_clean = 1;
891 ftl->cache_zone = -1;
892 ftl->cache_block = -1;
893 /*memset(ftl->cache_data, 0xAA, ftl->block_size);*/
894}
895
896/* Put sector in one block cache */
897void sm_cache_put(struct sm_ftl *ftl, char *buffer, int boffset)
898{
899 memcpy(ftl->cache_data + boffset, buffer, SM_SECTOR_SIZE);
900 clear_bit(boffset / SM_SECTOR_SIZE, &ftl->cache_data_invalid_bitmap);
901 ftl->cache_clean = 0;
902}
903
904/* Read a sector from the cache */
905int sm_cache_get(struct sm_ftl *ftl, char *buffer, int boffset)
906{
907 if (test_bit(boffset / SM_SECTOR_SIZE,
908 &ftl->cache_data_invalid_bitmap))
909 return -1;
910
911 memcpy(buffer, ftl->cache_data + boffset, SM_SECTOR_SIZE);
912 return 0;
913}
914
915/* Write the cache to hardware */
916int sm_cache_flush(struct sm_ftl *ftl)
917{
918 struct ftl_zone *zone;
919
920 int sector_num;
921 uint16_t write_sector;
922 int zone_num = ftl->cache_zone;
923 int block_num;
924
925 if (ftl->cache_clean)
926 return 0;
927
928 if (ftl->unstable)
929 return -EIO;
930
931 BUG_ON(zone_num < 0);
932 zone = &ftl->zones[zone_num];
933 block_num = zone->lba_to_phys_table[ftl->cache_block];
934
935
936 /* Try to read all unread areas of the cache block*/
937 for_each_set_bit(sector_num, &ftl->cache_data_invalid_bitmap,
938 ftl->block_size / SM_SECTOR_SIZE) {
939
940 if (!sm_read_sector(ftl,
941 zone_num, block_num, sector_num * SM_SECTOR_SIZE,
942 ftl->cache_data + sector_num * SM_SECTOR_SIZE, NULL))
943 clear_bit(sector_num,
944 &ftl->cache_data_invalid_bitmap);
945 }
946restart:
947
948 if (ftl->unstable)
949 return -EIO;
950
951 /* If there are no spare blocks, */
952 /* we could still continue by erasing/writing the current block,
953 but for such worn out media it doesn't worth the trouble,
954 and the dangers */
955 if (kfifo_out(&zone->free_sectors,
956 (unsigned char *)&write_sector, 2) != 2) {
957 dbg("no free sectors for write!");
958 return -EIO;
959 }
960
961
962 if (sm_write_block(ftl, ftl->cache_data, zone_num, write_sector,
963 ftl->cache_block, ftl->cache_data_invalid_bitmap))
964 goto restart;
965
966 /* Update the FTL table */
967 zone->lba_to_phys_table[ftl->cache_block] = write_sector;
968
969 /* Write succesfull, so erase and free the old block */
970 if (block_num > 0)
971 sm_erase_block(ftl, zone_num, block_num, 1);
972
973 sm_cache_init(ftl);
974 return 0;
975}
976
977
978/* flush timer, runs a second after last write */
979static void sm_cache_flush_timer(unsigned long data)
980{
981 struct sm_ftl *ftl = (struct sm_ftl *)data;
982 queue_work(cache_flush_workqueue, &ftl->flush_work);
983}
984
985/* cache flush work, kicked by timer */
986static void sm_cache_flush_work(struct work_struct *work)
987{
988 struct sm_ftl *ftl = container_of(work, struct sm_ftl, flush_work);
989 mutex_lock(&ftl->mutex);
990 sm_cache_flush(ftl);
991 mutex_unlock(&ftl->mutex);
992 return;
993}
994
995/* ---------------- outside interface -------------------------------------- */
996
997/* outside interface: read a sector */
998static int sm_read(struct mtd_blktrans_dev *dev,
999 unsigned long sect_no, char *buf)
1000{
1001 struct sm_ftl *ftl = dev->priv;
1002 struct ftl_zone *zone;
1003 int error = 0, in_cache = 0;
1004 int zone_num, block, boffset;
1005
1006 sm_break_offset(ftl, sect_no << 9, &zone_num, &block, &boffset);
1007 mutex_lock(&ftl->mutex);
1008
1009
1010 zone = sm_get_zone(ftl, zone_num);
1011 if (IS_ERR(zone)) {
1012 error = PTR_ERR(zone);
1013 goto unlock;
1014 }
1015
1016 /* Have to look at cache first */
1017 if (ftl->cache_zone == zone_num && ftl->cache_block == block) {
1018 in_cache = 1;
1019 if (!sm_cache_get(ftl, buf, boffset))
1020 goto unlock;
1021 }
1022
1023 /* Translate the block and return if doesn't exist in the table */
1024 block = zone->lba_to_phys_table[block];
1025
1026 if (block == -1) {
1027 memset(buf, 0xFF, SM_SECTOR_SIZE);
1028 goto unlock;
1029 }
1030
1031 if (sm_read_sector(ftl, zone_num, block, boffset, buf, NULL)) {
1032 error = -EIO;
1033 goto unlock;
1034 }
1035
1036 if (in_cache)
1037 sm_cache_put(ftl, buf, boffset);
1038unlock:
1039 mutex_unlock(&ftl->mutex);
1040 return error;
1041}
1042
1043/* outside interface: write a sector */
1044static int sm_write(struct mtd_blktrans_dev *dev,
1045 unsigned long sec_no, char *buf)
1046{
1047 struct sm_ftl *ftl = dev->priv;
1048 struct ftl_zone *zone;
1049 int error, zone_num, block, boffset;
1050
1051 BUG_ON(ftl->readonly);
1052 sm_break_offset(ftl, sec_no << 9, &zone_num, &block, &boffset);
1053
1054 /* No need in flush thread running now */
1055 del_timer(&ftl->timer);
1056 mutex_lock(&ftl->mutex);
1057
1058 zone = sm_get_zone(ftl, zone_num);
1059 if (IS_ERR(zone)) {
1060 error = PTR_ERR(zone);
1061 goto unlock;
1062 }
1063
1064 /* If entry is not in cache, flush it */
1065 if (ftl->cache_block != block || ftl->cache_zone != zone_num) {
1066
1067 error = sm_cache_flush(ftl);
1068 if (error)
1069 goto unlock;
1070
1071 ftl->cache_block = block;
1072 ftl->cache_zone = zone_num;
1073 }
1074
1075 sm_cache_put(ftl, buf, boffset);
1076unlock:
1077 mod_timer(&ftl->timer, jiffies + msecs_to_jiffies(cache_timeout));
1078 mutex_unlock(&ftl->mutex);
1079 return error;
1080}
1081
1082/* outside interface: flush everything */
1083static int sm_flush(struct mtd_blktrans_dev *dev)
1084{
1085 struct sm_ftl *ftl = dev->priv;
1086 int retval;
1087
1088 mutex_lock(&ftl->mutex);
1089 retval = sm_cache_flush(ftl);
1090 mutex_unlock(&ftl->mutex);
1091 return retval;
1092}
1093
1094/* outside interface: device is released */
1095static int sm_release(struct mtd_blktrans_dev *dev)
1096{
1097 struct sm_ftl *ftl = dev->priv;
1098
1099 mutex_lock(&ftl->mutex);
1100 del_timer_sync(&ftl->timer);
1101 cancel_work_sync(&ftl->flush_work);
1102 sm_cache_flush(ftl);
1103 mutex_unlock(&ftl->mutex);
1104 return 0;
1105}
1106
1107/* outside interface: get geometry */
1108static int sm_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
1109{
1110 struct sm_ftl *ftl = dev->priv;
1111 geo->heads = ftl->heads;
1112 geo->sectors = ftl->sectors;
1113 geo->cylinders = ftl->cylinders;
1114 return 0;
1115}
1116
1117/* external interface: main initialization function */
1118static void sm_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
1119{
1120 struct mtd_blktrans_dev *trans;
1121 struct sm_ftl *ftl;
1122
1123 /* Allocate & initialize our private structure */
1124 ftl = kzalloc(sizeof(struct sm_ftl), GFP_KERNEL);
1125 if (!ftl)
1126 goto error1;
1127
1128
1129 mutex_init(&ftl->mutex);
1130 setup_timer(&ftl->timer, sm_cache_flush_timer, (unsigned long)ftl);
1131 INIT_WORK(&ftl->flush_work, sm_cache_flush_work);
1132 init_completion(&ftl->erase_completion);
1133
1134 /* Read media information */
1135 if (sm_get_media_info(ftl, mtd)) {
1136 dbg("found unsupported mtd device, aborting");
1137 goto error2;
1138 }
1139
1140
1141 /* Allocate temporary CIS buffer for read retry support */
1142 ftl->cis_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
1143 if (!ftl->cis_buffer)
1144 goto error2;
1145
1146 /* Allocate zone array, it will be initialized on demand */
1147 ftl->zones = kzalloc(sizeof(struct ftl_zone) * ftl->zone_count,
1148 GFP_KERNEL);
1149 if (!ftl->zones)
1150 goto error3;
1151
1152 /* Allocate the cache*/
1153 ftl->cache_data = kzalloc(ftl->block_size, GFP_KERNEL);
1154
1155 if (!ftl->cache_data)
1156 goto error4;
1157
1158 sm_cache_init(ftl);
1159
1160
1161 /* Allocate upper layer structure and initialize it */
1162 trans = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL);
1163 if (!trans)
1164 goto error5;
1165
1166 ftl->trans = trans;
1167 trans->priv = ftl;
1168
1169 trans->tr = tr;
1170 trans->mtd = mtd;
1171 trans->devnum = -1;
1172 trans->size = (ftl->block_size * ftl->max_lba * ftl->zone_count) >> 9;
1173 trans->readonly = ftl->readonly;
1174
1175 if (sm_find_cis(ftl)) {
1176 dbg("CIS not found on mtd device, aborting");
1177 goto error6;
1178 }
1179
1180 ftl->disk_attributes = sm_create_sysfs_attributes(ftl);
1181 trans->disk_attributes = ftl->disk_attributes;
1182
1183 sm_printk("Found %d MiB xD/SmartMedia FTL on mtd%d",
1184 (int)(mtd->size / (1024 * 1024)), mtd->index);
1185
1186 dbg("FTL layout:");
1187 dbg("%d zone(s), each consists of %d blocks (+%d spares)",
1188 ftl->zone_count, ftl->max_lba,
1189 ftl->zone_size - ftl->max_lba);
1190 dbg("each block consists of %d bytes",
1191 ftl->block_size);
1192
1193
1194 /* Register device*/
1195 if (add_mtd_blktrans_dev(trans)) {
1196 dbg("error in mtdblktrans layer");
1197 goto error6;
1198 }
1199 return;
1200error6:
1201 kfree(trans);
1202error5:
1203 kfree(ftl->cache_data);
1204error4:
1205 kfree(ftl->zones);
1206error3:
1207 kfree(ftl->cis_buffer);
1208error2:
1209 kfree(ftl);
1210error1:
1211 return;
1212}
1213
1214/* main interface: device {surprise,} removal */
1215static void sm_remove_dev(struct mtd_blktrans_dev *dev)
1216{
1217 struct sm_ftl *ftl = dev->priv;
1218 int i;
1219
1220 del_mtd_blktrans_dev(dev);
1221 ftl->trans = NULL;
1222
1223 for (i = 0 ; i < ftl->zone_count; i++) {
1224
1225 if (!ftl->zones[i].initialized)
1226 continue;
1227
1228 kfree(ftl->zones[i].lba_to_phys_table);
1229 kfifo_free(&ftl->zones[i].free_sectors);
1230 }
1231
1232 sm_delete_sysfs_attributes(ftl);
1233 kfree(ftl->cis_buffer);
1234 kfree(ftl->zones);
1235 kfree(ftl->cache_data);
1236 kfree(ftl);
1237}
1238
1239static struct mtd_blktrans_ops sm_ftl_ops = {
1240 .name = "smblk",
1241 .major = -1,
1242 .part_bits = SM_FTL_PARTN_BITS,
1243 .blksize = SM_SECTOR_SIZE,
1244 .getgeo = sm_getgeo,
1245
1246 .add_mtd = sm_add_mtd,
1247 .remove_dev = sm_remove_dev,
1248
1249 .readsect = sm_read,
1250 .writesect = sm_write,
1251
1252 .flush = sm_flush,
1253 .release = sm_release,
1254
1255 .owner = THIS_MODULE,
1256};
1257
1258static __init int sm_module_init(void)
1259{
1260 int error = 0;
1261 cache_flush_workqueue = create_freezeable_workqueue("smflush");
1262
1263 if (IS_ERR(cache_flush_workqueue))
1264 return PTR_ERR(cache_flush_workqueue);
1265
1266 error = register_mtd_blktrans(&sm_ftl_ops);
1267 if (error)
1268 destroy_workqueue(cache_flush_workqueue);
1269 return error;
1270
1271}
1272
1273static void __exit sm_module_exit(void)
1274{
1275 destroy_workqueue(cache_flush_workqueue);
1276 deregister_mtd_blktrans(&sm_ftl_ops);
1277}
1278
1279module_init(sm_module_init);
1280module_exit(sm_module_exit);
1281
1282MODULE_LICENSE("GPL");
1283MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
1284MODULE_DESCRIPTION("Smartmedia/xD mtd translation layer");
diff --git a/drivers/mtd/sm_ftl.h b/drivers/mtd/sm_ftl.h
new file mode 100644
index 000000000000..e30e48e7f63d
--- /dev/null
+++ b/drivers/mtd/sm_ftl.h
@@ -0,0 +1,94 @@
1/*
2 * Copyright © 2009 - Maxim Levitsky
3 * SmartMedia/xD translation layer
4 *
5 * Based loosly on ssfdc.c which is
6 * © 2005 Eptar srl
7 * Author: Claudio Lanconelli <lanconelli.claudio@eptar.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/mtd/blktrans.h>
15#include <linux/kfifo.h>
16#include <linux/sched.h>
17#include <linux/completion.h>
18#include <linux/mtd/mtd.h>
19
20
21
22struct ftl_zone {
23 int initialized;
24 int16_t *lba_to_phys_table; /* LBA to physical table */
25 struct kfifo free_sectors; /* queue of free sectors */
26};
27
28struct sm_ftl {
29 struct mtd_blktrans_dev *trans;
30
31 struct mutex mutex; /* protects the structure */
32 struct ftl_zone *zones; /* FTL tables for each zone */
33
34 /* Media information */
35 int block_size; /* block size in bytes */
36 int zone_size; /* zone size in blocks */
37 int zone_count; /* number of zones */
38 int max_lba; /* maximum lba in a zone */
39 int smallpagenand; /* 256 bytes/page nand */
40 int readonly; /* is FS readonly */
41 int unstable;
42 int cis_block; /* CIS block location */
43 int cis_boffset; /* CIS offset in the block */
44 int cis_page_offset; /* CIS offset in the page */
45 void *cis_buffer; /* tmp buffer for cis reads */
46
47 /* Cache */
48 int cache_block; /* block number of cached block */
49 int cache_zone; /* zone of cached block */
50 unsigned char *cache_data; /* cached block data */
51 long unsigned int cache_data_invalid_bitmap;
52 int cache_clean;
53 struct work_struct flush_work;
54 struct timer_list timer;
55
56 /* Async erase stuff */
57 struct completion erase_completion;
58
59 /* Geometry stuff */
60 int heads;
61 int sectors;
62 int cylinders;
63
64 struct attribute_group *disk_attributes;
65};
66
67struct chs_entry {
68 unsigned long size;
69 unsigned short cyl;
70 unsigned char head;
71 unsigned char sec;
72};
73
74
75#define SM_FTL_PARTN_BITS 3
76
77#define sm_printk(format, ...) \
78 printk(KERN_WARNING "sm_ftl" ": " format "\n", ## __VA_ARGS__)
79
80#define dbg(format, ...) \
81 if (debug) \
82 printk(KERN_DEBUG "sm_ftl" ": " format "\n", ## __VA_ARGS__)
83
84#define dbg_verbose(format, ...) \
85 if (debug > 1) \
86 printk(KERN_DEBUG "sm_ftl" ": " format "\n", ## __VA_ARGS__)
87
88
89static void sm_erase_callback(struct erase_info *self);
90static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
91 int put_free);
92static void sm_mark_block_bad(struct sm_ftl *ftl, int zone_num, int block);
93
94static int sm_recheck_media(struct sm_ftl *ftl);
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
index 3f67e00d98e0..81c4ecdc11f5 100644
--- a/drivers/mtd/ssfdc.c
+++ b/drivers/mtd/ssfdc.c
@@ -375,7 +375,6 @@ static void ssfdcr_remove_dev(struct mtd_blktrans_dev *dev)
375 375
376 del_mtd_blktrans_dev(dev); 376 del_mtd_blktrans_dev(dev);
377 kfree(ssfdc->logic_block_map); 377 kfree(ssfdc->logic_block_map);
378 kfree(ssfdc);
379} 378}
380 379
381static int ssfdcr_readsect(struct mtd_blktrans_dev *dev, 380static int ssfdcr_readsect(struct mtd_blktrans_dev *dev,