diff options
author | Grant Likely <grant.likely@secretlab.ca> | 2010-05-22 02:36:56 -0400 |
---|---|---|
committer | Grant Likely <grant.likely@secretlab.ca> | 2010-05-22 02:36:56 -0400 |
commit | cf9b59e9d3e008591d1f54830f570982bb307a0d (patch) | |
tree | 113478ce8fd8c832ba726ffdf59b82cb46356476 /drivers/mtd | |
parent | 44504b2bebf8b5823c59484e73096a7d6574471d (diff) | |
parent | f4b87dee923342505e1ddba8d34ce9de33e75050 (diff) |
Merge remote branch 'origin' into secretlab/next-devicetree
Merging in current state of Linus' tree to deal with merge conflicts and
build failures in vio.c after merge.
Conflicts:
drivers/i2c/busses/i2c-cpm.c
drivers/i2c/busses/i2c-mpc.c
drivers/net/gianfar.c
Also fixed up one line in arch/powerpc/kernel/vio.c to use the
correct node pointer.
Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
Diffstat (limited to 'drivers/mtd')
100 files changed, 9981 insertions, 1631 deletions
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig index ecf90f5c97c2..f8210bf2d241 100644 --- a/drivers/mtd/Kconfig +++ b/drivers/mtd/Kconfig | |||
@@ -304,6 +304,19 @@ config SSFDC | |||
304 | This enables read only access to SmartMedia formatted NAND | 304 | This enables read only access to SmartMedia formatted NAND |
305 | flash. You can mount it with FAT file system. | 305 | flash. You can mount it with FAT file system. |
306 | 306 | ||
307 | |||
308 | config SM_FTL | ||
309 | tristate "SmartMedia/xD new translation layer" | ||
310 | depends on EXPERIMENTAL && BLOCK | ||
311 | select MTD_BLKDEVS | ||
312 | select MTD_NAND_ECC | ||
313 | help | ||
314 | This enables new and very EXPERMENTAL support for SmartMedia/xD | ||
315 | FTL (Flash translation layer). | ||
316 | Write support isn't yet well tested, therefore this code IS likely to | ||
317 | eat your card, so please don't use it together with valuable data. | ||
318 | Use readonly driver (CONFIG_SSFDC) instead. | ||
319 | |||
307 | config MTD_OOPS | 320 | config MTD_OOPS |
308 | tristate "Log panic/oops to an MTD buffer" | 321 | tristate "Log panic/oops to an MTD buffer" |
309 | depends on MTD | 322 | depends on MTD |
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile index 82d1e4de475b..760abc533395 100644 --- a/drivers/mtd/Makefile +++ b/drivers/mtd/Makefile | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | # Core functionality. | 5 | # Core functionality. |
6 | obj-$(CONFIG_MTD) += mtd.o | 6 | obj-$(CONFIG_MTD) += mtd.o |
7 | mtd-y := mtdcore.o mtdsuper.o mtdbdi.o | 7 | mtd-y := mtdcore.o mtdsuper.o |
8 | mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o | 8 | mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o |
9 | 9 | ||
10 | obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o | 10 | obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o |
@@ -24,6 +24,7 @@ obj-$(CONFIG_NFTL) += nftl.o | |||
24 | obj-$(CONFIG_INFTL) += inftl.o | 24 | obj-$(CONFIG_INFTL) += inftl.o |
25 | obj-$(CONFIG_RFD_FTL) += rfd_ftl.o | 25 | obj-$(CONFIG_RFD_FTL) += rfd_ftl.o |
26 | obj-$(CONFIG_SSFDC) += ssfdc.o | 26 | obj-$(CONFIG_SSFDC) += ssfdc.o |
27 | obj-$(CONFIG_SM_FTL) += sm_ftl.o | ||
27 | obj-$(CONFIG_MTD_OOPS) += mtdoops.o | 28 | obj-$(CONFIG_MTD_OOPS) += mtdoops.o |
28 | 29 | ||
29 | nftl-objs := nftlcore.o nftlmount.o | 30 | nftl-objs := nftlcore.o nftlmount.o |
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index 5fbf29e1e64f..62f3ea9de848 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c | |||
@@ -615,10 +615,8 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd) | |||
615 | return mtd; | 615 | return mtd; |
616 | 616 | ||
617 | setup_err: | 617 | setup_err: |
618 | if(mtd) { | 618 | kfree(mtd->eraseregions); |
619 | kfree(mtd->eraseregions); | 619 | kfree(mtd); |
620 | kfree(mtd); | ||
621 | } | ||
622 | kfree(cfi->cmdset_priv); | 620 | kfree(cfi->cmdset_priv); |
623 | return NULL; | 621 | return NULL; |
624 | } | 622 | } |
@@ -727,8 +725,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd, | |||
727 | /* those should be reset too since | 725 | /* those should be reset too since |
728 | they create memory references. */ | 726 | they create memory references. */ |
729 | init_waitqueue_head(&chip->wq); | 727 | init_waitqueue_head(&chip->wq); |
730 | spin_lock_init(&chip->_spinlock); | 728 | mutex_init(&chip->mutex); |
731 | chip->mutex = &chip->_spinlock; | ||
732 | chip++; | 729 | chip++; |
733 | } | 730 | } |
734 | } | 731 | } |
@@ -774,9 +771,9 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long | |||
774 | if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS)) | 771 | if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS)) |
775 | break; | 772 | break; |
776 | 773 | ||
777 | spin_unlock(chip->mutex); | 774 | mutex_unlock(&chip->mutex); |
778 | cfi_udelay(1); | 775 | cfi_udelay(1); |
779 | spin_lock(chip->mutex); | 776 | mutex_lock(&chip->mutex); |
780 | /* Someone else might have been playing with it. */ | 777 | /* Someone else might have been playing with it. */ |
781 | return -EAGAIN; | 778 | return -EAGAIN; |
782 | } | 779 | } |
@@ -823,9 +820,9 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long | |||
823 | return -EIO; | 820 | return -EIO; |
824 | } | 821 | } |
825 | 822 | ||
826 | spin_unlock(chip->mutex); | 823 | mutex_unlock(&chip->mutex); |
827 | cfi_udelay(1); | 824 | cfi_udelay(1); |
828 | spin_lock(chip->mutex); | 825 | mutex_lock(&chip->mutex); |
829 | /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. | 826 | /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. |
830 | So we can just loop here. */ | 827 | So we can just loop here. */ |
831 | } | 828 | } |
@@ -852,10 +849,10 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long | |||
852 | sleep: | 849 | sleep: |
853 | set_current_state(TASK_UNINTERRUPTIBLE); | 850 | set_current_state(TASK_UNINTERRUPTIBLE); |
854 | add_wait_queue(&chip->wq, &wait); | 851 | add_wait_queue(&chip->wq, &wait); |
855 | spin_unlock(chip->mutex); | 852 | mutex_unlock(&chip->mutex); |
856 | schedule(); | 853 | schedule(); |
857 | remove_wait_queue(&chip->wq, &wait); | 854 | remove_wait_queue(&chip->wq, &wait); |
858 | spin_lock(chip->mutex); | 855 | mutex_lock(&chip->mutex); |
859 | return -EAGAIN; | 856 | return -EAGAIN; |
860 | } | 857 | } |
861 | } | 858 | } |
@@ -901,20 +898,20 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr | |||
901 | * it'll happily send us to sleep. In any case, when | 898 | * it'll happily send us to sleep. In any case, when |
902 | * get_chip returns success we're clear to go ahead. | 899 | * get_chip returns success we're clear to go ahead. |
903 | */ | 900 | */ |
904 | ret = spin_trylock(contender->mutex); | 901 | ret = mutex_trylock(&contender->mutex); |
905 | spin_unlock(&shared->lock); | 902 | spin_unlock(&shared->lock); |
906 | if (!ret) | 903 | if (!ret) |
907 | goto retry; | 904 | goto retry; |
908 | spin_unlock(chip->mutex); | 905 | mutex_unlock(&chip->mutex); |
909 | ret = chip_ready(map, contender, contender->start, mode); | 906 | ret = chip_ready(map, contender, contender->start, mode); |
910 | spin_lock(chip->mutex); | 907 | mutex_lock(&chip->mutex); |
911 | 908 | ||
912 | if (ret == -EAGAIN) { | 909 | if (ret == -EAGAIN) { |
913 | spin_unlock(contender->mutex); | 910 | mutex_unlock(&contender->mutex); |
914 | goto retry; | 911 | goto retry; |
915 | } | 912 | } |
916 | if (ret) { | 913 | if (ret) { |
917 | spin_unlock(contender->mutex); | 914 | mutex_unlock(&contender->mutex); |
918 | return ret; | 915 | return ret; |
919 | } | 916 | } |
920 | spin_lock(&shared->lock); | 917 | spin_lock(&shared->lock); |
@@ -923,10 +920,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr | |||
923 | * in FL_SYNCING state. Put contender and retry. */ | 920 | * in FL_SYNCING state. Put contender and retry. */ |
924 | if (chip->state == FL_SYNCING) { | 921 | if (chip->state == FL_SYNCING) { |
925 | put_chip(map, contender, contender->start); | 922 | put_chip(map, contender, contender->start); |
926 | spin_unlock(contender->mutex); | 923 | mutex_unlock(&contender->mutex); |
927 | goto retry; | 924 | goto retry; |
928 | } | 925 | } |
929 | spin_unlock(contender->mutex); | 926 | mutex_unlock(&contender->mutex); |
930 | } | 927 | } |
931 | 928 | ||
932 | /* Check if we already have suspended erase | 929 | /* Check if we already have suspended erase |
@@ -936,10 +933,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr | |||
936 | spin_unlock(&shared->lock); | 933 | spin_unlock(&shared->lock); |
937 | set_current_state(TASK_UNINTERRUPTIBLE); | 934 | set_current_state(TASK_UNINTERRUPTIBLE); |
938 | add_wait_queue(&chip->wq, &wait); | 935 | add_wait_queue(&chip->wq, &wait); |
939 | spin_unlock(chip->mutex); | 936 | mutex_unlock(&chip->mutex); |
940 | schedule(); | 937 | schedule(); |
941 | remove_wait_queue(&chip->wq, &wait); | 938 | remove_wait_queue(&chip->wq, &wait); |
942 | spin_lock(chip->mutex); | 939 | mutex_lock(&chip->mutex); |
943 | goto retry; | 940 | goto retry; |
944 | } | 941 | } |
945 | 942 | ||
@@ -969,12 +966,12 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad | |||
969 | if (shared->writing && shared->writing != chip) { | 966 | if (shared->writing && shared->writing != chip) { |
970 | /* give back ownership to who we loaned it from */ | 967 | /* give back ownership to who we loaned it from */ |
971 | struct flchip *loaner = shared->writing; | 968 | struct flchip *loaner = shared->writing; |
972 | spin_lock(loaner->mutex); | 969 | mutex_lock(&loaner->mutex); |
973 | spin_unlock(&shared->lock); | 970 | spin_unlock(&shared->lock); |
974 | spin_unlock(chip->mutex); | 971 | mutex_unlock(&chip->mutex); |
975 | put_chip(map, loaner, loaner->start); | 972 | put_chip(map, loaner, loaner->start); |
976 | spin_lock(chip->mutex); | 973 | mutex_lock(&chip->mutex); |
977 | spin_unlock(loaner->mutex); | 974 | mutex_unlock(&loaner->mutex); |
978 | wake_up(&chip->wq); | 975 | wake_up(&chip->wq); |
979 | return; | 976 | return; |
980 | } | 977 | } |
@@ -1144,7 +1141,7 @@ static int __xipram xip_wait_for_operation( | |||
1144 | (void) map_read(map, adr); | 1141 | (void) map_read(map, adr); |
1145 | xip_iprefetch(); | 1142 | xip_iprefetch(); |
1146 | local_irq_enable(); | 1143 | local_irq_enable(); |
1147 | spin_unlock(chip->mutex); | 1144 | mutex_unlock(&chip->mutex); |
1148 | xip_iprefetch(); | 1145 | xip_iprefetch(); |
1149 | cond_resched(); | 1146 | cond_resched(); |
1150 | 1147 | ||
@@ -1154,15 +1151,15 @@ static int __xipram xip_wait_for_operation( | |||
1154 | * a suspended erase state. If so let's wait | 1151 | * a suspended erase state. If so let's wait |
1155 | * until it's done. | 1152 | * until it's done. |
1156 | */ | 1153 | */ |
1157 | spin_lock(chip->mutex); | 1154 | mutex_lock(&chip->mutex); |
1158 | while (chip->state != newstate) { | 1155 | while (chip->state != newstate) { |
1159 | DECLARE_WAITQUEUE(wait, current); | 1156 | DECLARE_WAITQUEUE(wait, current); |
1160 | set_current_state(TASK_UNINTERRUPTIBLE); | 1157 | set_current_state(TASK_UNINTERRUPTIBLE); |
1161 | add_wait_queue(&chip->wq, &wait); | 1158 | add_wait_queue(&chip->wq, &wait); |
1162 | spin_unlock(chip->mutex); | 1159 | mutex_unlock(&chip->mutex); |
1163 | schedule(); | 1160 | schedule(); |
1164 | remove_wait_queue(&chip->wq, &wait); | 1161 | remove_wait_queue(&chip->wq, &wait); |
1165 | spin_lock(chip->mutex); | 1162 | mutex_lock(&chip->mutex); |
1166 | } | 1163 | } |
1167 | /* Disallow XIP again */ | 1164 | /* Disallow XIP again */ |
1168 | local_irq_disable(); | 1165 | local_irq_disable(); |
@@ -1218,10 +1215,10 @@ static int inval_cache_and_wait_for_operation( | |||
1218 | int chip_state = chip->state; | 1215 | int chip_state = chip->state; |
1219 | unsigned int timeo, sleep_time, reset_timeo; | 1216 | unsigned int timeo, sleep_time, reset_timeo; |
1220 | 1217 | ||
1221 | spin_unlock(chip->mutex); | 1218 | mutex_unlock(&chip->mutex); |
1222 | if (inval_len) | 1219 | if (inval_len) |
1223 | INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len); | 1220 | INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len); |
1224 | spin_lock(chip->mutex); | 1221 | mutex_lock(&chip->mutex); |
1225 | 1222 | ||
1226 | timeo = chip_op_time_max; | 1223 | timeo = chip_op_time_max; |
1227 | if (!timeo) | 1224 | if (!timeo) |
@@ -1241,7 +1238,7 @@ static int inval_cache_and_wait_for_operation( | |||
1241 | } | 1238 | } |
1242 | 1239 | ||
1243 | /* OK Still waiting. Drop the lock, wait a while and retry. */ | 1240 | /* OK Still waiting. Drop the lock, wait a while and retry. */ |
1244 | spin_unlock(chip->mutex); | 1241 | mutex_unlock(&chip->mutex); |
1245 | if (sleep_time >= 1000000/HZ) { | 1242 | if (sleep_time >= 1000000/HZ) { |
1246 | /* | 1243 | /* |
1247 | * Half of the normal delay still remaining | 1244 | * Half of the normal delay still remaining |
@@ -1256,17 +1253,17 @@ static int inval_cache_and_wait_for_operation( | |||
1256 | cond_resched(); | 1253 | cond_resched(); |
1257 | timeo--; | 1254 | timeo--; |
1258 | } | 1255 | } |
1259 | spin_lock(chip->mutex); | 1256 | mutex_lock(&chip->mutex); |
1260 | 1257 | ||
1261 | while (chip->state != chip_state) { | 1258 | while (chip->state != chip_state) { |
1262 | /* Someone's suspended the operation: sleep */ | 1259 | /* Someone's suspended the operation: sleep */ |
1263 | DECLARE_WAITQUEUE(wait, current); | 1260 | DECLARE_WAITQUEUE(wait, current); |
1264 | set_current_state(TASK_UNINTERRUPTIBLE); | 1261 | set_current_state(TASK_UNINTERRUPTIBLE); |
1265 | add_wait_queue(&chip->wq, &wait); | 1262 | add_wait_queue(&chip->wq, &wait); |
1266 | spin_unlock(chip->mutex); | 1263 | mutex_unlock(&chip->mutex); |
1267 | schedule(); | 1264 | schedule(); |
1268 | remove_wait_queue(&chip->wq, &wait); | 1265 | remove_wait_queue(&chip->wq, &wait); |
1269 | spin_lock(chip->mutex); | 1266 | mutex_lock(&chip->mutex); |
1270 | } | 1267 | } |
1271 | if (chip->erase_suspended && chip_state == FL_ERASING) { | 1268 | if (chip->erase_suspended && chip_state == FL_ERASING) { |
1272 | /* Erase suspend occured while sleep: reset timeout */ | 1269 | /* Erase suspend occured while sleep: reset timeout */ |
@@ -1302,7 +1299,7 @@ static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t a | |||
1302 | /* Ensure cmd read/writes are aligned. */ | 1299 | /* Ensure cmd read/writes are aligned. */ |
1303 | cmd_addr = adr & ~(map_bankwidth(map)-1); | 1300 | cmd_addr = adr & ~(map_bankwidth(map)-1); |
1304 | 1301 | ||
1305 | spin_lock(chip->mutex); | 1302 | mutex_lock(&chip->mutex); |
1306 | 1303 | ||
1307 | ret = get_chip(map, chip, cmd_addr, FL_POINT); | 1304 | ret = get_chip(map, chip, cmd_addr, FL_POINT); |
1308 | 1305 | ||
@@ -1313,7 +1310,7 @@ static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t a | |||
1313 | chip->state = FL_POINT; | 1310 | chip->state = FL_POINT; |
1314 | chip->ref_point_counter++; | 1311 | chip->ref_point_counter++; |
1315 | } | 1312 | } |
1316 | spin_unlock(chip->mutex); | 1313 | mutex_unlock(&chip->mutex); |
1317 | 1314 | ||
1318 | return ret; | 1315 | return ret; |
1319 | } | 1316 | } |
@@ -1398,7 +1395,7 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len) | |||
1398 | else | 1395 | else |
1399 | thislen = len; | 1396 | thislen = len; |
1400 | 1397 | ||
1401 | spin_lock(chip->mutex); | 1398 | mutex_lock(&chip->mutex); |
1402 | if (chip->state == FL_POINT) { | 1399 | if (chip->state == FL_POINT) { |
1403 | chip->ref_point_counter--; | 1400 | chip->ref_point_counter--; |
1404 | if(chip->ref_point_counter == 0) | 1401 | if(chip->ref_point_counter == 0) |
@@ -1407,7 +1404,7 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len) | |||
1407 | printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */ | 1404 | printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */ |
1408 | 1405 | ||
1409 | put_chip(map, chip, chip->start); | 1406 | put_chip(map, chip, chip->start); |
1410 | spin_unlock(chip->mutex); | 1407 | mutex_unlock(&chip->mutex); |
1411 | 1408 | ||
1412 | len -= thislen; | 1409 | len -= thislen; |
1413 | ofs = 0; | 1410 | ofs = 0; |
@@ -1426,10 +1423,10 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof | |||
1426 | /* Ensure cmd read/writes are aligned. */ | 1423 | /* Ensure cmd read/writes are aligned. */ |
1427 | cmd_addr = adr & ~(map_bankwidth(map)-1); | 1424 | cmd_addr = adr & ~(map_bankwidth(map)-1); |
1428 | 1425 | ||
1429 | spin_lock(chip->mutex); | 1426 | mutex_lock(&chip->mutex); |
1430 | ret = get_chip(map, chip, cmd_addr, FL_READY); | 1427 | ret = get_chip(map, chip, cmd_addr, FL_READY); |
1431 | if (ret) { | 1428 | if (ret) { |
1432 | spin_unlock(chip->mutex); | 1429 | mutex_unlock(&chip->mutex); |
1433 | return ret; | 1430 | return ret; |
1434 | } | 1431 | } |
1435 | 1432 | ||
@@ -1443,7 +1440,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof | |||
1443 | 1440 | ||
1444 | put_chip(map, chip, cmd_addr); | 1441 | put_chip(map, chip, cmd_addr); |
1445 | 1442 | ||
1446 | spin_unlock(chip->mutex); | 1443 | mutex_unlock(&chip->mutex); |
1447 | return 0; | 1444 | return 0; |
1448 | } | 1445 | } |
1449 | 1446 | ||
@@ -1506,10 +1503,10 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, | |||
1506 | return -EINVAL; | 1503 | return -EINVAL; |
1507 | } | 1504 | } |
1508 | 1505 | ||
1509 | spin_lock(chip->mutex); | 1506 | mutex_lock(&chip->mutex); |
1510 | ret = get_chip(map, chip, adr, mode); | 1507 | ret = get_chip(map, chip, adr, mode); |
1511 | if (ret) { | 1508 | if (ret) { |
1512 | spin_unlock(chip->mutex); | 1509 | mutex_unlock(&chip->mutex); |
1513 | return ret; | 1510 | return ret; |
1514 | } | 1511 | } |
1515 | 1512 | ||
@@ -1555,7 +1552,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, | |||
1555 | 1552 | ||
1556 | xip_enable(map, chip, adr); | 1553 | xip_enable(map, chip, adr); |
1557 | out: put_chip(map, chip, adr); | 1554 | out: put_chip(map, chip, adr); |
1558 | spin_unlock(chip->mutex); | 1555 | mutex_unlock(&chip->mutex); |
1559 | return ret; | 1556 | return ret; |
1560 | } | 1557 | } |
1561 | 1558 | ||
@@ -1664,10 +1661,10 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, | |||
1664 | /* Let's determine this according to the interleave only once */ | 1661 | /* Let's determine this according to the interleave only once */ |
1665 | write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9); | 1662 | write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9); |
1666 | 1663 | ||
1667 | spin_lock(chip->mutex); | 1664 | mutex_lock(&chip->mutex); |
1668 | ret = get_chip(map, chip, cmd_adr, FL_WRITING); | 1665 | ret = get_chip(map, chip, cmd_adr, FL_WRITING); |
1669 | if (ret) { | 1666 | if (ret) { |
1670 | spin_unlock(chip->mutex); | 1667 | mutex_unlock(&chip->mutex); |
1671 | return ret; | 1668 | return ret; |
1672 | } | 1669 | } |
1673 | 1670 | ||
@@ -1798,7 +1795,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, | |||
1798 | 1795 | ||
1799 | xip_enable(map, chip, cmd_adr); | 1796 | xip_enable(map, chip, cmd_adr); |
1800 | out: put_chip(map, chip, cmd_adr); | 1797 | out: put_chip(map, chip, cmd_adr); |
1801 | spin_unlock(chip->mutex); | 1798 | mutex_unlock(&chip->mutex); |
1802 | return ret; | 1799 | return ret; |
1803 | } | 1800 | } |
1804 | 1801 | ||
@@ -1877,10 +1874,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, | |||
1877 | adr += chip->start; | 1874 | adr += chip->start; |
1878 | 1875 | ||
1879 | retry: | 1876 | retry: |
1880 | spin_lock(chip->mutex); | 1877 | mutex_lock(&chip->mutex); |
1881 | ret = get_chip(map, chip, adr, FL_ERASING); | 1878 | ret = get_chip(map, chip, adr, FL_ERASING); |
1882 | if (ret) { | 1879 | if (ret) { |
1883 | spin_unlock(chip->mutex); | 1880 | mutex_unlock(&chip->mutex); |
1884 | return ret; | 1881 | return ret; |
1885 | } | 1882 | } |
1886 | 1883 | ||
@@ -1936,7 +1933,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, | |||
1936 | } else if (chipstatus & 0x20 && retries--) { | 1933 | } else if (chipstatus & 0x20 && retries--) { |
1937 | printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus); | 1934 | printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus); |
1938 | put_chip(map, chip, adr); | 1935 | put_chip(map, chip, adr); |
1939 | spin_unlock(chip->mutex); | 1936 | mutex_unlock(&chip->mutex); |
1940 | goto retry; | 1937 | goto retry; |
1941 | } else { | 1938 | } else { |
1942 | printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus); | 1939 | printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus); |
@@ -1948,7 +1945,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, | |||
1948 | 1945 | ||
1949 | xip_enable(map, chip, adr); | 1946 | xip_enable(map, chip, adr); |
1950 | out: put_chip(map, chip, adr); | 1947 | out: put_chip(map, chip, adr); |
1951 | spin_unlock(chip->mutex); | 1948 | mutex_unlock(&chip->mutex); |
1952 | return ret; | 1949 | return ret; |
1953 | } | 1950 | } |
1954 | 1951 | ||
@@ -1981,7 +1978,7 @@ static void cfi_intelext_sync (struct mtd_info *mtd) | |||
1981 | for (i=0; !ret && i<cfi->numchips; i++) { | 1978 | for (i=0; !ret && i<cfi->numchips; i++) { |
1982 | chip = &cfi->chips[i]; | 1979 | chip = &cfi->chips[i]; |
1983 | 1980 | ||
1984 | spin_lock(chip->mutex); | 1981 | mutex_lock(&chip->mutex); |
1985 | ret = get_chip(map, chip, chip->start, FL_SYNCING); | 1982 | ret = get_chip(map, chip, chip->start, FL_SYNCING); |
1986 | 1983 | ||
1987 | if (!ret) { | 1984 | if (!ret) { |
@@ -1992,7 +1989,7 @@ static void cfi_intelext_sync (struct mtd_info *mtd) | |||
1992 | * with the chip now anyway. | 1989 | * with the chip now anyway. |
1993 | */ | 1990 | */ |
1994 | } | 1991 | } |
1995 | spin_unlock(chip->mutex); | 1992 | mutex_unlock(&chip->mutex); |
1996 | } | 1993 | } |
1997 | 1994 | ||
1998 | /* Unlock the chips again */ | 1995 | /* Unlock the chips again */ |
@@ -2000,14 +1997,14 @@ static void cfi_intelext_sync (struct mtd_info *mtd) | |||
2000 | for (i--; i >=0; i--) { | 1997 | for (i--; i >=0; i--) { |
2001 | chip = &cfi->chips[i]; | 1998 | chip = &cfi->chips[i]; |
2002 | 1999 | ||
2003 | spin_lock(chip->mutex); | 2000 | mutex_lock(&chip->mutex); |
2004 | 2001 | ||
2005 | if (chip->state == FL_SYNCING) { | 2002 | if (chip->state == FL_SYNCING) { |
2006 | chip->state = chip->oldstate; | 2003 | chip->state = chip->oldstate; |
2007 | chip->oldstate = FL_READY; | 2004 | chip->oldstate = FL_READY; |
2008 | wake_up(&chip->wq); | 2005 | wake_up(&chip->wq); |
2009 | } | 2006 | } |
2010 | spin_unlock(chip->mutex); | 2007 | mutex_unlock(&chip->mutex); |
2011 | } | 2008 | } |
2012 | } | 2009 | } |
2013 | 2010 | ||
@@ -2053,10 +2050,10 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip | |||
2053 | 2050 | ||
2054 | adr += chip->start; | 2051 | adr += chip->start; |
2055 | 2052 | ||
2056 | spin_lock(chip->mutex); | 2053 | mutex_lock(&chip->mutex); |
2057 | ret = get_chip(map, chip, adr, FL_LOCKING); | 2054 | ret = get_chip(map, chip, adr, FL_LOCKING); |
2058 | if (ret) { | 2055 | if (ret) { |
2059 | spin_unlock(chip->mutex); | 2056 | mutex_unlock(&chip->mutex); |
2060 | return ret; | 2057 | return ret; |
2061 | } | 2058 | } |
2062 | 2059 | ||
@@ -2090,7 +2087,7 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip | |||
2090 | 2087 | ||
2091 | xip_enable(map, chip, adr); | 2088 | xip_enable(map, chip, adr); |
2092 | out: put_chip(map, chip, adr); | 2089 | out: put_chip(map, chip, adr); |
2093 | spin_unlock(chip->mutex); | 2090 | mutex_unlock(&chip->mutex); |
2094 | return ret; | 2091 | return ret; |
2095 | } | 2092 | } |
2096 | 2093 | ||
@@ -2155,10 +2152,10 @@ do_otp_read(struct map_info *map, struct flchip *chip, u_long offset, | |||
2155 | struct cfi_private *cfi = map->fldrv_priv; | 2152 | struct cfi_private *cfi = map->fldrv_priv; |
2156 | int ret; | 2153 | int ret; |
2157 | 2154 | ||
2158 | spin_lock(chip->mutex); | 2155 | mutex_lock(&chip->mutex); |
2159 | ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY); | 2156 | ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY); |
2160 | if (ret) { | 2157 | if (ret) { |
2161 | spin_unlock(chip->mutex); | 2158 | mutex_unlock(&chip->mutex); |
2162 | return ret; | 2159 | return ret; |
2163 | } | 2160 | } |
2164 | 2161 | ||
@@ -2177,7 +2174,7 @@ do_otp_read(struct map_info *map, struct flchip *chip, u_long offset, | |||
2177 | INVALIDATE_CACHED_RANGE(map, chip->start + offset, size); | 2174 | INVALIDATE_CACHED_RANGE(map, chip->start + offset, size); |
2178 | 2175 | ||
2179 | put_chip(map, chip, chip->start); | 2176 | put_chip(map, chip, chip->start); |
2180 | spin_unlock(chip->mutex); | 2177 | mutex_unlock(&chip->mutex); |
2181 | return 0; | 2178 | return 0; |
2182 | } | 2179 | } |
2183 | 2180 | ||
@@ -2452,7 +2449,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd) | |||
2452 | for (i=0; !ret && i<cfi->numchips; i++) { | 2449 | for (i=0; !ret && i<cfi->numchips; i++) { |
2453 | chip = &cfi->chips[i]; | 2450 | chip = &cfi->chips[i]; |
2454 | 2451 | ||
2455 | spin_lock(chip->mutex); | 2452 | mutex_lock(&chip->mutex); |
2456 | 2453 | ||
2457 | switch (chip->state) { | 2454 | switch (chip->state) { |
2458 | case FL_READY: | 2455 | case FL_READY: |
@@ -2484,7 +2481,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd) | |||
2484 | case FL_PM_SUSPENDED: | 2481 | case FL_PM_SUSPENDED: |
2485 | break; | 2482 | break; |
2486 | } | 2483 | } |
2487 | spin_unlock(chip->mutex); | 2484 | mutex_unlock(&chip->mutex); |
2488 | } | 2485 | } |
2489 | 2486 | ||
2490 | /* Unlock the chips again */ | 2487 | /* Unlock the chips again */ |
@@ -2493,7 +2490,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd) | |||
2493 | for (i--; i >=0; i--) { | 2490 | for (i--; i >=0; i--) { |
2494 | chip = &cfi->chips[i]; | 2491 | chip = &cfi->chips[i]; |
2495 | 2492 | ||
2496 | spin_lock(chip->mutex); | 2493 | mutex_lock(&chip->mutex); |
2497 | 2494 | ||
2498 | if (chip->state == FL_PM_SUSPENDED) { | 2495 | if (chip->state == FL_PM_SUSPENDED) { |
2499 | /* No need to force it into a known state here, | 2496 | /* No need to force it into a known state here, |
@@ -2503,7 +2500,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd) | |||
2503 | chip->oldstate = FL_READY; | 2500 | chip->oldstate = FL_READY; |
2504 | wake_up(&chip->wq); | 2501 | wake_up(&chip->wq); |
2505 | } | 2502 | } |
2506 | spin_unlock(chip->mutex); | 2503 | mutex_unlock(&chip->mutex); |
2507 | } | 2504 | } |
2508 | } | 2505 | } |
2509 | 2506 | ||
@@ -2544,7 +2541,7 @@ static void cfi_intelext_resume(struct mtd_info *mtd) | |||
2544 | 2541 | ||
2545 | chip = &cfi->chips[i]; | 2542 | chip = &cfi->chips[i]; |
2546 | 2543 | ||
2547 | spin_lock(chip->mutex); | 2544 | mutex_lock(&chip->mutex); |
2548 | 2545 | ||
2549 | /* Go to known state. Chip may have been power cycled */ | 2546 | /* Go to known state. Chip may have been power cycled */ |
2550 | if (chip->state == FL_PM_SUSPENDED) { | 2547 | if (chip->state == FL_PM_SUSPENDED) { |
@@ -2553,7 +2550,7 @@ static void cfi_intelext_resume(struct mtd_info *mtd) | |||
2553 | wake_up(&chip->wq); | 2550 | wake_up(&chip->wq); |
2554 | } | 2551 | } |
2555 | 2552 | ||
2556 | spin_unlock(chip->mutex); | 2553 | mutex_unlock(&chip->mutex); |
2557 | } | 2554 | } |
2558 | 2555 | ||
2559 | if ((mtd->flags & MTD_POWERUP_LOCK) | 2556 | if ((mtd->flags & MTD_POWERUP_LOCK) |
@@ -2573,14 +2570,14 @@ static int cfi_intelext_reset(struct mtd_info *mtd) | |||
2573 | /* force the completion of any ongoing operation | 2570 | /* force the completion of any ongoing operation |
2574 | and switch to array mode so any bootloader in | 2571 | and switch to array mode so any bootloader in |
2575 | flash is accessible for soft reboot. */ | 2572 | flash is accessible for soft reboot. */ |
2576 | spin_lock(chip->mutex); | 2573 | mutex_lock(&chip->mutex); |
2577 | ret = get_chip(map, chip, chip->start, FL_SHUTDOWN); | 2574 | ret = get_chip(map, chip, chip->start, FL_SHUTDOWN); |
2578 | if (!ret) { | 2575 | if (!ret) { |
2579 | map_write(map, CMD(0xff), chip->start); | 2576 | map_write(map, CMD(0xff), chip->start); |
2580 | chip->state = FL_SHUTDOWN; | 2577 | chip->state = FL_SHUTDOWN; |
2581 | put_chip(map, chip, chip->start); | 2578 | put_chip(map, chip, chip->start); |
2582 | } | 2579 | } |
2583 | spin_unlock(chip->mutex); | 2580 | mutex_unlock(&chip->mutex); |
2584 | } | 2581 | } |
2585 | 2582 | ||
2586 | return 0; | 2583 | return 0; |
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index f3600e8d5382..d81079ef91a5 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
33 | #include <linux/delay.h> | 33 | #include <linux/delay.h> |
34 | #include <linux/interrupt.h> | 34 | #include <linux/interrupt.h> |
35 | #include <linux/reboot.h> | ||
35 | #include <linux/mtd/compatmac.h> | 36 | #include <linux/mtd/compatmac.h> |
36 | #include <linux/mtd/map.h> | 37 | #include <linux/mtd/map.h> |
37 | #include <linux/mtd/mtd.h> | 38 | #include <linux/mtd/mtd.h> |
@@ -43,10 +44,6 @@ | |||
43 | 44 | ||
44 | #define MAX_WORD_RETRIES 3 | 45 | #define MAX_WORD_RETRIES 3 |
45 | 46 | ||
46 | #define MANUFACTURER_AMD 0x0001 | ||
47 | #define MANUFACTURER_ATMEL 0x001F | ||
48 | #define MANUFACTURER_MACRONIX 0x00C2 | ||
49 | #define MANUFACTURER_SST 0x00BF | ||
50 | #define SST49LF004B 0x0060 | 47 | #define SST49LF004B 0x0060 |
51 | #define SST49LF040B 0x0050 | 48 | #define SST49LF040B 0x0050 |
52 | #define SST49LF008A 0x005a | 49 | #define SST49LF008A 0x005a |
@@ -60,6 +57,7 @@ static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *); | |||
60 | static void cfi_amdstd_sync (struct mtd_info *); | 57 | static void cfi_amdstd_sync (struct mtd_info *); |
61 | static int cfi_amdstd_suspend (struct mtd_info *); | 58 | static int cfi_amdstd_suspend (struct mtd_info *); |
62 | static void cfi_amdstd_resume (struct mtd_info *); | 59 | static void cfi_amdstd_resume (struct mtd_info *); |
60 | static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *); | ||
63 | static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); | 61 | static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); |
64 | 62 | ||
65 | static void cfi_amdstd_destroy(struct mtd_info *); | 63 | static void cfi_amdstd_destroy(struct mtd_info *); |
@@ -168,7 +166,7 @@ static void fixup_amd_bootblock(struct mtd_info *mtd, void* param) | |||
168 | * This reduces the risk of false detection due to | 166 | * This reduces the risk of false detection due to |
169 | * the 8-bit device ID. | 167 | * the 8-bit device ID. |
170 | */ | 168 | */ |
171 | (cfi->mfr == MANUFACTURER_MACRONIX)) { | 169 | (cfi->mfr == CFI_MFR_MACRONIX)) { |
172 | DEBUG(MTD_DEBUG_LEVEL1, | 170 | DEBUG(MTD_DEBUG_LEVEL1, |
173 | "%s: Macronix MX29LV400C with bottom boot block" | 171 | "%s: Macronix MX29LV400C with bottom boot block" |
174 | " detected\n", map->name); | 172 | " detected\n", map->name); |
@@ -260,6 +258,42 @@ static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param) | |||
260 | mtd->flags |= MTD_POWERUP_LOCK; | 258 | mtd->flags |= MTD_POWERUP_LOCK; |
261 | } | 259 | } |
262 | 260 | ||
261 | static void fixup_old_sst_eraseregion(struct mtd_info *mtd) | ||
262 | { | ||
263 | struct map_info *map = mtd->priv; | ||
264 | struct cfi_private *cfi = map->fldrv_priv; | ||
265 | |||
266 | /* | ||
267 | * These flashes report two seperate eraseblock regions based on the | ||
268 | * sector_erase-size and block_erase-size, although they both operate on the | ||
269 | * same memory. This is not allowed according to CFI, so we just pick the | ||
270 | * sector_erase-size. | ||
271 | */ | ||
272 | cfi->cfiq->NumEraseRegions = 1; | ||
273 | } | ||
274 | |||
275 | static void fixup_sst39vf(struct mtd_info *mtd, void *param) | ||
276 | { | ||
277 | struct map_info *map = mtd->priv; | ||
278 | struct cfi_private *cfi = map->fldrv_priv; | ||
279 | |||
280 | fixup_old_sst_eraseregion(mtd); | ||
281 | |||
282 | cfi->addr_unlock1 = 0x5555; | ||
283 | cfi->addr_unlock2 = 0x2AAA; | ||
284 | } | ||
285 | |||
286 | static void fixup_sst39vf_rev_b(struct mtd_info *mtd, void *param) | ||
287 | { | ||
288 | struct map_info *map = mtd->priv; | ||
289 | struct cfi_private *cfi = map->fldrv_priv; | ||
290 | |||
291 | fixup_old_sst_eraseregion(mtd); | ||
292 | |||
293 | cfi->addr_unlock1 = 0x555; | ||
294 | cfi->addr_unlock2 = 0x2AA; | ||
295 | } | ||
296 | |||
263 | static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param) | 297 | static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param) |
264 | { | 298 | { |
265 | struct map_info *map = mtd->priv; | 299 | struct map_info *map = mtd->priv; |
@@ -282,11 +316,24 @@ static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param) | |||
282 | } | 316 | } |
283 | } | 317 | } |
284 | 318 | ||
319 | /* Used to fix CFI-Tables of chips without Extended Query Tables */ | ||
320 | static struct cfi_fixup cfi_nopri_fixup_table[] = { | ||
321 | { CFI_MFR_SST, 0x234A, fixup_sst39vf, NULL, }, // SST39VF1602 | ||
322 | { CFI_MFR_SST, 0x234B, fixup_sst39vf, NULL, }, // SST39VF1601 | ||
323 | { CFI_MFR_SST, 0x235A, fixup_sst39vf, NULL, }, // SST39VF3202 | ||
324 | { CFI_MFR_SST, 0x235B, fixup_sst39vf, NULL, }, // SST39VF3201 | ||
325 | { CFI_MFR_SST, 0x235C, fixup_sst39vf_rev_b, NULL, }, // SST39VF3202B | ||
326 | { CFI_MFR_SST, 0x235D, fixup_sst39vf_rev_b, NULL, }, // SST39VF3201B | ||
327 | { CFI_MFR_SST, 0x236C, fixup_sst39vf_rev_b, NULL, }, // SST39VF6402B | ||
328 | { CFI_MFR_SST, 0x236D, fixup_sst39vf_rev_b, NULL, }, // SST39VF6401B | ||
329 | { 0, 0, NULL, NULL } | ||
330 | }; | ||
331 | |||
285 | static struct cfi_fixup cfi_fixup_table[] = { | 332 | static struct cfi_fixup cfi_fixup_table[] = { |
286 | { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL }, | 333 | { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL }, |
287 | #ifdef AMD_BOOTLOC_BUG | 334 | #ifdef AMD_BOOTLOC_BUG |
288 | { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL }, | 335 | { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL }, |
289 | { MANUFACTURER_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL }, | 336 | { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL }, |
290 | #endif | 337 | #endif |
291 | { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, }, | 338 | { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, }, |
292 | { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, }, | 339 | { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, }, |
@@ -304,9 +351,9 @@ static struct cfi_fixup cfi_fixup_table[] = { | |||
304 | { 0, 0, NULL, NULL } | 351 | { 0, 0, NULL, NULL } |
305 | }; | 352 | }; |
306 | static struct cfi_fixup jedec_fixup_table[] = { | 353 | static struct cfi_fixup jedec_fixup_table[] = { |
307 | { MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, }, | 354 | { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock, NULL, }, |
308 | { MANUFACTURER_SST, SST49LF040B, fixup_use_fwh_lock, NULL, }, | 355 | { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock, NULL, }, |
309 | { MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, }, | 356 | { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock, NULL, }, |
310 | { 0, 0, NULL, NULL } | 357 | { 0, 0, NULL, NULL } |
311 | }; | 358 | }; |
312 | 359 | ||
@@ -355,67 +402,72 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) | |||
355 | mtd->name = map->name; | 402 | mtd->name = map->name; |
356 | mtd->writesize = 1; | 403 | mtd->writesize = 1; |
357 | 404 | ||
405 | mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot; | ||
406 | |||
358 | if (cfi->cfi_mode==CFI_MODE_CFI){ | 407 | if (cfi->cfi_mode==CFI_MODE_CFI){ |
359 | unsigned char bootloc; | 408 | unsigned char bootloc; |
360 | /* | ||
361 | * It's a real CFI chip, not one for which the probe | ||
362 | * routine faked a CFI structure. So we read the feature | ||
363 | * table from it. | ||
364 | */ | ||
365 | __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR; | 409 | __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR; |
366 | struct cfi_pri_amdstd *extp; | 410 | struct cfi_pri_amdstd *extp; |
367 | 411 | ||
368 | extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu"); | 412 | extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu"); |
369 | if (!extp) { | 413 | if (extp) { |
370 | kfree(mtd); | 414 | /* |
371 | return NULL; | 415 | * It's a real CFI chip, not one for which the probe |
372 | } | 416 | * routine faked a CFI structure. |
373 | 417 | */ | |
374 | cfi_fixup_major_minor(cfi, extp); | 418 | cfi_fixup_major_minor(cfi, extp); |
375 | 419 | ||
376 | if (extp->MajorVersion != '1' || | 420 | if (extp->MajorVersion != '1' || |
377 | (extp->MinorVersion < '0' || extp->MinorVersion > '4')) { | 421 | (extp->MinorVersion < '0' || extp->MinorVersion > '4')) { |
378 | printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query " | 422 | printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query " |
379 | "version %c.%c.\n", extp->MajorVersion, | 423 | "version %c.%c.\n", extp->MajorVersion, |
380 | extp->MinorVersion); | 424 | extp->MinorVersion); |
381 | kfree(extp); | 425 | kfree(extp); |
382 | kfree(mtd); | 426 | kfree(mtd); |
383 | return NULL; | 427 | return NULL; |
384 | } | 428 | } |
385 | 429 | ||
386 | /* Install our own private info structure */ | 430 | /* Install our own private info structure */ |
387 | cfi->cmdset_priv = extp; | 431 | cfi->cmdset_priv = extp; |
388 | 432 | ||
389 | /* Apply cfi device specific fixups */ | 433 | /* Apply cfi device specific fixups */ |
390 | cfi_fixup(mtd, cfi_fixup_table); | 434 | cfi_fixup(mtd, cfi_fixup_table); |
391 | 435 | ||
392 | #ifdef DEBUG_CFI_FEATURES | 436 | #ifdef DEBUG_CFI_FEATURES |
393 | /* Tell the user about it in lots of lovely detail */ | 437 | /* Tell the user about it in lots of lovely detail */ |
394 | cfi_tell_features(extp); | 438 | cfi_tell_features(extp); |
395 | #endif | 439 | #endif |
396 | 440 | ||
397 | bootloc = extp->TopBottom; | 441 | bootloc = extp->TopBottom; |
398 | if ((bootloc != 2) && (bootloc != 3)) { | 442 | if ((bootloc < 2) || (bootloc > 5)) { |
399 | printk(KERN_WARNING "%s: CFI does not contain boot " | 443 | printk(KERN_WARNING "%s: CFI contains unrecognised boot " |
400 | "bank location. Assuming top.\n", map->name); | 444 | "bank location (%d). Assuming bottom.\n", |
401 | bootloc = 2; | 445 | map->name, bootloc); |
402 | } | 446 | bootloc = 2; |
447 | } | ||
403 | 448 | ||
404 | if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) { | 449 | if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) { |
405 | printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name); | 450 | printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name); |
406 | 451 | ||
407 | for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) { | 452 | for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) { |
408 | int j = (cfi->cfiq->NumEraseRegions-1)-i; | 453 | int j = (cfi->cfiq->NumEraseRegions-1)-i; |
409 | __u32 swap; | 454 | __u32 swap; |
410 | 455 | ||
411 | swap = cfi->cfiq->EraseRegionInfo[i]; | 456 | swap = cfi->cfiq->EraseRegionInfo[i]; |
412 | cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j]; | 457 | cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j]; |
413 | cfi->cfiq->EraseRegionInfo[j] = swap; | 458 | cfi->cfiq->EraseRegionInfo[j] = swap; |
459 | } | ||
414 | } | 460 | } |
461 | /* Set the default CFI lock/unlock addresses */ | ||
462 | cfi->addr_unlock1 = 0x555; | ||
463 | cfi->addr_unlock2 = 0x2aa; | ||
464 | } | ||
465 | cfi_fixup(mtd, cfi_nopri_fixup_table); | ||
466 | |||
467 | if (!cfi->addr_unlock1 || !cfi->addr_unlock2) { | ||
468 | kfree(mtd); | ||
469 | return NULL; | ||
415 | } | 470 | } |
416 | /* Set the default CFI lock/unlock addresses */ | ||
417 | cfi->addr_unlock1 = 0x555; | ||
418 | cfi->addr_unlock2 = 0x2aa; | ||
419 | 471 | ||
420 | } /* CFI mode */ | 472 | } /* CFI mode */ |
421 | else if (cfi->cfi_mode == CFI_MODE_JEDEC) { | 473 | else if (cfi->cfi_mode == CFI_MODE_JEDEC) { |
@@ -437,7 +489,11 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) | |||
437 | 489 | ||
438 | return cfi_amdstd_setup(mtd); | 490 | return cfi_amdstd_setup(mtd); |
439 | } | 491 | } |
492 | struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); | ||
493 | struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); | ||
440 | EXPORT_SYMBOL_GPL(cfi_cmdset_0002); | 494 | EXPORT_SYMBOL_GPL(cfi_cmdset_0002); |
495 | EXPORT_SYMBOL_GPL(cfi_cmdset_0006); | ||
496 | EXPORT_SYMBOL_GPL(cfi_cmdset_0701); | ||
441 | 497 | ||
442 | static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) | 498 | static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) |
443 | { | 499 | { |
@@ -491,13 +547,12 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) | |||
491 | #endif | 547 | #endif |
492 | 548 | ||
493 | __module_get(THIS_MODULE); | 549 | __module_get(THIS_MODULE); |
550 | register_reboot_notifier(&mtd->reboot_notifier); | ||
494 | return mtd; | 551 | return mtd; |
495 | 552 | ||
496 | setup_err: | 553 | setup_err: |
497 | if(mtd) { | 554 | kfree(mtd->eraseregions); |
498 | kfree(mtd->eraseregions); | 555 | kfree(mtd); |
499 | kfree(mtd); | ||
500 | } | ||
501 | kfree(cfi->cmdset_priv); | 556 | kfree(cfi->cmdset_priv); |
502 | kfree(cfi->cfiq); | 557 | kfree(cfi->cfiq); |
503 | return NULL; | 558 | return NULL; |
@@ -571,9 +626,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr | |||
571 | printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); | 626 | printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); |
572 | return -EIO; | 627 | return -EIO; |
573 | } | 628 | } |
574 | spin_unlock(chip->mutex); | 629 | mutex_unlock(&chip->mutex); |
575 | cfi_udelay(1); | 630 | cfi_udelay(1); |
576 | spin_lock(chip->mutex); | 631 | mutex_lock(&chip->mutex); |
577 | /* Someone else might have been playing with it. */ | 632 | /* Someone else might have been playing with it. */ |
578 | goto retry; | 633 | goto retry; |
579 | } | 634 | } |
@@ -617,9 +672,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr | |||
617 | return -EIO; | 672 | return -EIO; |
618 | } | 673 | } |
619 | 674 | ||
620 | spin_unlock(chip->mutex); | 675 | mutex_unlock(&chip->mutex); |
621 | cfi_udelay(1); | 676 | cfi_udelay(1); |
622 | spin_lock(chip->mutex); | 677 | mutex_lock(&chip->mutex); |
623 | /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. | 678 | /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. |
624 | So we can just loop here. */ | 679 | So we can just loop here. */ |
625 | } | 680 | } |
@@ -634,6 +689,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr | |||
634 | chip->state = FL_READY; | 689 | chip->state = FL_READY; |
635 | return 0; | 690 | return 0; |
636 | 691 | ||
692 | case FL_SHUTDOWN: | ||
693 | /* The machine is rebooting */ | ||
694 | return -EIO; | ||
695 | |||
637 | case FL_POINT: | 696 | case FL_POINT: |
638 | /* Only if there's no operation suspended... */ | 697 | /* Only if there's no operation suspended... */ |
639 | if (mode == FL_READY && chip->oldstate == FL_READY) | 698 | if (mode == FL_READY && chip->oldstate == FL_READY) |
@@ -643,10 +702,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr | |||
643 | sleep: | 702 | sleep: |
644 | set_current_state(TASK_UNINTERRUPTIBLE); | 703 | set_current_state(TASK_UNINTERRUPTIBLE); |
645 | add_wait_queue(&chip->wq, &wait); | 704 | add_wait_queue(&chip->wq, &wait); |
646 | spin_unlock(chip->mutex); | 705 | mutex_unlock(&chip->mutex); |
647 | schedule(); | 706 | schedule(); |
648 | remove_wait_queue(&chip->wq, &wait); | 707 | remove_wait_queue(&chip->wq, &wait); |
649 | spin_lock(chip->mutex); | 708 | mutex_lock(&chip->mutex); |
650 | goto resettime; | 709 | goto resettime; |
651 | } | 710 | } |
652 | } | 711 | } |
@@ -778,7 +837,7 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, | |||
778 | (void) map_read(map, adr); | 837 | (void) map_read(map, adr); |
779 | xip_iprefetch(); | 838 | xip_iprefetch(); |
780 | local_irq_enable(); | 839 | local_irq_enable(); |
781 | spin_unlock(chip->mutex); | 840 | mutex_unlock(&chip->mutex); |
782 | xip_iprefetch(); | 841 | xip_iprefetch(); |
783 | cond_resched(); | 842 | cond_resched(); |
784 | 843 | ||
@@ -788,15 +847,15 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, | |||
788 | * a suspended erase state. If so let's wait | 847 | * a suspended erase state. If so let's wait |
789 | * until it's done. | 848 | * until it's done. |
790 | */ | 849 | */ |
791 | spin_lock(chip->mutex); | 850 | mutex_lock(&chip->mutex); |
792 | while (chip->state != FL_XIP_WHILE_ERASING) { | 851 | while (chip->state != FL_XIP_WHILE_ERASING) { |
793 | DECLARE_WAITQUEUE(wait, current); | 852 | DECLARE_WAITQUEUE(wait, current); |
794 | set_current_state(TASK_UNINTERRUPTIBLE); | 853 | set_current_state(TASK_UNINTERRUPTIBLE); |
795 | add_wait_queue(&chip->wq, &wait); | 854 | add_wait_queue(&chip->wq, &wait); |
796 | spin_unlock(chip->mutex); | 855 | mutex_unlock(&chip->mutex); |
797 | schedule(); | 856 | schedule(); |
798 | remove_wait_queue(&chip->wq, &wait); | 857 | remove_wait_queue(&chip->wq, &wait); |
799 | spin_lock(chip->mutex); | 858 | mutex_lock(&chip->mutex); |
800 | } | 859 | } |
801 | /* Disallow XIP again */ | 860 | /* Disallow XIP again */ |
802 | local_irq_disable(); | 861 | local_irq_disable(); |
@@ -858,17 +917,17 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, | |||
858 | 917 | ||
859 | #define UDELAY(map, chip, adr, usec) \ | 918 | #define UDELAY(map, chip, adr, usec) \ |
860 | do { \ | 919 | do { \ |
861 | spin_unlock(chip->mutex); \ | 920 | mutex_unlock(&chip->mutex); \ |
862 | cfi_udelay(usec); \ | 921 | cfi_udelay(usec); \ |
863 | spin_lock(chip->mutex); \ | 922 | mutex_lock(&chip->mutex); \ |
864 | } while (0) | 923 | } while (0) |
865 | 924 | ||
866 | #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ | 925 | #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ |
867 | do { \ | 926 | do { \ |
868 | spin_unlock(chip->mutex); \ | 927 | mutex_unlock(&chip->mutex); \ |
869 | INVALIDATE_CACHED_RANGE(map, adr, len); \ | 928 | INVALIDATE_CACHED_RANGE(map, adr, len); \ |
870 | cfi_udelay(usec); \ | 929 | cfi_udelay(usec); \ |
871 | spin_lock(chip->mutex); \ | 930 | mutex_lock(&chip->mutex); \ |
872 | } while (0) | 931 | } while (0) |
873 | 932 | ||
874 | #endif | 933 | #endif |
@@ -884,10 +943,10 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof | |||
884 | /* Ensure cmd read/writes are aligned. */ | 943 | /* Ensure cmd read/writes are aligned. */ |
885 | cmd_addr = adr & ~(map_bankwidth(map)-1); | 944 | cmd_addr = adr & ~(map_bankwidth(map)-1); |
886 | 945 | ||
887 | spin_lock(chip->mutex); | 946 | mutex_lock(&chip->mutex); |
888 | ret = get_chip(map, chip, cmd_addr, FL_READY); | 947 | ret = get_chip(map, chip, cmd_addr, FL_READY); |
889 | if (ret) { | 948 | if (ret) { |
890 | spin_unlock(chip->mutex); | 949 | mutex_unlock(&chip->mutex); |
891 | return ret; | 950 | return ret; |
892 | } | 951 | } |
893 | 952 | ||
@@ -900,7 +959,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof | |||
900 | 959 | ||
901 | put_chip(map, chip, cmd_addr); | 960 | put_chip(map, chip, cmd_addr); |
902 | 961 | ||
903 | spin_unlock(chip->mutex); | 962 | mutex_unlock(&chip->mutex); |
904 | return 0; | 963 | return 0; |
905 | } | 964 | } |
906 | 965 | ||
@@ -954,7 +1013,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi | |||
954 | struct cfi_private *cfi = map->fldrv_priv; | 1013 | struct cfi_private *cfi = map->fldrv_priv; |
955 | 1014 | ||
956 | retry: | 1015 | retry: |
957 | spin_lock(chip->mutex); | 1016 | mutex_lock(&chip->mutex); |
958 | 1017 | ||
959 | if (chip->state != FL_READY){ | 1018 | if (chip->state != FL_READY){ |
960 | #if 0 | 1019 | #if 0 |
@@ -963,7 +1022,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi | |||
963 | set_current_state(TASK_UNINTERRUPTIBLE); | 1022 | set_current_state(TASK_UNINTERRUPTIBLE); |
964 | add_wait_queue(&chip->wq, &wait); | 1023 | add_wait_queue(&chip->wq, &wait); |
965 | 1024 | ||
966 | spin_unlock(chip->mutex); | 1025 | mutex_unlock(&chip->mutex); |
967 | 1026 | ||
968 | schedule(); | 1027 | schedule(); |
969 | remove_wait_queue(&chip->wq, &wait); | 1028 | remove_wait_queue(&chip->wq, &wait); |
@@ -992,7 +1051,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi | |||
992 | cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); | 1051 | cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); |
993 | 1052 | ||
994 | wake_up(&chip->wq); | 1053 | wake_up(&chip->wq); |
995 | spin_unlock(chip->mutex); | 1054 | mutex_unlock(&chip->mutex); |
996 | 1055 | ||
997 | return 0; | 1056 | return 0; |
998 | } | 1057 | } |
@@ -1061,10 +1120,10 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, | |||
1061 | 1120 | ||
1062 | adr += chip->start; | 1121 | adr += chip->start; |
1063 | 1122 | ||
1064 | spin_lock(chip->mutex); | 1123 | mutex_lock(&chip->mutex); |
1065 | ret = get_chip(map, chip, adr, FL_WRITING); | 1124 | ret = get_chip(map, chip, adr, FL_WRITING); |
1066 | if (ret) { | 1125 | if (ret) { |
1067 | spin_unlock(chip->mutex); | 1126 | mutex_unlock(&chip->mutex); |
1068 | return ret; | 1127 | return ret; |
1069 | } | 1128 | } |
1070 | 1129 | ||
@@ -1107,11 +1166,11 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, | |||
1107 | 1166 | ||
1108 | set_current_state(TASK_UNINTERRUPTIBLE); | 1167 | set_current_state(TASK_UNINTERRUPTIBLE); |
1109 | add_wait_queue(&chip->wq, &wait); | 1168 | add_wait_queue(&chip->wq, &wait); |
1110 | spin_unlock(chip->mutex); | 1169 | mutex_unlock(&chip->mutex); |
1111 | schedule(); | 1170 | schedule(); |
1112 | remove_wait_queue(&chip->wq, &wait); | 1171 | remove_wait_queue(&chip->wq, &wait); |
1113 | timeo = jiffies + (HZ / 2); /* FIXME */ | 1172 | timeo = jiffies + (HZ / 2); /* FIXME */ |
1114 | spin_lock(chip->mutex); | 1173 | mutex_lock(&chip->mutex); |
1115 | continue; | 1174 | continue; |
1116 | } | 1175 | } |
1117 | 1176 | ||
@@ -1143,7 +1202,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, | |||
1143 | op_done: | 1202 | op_done: |
1144 | chip->state = FL_READY; | 1203 | chip->state = FL_READY; |
1145 | put_chip(map, chip, adr); | 1204 | put_chip(map, chip, adr); |
1146 | spin_unlock(chip->mutex); | 1205 | mutex_unlock(&chip->mutex); |
1147 | 1206 | ||
1148 | return ret; | 1207 | return ret; |
1149 | } | 1208 | } |
@@ -1175,7 +1234,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, | |||
1175 | map_word tmp_buf; | 1234 | map_word tmp_buf; |
1176 | 1235 | ||
1177 | retry: | 1236 | retry: |
1178 | spin_lock(cfi->chips[chipnum].mutex); | 1237 | mutex_lock(&cfi->chips[chipnum].mutex); |
1179 | 1238 | ||
1180 | if (cfi->chips[chipnum].state != FL_READY) { | 1239 | if (cfi->chips[chipnum].state != FL_READY) { |
1181 | #if 0 | 1240 | #if 0 |
@@ -1184,7 +1243,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, | |||
1184 | set_current_state(TASK_UNINTERRUPTIBLE); | 1243 | set_current_state(TASK_UNINTERRUPTIBLE); |
1185 | add_wait_queue(&cfi->chips[chipnum].wq, &wait); | 1244 | add_wait_queue(&cfi->chips[chipnum].wq, &wait); |
1186 | 1245 | ||
1187 | spin_unlock(cfi->chips[chipnum].mutex); | 1246 | mutex_unlock(&cfi->chips[chipnum].mutex); |
1188 | 1247 | ||
1189 | schedule(); | 1248 | schedule(); |
1190 | remove_wait_queue(&cfi->chips[chipnum].wq, &wait); | 1249 | remove_wait_queue(&cfi->chips[chipnum].wq, &wait); |
@@ -1198,7 +1257,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, | |||
1198 | /* Load 'tmp_buf' with old contents of flash */ | 1257 | /* Load 'tmp_buf' with old contents of flash */ |
1199 | tmp_buf = map_read(map, bus_ofs+chipstart); | 1258 | tmp_buf = map_read(map, bus_ofs+chipstart); |
1200 | 1259 | ||
1201 | spin_unlock(cfi->chips[chipnum].mutex); | 1260 | mutex_unlock(&cfi->chips[chipnum].mutex); |
1202 | 1261 | ||
1203 | /* Number of bytes to copy from buffer */ | 1262 | /* Number of bytes to copy from buffer */ |
1204 | n = min_t(int, len, map_bankwidth(map)-i); | 1263 | n = min_t(int, len, map_bankwidth(map)-i); |
@@ -1253,7 +1312,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, | |||
1253 | map_word tmp_buf; | 1312 | map_word tmp_buf; |
1254 | 1313 | ||
1255 | retry1: | 1314 | retry1: |
1256 | spin_lock(cfi->chips[chipnum].mutex); | 1315 | mutex_lock(&cfi->chips[chipnum].mutex); |
1257 | 1316 | ||
1258 | if (cfi->chips[chipnum].state != FL_READY) { | 1317 | if (cfi->chips[chipnum].state != FL_READY) { |
1259 | #if 0 | 1318 | #if 0 |
@@ -1262,7 +1321,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, | |||
1262 | set_current_state(TASK_UNINTERRUPTIBLE); | 1321 | set_current_state(TASK_UNINTERRUPTIBLE); |
1263 | add_wait_queue(&cfi->chips[chipnum].wq, &wait); | 1322 | add_wait_queue(&cfi->chips[chipnum].wq, &wait); |
1264 | 1323 | ||
1265 | spin_unlock(cfi->chips[chipnum].mutex); | 1324 | mutex_unlock(&cfi->chips[chipnum].mutex); |
1266 | 1325 | ||
1267 | schedule(); | 1326 | schedule(); |
1268 | remove_wait_queue(&cfi->chips[chipnum].wq, &wait); | 1327 | remove_wait_queue(&cfi->chips[chipnum].wq, &wait); |
@@ -1275,7 +1334,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, | |||
1275 | 1334 | ||
1276 | tmp_buf = map_read(map, ofs + chipstart); | 1335 | tmp_buf = map_read(map, ofs + chipstart); |
1277 | 1336 | ||
1278 | spin_unlock(cfi->chips[chipnum].mutex); | 1337 | mutex_unlock(&cfi->chips[chipnum].mutex); |
1279 | 1338 | ||
1280 | tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); | 1339 | tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); |
1281 | 1340 | ||
@@ -1310,10 +1369,10 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, | |||
1310 | adr += chip->start; | 1369 | adr += chip->start; |
1311 | cmd_adr = adr; | 1370 | cmd_adr = adr; |
1312 | 1371 | ||
1313 | spin_lock(chip->mutex); | 1372 | mutex_lock(&chip->mutex); |
1314 | ret = get_chip(map, chip, adr, FL_WRITING); | 1373 | ret = get_chip(map, chip, adr, FL_WRITING); |
1315 | if (ret) { | 1374 | if (ret) { |
1316 | spin_unlock(chip->mutex); | 1375 | mutex_unlock(&chip->mutex); |
1317 | return ret; | 1376 | return ret; |
1318 | } | 1377 | } |
1319 | 1378 | ||
@@ -1368,11 +1427,11 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, | |||
1368 | 1427 | ||
1369 | set_current_state(TASK_UNINTERRUPTIBLE); | 1428 | set_current_state(TASK_UNINTERRUPTIBLE); |
1370 | add_wait_queue(&chip->wq, &wait); | 1429 | add_wait_queue(&chip->wq, &wait); |
1371 | spin_unlock(chip->mutex); | 1430 | mutex_unlock(&chip->mutex); |
1372 | schedule(); | 1431 | schedule(); |
1373 | remove_wait_queue(&chip->wq, &wait); | 1432 | remove_wait_queue(&chip->wq, &wait); |
1374 | timeo = jiffies + (HZ / 2); /* FIXME */ | 1433 | timeo = jiffies + (HZ / 2); /* FIXME */ |
1375 | spin_lock(chip->mutex); | 1434 | mutex_lock(&chip->mutex); |
1376 | continue; | 1435 | continue; |
1377 | } | 1436 | } |
1378 | 1437 | ||
@@ -1400,7 +1459,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, | |||
1400 | op_done: | 1459 | op_done: |
1401 | chip->state = FL_READY; | 1460 | chip->state = FL_READY; |
1402 | put_chip(map, chip, adr); | 1461 | put_chip(map, chip, adr); |
1403 | spin_unlock(chip->mutex); | 1462 | mutex_unlock(&chip->mutex); |
1404 | 1463 | ||
1405 | return ret; | 1464 | return ret; |
1406 | } | 1465 | } |
@@ -1500,10 +1559,10 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) | |||
1500 | 1559 | ||
1501 | adr = cfi->addr_unlock1; | 1560 | adr = cfi->addr_unlock1; |
1502 | 1561 | ||
1503 | spin_lock(chip->mutex); | 1562 | mutex_lock(&chip->mutex); |
1504 | ret = get_chip(map, chip, adr, FL_WRITING); | 1563 | ret = get_chip(map, chip, adr, FL_WRITING); |
1505 | if (ret) { | 1564 | if (ret) { |
1506 | spin_unlock(chip->mutex); | 1565 | mutex_unlock(&chip->mutex); |
1507 | return ret; | 1566 | return ret; |
1508 | } | 1567 | } |
1509 | 1568 | ||
@@ -1536,10 +1595,10 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) | |||
1536 | /* Someone's suspended the erase. Sleep */ | 1595 | /* Someone's suspended the erase. Sleep */ |
1537 | set_current_state(TASK_UNINTERRUPTIBLE); | 1596 | set_current_state(TASK_UNINTERRUPTIBLE); |
1538 | add_wait_queue(&chip->wq, &wait); | 1597 | add_wait_queue(&chip->wq, &wait); |
1539 | spin_unlock(chip->mutex); | 1598 | mutex_unlock(&chip->mutex); |
1540 | schedule(); | 1599 | schedule(); |
1541 | remove_wait_queue(&chip->wq, &wait); | 1600 | remove_wait_queue(&chip->wq, &wait); |
1542 | spin_lock(chip->mutex); | 1601 | mutex_lock(&chip->mutex); |
1543 | continue; | 1602 | continue; |
1544 | } | 1603 | } |
1545 | if (chip->erase_suspended) { | 1604 | if (chip->erase_suspended) { |
@@ -1573,7 +1632,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) | |||
1573 | chip->state = FL_READY; | 1632 | chip->state = FL_READY; |
1574 | xip_enable(map, chip, adr); | 1633 | xip_enable(map, chip, adr); |
1575 | put_chip(map, chip, adr); | 1634 | put_chip(map, chip, adr); |
1576 | spin_unlock(chip->mutex); | 1635 | mutex_unlock(&chip->mutex); |
1577 | 1636 | ||
1578 | return ret; | 1637 | return ret; |
1579 | } | 1638 | } |
@@ -1588,10 +1647,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, | |||
1588 | 1647 | ||
1589 | adr += chip->start; | 1648 | adr += chip->start; |
1590 | 1649 | ||
1591 | spin_lock(chip->mutex); | 1650 | mutex_lock(&chip->mutex); |
1592 | ret = get_chip(map, chip, adr, FL_ERASING); | 1651 | ret = get_chip(map, chip, adr, FL_ERASING); |
1593 | if (ret) { | 1652 | if (ret) { |
1594 | spin_unlock(chip->mutex); | 1653 | mutex_unlock(&chip->mutex); |
1595 | return ret; | 1654 | return ret; |
1596 | } | 1655 | } |
1597 | 1656 | ||
@@ -1624,10 +1683,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, | |||
1624 | /* Someone's suspended the erase. Sleep */ | 1683 | /* Someone's suspended the erase. Sleep */ |
1625 | set_current_state(TASK_UNINTERRUPTIBLE); | 1684 | set_current_state(TASK_UNINTERRUPTIBLE); |
1626 | add_wait_queue(&chip->wq, &wait); | 1685 | add_wait_queue(&chip->wq, &wait); |
1627 | spin_unlock(chip->mutex); | 1686 | mutex_unlock(&chip->mutex); |
1628 | schedule(); | 1687 | schedule(); |
1629 | remove_wait_queue(&chip->wq, &wait); | 1688 | remove_wait_queue(&chip->wq, &wait); |
1630 | spin_lock(chip->mutex); | 1689 | mutex_lock(&chip->mutex); |
1631 | continue; | 1690 | continue; |
1632 | } | 1691 | } |
1633 | if (chip->erase_suspended) { | 1692 | if (chip->erase_suspended) { |
@@ -1663,7 +1722,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, | |||
1663 | 1722 | ||
1664 | chip->state = FL_READY; | 1723 | chip->state = FL_READY; |
1665 | put_chip(map, chip, adr); | 1724 | put_chip(map, chip, adr); |
1666 | spin_unlock(chip->mutex); | 1725 | mutex_unlock(&chip->mutex); |
1667 | return ret; | 1726 | return ret; |
1668 | } | 1727 | } |
1669 | 1728 | ||
@@ -1715,7 +1774,7 @@ static int do_atmel_lock(struct map_info *map, struct flchip *chip, | |||
1715 | struct cfi_private *cfi = map->fldrv_priv; | 1774 | struct cfi_private *cfi = map->fldrv_priv; |
1716 | int ret; | 1775 | int ret; |
1717 | 1776 | ||
1718 | spin_lock(chip->mutex); | 1777 | mutex_lock(&chip->mutex); |
1719 | ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); | 1778 | ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); |
1720 | if (ret) | 1779 | if (ret) |
1721 | goto out_unlock; | 1780 | goto out_unlock; |
@@ -1741,7 +1800,7 @@ static int do_atmel_lock(struct map_info *map, struct flchip *chip, | |||
1741 | ret = 0; | 1800 | ret = 0; |
1742 | 1801 | ||
1743 | out_unlock: | 1802 | out_unlock: |
1744 | spin_unlock(chip->mutex); | 1803 | mutex_unlock(&chip->mutex); |
1745 | return ret; | 1804 | return ret; |
1746 | } | 1805 | } |
1747 | 1806 | ||
@@ -1751,7 +1810,7 @@ static int do_atmel_unlock(struct map_info *map, struct flchip *chip, | |||
1751 | struct cfi_private *cfi = map->fldrv_priv; | 1810 | struct cfi_private *cfi = map->fldrv_priv; |
1752 | int ret; | 1811 | int ret; |
1753 | 1812 | ||
1754 | spin_lock(chip->mutex); | 1813 | mutex_lock(&chip->mutex); |
1755 | ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING); | 1814 | ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING); |
1756 | if (ret) | 1815 | if (ret) |
1757 | goto out_unlock; | 1816 | goto out_unlock; |
@@ -1769,7 +1828,7 @@ static int do_atmel_unlock(struct map_info *map, struct flchip *chip, | |||
1769 | ret = 0; | 1828 | ret = 0; |
1770 | 1829 | ||
1771 | out_unlock: | 1830 | out_unlock: |
1772 | spin_unlock(chip->mutex); | 1831 | mutex_unlock(&chip->mutex); |
1773 | return ret; | 1832 | return ret; |
1774 | } | 1833 | } |
1775 | 1834 | ||
@@ -1797,7 +1856,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd) | |||
1797 | chip = &cfi->chips[i]; | 1856 | chip = &cfi->chips[i]; |
1798 | 1857 | ||
1799 | retry: | 1858 | retry: |
1800 | spin_lock(chip->mutex); | 1859 | mutex_lock(&chip->mutex); |
1801 | 1860 | ||
1802 | switch(chip->state) { | 1861 | switch(chip->state) { |
1803 | case FL_READY: | 1862 | case FL_READY: |
@@ -1811,7 +1870,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd) | |||
1811 | * with the chip now anyway. | 1870 | * with the chip now anyway. |
1812 | */ | 1871 | */ |
1813 | case FL_SYNCING: | 1872 | case FL_SYNCING: |
1814 | spin_unlock(chip->mutex); | 1873 | mutex_unlock(&chip->mutex); |
1815 | break; | 1874 | break; |
1816 | 1875 | ||
1817 | default: | 1876 | default: |
@@ -1819,7 +1878,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd) | |||
1819 | set_current_state(TASK_UNINTERRUPTIBLE); | 1878 | set_current_state(TASK_UNINTERRUPTIBLE); |
1820 | add_wait_queue(&chip->wq, &wait); | 1879 | add_wait_queue(&chip->wq, &wait); |
1821 | 1880 | ||
1822 | spin_unlock(chip->mutex); | 1881 | mutex_unlock(&chip->mutex); |
1823 | 1882 | ||
1824 | schedule(); | 1883 | schedule(); |
1825 | 1884 | ||
@@ -1834,13 +1893,13 @@ static void cfi_amdstd_sync (struct mtd_info *mtd) | |||
1834 | for (i--; i >=0; i--) { | 1893 | for (i--; i >=0; i--) { |
1835 | chip = &cfi->chips[i]; | 1894 | chip = &cfi->chips[i]; |
1836 | 1895 | ||
1837 | spin_lock(chip->mutex); | 1896 | mutex_lock(&chip->mutex); |
1838 | 1897 | ||
1839 | if (chip->state == FL_SYNCING) { | 1898 | if (chip->state == FL_SYNCING) { |
1840 | chip->state = chip->oldstate; | 1899 | chip->state = chip->oldstate; |
1841 | wake_up(&chip->wq); | 1900 | wake_up(&chip->wq); |
1842 | } | 1901 | } |
1843 | spin_unlock(chip->mutex); | 1902 | mutex_unlock(&chip->mutex); |
1844 | } | 1903 | } |
1845 | } | 1904 | } |
1846 | 1905 | ||
@@ -1856,7 +1915,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd) | |||
1856 | for (i=0; !ret && i<cfi->numchips; i++) { | 1915 | for (i=0; !ret && i<cfi->numchips; i++) { |
1857 | chip = &cfi->chips[i]; | 1916 | chip = &cfi->chips[i]; |
1858 | 1917 | ||
1859 | spin_lock(chip->mutex); | 1918 | mutex_lock(&chip->mutex); |
1860 | 1919 | ||
1861 | switch(chip->state) { | 1920 | switch(chip->state) { |
1862 | case FL_READY: | 1921 | case FL_READY: |
@@ -1876,7 +1935,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd) | |||
1876 | ret = -EAGAIN; | 1935 | ret = -EAGAIN; |
1877 | break; | 1936 | break; |
1878 | } | 1937 | } |
1879 | spin_unlock(chip->mutex); | 1938 | mutex_unlock(&chip->mutex); |
1880 | } | 1939 | } |
1881 | 1940 | ||
1882 | /* Unlock the chips again */ | 1941 | /* Unlock the chips again */ |
@@ -1885,13 +1944,13 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd) | |||
1885 | for (i--; i >=0; i--) { | 1944 | for (i--; i >=0; i--) { |
1886 | chip = &cfi->chips[i]; | 1945 | chip = &cfi->chips[i]; |
1887 | 1946 | ||
1888 | spin_lock(chip->mutex); | 1947 | mutex_lock(&chip->mutex); |
1889 | 1948 | ||
1890 | if (chip->state == FL_PM_SUSPENDED) { | 1949 | if (chip->state == FL_PM_SUSPENDED) { |
1891 | chip->state = chip->oldstate; | 1950 | chip->state = chip->oldstate; |
1892 | wake_up(&chip->wq); | 1951 | wake_up(&chip->wq); |
1893 | } | 1952 | } |
1894 | spin_unlock(chip->mutex); | 1953 | mutex_unlock(&chip->mutex); |
1895 | } | 1954 | } |
1896 | } | 1955 | } |
1897 | 1956 | ||
@@ -1910,7 +1969,7 @@ static void cfi_amdstd_resume(struct mtd_info *mtd) | |||
1910 | 1969 | ||
1911 | chip = &cfi->chips[i]; | 1970 | chip = &cfi->chips[i]; |
1912 | 1971 | ||
1913 | spin_lock(chip->mutex); | 1972 | mutex_lock(&chip->mutex); |
1914 | 1973 | ||
1915 | if (chip->state == FL_PM_SUSPENDED) { | 1974 | if (chip->state == FL_PM_SUSPENDED) { |
1916 | chip->state = FL_READY; | 1975 | chip->state = FL_READY; |
@@ -1920,15 +1979,62 @@ static void cfi_amdstd_resume(struct mtd_info *mtd) | |||
1920 | else | 1979 | else |
1921 | printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n"); | 1980 | printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n"); |
1922 | 1981 | ||
1923 | spin_unlock(chip->mutex); | 1982 | mutex_unlock(&chip->mutex); |
1924 | } | 1983 | } |
1925 | } | 1984 | } |
1926 | 1985 | ||
1986 | |||
1987 | /* | ||
1988 | * Ensure that the flash device is put back into read array mode before | ||
1989 | * unloading the driver or rebooting. On some systems, rebooting while | ||
1990 | * the flash is in query/program/erase mode will prevent the CPU from | ||
1991 | * fetching the bootloader code, requiring a hard reset or power cycle. | ||
1992 | */ | ||
1993 | static int cfi_amdstd_reset(struct mtd_info *mtd) | ||
1994 | { | ||
1995 | struct map_info *map = mtd->priv; | ||
1996 | struct cfi_private *cfi = map->fldrv_priv; | ||
1997 | int i, ret; | ||
1998 | struct flchip *chip; | ||
1999 | |||
2000 | for (i = 0; i < cfi->numchips; i++) { | ||
2001 | |||
2002 | chip = &cfi->chips[i]; | ||
2003 | |||
2004 | mutex_lock(&chip->mutex); | ||
2005 | |||
2006 | ret = get_chip(map, chip, chip->start, FL_SHUTDOWN); | ||
2007 | if (!ret) { | ||
2008 | map_write(map, CMD(0xF0), chip->start); | ||
2009 | chip->state = FL_SHUTDOWN; | ||
2010 | put_chip(map, chip, chip->start); | ||
2011 | } | ||
2012 | |||
2013 | mutex_unlock(&chip->mutex); | ||
2014 | } | ||
2015 | |||
2016 | return 0; | ||
2017 | } | ||
2018 | |||
2019 | |||
2020 | static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val, | ||
2021 | void *v) | ||
2022 | { | ||
2023 | struct mtd_info *mtd; | ||
2024 | |||
2025 | mtd = container_of(nb, struct mtd_info, reboot_notifier); | ||
2026 | cfi_amdstd_reset(mtd); | ||
2027 | return NOTIFY_DONE; | ||
2028 | } | ||
2029 | |||
2030 | |||
1927 | static void cfi_amdstd_destroy(struct mtd_info *mtd) | 2031 | static void cfi_amdstd_destroy(struct mtd_info *mtd) |
1928 | { | 2032 | { |
1929 | struct map_info *map = mtd->priv; | 2033 | struct map_info *map = mtd->priv; |
1930 | struct cfi_private *cfi = map->fldrv_priv; | 2034 | struct cfi_private *cfi = map->fldrv_priv; |
1931 | 2035 | ||
2036 | cfi_amdstd_reset(mtd); | ||
2037 | unregister_reboot_notifier(&mtd->reboot_notifier); | ||
1932 | kfree(cfi->cmdset_priv); | 2038 | kfree(cfi->cmdset_priv); |
1933 | kfree(cfi->cfiq); | 2039 | kfree(cfi->cfiq); |
1934 | kfree(cfi); | 2040 | kfree(cfi); |
@@ -1938,3 +2044,5 @@ static void cfi_amdstd_destroy(struct mtd_info *mtd) | |||
1938 | MODULE_LICENSE("GPL"); | 2044 | MODULE_LICENSE("GPL"); |
1939 | MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al."); | 2045 | MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al."); |
1940 | MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips"); | 2046 | MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips"); |
2047 | MODULE_ALIAS("cfi_cmdset_0006"); | ||
2048 | MODULE_ALIAS("cfi_cmdset_0701"); | ||
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c index 0667a671525d..e54e8c169d76 100644 --- a/drivers/mtd/chips/cfi_cmdset_0020.c +++ b/drivers/mtd/chips/cfi_cmdset_0020.c | |||
@@ -265,7 +265,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof | |||
265 | 265 | ||
266 | timeo = jiffies + HZ; | 266 | timeo = jiffies + HZ; |
267 | retry: | 267 | retry: |
268 | spin_lock_bh(chip->mutex); | 268 | mutex_lock(&chip->mutex); |
269 | 269 | ||
270 | /* Check that the chip's ready to talk to us. | 270 | /* Check that the chip's ready to talk to us. |
271 | * If it's in FL_ERASING state, suspend it and make it talk now. | 271 | * If it's in FL_ERASING state, suspend it and make it talk now. |
@@ -296,15 +296,15 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof | |||
296 | /* make sure we're in 'read status' mode */ | 296 | /* make sure we're in 'read status' mode */ |
297 | map_write(map, CMD(0x70), cmd_addr); | 297 | map_write(map, CMD(0x70), cmd_addr); |
298 | chip->state = FL_ERASING; | 298 | chip->state = FL_ERASING; |
299 | spin_unlock_bh(chip->mutex); | 299 | mutex_unlock(&chip->mutex); |
300 | printk(KERN_ERR "Chip not ready after erase " | 300 | printk(KERN_ERR "Chip not ready after erase " |
301 | "suspended: status = 0x%lx\n", status.x[0]); | 301 | "suspended: status = 0x%lx\n", status.x[0]); |
302 | return -EIO; | 302 | return -EIO; |
303 | } | 303 | } |
304 | 304 | ||
305 | spin_unlock_bh(chip->mutex); | 305 | mutex_unlock(&chip->mutex); |
306 | cfi_udelay(1); | 306 | cfi_udelay(1); |
307 | spin_lock_bh(chip->mutex); | 307 | mutex_lock(&chip->mutex); |
308 | } | 308 | } |
309 | 309 | ||
310 | suspended = 1; | 310 | suspended = 1; |
@@ -335,13 +335,13 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof | |||
335 | 335 | ||
336 | /* Urgh. Chip not yet ready to talk to us. */ | 336 | /* Urgh. Chip not yet ready to talk to us. */ |
337 | if (time_after(jiffies, timeo)) { | 337 | if (time_after(jiffies, timeo)) { |
338 | spin_unlock_bh(chip->mutex); | 338 | mutex_unlock(&chip->mutex); |
339 | printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]); | 339 | printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]); |
340 | return -EIO; | 340 | return -EIO; |
341 | } | 341 | } |
342 | 342 | ||
343 | /* Latency issues. Drop the lock, wait a while and retry */ | 343 | /* Latency issues. Drop the lock, wait a while and retry */ |
344 | spin_unlock_bh(chip->mutex); | 344 | mutex_unlock(&chip->mutex); |
345 | cfi_udelay(1); | 345 | cfi_udelay(1); |
346 | goto retry; | 346 | goto retry; |
347 | 347 | ||
@@ -351,7 +351,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof | |||
351 | someone changes the status */ | 351 | someone changes the status */ |
352 | set_current_state(TASK_UNINTERRUPTIBLE); | 352 | set_current_state(TASK_UNINTERRUPTIBLE); |
353 | add_wait_queue(&chip->wq, &wait); | 353 | add_wait_queue(&chip->wq, &wait); |
354 | spin_unlock_bh(chip->mutex); | 354 | mutex_unlock(&chip->mutex); |
355 | schedule(); | 355 | schedule(); |
356 | remove_wait_queue(&chip->wq, &wait); | 356 | remove_wait_queue(&chip->wq, &wait); |
357 | timeo = jiffies + HZ; | 357 | timeo = jiffies + HZ; |
@@ -376,7 +376,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof | |||
376 | } | 376 | } |
377 | 377 | ||
378 | wake_up(&chip->wq); | 378 | wake_up(&chip->wq); |
379 | spin_unlock_bh(chip->mutex); | 379 | mutex_unlock(&chip->mutex); |
380 | return 0; | 380 | return 0; |
381 | } | 381 | } |
382 | 382 | ||
@@ -445,7 +445,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip, | |||
445 | #ifdef DEBUG_CFI_FEATURES | 445 | #ifdef DEBUG_CFI_FEATURES |
446 | printk("%s: chip->state[%d]\n", __func__, chip->state); | 446 | printk("%s: chip->state[%d]\n", __func__, chip->state); |
447 | #endif | 447 | #endif |
448 | spin_lock_bh(chip->mutex); | 448 | mutex_lock(&chip->mutex); |
449 | 449 | ||
450 | /* Check that the chip's ready to talk to us. | 450 | /* Check that the chip's ready to talk to us. |
451 | * Later, we can actually think about interrupting it | 451 | * Later, we can actually think about interrupting it |
@@ -470,14 +470,14 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip, | |||
470 | break; | 470 | break; |
471 | /* Urgh. Chip not yet ready to talk to us. */ | 471 | /* Urgh. Chip not yet ready to talk to us. */ |
472 | if (time_after(jiffies, timeo)) { | 472 | if (time_after(jiffies, timeo)) { |
473 | spin_unlock_bh(chip->mutex); | 473 | mutex_unlock(&chip->mutex); |
474 | printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n", | 474 | printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n", |
475 | status.x[0], map_read(map, cmd_adr).x[0]); | 475 | status.x[0], map_read(map, cmd_adr).x[0]); |
476 | return -EIO; | 476 | return -EIO; |
477 | } | 477 | } |
478 | 478 | ||
479 | /* Latency issues. Drop the lock, wait a while and retry */ | 479 | /* Latency issues. Drop the lock, wait a while and retry */ |
480 | spin_unlock_bh(chip->mutex); | 480 | mutex_unlock(&chip->mutex); |
481 | cfi_udelay(1); | 481 | cfi_udelay(1); |
482 | goto retry; | 482 | goto retry; |
483 | 483 | ||
@@ -486,7 +486,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip, | |||
486 | someone changes the status */ | 486 | someone changes the status */ |
487 | set_current_state(TASK_UNINTERRUPTIBLE); | 487 | set_current_state(TASK_UNINTERRUPTIBLE); |
488 | add_wait_queue(&chip->wq, &wait); | 488 | add_wait_queue(&chip->wq, &wait); |
489 | spin_unlock_bh(chip->mutex); | 489 | mutex_unlock(&chip->mutex); |
490 | schedule(); | 490 | schedule(); |
491 | remove_wait_queue(&chip->wq, &wait); | 491 | remove_wait_queue(&chip->wq, &wait); |
492 | timeo = jiffies + HZ; | 492 | timeo = jiffies + HZ; |
@@ -503,16 +503,16 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip, | |||
503 | if (map_word_andequal(map, status, status_OK, status_OK)) | 503 | if (map_word_andequal(map, status, status_OK, status_OK)) |
504 | break; | 504 | break; |
505 | 505 | ||
506 | spin_unlock_bh(chip->mutex); | 506 | mutex_unlock(&chip->mutex); |
507 | cfi_udelay(1); | 507 | cfi_udelay(1); |
508 | spin_lock_bh(chip->mutex); | 508 | mutex_lock(&chip->mutex); |
509 | 509 | ||
510 | if (++z > 100) { | 510 | if (++z > 100) { |
511 | /* Argh. Not ready for write to buffer */ | 511 | /* Argh. Not ready for write to buffer */ |
512 | DISABLE_VPP(map); | 512 | DISABLE_VPP(map); |
513 | map_write(map, CMD(0x70), cmd_adr); | 513 | map_write(map, CMD(0x70), cmd_adr); |
514 | chip->state = FL_STATUS; | 514 | chip->state = FL_STATUS; |
515 | spin_unlock_bh(chip->mutex); | 515 | mutex_unlock(&chip->mutex); |
516 | printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]); | 516 | printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]); |
517 | return -EIO; | 517 | return -EIO; |
518 | } | 518 | } |
@@ -532,9 +532,9 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip, | |||
532 | map_write(map, CMD(0xd0), cmd_adr); | 532 | map_write(map, CMD(0xd0), cmd_adr); |
533 | chip->state = FL_WRITING; | 533 | chip->state = FL_WRITING; |
534 | 534 | ||
535 | spin_unlock_bh(chip->mutex); | 535 | mutex_unlock(&chip->mutex); |
536 | cfi_udelay(chip->buffer_write_time); | 536 | cfi_udelay(chip->buffer_write_time); |
537 | spin_lock_bh(chip->mutex); | 537 | mutex_lock(&chip->mutex); |
538 | 538 | ||
539 | timeo = jiffies + (HZ/2); | 539 | timeo = jiffies + (HZ/2); |
540 | z = 0; | 540 | z = 0; |
@@ -543,11 +543,11 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip, | |||
543 | /* Someone's suspended the write. Sleep */ | 543 | /* Someone's suspended the write. Sleep */ |
544 | set_current_state(TASK_UNINTERRUPTIBLE); | 544 | set_current_state(TASK_UNINTERRUPTIBLE); |
545 | add_wait_queue(&chip->wq, &wait); | 545 | add_wait_queue(&chip->wq, &wait); |
546 | spin_unlock_bh(chip->mutex); | 546 | mutex_unlock(&chip->mutex); |
547 | schedule(); | 547 | schedule(); |
548 | remove_wait_queue(&chip->wq, &wait); | 548 | remove_wait_queue(&chip->wq, &wait); |
549 | timeo = jiffies + (HZ / 2); /* FIXME */ | 549 | timeo = jiffies + (HZ / 2); /* FIXME */ |
550 | spin_lock_bh(chip->mutex); | 550 | mutex_lock(&chip->mutex); |
551 | continue; | 551 | continue; |
552 | } | 552 | } |
553 | 553 | ||
@@ -563,16 +563,16 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip, | |||
563 | map_write(map, CMD(0x70), adr); | 563 | map_write(map, CMD(0x70), adr); |
564 | chip->state = FL_STATUS; | 564 | chip->state = FL_STATUS; |
565 | DISABLE_VPP(map); | 565 | DISABLE_VPP(map); |
566 | spin_unlock_bh(chip->mutex); | 566 | mutex_unlock(&chip->mutex); |
567 | printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n"); | 567 | printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n"); |
568 | return -EIO; | 568 | return -EIO; |
569 | } | 569 | } |
570 | 570 | ||
571 | /* Latency issues. Drop the lock, wait a while and retry */ | 571 | /* Latency issues. Drop the lock, wait a while and retry */ |
572 | spin_unlock_bh(chip->mutex); | 572 | mutex_unlock(&chip->mutex); |
573 | cfi_udelay(1); | 573 | cfi_udelay(1); |
574 | z++; | 574 | z++; |
575 | spin_lock_bh(chip->mutex); | 575 | mutex_lock(&chip->mutex); |
576 | } | 576 | } |
577 | if (!z) { | 577 | if (!z) { |
578 | chip->buffer_write_time--; | 578 | chip->buffer_write_time--; |
@@ -596,11 +596,11 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip, | |||
596 | /* put back into read status register mode */ | 596 | /* put back into read status register mode */ |
597 | map_write(map, CMD(0x70), adr); | 597 | map_write(map, CMD(0x70), adr); |
598 | wake_up(&chip->wq); | 598 | wake_up(&chip->wq); |
599 | spin_unlock_bh(chip->mutex); | 599 | mutex_unlock(&chip->mutex); |
600 | return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO; | 600 | return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO; |
601 | } | 601 | } |
602 | wake_up(&chip->wq); | 602 | wake_up(&chip->wq); |
603 | spin_unlock_bh(chip->mutex); | 603 | mutex_unlock(&chip->mutex); |
604 | 604 | ||
605 | return 0; | 605 | return 0; |
606 | } | 606 | } |
@@ -749,7 +749,7 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u | |||
749 | 749 | ||
750 | timeo = jiffies + HZ; | 750 | timeo = jiffies + HZ; |
751 | retry: | 751 | retry: |
752 | spin_lock_bh(chip->mutex); | 752 | mutex_lock(&chip->mutex); |
753 | 753 | ||
754 | /* Check that the chip's ready to talk to us. */ | 754 | /* Check that the chip's ready to talk to us. */ |
755 | switch (chip->state) { | 755 | switch (chip->state) { |
@@ -766,13 +766,13 @@ retry: | |||
766 | 766 | ||
767 | /* Urgh. Chip not yet ready to talk to us. */ | 767 | /* Urgh. Chip not yet ready to talk to us. */ |
768 | if (time_after(jiffies, timeo)) { | 768 | if (time_after(jiffies, timeo)) { |
769 | spin_unlock_bh(chip->mutex); | 769 | mutex_unlock(&chip->mutex); |
770 | printk(KERN_ERR "waiting for chip to be ready timed out in erase\n"); | 770 | printk(KERN_ERR "waiting for chip to be ready timed out in erase\n"); |
771 | return -EIO; | 771 | return -EIO; |
772 | } | 772 | } |
773 | 773 | ||
774 | /* Latency issues. Drop the lock, wait a while and retry */ | 774 | /* Latency issues. Drop the lock, wait a while and retry */ |
775 | spin_unlock_bh(chip->mutex); | 775 | mutex_unlock(&chip->mutex); |
776 | cfi_udelay(1); | 776 | cfi_udelay(1); |
777 | goto retry; | 777 | goto retry; |
778 | 778 | ||
@@ -781,7 +781,7 @@ retry: | |||
781 | someone changes the status */ | 781 | someone changes the status */ |
782 | set_current_state(TASK_UNINTERRUPTIBLE); | 782 | set_current_state(TASK_UNINTERRUPTIBLE); |
783 | add_wait_queue(&chip->wq, &wait); | 783 | add_wait_queue(&chip->wq, &wait); |
784 | spin_unlock_bh(chip->mutex); | 784 | mutex_unlock(&chip->mutex); |
785 | schedule(); | 785 | schedule(); |
786 | remove_wait_queue(&chip->wq, &wait); | 786 | remove_wait_queue(&chip->wq, &wait); |
787 | timeo = jiffies + HZ; | 787 | timeo = jiffies + HZ; |
@@ -797,9 +797,9 @@ retry: | |||
797 | map_write(map, CMD(0xD0), adr); | 797 | map_write(map, CMD(0xD0), adr); |
798 | chip->state = FL_ERASING; | 798 | chip->state = FL_ERASING; |
799 | 799 | ||
800 | spin_unlock_bh(chip->mutex); | 800 | mutex_unlock(&chip->mutex); |
801 | msleep(1000); | 801 | msleep(1000); |
802 | spin_lock_bh(chip->mutex); | 802 | mutex_lock(&chip->mutex); |
803 | 803 | ||
804 | /* FIXME. Use a timer to check this, and return immediately. */ | 804 | /* FIXME. Use a timer to check this, and return immediately. */ |
805 | /* Once the state machine's known to be working I'll do that */ | 805 | /* Once the state machine's known to be working I'll do that */ |
@@ -810,11 +810,11 @@ retry: | |||
810 | /* Someone's suspended the erase. Sleep */ | 810 | /* Someone's suspended the erase. Sleep */ |
811 | set_current_state(TASK_UNINTERRUPTIBLE); | 811 | set_current_state(TASK_UNINTERRUPTIBLE); |
812 | add_wait_queue(&chip->wq, &wait); | 812 | add_wait_queue(&chip->wq, &wait); |
813 | spin_unlock_bh(chip->mutex); | 813 | mutex_unlock(&chip->mutex); |
814 | schedule(); | 814 | schedule(); |
815 | remove_wait_queue(&chip->wq, &wait); | 815 | remove_wait_queue(&chip->wq, &wait); |
816 | timeo = jiffies + (HZ*20); /* FIXME */ | 816 | timeo = jiffies + (HZ*20); /* FIXME */ |
817 | spin_lock_bh(chip->mutex); | 817 | mutex_lock(&chip->mutex); |
818 | continue; | 818 | continue; |
819 | } | 819 | } |
820 | 820 | ||
@@ -828,14 +828,14 @@ retry: | |||
828 | chip->state = FL_STATUS; | 828 | chip->state = FL_STATUS; |
829 | printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]); | 829 | printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]); |
830 | DISABLE_VPP(map); | 830 | DISABLE_VPP(map); |
831 | spin_unlock_bh(chip->mutex); | 831 | mutex_unlock(&chip->mutex); |
832 | return -EIO; | 832 | return -EIO; |
833 | } | 833 | } |
834 | 834 | ||
835 | /* Latency issues. Drop the lock, wait a while and retry */ | 835 | /* Latency issues. Drop the lock, wait a while and retry */ |
836 | spin_unlock_bh(chip->mutex); | 836 | mutex_unlock(&chip->mutex); |
837 | cfi_udelay(1); | 837 | cfi_udelay(1); |
838 | spin_lock_bh(chip->mutex); | 838 | mutex_lock(&chip->mutex); |
839 | } | 839 | } |
840 | 840 | ||
841 | DISABLE_VPP(map); | 841 | DISABLE_VPP(map); |
@@ -878,7 +878,7 @@ retry: | |||
878 | printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus); | 878 | printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus); |
879 | timeo = jiffies + HZ; | 879 | timeo = jiffies + HZ; |
880 | chip->state = FL_STATUS; | 880 | chip->state = FL_STATUS; |
881 | spin_unlock_bh(chip->mutex); | 881 | mutex_unlock(&chip->mutex); |
882 | goto retry; | 882 | goto retry; |
883 | } | 883 | } |
884 | printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus); | 884 | printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus); |
@@ -887,7 +887,7 @@ retry: | |||
887 | } | 887 | } |
888 | 888 | ||
889 | wake_up(&chip->wq); | 889 | wake_up(&chip->wq); |
890 | spin_unlock_bh(chip->mutex); | 890 | mutex_unlock(&chip->mutex); |
891 | return ret; | 891 | return ret; |
892 | } | 892 | } |
893 | 893 | ||
@@ -995,7 +995,7 @@ static void cfi_staa_sync (struct mtd_info *mtd) | |||
995 | chip = &cfi->chips[i]; | 995 | chip = &cfi->chips[i]; |
996 | 996 | ||
997 | retry: | 997 | retry: |
998 | spin_lock_bh(chip->mutex); | 998 | mutex_lock(&chip->mutex); |
999 | 999 | ||
1000 | switch(chip->state) { | 1000 | switch(chip->state) { |
1001 | case FL_READY: | 1001 | case FL_READY: |
@@ -1009,7 +1009,7 @@ static void cfi_staa_sync (struct mtd_info *mtd) | |||
1009 | * with the chip now anyway. | 1009 | * with the chip now anyway. |
1010 | */ | 1010 | */ |
1011 | case FL_SYNCING: | 1011 | case FL_SYNCING: |
1012 | spin_unlock_bh(chip->mutex); | 1012 | mutex_unlock(&chip->mutex); |
1013 | break; | 1013 | break; |
1014 | 1014 | ||
1015 | default: | 1015 | default: |
@@ -1017,7 +1017,7 @@ static void cfi_staa_sync (struct mtd_info *mtd) | |||
1017 | set_current_state(TASK_UNINTERRUPTIBLE); | 1017 | set_current_state(TASK_UNINTERRUPTIBLE); |
1018 | add_wait_queue(&chip->wq, &wait); | 1018 | add_wait_queue(&chip->wq, &wait); |
1019 | 1019 | ||
1020 | spin_unlock_bh(chip->mutex); | 1020 | mutex_unlock(&chip->mutex); |
1021 | schedule(); | 1021 | schedule(); |
1022 | remove_wait_queue(&chip->wq, &wait); | 1022 | remove_wait_queue(&chip->wq, &wait); |
1023 | 1023 | ||
@@ -1030,13 +1030,13 @@ static void cfi_staa_sync (struct mtd_info *mtd) | |||
1030 | for (i--; i >=0; i--) { | 1030 | for (i--; i >=0; i--) { |
1031 | chip = &cfi->chips[i]; | 1031 | chip = &cfi->chips[i]; |
1032 | 1032 | ||
1033 | spin_lock_bh(chip->mutex); | 1033 | mutex_lock(&chip->mutex); |
1034 | 1034 | ||
1035 | if (chip->state == FL_SYNCING) { | 1035 | if (chip->state == FL_SYNCING) { |
1036 | chip->state = chip->oldstate; | 1036 | chip->state = chip->oldstate; |
1037 | wake_up(&chip->wq); | 1037 | wake_up(&chip->wq); |
1038 | } | 1038 | } |
1039 | spin_unlock_bh(chip->mutex); | 1039 | mutex_unlock(&chip->mutex); |
1040 | } | 1040 | } |
1041 | } | 1041 | } |
1042 | 1042 | ||
@@ -1054,7 +1054,7 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un | |||
1054 | 1054 | ||
1055 | timeo = jiffies + HZ; | 1055 | timeo = jiffies + HZ; |
1056 | retry: | 1056 | retry: |
1057 | spin_lock_bh(chip->mutex); | 1057 | mutex_lock(&chip->mutex); |
1058 | 1058 | ||
1059 | /* Check that the chip's ready to talk to us. */ | 1059 | /* Check that the chip's ready to talk to us. */ |
1060 | switch (chip->state) { | 1060 | switch (chip->state) { |
@@ -1071,13 +1071,13 @@ retry: | |||
1071 | 1071 | ||
1072 | /* Urgh. Chip not yet ready to talk to us. */ | 1072 | /* Urgh. Chip not yet ready to talk to us. */ |
1073 | if (time_after(jiffies, timeo)) { | 1073 | if (time_after(jiffies, timeo)) { |
1074 | spin_unlock_bh(chip->mutex); | 1074 | mutex_unlock(&chip->mutex); |
1075 | printk(KERN_ERR "waiting for chip to be ready timed out in lock\n"); | 1075 | printk(KERN_ERR "waiting for chip to be ready timed out in lock\n"); |
1076 | return -EIO; | 1076 | return -EIO; |
1077 | } | 1077 | } |
1078 | 1078 | ||
1079 | /* Latency issues. Drop the lock, wait a while and retry */ | 1079 | /* Latency issues. Drop the lock, wait a while and retry */ |
1080 | spin_unlock_bh(chip->mutex); | 1080 | mutex_unlock(&chip->mutex); |
1081 | cfi_udelay(1); | 1081 | cfi_udelay(1); |
1082 | goto retry; | 1082 | goto retry; |
1083 | 1083 | ||
@@ -1086,7 +1086,7 @@ retry: | |||
1086 | someone changes the status */ | 1086 | someone changes the status */ |
1087 | set_current_state(TASK_UNINTERRUPTIBLE); | 1087 | set_current_state(TASK_UNINTERRUPTIBLE); |
1088 | add_wait_queue(&chip->wq, &wait); | 1088 | add_wait_queue(&chip->wq, &wait); |
1089 | spin_unlock_bh(chip->mutex); | 1089 | mutex_unlock(&chip->mutex); |
1090 | schedule(); | 1090 | schedule(); |
1091 | remove_wait_queue(&chip->wq, &wait); | 1091 | remove_wait_queue(&chip->wq, &wait); |
1092 | timeo = jiffies + HZ; | 1092 | timeo = jiffies + HZ; |
@@ -1098,9 +1098,9 @@ retry: | |||
1098 | map_write(map, CMD(0x01), adr); | 1098 | map_write(map, CMD(0x01), adr); |
1099 | chip->state = FL_LOCKING; | 1099 | chip->state = FL_LOCKING; |
1100 | 1100 | ||
1101 | spin_unlock_bh(chip->mutex); | 1101 | mutex_unlock(&chip->mutex); |
1102 | msleep(1000); | 1102 | msleep(1000); |
1103 | spin_lock_bh(chip->mutex); | 1103 | mutex_lock(&chip->mutex); |
1104 | 1104 | ||
1105 | /* FIXME. Use a timer to check this, and return immediately. */ | 1105 | /* FIXME. Use a timer to check this, and return immediately. */ |
1106 | /* Once the state machine's known to be working I'll do that */ | 1106 | /* Once the state machine's known to be working I'll do that */ |
@@ -1118,21 +1118,21 @@ retry: | |||
1118 | chip->state = FL_STATUS; | 1118 | chip->state = FL_STATUS; |
1119 | printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]); | 1119 | printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]); |
1120 | DISABLE_VPP(map); | 1120 | DISABLE_VPP(map); |
1121 | spin_unlock_bh(chip->mutex); | 1121 | mutex_unlock(&chip->mutex); |
1122 | return -EIO; | 1122 | return -EIO; |
1123 | } | 1123 | } |
1124 | 1124 | ||
1125 | /* Latency issues. Drop the lock, wait a while and retry */ | 1125 | /* Latency issues. Drop the lock, wait a while and retry */ |
1126 | spin_unlock_bh(chip->mutex); | 1126 | mutex_unlock(&chip->mutex); |
1127 | cfi_udelay(1); | 1127 | cfi_udelay(1); |
1128 | spin_lock_bh(chip->mutex); | 1128 | mutex_lock(&chip->mutex); |
1129 | } | 1129 | } |
1130 | 1130 | ||
1131 | /* Done and happy. */ | 1131 | /* Done and happy. */ |
1132 | chip->state = FL_STATUS; | 1132 | chip->state = FL_STATUS; |
1133 | DISABLE_VPP(map); | 1133 | DISABLE_VPP(map); |
1134 | wake_up(&chip->wq); | 1134 | wake_up(&chip->wq); |
1135 | spin_unlock_bh(chip->mutex); | 1135 | mutex_unlock(&chip->mutex); |
1136 | return 0; | 1136 | return 0; |
1137 | } | 1137 | } |
1138 | static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | 1138 | static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
@@ -1203,7 +1203,7 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, | |||
1203 | 1203 | ||
1204 | timeo = jiffies + HZ; | 1204 | timeo = jiffies + HZ; |
1205 | retry: | 1205 | retry: |
1206 | spin_lock_bh(chip->mutex); | 1206 | mutex_lock(&chip->mutex); |
1207 | 1207 | ||
1208 | /* Check that the chip's ready to talk to us. */ | 1208 | /* Check that the chip's ready to talk to us. */ |
1209 | switch (chip->state) { | 1209 | switch (chip->state) { |
@@ -1220,13 +1220,13 @@ retry: | |||
1220 | 1220 | ||
1221 | /* Urgh. Chip not yet ready to talk to us. */ | 1221 | /* Urgh. Chip not yet ready to talk to us. */ |
1222 | if (time_after(jiffies, timeo)) { | 1222 | if (time_after(jiffies, timeo)) { |
1223 | spin_unlock_bh(chip->mutex); | 1223 | mutex_unlock(&chip->mutex); |
1224 | printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n"); | 1224 | printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n"); |
1225 | return -EIO; | 1225 | return -EIO; |
1226 | } | 1226 | } |
1227 | 1227 | ||
1228 | /* Latency issues. Drop the lock, wait a while and retry */ | 1228 | /* Latency issues. Drop the lock, wait a while and retry */ |
1229 | spin_unlock_bh(chip->mutex); | 1229 | mutex_unlock(&chip->mutex); |
1230 | cfi_udelay(1); | 1230 | cfi_udelay(1); |
1231 | goto retry; | 1231 | goto retry; |
1232 | 1232 | ||
@@ -1235,7 +1235,7 @@ retry: | |||
1235 | someone changes the status */ | 1235 | someone changes the status */ |
1236 | set_current_state(TASK_UNINTERRUPTIBLE); | 1236 | set_current_state(TASK_UNINTERRUPTIBLE); |
1237 | add_wait_queue(&chip->wq, &wait); | 1237 | add_wait_queue(&chip->wq, &wait); |
1238 | spin_unlock_bh(chip->mutex); | 1238 | mutex_unlock(&chip->mutex); |
1239 | schedule(); | 1239 | schedule(); |
1240 | remove_wait_queue(&chip->wq, &wait); | 1240 | remove_wait_queue(&chip->wq, &wait); |
1241 | timeo = jiffies + HZ; | 1241 | timeo = jiffies + HZ; |
@@ -1247,9 +1247,9 @@ retry: | |||
1247 | map_write(map, CMD(0xD0), adr); | 1247 | map_write(map, CMD(0xD0), adr); |
1248 | chip->state = FL_UNLOCKING; | 1248 | chip->state = FL_UNLOCKING; |
1249 | 1249 | ||
1250 | spin_unlock_bh(chip->mutex); | 1250 | mutex_unlock(&chip->mutex); |
1251 | msleep(1000); | 1251 | msleep(1000); |
1252 | spin_lock_bh(chip->mutex); | 1252 | mutex_lock(&chip->mutex); |
1253 | 1253 | ||
1254 | /* FIXME. Use a timer to check this, and return immediately. */ | 1254 | /* FIXME. Use a timer to check this, and return immediately. */ |
1255 | /* Once the state machine's known to be working I'll do that */ | 1255 | /* Once the state machine's known to be working I'll do that */ |
@@ -1267,21 +1267,21 @@ retry: | |||
1267 | chip->state = FL_STATUS; | 1267 | chip->state = FL_STATUS; |
1268 | printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]); | 1268 | printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]); |
1269 | DISABLE_VPP(map); | 1269 | DISABLE_VPP(map); |
1270 | spin_unlock_bh(chip->mutex); | 1270 | mutex_unlock(&chip->mutex); |
1271 | return -EIO; | 1271 | return -EIO; |
1272 | } | 1272 | } |
1273 | 1273 | ||
1274 | /* Latency issues. Drop the unlock, wait a while and retry */ | 1274 | /* Latency issues. Drop the unlock, wait a while and retry */ |
1275 | spin_unlock_bh(chip->mutex); | 1275 | mutex_unlock(&chip->mutex); |
1276 | cfi_udelay(1); | 1276 | cfi_udelay(1); |
1277 | spin_lock_bh(chip->mutex); | 1277 | mutex_lock(&chip->mutex); |
1278 | } | 1278 | } |
1279 | 1279 | ||
1280 | /* Done and happy. */ | 1280 | /* Done and happy. */ |
1281 | chip->state = FL_STATUS; | 1281 | chip->state = FL_STATUS; |
1282 | DISABLE_VPP(map); | 1282 | DISABLE_VPP(map); |
1283 | wake_up(&chip->wq); | 1283 | wake_up(&chip->wq); |
1284 | spin_unlock_bh(chip->mutex); | 1284 | mutex_unlock(&chip->mutex); |
1285 | return 0; | 1285 | return 0; |
1286 | } | 1286 | } |
1287 | static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | 1287 | static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
@@ -1334,7 +1334,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd) | |||
1334 | for (i=0; !ret && i<cfi->numchips; i++) { | 1334 | for (i=0; !ret && i<cfi->numchips; i++) { |
1335 | chip = &cfi->chips[i]; | 1335 | chip = &cfi->chips[i]; |
1336 | 1336 | ||
1337 | spin_lock_bh(chip->mutex); | 1337 | mutex_lock(&chip->mutex); |
1338 | 1338 | ||
1339 | switch(chip->state) { | 1339 | switch(chip->state) { |
1340 | case FL_READY: | 1340 | case FL_READY: |
@@ -1354,7 +1354,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd) | |||
1354 | ret = -EAGAIN; | 1354 | ret = -EAGAIN; |
1355 | break; | 1355 | break; |
1356 | } | 1356 | } |
1357 | spin_unlock_bh(chip->mutex); | 1357 | mutex_unlock(&chip->mutex); |
1358 | } | 1358 | } |
1359 | 1359 | ||
1360 | /* Unlock the chips again */ | 1360 | /* Unlock the chips again */ |
@@ -1363,7 +1363,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd) | |||
1363 | for (i--; i >=0; i--) { | 1363 | for (i--; i >=0; i--) { |
1364 | chip = &cfi->chips[i]; | 1364 | chip = &cfi->chips[i]; |
1365 | 1365 | ||
1366 | spin_lock_bh(chip->mutex); | 1366 | mutex_lock(&chip->mutex); |
1367 | 1367 | ||
1368 | if (chip->state == FL_PM_SUSPENDED) { | 1368 | if (chip->state == FL_PM_SUSPENDED) { |
1369 | /* No need to force it into a known state here, | 1369 | /* No need to force it into a known state here, |
@@ -1372,7 +1372,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd) | |||
1372 | chip->state = chip->oldstate; | 1372 | chip->state = chip->oldstate; |
1373 | wake_up(&chip->wq); | 1373 | wake_up(&chip->wq); |
1374 | } | 1374 | } |
1375 | spin_unlock_bh(chip->mutex); | 1375 | mutex_unlock(&chip->mutex); |
1376 | } | 1376 | } |
1377 | } | 1377 | } |
1378 | 1378 | ||
@@ -1390,7 +1390,7 @@ static void cfi_staa_resume(struct mtd_info *mtd) | |||
1390 | 1390 | ||
1391 | chip = &cfi->chips[i]; | 1391 | chip = &cfi->chips[i]; |
1392 | 1392 | ||
1393 | spin_lock_bh(chip->mutex); | 1393 | mutex_lock(&chip->mutex); |
1394 | 1394 | ||
1395 | /* Go to known state. Chip may have been power cycled */ | 1395 | /* Go to known state. Chip may have been power cycled */ |
1396 | if (chip->state == FL_PM_SUSPENDED) { | 1396 | if (chip->state == FL_PM_SUSPENDED) { |
@@ -1399,7 +1399,7 @@ static void cfi_staa_resume(struct mtd_info *mtd) | |||
1399 | wake_up(&chip->wq); | 1399 | wake_up(&chip->wq); |
1400 | } | 1400 | } |
1401 | 1401 | ||
1402 | spin_unlock_bh(chip->mutex); | 1402 | mutex_unlock(&chip->mutex); |
1403 | } | 1403 | } |
1404 | } | 1404 | } |
1405 | 1405 | ||
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c index e63e6749429a..b2acd32f4fbf 100644 --- a/drivers/mtd/chips/cfi_probe.c +++ b/drivers/mtd/chips/cfi_probe.c | |||
@@ -158,6 +158,7 @@ static int __xipram cfi_chip_setup(struct map_info *map, | |||
158 | __u32 base = 0; | 158 | __u32 base = 0; |
159 | int num_erase_regions = cfi_read_query(map, base + (0x10 + 28)*ofs_factor); | 159 | int num_erase_regions = cfi_read_query(map, base + (0x10 + 28)*ofs_factor); |
160 | int i; | 160 | int i; |
161 | int addr_unlock1 = 0x555, addr_unlock2 = 0x2AA; | ||
161 | 162 | ||
162 | xip_enable(base, map, cfi); | 163 | xip_enable(base, map, cfi); |
163 | #ifdef DEBUG_CFI | 164 | #ifdef DEBUG_CFI |
@@ -181,29 +182,6 @@ static int __xipram cfi_chip_setup(struct map_info *map, | |||
181 | for (i=0; i<(sizeof(struct cfi_ident) + num_erase_regions * 4); i++) | 182 | for (i=0; i<(sizeof(struct cfi_ident) + num_erase_regions * 4); i++) |
182 | ((unsigned char *)cfi->cfiq)[i] = cfi_read_query(map,base + (0x10 + i)*ofs_factor); | 183 | ((unsigned char *)cfi->cfiq)[i] = cfi_read_query(map,base + (0x10 + i)*ofs_factor); |
183 | 184 | ||
184 | /* Note we put the device back into Read Mode BEFORE going into Auto | ||
185 | * Select Mode, as some devices support nesting of modes, others | ||
186 | * don't. This way should always work. | ||
187 | * On cmdset 0001 the writes of 0xaa and 0x55 are not needed, and | ||
188 | * so should be treated as nops or illegal (and so put the device | ||
189 | * back into Read Mode, which is a nop in this case). | ||
190 | */ | ||
191 | cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL); | ||
192 | cfi_send_gen_cmd(0xaa, 0x555, base, map, cfi, cfi->device_type, NULL); | ||
193 | cfi_send_gen_cmd(0x55, 0x2aa, base, map, cfi, cfi->device_type, NULL); | ||
194 | cfi_send_gen_cmd(0x90, 0x555, base, map, cfi, cfi->device_type, NULL); | ||
195 | cfi->mfr = cfi_read_query16(map, base); | ||
196 | cfi->id = cfi_read_query16(map, base + ofs_factor); | ||
197 | |||
198 | /* Get AMD/Spansion extended JEDEC ID */ | ||
199 | if (cfi->mfr == CFI_MFR_AMD && (cfi->id & 0xff) == 0x7e) | ||
200 | cfi->id = cfi_read_query(map, base + 0xe * ofs_factor) << 8 | | ||
201 | cfi_read_query(map, base + 0xf * ofs_factor); | ||
202 | |||
203 | /* Put it back into Read Mode */ | ||
204 | cfi_qry_mode_off(base, map, cfi); | ||
205 | xip_allowed(base, map); | ||
206 | |||
207 | /* Do any necessary byteswapping */ | 185 | /* Do any necessary byteswapping */ |
208 | cfi->cfiq->P_ID = le16_to_cpu(cfi->cfiq->P_ID); | 186 | cfi->cfiq->P_ID = le16_to_cpu(cfi->cfiq->P_ID); |
209 | 187 | ||
@@ -228,6 +206,35 @@ static int __xipram cfi_chip_setup(struct map_info *map, | |||
228 | #endif | 206 | #endif |
229 | } | 207 | } |
230 | 208 | ||
209 | if (cfi->cfiq->P_ID == P_ID_SST_OLD) { | ||
210 | addr_unlock1 = 0x5555; | ||
211 | addr_unlock2 = 0x2AAA; | ||
212 | } | ||
213 | |||
214 | /* | ||
215 | * Note we put the device back into Read Mode BEFORE going into Auto | ||
216 | * Select Mode, as some devices support nesting of modes, others | ||
217 | * don't. This way should always work. | ||
218 | * On cmdset 0001 the writes of 0xaa and 0x55 are not needed, and | ||
219 | * so should be treated as nops or illegal (and so put the device | ||
220 | * back into Read Mode, which is a nop in this case). | ||
221 | */ | ||
222 | cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL); | ||
223 | cfi_send_gen_cmd(0xaa, addr_unlock1, base, map, cfi, cfi->device_type, NULL); | ||
224 | cfi_send_gen_cmd(0x55, addr_unlock2, base, map, cfi, cfi->device_type, NULL); | ||
225 | cfi_send_gen_cmd(0x90, addr_unlock1, base, map, cfi, cfi->device_type, NULL); | ||
226 | cfi->mfr = cfi_read_query16(map, base); | ||
227 | cfi->id = cfi_read_query16(map, base + ofs_factor); | ||
228 | |||
229 | /* Get AMD/Spansion extended JEDEC ID */ | ||
230 | if (cfi->mfr == CFI_MFR_AMD && (cfi->id & 0xff) == 0x7e) | ||
231 | cfi->id = cfi_read_query(map, base + 0xe * ofs_factor) << 8 | | ||
232 | cfi_read_query(map, base + 0xf * ofs_factor); | ||
233 | |||
234 | /* Put it back into Read Mode */ | ||
235 | cfi_qry_mode_off(base, map, cfi); | ||
236 | xip_allowed(base, map); | ||
237 | |||
231 | printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n", | 238 | printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n", |
232 | map->name, cfi->interleave, cfi->device_type*8, base, | 239 | map->name, cfi->interleave, cfi->device_type*8, base, |
233 | map->bankwidth*8); | 240 | map->bankwidth*8); |
@@ -269,6 +276,9 @@ static char *vendorname(__u16 vendor) | |||
269 | case P_ID_SST_PAGE: | 276 | case P_ID_SST_PAGE: |
270 | return "SST Page Write"; | 277 | return "SST Page Write"; |
271 | 278 | ||
279 | case P_ID_SST_OLD: | ||
280 | return "SST 39VF160x/39VF320x"; | ||
281 | |||
272 | case P_ID_INTEL_PERFORMANCE: | 282 | case P_ID_INTEL_PERFORMANCE: |
273 | return "Intel Performance Code"; | 283 | return "Intel Performance Code"; |
274 | 284 | ||
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c index ca584d0380b4..d7c2c672757e 100644 --- a/drivers/mtd/chips/cfi_util.c +++ b/drivers/mtd/chips/cfi_util.c | |||
@@ -104,10 +104,11 @@ __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* n | |||
104 | int i; | 104 | int i; |
105 | struct cfi_extquery *extp = NULL; | 105 | struct cfi_extquery *extp = NULL; |
106 | 106 | ||
107 | printk(" %s Extended Query Table at 0x%4.4X\n", name, adr); | ||
108 | if (!adr) | 107 | if (!adr) |
109 | goto out; | 108 | goto out; |
110 | 109 | ||
110 | printk(KERN_INFO "%s Extended Query Table at 0x%4.4X\n", name, adr); | ||
111 | |||
111 | extp = kmalloc(size, GFP_KERNEL); | 112 | extp = kmalloc(size, GFP_KERNEL); |
112 | if (!extp) { | 113 | if (!extp) { |
113 | printk(KERN_ERR "Failed to allocate memory\n"); | 114 | printk(KERN_ERR "Failed to allocate memory\n"); |
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h index 57e0e4e921f9..d18064977192 100644 --- a/drivers/mtd/chips/fwh_lock.h +++ b/drivers/mtd/chips/fwh_lock.h | |||
@@ -58,10 +58,10 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip, | |||
58 | * to flash memory - that means that we don't have to check status | 58 | * to flash memory - that means that we don't have to check status |
59 | * and timeout. | 59 | * and timeout. |
60 | */ | 60 | */ |
61 | spin_lock(chip->mutex); | 61 | mutex_lock(&chip->mutex); |
62 | ret = get_chip(map, chip, adr, FL_LOCKING); | 62 | ret = get_chip(map, chip, adr, FL_LOCKING); |
63 | if (ret) { | 63 | if (ret) { |
64 | spin_unlock(chip->mutex); | 64 | mutex_unlock(&chip->mutex); |
65 | return ret; | 65 | return ret; |
66 | } | 66 | } |
67 | 67 | ||
@@ -72,7 +72,7 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip, | |||
72 | /* Done and happy. */ | 72 | /* Done and happy. */ |
73 | chip->state = chip->oldstate; | 73 | chip->state = chip->oldstate; |
74 | put_chip(map, chip, adr); | 74 | put_chip(map, chip, adr); |
75 | spin_unlock(chip->mutex); | 75 | mutex_unlock(&chip->mutex); |
76 | return 0; | 76 | return 0; |
77 | } | 77 | } |
78 | 78 | ||
diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c index e2dc96441e05..3b9a2843c5f8 100644 --- a/drivers/mtd/chips/gen_probe.c +++ b/drivers/mtd/chips/gen_probe.c | |||
@@ -155,8 +155,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi | |||
155 | pchip->start = (i << cfi.chipshift); | 155 | pchip->start = (i << cfi.chipshift); |
156 | pchip->state = FL_READY; | 156 | pchip->state = FL_READY; |
157 | init_waitqueue_head(&pchip->wq); | 157 | init_waitqueue_head(&pchip->wq); |
158 | spin_lock_init(&pchip->_spinlock); | 158 | mutex_init(&pchip->mutex); |
159 | pchip->mutex = &pchip->_spinlock; | ||
160 | } | 159 | } |
161 | } | 160 | } |
162 | 161 | ||
@@ -242,17 +241,19 @@ static struct mtd_info *check_cmd_set(struct map_info *map, int primary) | |||
242 | /* We need these for the !CONFIG_MODULES case, | 241 | /* We need these for the !CONFIG_MODULES case, |
243 | because symbol_get() doesn't work there */ | 242 | because symbol_get() doesn't work there */ |
244 | #ifdef CONFIG_MTD_CFI_INTELEXT | 243 | #ifdef CONFIG_MTD_CFI_INTELEXT |
245 | case 0x0001: | 244 | case P_ID_INTEL_EXT: |
246 | case 0x0003: | 245 | case P_ID_INTEL_STD: |
247 | case 0x0200: | 246 | case P_ID_INTEL_PERFORMANCE: |
248 | return cfi_cmdset_0001(map, primary); | 247 | return cfi_cmdset_0001(map, primary); |
249 | #endif | 248 | #endif |
250 | #ifdef CONFIG_MTD_CFI_AMDSTD | 249 | #ifdef CONFIG_MTD_CFI_AMDSTD |
251 | case 0x0002: | 250 | case P_ID_AMD_STD: |
251 | case P_ID_SST_OLD: | ||
252 | case P_ID_WINBOND: | ||
252 | return cfi_cmdset_0002(map, primary); | 253 | return cfi_cmdset_0002(map, primary); |
253 | #endif | 254 | #endif |
254 | #ifdef CONFIG_MTD_CFI_STAA | 255 | #ifdef CONFIG_MTD_CFI_STAA |
255 | case 0x0020: | 256 | case P_ID_ST_ADV: |
256 | return cfi_cmdset_0020(map, primary); | 257 | return cfi_cmdset_0020(map, primary); |
257 | #endif | 258 | #endif |
258 | default: | 259 | default: |
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c index 8db1148dfa47..d72a5fb2d041 100644 --- a/drivers/mtd/chips/jedec_probe.c +++ b/drivers/mtd/chips/jedec_probe.c | |||
@@ -22,24 +22,6 @@ | |||
22 | #include <linux/mtd/cfi.h> | 22 | #include <linux/mtd/cfi.h> |
23 | #include <linux/mtd/gen_probe.h> | 23 | #include <linux/mtd/gen_probe.h> |
24 | 24 | ||
25 | /* Manufacturers */ | ||
26 | #define MANUFACTURER_AMD 0x0001 | ||
27 | #define MANUFACTURER_ATMEL 0x001f | ||
28 | #define MANUFACTURER_EON 0x001c | ||
29 | #define MANUFACTURER_FUJITSU 0x0004 | ||
30 | #define MANUFACTURER_HYUNDAI 0x00AD | ||
31 | #define MANUFACTURER_INTEL 0x0089 | ||
32 | #define MANUFACTURER_MACRONIX 0x00C2 | ||
33 | #define MANUFACTURER_NEC 0x0010 | ||
34 | #define MANUFACTURER_PMC 0x009D | ||
35 | #define MANUFACTURER_SHARP 0x00b0 | ||
36 | #define MANUFACTURER_SST 0x00BF | ||
37 | #define MANUFACTURER_ST 0x0020 | ||
38 | #define MANUFACTURER_TOSHIBA 0x0098 | ||
39 | #define MANUFACTURER_WINBOND 0x00da | ||
40 | #define CONTINUATION_CODE 0x007f | ||
41 | |||
42 | |||
43 | /* AMD */ | 25 | /* AMD */ |
44 | #define AM29DL800BB 0x22CB | 26 | #define AM29DL800BB 0x22CB |
45 | #define AM29DL800BT 0x224A | 27 | #define AM29DL800BT 0x224A |
@@ -166,6 +148,8 @@ | |||
166 | #define SST39LF160 0x2782 | 148 | #define SST39LF160 0x2782 |
167 | #define SST39VF1601 0x234b | 149 | #define SST39VF1601 0x234b |
168 | #define SST39VF3201 0x235b | 150 | #define SST39VF3201 0x235b |
151 | #define SST39WF1601 0x274b | ||
152 | #define SST39WF1602 0x274a | ||
169 | #define SST39LF512 0x00D4 | 153 | #define SST39LF512 0x00D4 |
170 | #define SST39LF010 0x00D5 | 154 | #define SST39LF010 0x00D5 |
171 | #define SST39LF020 0x00D6 | 155 | #define SST39LF020 0x00D6 |
@@ -309,7 +293,7 @@ struct amd_flash_info { | |||
309 | */ | 293 | */ |
310 | static const struct amd_flash_info jedec_table[] = { | 294 | static const struct amd_flash_info jedec_table[] = { |
311 | { | 295 | { |
312 | .mfr_id = MANUFACTURER_AMD, | 296 | .mfr_id = CFI_MFR_AMD, |
313 | .dev_id = AM29F032B, | 297 | .dev_id = AM29F032B, |
314 | .name = "AMD AM29F032B", | 298 | .name = "AMD AM29F032B", |
315 | .uaddr = MTD_UADDR_0x0555_0x02AA, | 299 | .uaddr = MTD_UADDR_0x0555_0x02AA, |
@@ -321,7 +305,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
321 | ERASEINFO(0x10000,64) | 305 | ERASEINFO(0x10000,64) |
322 | } | 306 | } |
323 | }, { | 307 | }, { |
324 | .mfr_id = MANUFACTURER_AMD, | 308 | .mfr_id = CFI_MFR_AMD, |
325 | .dev_id = AM29LV160DT, | 309 | .dev_id = AM29LV160DT, |
326 | .name = "AMD AM29LV160DT", | 310 | .name = "AMD AM29LV160DT", |
327 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 311 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -336,7 +320,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
336 | ERASEINFO(0x04000,1) | 320 | ERASEINFO(0x04000,1) |
337 | } | 321 | } |
338 | }, { | 322 | }, { |
339 | .mfr_id = MANUFACTURER_AMD, | 323 | .mfr_id = CFI_MFR_AMD, |
340 | .dev_id = AM29LV160DB, | 324 | .dev_id = AM29LV160DB, |
341 | .name = "AMD AM29LV160DB", | 325 | .name = "AMD AM29LV160DB", |
342 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 326 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -351,7 +335,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
351 | ERASEINFO(0x10000,31) | 335 | ERASEINFO(0x10000,31) |
352 | } | 336 | } |
353 | }, { | 337 | }, { |
354 | .mfr_id = MANUFACTURER_AMD, | 338 | .mfr_id = CFI_MFR_AMD, |
355 | .dev_id = AM29LV400BB, | 339 | .dev_id = AM29LV400BB, |
356 | .name = "AMD AM29LV400BB", | 340 | .name = "AMD AM29LV400BB", |
357 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 341 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -366,7 +350,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
366 | ERASEINFO(0x10000,7) | 350 | ERASEINFO(0x10000,7) |
367 | } | 351 | } |
368 | }, { | 352 | }, { |
369 | .mfr_id = MANUFACTURER_AMD, | 353 | .mfr_id = CFI_MFR_AMD, |
370 | .dev_id = AM29LV400BT, | 354 | .dev_id = AM29LV400BT, |
371 | .name = "AMD AM29LV400BT", | 355 | .name = "AMD AM29LV400BT", |
372 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 356 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -381,7 +365,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
381 | ERASEINFO(0x04000,1) | 365 | ERASEINFO(0x04000,1) |
382 | } | 366 | } |
383 | }, { | 367 | }, { |
384 | .mfr_id = MANUFACTURER_AMD, | 368 | .mfr_id = CFI_MFR_AMD, |
385 | .dev_id = AM29LV800BB, | 369 | .dev_id = AM29LV800BB, |
386 | .name = "AMD AM29LV800BB", | 370 | .name = "AMD AM29LV800BB", |
387 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 371 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -397,7 +381,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
397 | } | 381 | } |
398 | }, { | 382 | }, { |
399 | /* add DL */ | 383 | /* add DL */ |
400 | .mfr_id = MANUFACTURER_AMD, | 384 | .mfr_id = CFI_MFR_AMD, |
401 | .dev_id = AM29DL800BB, | 385 | .dev_id = AM29DL800BB, |
402 | .name = "AMD AM29DL800BB", | 386 | .name = "AMD AM29DL800BB", |
403 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 387 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -414,7 +398,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
414 | ERASEINFO(0x10000,14) | 398 | ERASEINFO(0x10000,14) |
415 | } | 399 | } |
416 | }, { | 400 | }, { |
417 | .mfr_id = MANUFACTURER_AMD, | 401 | .mfr_id = CFI_MFR_AMD, |
418 | .dev_id = AM29DL800BT, | 402 | .dev_id = AM29DL800BT, |
419 | .name = "AMD AM29DL800BT", | 403 | .name = "AMD AM29DL800BT", |
420 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 404 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -431,7 +415,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
431 | ERASEINFO(0x04000,1) | 415 | ERASEINFO(0x04000,1) |
432 | } | 416 | } |
433 | }, { | 417 | }, { |
434 | .mfr_id = MANUFACTURER_AMD, | 418 | .mfr_id = CFI_MFR_AMD, |
435 | .dev_id = AM29F800BB, | 419 | .dev_id = AM29F800BB, |
436 | .name = "AMD AM29F800BB", | 420 | .name = "AMD AM29F800BB", |
437 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 421 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -446,7 +430,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
446 | ERASEINFO(0x10000,15), | 430 | ERASEINFO(0x10000,15), |
447 | } | 431 | } |
448 | }, { | 432 | }, { |
449 | .mfr_id = MANUFACTURER_AMD, | 433 | .mfr_id = CFI_MFR_AMD, |
450 | .dev_id = AM29LV800BT, | 434 | .dev_id = AM29LV800BT, |
451 | .name = "AMD AM29LV800BT", | 435 | .name = "AMD AM29LV800BT", |
452 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 436 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -461,7 +445,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
461 | ERASEINFO(0x04000,1) | 445 | ERASEINFO(0x04000,1) |
462 | } | 446 | } |
463 | }, { | 447 | }, { |
464 | .mfr_id = MANUFACTURER_AMD, | 448 | .mfr_id = CFI_MFR_AMD, |
465 | .dev_id = AM29F800BT, | 449 | .dev_id = AM29F800BT, |
466 | .name = "AMD AM29F800BT", | 450 | .name = "AMD AM29F800BT", |
467 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 451 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -476,7 +460,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
476 | ERASEINFO(0x04000,1) | 460 | ERASEINFO(0x04000,1) |
477 | } | 461 | } |
478 | }, { | 462 | }, { |
479 | .mfr_id = MANUFACTURER_AMD, | 463 | .mfr_id = CFI_MFR_AMD, |
480 | .dev_id = AM29F017D, | 464 | .dev_id = AM29F017D, |
481 | .name = "AMD AM29F017D", | 465 | .name = "AMD AM29F017D", |
482 | .devtypes = CFI_DEVICETYPE_X8, | 466 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -488,7 +472,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
488 | ERASEINFO(0x10000,32), | 472 | ERASEINFO(0x10000,32), |
489 | } | 473 | } |
490 | }, { | 474 | }, { |
491 | .mfr_id = MANUFACTURER_AMD, | 475 | .mfr_id = CFI_MFR_AMD, |
492 | .dev_id = AM29F016D, | 476 | .dev_id = AM29F016D, |
493 | .name = "AMD AM29F016D", | 477 | .name = "AMD AM29F016D", |
494 | .devtypes = CFI_DEVICETYPE_X8, | 478 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -500,7 +484,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
500 | ERASEINFO(0x10000,32), | 484 | ERASEINFO(0x10000,32), |
501 | } | 485 | } |
502 | }, { | 486 | }, { |
503 | .mfr_id = MANUFACTURER_AMD, | 487 | .mfr_id = CFI_MFR_AMD, |
504 | .dev_id = AM29F080, | 488 | .dev_id = AM29F080, |
505 | .name = "AMD AM29F080", | 489 | .name = "AMD AM29F080", |
506 | .devtypes = CFI_DEVICETYPE_X8, | 490 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -512,7 +496,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
512 | ERASEINFO(0x10000,16), | 496 | ERASEINFO(0x10000,16), |
513 | } | 497 | } |
514 | }, { | 498 | }, { |
515 | .mfr_id = MANUFACTURER_AMD, | 499 | .mfr_id = CFI_MFR_AMD, |
516 | .dev_id = AM29F040, | 500 | .dev_id = AM29F040, |
517 | .name = "AMD AM29F040", | 501 | .name = "AMD AM29F040", |
518 | .devtypes = CFI_DEVICETYPE_X8, | 502 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -524,7 +508,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
524 | ERASEINFO(0x10000,8), | 508 | ERASEINFO(0x10000,8), |
525 | } | 509 | } |
526 | }, { | 510 | }, { |
527 | .mfr_id = MANUFACTURER_AMD, | 511 | .mfr_id = CFI_MFR_AMD, |
528 | .dev_id = AM29LV040B, | 512 | .dev_id = AM29LV040B, |
529 | .name = "AMD AM29LV040B", | 513 | .name = "AMD AM29LV040B", |
530 | .devtypes = CFI_DEVICETYPE_X8, | 514 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -536,7 +520,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
536 | ERASEINFO(0x10000,8), | 520 | ERASEINFO(0x10000,8), |
537 | } | 521 | } |
538 | }, { | 522 | }, { |
539 | .mfr_id = MANUFACTURER_AMD, | 523 | .mfr_id = CFI_MFR_AMD, |
540 | .dev_id = AM29F002T, | 524 | .dev_id = AM29F002T, |
541 | .name = "AMD AM29F002T", | 525 | .name = "AMD AM29F002T", |
542 | .devtypes = CFI_DEVICETYPE_X8, | 526 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -551,7 +535,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
551 | ERASEINFO(0x04000,1), | 535 | ERASEINFO(0x04000,1), |
552 | } | 536 | } |
553 | }, { | 537 | }, { |
554 | .mfr_id = MANUFACTURER_AMD, | 538 | .mfr_id = CFI_MFR_AMD, |
555 | .dev_id = AM29SL800DT, | 539 | .dev_id = AM29SL800DT, |
556 | .name = "AMD AM29SL800DT", | 540 | .name = "AMD AM29SL800DT", |
557 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 541 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -566,7 +550,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
566 | ERASEINFO(0x04000,1), | 550 | ERASEINFO(0x04000,1), |
567 | } | 551 | } |
568 | }, { | 552 | }, { |
569 | .mfr_id = MANUFACTURER_AMD, | 553 | .mfr_id = CFI_MFR_AMD, |
570 | .dev_id = AM29SL800DB, | 554 | .dev_id = AM29SL800DB, |
571 | .name = "AMD AM29SL800DB", | 555 | .name = "AMD AM29SL800DB", |
572 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 556 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -581,7 +565,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
581 | ERASEINFO(0x10000,15), | 565 | ERASEINFO(0x10000,15), |
582 | } | 566 | } |
583 | }, { | 567 | }, { |
584 | .mfr_id = MANUFACTURER_ATMEL, | 568 | .mfr_id = CFI_MFR_ATMEL, |
585 | .dev_id = AT49BV512, | 569 | .dev_id = AT49BV512, |
586 | .name = "Atmel AT49BV512", | 570 | .name = "Atmel AT49BV512", |
587 | .devtypes = CFI_DEVICETYPE_X8, | 571 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -593,7 +577,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
593 | ERASEINFO(0x10000,1) | 577 | ERASEINFO(0x10000,1) |
594 | } | 578 | } |
595 | }, { | 579 | }, { |
596 | .mfr_id = MANUFACTURER_ATMEL, | 580 | .mfr_id = CFI_MFR_ATMEL, |
597 | .dev_id = AT29LV512, | 581 | .dev_id = AT29LV512, |
598 | .name = "Atmel AT29LV512", | 582 | .name = "Atmel AT29LV512", |
599 | .devtypes = CFI_DEVICETYPE_X8, | 583 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -606,7 +590,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
606 | ERASEINFO(0x80,256) | 590 | ERASEINFO(0x80,256) |
607 | } | 591 | } |
608 | }, { | 592 | }, { |
609 | .mfr_id = MANUFACTURER_ATMEL, | 593 | .mfr_id = CFI_MFR_ATMEL, |
610 | .dev_id = AT49BV16X, | 594 | .dev_id = AT49BV16X, |
611 | .name = "Atmel AT49BV16X", | 595 | .name = "Atmel AT49BV16X", |
612 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 596 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -619,7 +603,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
619 | ERASEINFO(0x10000,31) | 603 | ERASEINFO(0x10000,31) |
620 | } | 604 | } |
621 | }, { | 605 | }, { |
622 | .mfr_id = MANUFACTURER_ATMEL, | 606 | .mfr_id = CFI_MFR_ATMEL, |
623 | .dev_id = AT49BV16XT, | 607 | .dev_id = AT49BV16XT, |
624 | .name = "Atmel AT49BV16XT", | 608 | .name = "Atmel AT49BV16XT", |
625 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 609 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -632,7 +616,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
632 | ERASEINFO(0x02000,8) | 616 | ERASEINFO(0x02000,8) |
633 | } | 617 | } |
634 | }, { | 618 | }, { |
635 | .mfr_id = MANUFACTURER_ATMEL, | 619 | .mfr_id = CFI_MFR_ATMEL, |
636 | .dev_id = AT49BV32X, | 620 | .dev_id = AT49BV32X, |
637 | .name = "Atmel AT49BV32X", | 621 | .name = "Atmel AT49BV32X", |
638 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 622 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -645,7 +629,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
645 | ERASEINFO(0x10000,63) | 629 | ERASEINFO(0x10000,63) |
646 | } | 630 | } |
647 | }, { | 631 | }, { |
648 | .mfr_id = MANUFACTURER_ATMEL, | 632 | .mfr_id = CFI_MFR_ATMEL, |
649 | .dev_id = AT49BV32XT, | 633 | .dev_id = AT49BV32XT, |
650 | .name = "Atmel AT49BV32XT", | 634 | .name = "Atmel AT49BV32XT", |
651 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 635 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -658,7 +642,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
658 | ERASEINFO(0x02000,8) | 642 | ERASEINFO(0x02000,8) |
659 | } | 643 | } |
660 | }, { | 644 | }, { |
661 | .mfr_id = MANUFACTURER_EON, | 645 | .mfr_id = CFI_MFR_EON, |
662 | .dev_id = EN29SL800BT, | 646 | .dev_id = EN29SL800BT, |
663 | .name = "Eon EN29SL800BT", | 647 | .name = "Eon EN29SL800BT", |
664 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 648 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -673,7 +657,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
673 | ERASEINFO(0x04000,1), | 657 | ERASEINFO(0x04000,1), |
674 | } | 658 | } |
675 | }, { | 659 | }, { |
676 | .mfr_id = MANUFACTURER_EON, | 660 | .mfr_id = CFI_MFR_EON, |
677 | .dev_id = EN29SL800BB, | 661 | .dev_id = EN29SL800BB, |
678 | .name = "Eon EN29SL800BB", | 662 | .name = "Eon EN29SL800BB", |
679 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 663 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -688,7 +672,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
688 | ERASEINFO(0x10000,15), | 672 | ERASEINFO(0x10000,15), |
689 | } | 673 | } |
690 | }, { | 674 | }, { |
691 | .mfr_id = MANUFACTURER_FUJITSU, | 675 | .mfr_id = CFI_MFR_FUJITSU, |
692 | .dev_id = MBM29F040C, | 676 | .dev_id = MBM29F040C, |
693 | .name = "Fujitsu MBM29F040C", | 677 | .name = "Fujitsu MBM29F040C", |
694 | .devtypes = CFI_DEVICETYPE_X8, | 678 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -700,7 +684,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
700 | ERASEINFO(0x10000,8) | 684 | ERASEINFO(0x10000,8) |
701 | } | 685 | } |
702 | }, { | 686 | }, { |
703 | .mfr_id = MANUFACTURER_FUJITSU, | 687 | .mfr_id = CFI_MFR_FUJITSU, |
704 | .dev_id = MBM29F800BA, | 688 | .dev_id = MBM29F800BA, |
705 | .name = "Fujitsu MBM29F800BA", | 689 | .name = "Fujitsu MBM29F800BA", |
706 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 690 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -715,7 +699,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
715 | ERASEINFO(0x10000,15), | 699 | ERASEINFO(0x10000,15), |
716 | } | 700 | } |
717 | }, { | 701 | }, { |
718 | .mfr_id = MANUFACTURER_FUJITSU, | 702 | .mfr_id = CFI_MFR_FUJITSU, |
719 | .dev_id = MBM29LV650UE, | 703 | .dev_id = MBM29LV650UE, |
720 | .name = "Fujitsu MBM29LV650UE", | 704 | .name = "Fujitsu MBM29LV650UE", |
721 | .devtypes = CFI_DEVICETYPE_X8, | 705 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -727,7 +711,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
727 | ERASEINFO(0x10000,128) | 711 | ERASEINFO(0x10000,128) |
728 | } | 712 | } |
729 | }, { | 713 | }, { |
730 | .mfr_id = MANUFACTURER_FUJITSU, | 714 | .mfr_id = CFI_MFR_FUJITSU, |
731 | .dev_id = MBM29LV320TE, | 715 | .dev_id = MBM29LV320TE, |
732 | .name = "Fujitsu MBM29LV320TE", | 716 | .name = "Fujitsu MBM29LV320TE", |
733 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 717 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -740,7 +724,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
740 | ERASEINFO(0x02000,8) | 724 | ERASEINFO(0x02000,8) |
741 | } | 725 | } |
742 | }, { | 726 | }, { |
743 | .mfr_id = MANUFACTURER_FUJITSU, | 727 | .mfr_id = CFI_MFR_FUJITSU, |
744 | .dev_id = MBM29LV320BE, | 728 | .dev_id = MBM29LV320BE, |
745 | .name = "Fujitsu MBM29LV320BE", | 729 | .name = "Fujitsu MBM29LV320BE", |
746 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 730 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -753,7 +737,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
753 | ERASEINFO(0x10000,63) | 737 | ERASEINFO(0x10000,63) |
754 | } | 738 | } |
755 | }, { | 739 | }, { |
756 | .mfr_id = MANUFACTURER_FUJITSU, | 740 | .mfr_id = CFI_MFR_FUJITSU, |
757 | .dev_id = MBM29LV160TE, | 741 | .dev_id = MBM29LV160TE, |
758 | .name = "Fujitsu MBM29LV160TE", | 742 | .name = "Fujitsu MBM29LV160TE", |
759 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 743 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -768,7 +752,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
768 | ERASEINFO(0x04000,1) | 752 | ERASEINFO(0x04000,1) |
769 | } | 753 | } |
770 | }, { | 754 | }, { |
771 | .mfr_id = MANUFACTURER_FUJITSU, | 755 | .mfr_id = CFI_MFR_FUJITSU, |
772 | .dev_id = MBM29LV160BE, | 756 | .dev_id = MBM29LV160BE, |
773 | .name = "Fujitsu MBM29LV160BE", | 757 | .name = "Fujitsu MBM29LV160BE", |
774 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 758 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -783,7 +767,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
783 | ERASEINFO(0x10000,31) | 767 | ERASEINFO(0x10000,31) |
784 | } | 768 | } |
785 | }, { | 769 | }, { |
786 | .mfr_id = MANUFACTURER_FUJITSU, | 770 | .mfr_id = CFI_MFR_FUJITSU, |
787 | .dev_id = MBM29LV800BA, | 771 | .dev_id = MBM29LV800BA, |
788 | .name = "Fujitsu MBM29LV800BA", | 772 | .name = "Fujitsu MBM29LV800BA", |
789 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 773 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -798,7 +782,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
798 | ERASEINFO(0x10000,15) | 782 | ERASEINFO(0x10000,15) |
799 | } | 783 | } |
800 | }, { | 784 | }, { |
801 | .mfr_id = MANUFACTURER_FUJITSU, | 785 | .mfr_id = CFI_MFR_FUJITSU, |
802 | .dev_id = MBM29LV800TA, | 786 | .dev_id = MBM29LV800TA, |
803 | .name = "Fujitsu MBM29LV800TA", | 787 | .name = "Fujitsu MBM29LV800TA", |
804 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 788 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -813,7 +797,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
813 | ERASEINFO(0x04000,1) | 797 | ERASEINFO(0x04000,1) |
814 | } | 798 | } |
815 | }, { | 799 | }, { |
816 | .mfr_id = MANUFACTURER_FUJITSU, | 800 | .mfr_id = CFI_MFR_FUJITSU, |
817 | .dev_id = MBM29LV400BC, | 801 | .dev_id = MBM29LV400BC, |
818 | .name = "Fujitsu MBM29LV400BC", | 802 | .name = "Fujitsu MBM29LV400BC", |
819 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 803 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -828,7 +812,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
828 | ERASEINFO(0x10000,7) | 812 | ERASEINFO(0x10000,7) |
829 | } | 813 | } |
830 | }, { | 814 | }, { |
831 | .mfr_id = MANUFACTURER_FUJITSU, | 815 | .mfr_id = CFI_MFR_FUJITSU, |
832 | .dev_id = MBM29LV400TC, | 816 | .dev_id = MBM29LV400TC, |
833 | .name = "Fujitsu MBM29LV400TC", | 817 | .name = "Fujitsu MBM29LV400TC", |
834 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 818 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -843,7 +827,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
843 | ERASEINFO(0x04000,1) | 827 | ERASEINFO(0x04000,1) |
844 | } | 828 | } |
845 | }, { | 829 | }, { |
846 | .mfr_id = MANUFACTURER_HYUNDAI, | 830 | .mfr_id = CFI_MFR_HYUNDAI, |
847 | .dev_id = HY29F002T, | 831 | .dev_id = HY29F002T, |
848 | .name = "Hyundai HY29F002T", | 832 | .name = "Hyundai HY29F002T", |
849 | .devtypes = CFI_DEVICETYPE_X8, | 833 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -858,7 +842,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
858 | ERASEINFO(0x04000,1), | 842 | ERASEINFO(0x04000,1), |
859 | } | 843 | } |
860 | }, { | 844 | }, { |
861 | .mfr_id = MANUFACTURER_INTEL, | 845 | .mfr_id = CFI_MFR_INTEL, |
862 | .dev_id = I28F004B3B, | 846 | .dev_id = I28F004B3B, |
863 | .name = "Intel 28F004B3B", | 847 | .name = "Intel 28F004B3B", |
864 | .devtypes = CFI_DEVICETYPE_X8, | 848 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -871,7 +855,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
871 | ERASEINFO(0x10000, 7), | 855 | ERASEINFO(0x10000, 7), |
872 | } | 856 | } |
873 | }, { | 857 | }, { |
874 | .mfr_id = MANUFACTURER_INTEL, | 858 | .mfr_id = CFI_MFR_INTEL, |
875 | .dev_id = I28F004B3T, | 859 | .dev_id = I28F004B3T, |
876 | .name = "Intel 28F004B3T", | 860 | .name = "Intel 28F004B3T", |
877 | .devtypes = CFI_DEVICETYPE_X8, | 861 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -884,7 +868,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
884 | ERASEINFO(0x02000, 8), | 868 | ERASEINFO(0x02000, 8), |
885 | } | 869 | } |
886 | }, { | 870 | }, { |
887 | .mfr_id = MANUFACTURER_INTEL, | 871 | .mfr_id = CFI_MFR_INTEL, |
888 | .dev_id = I28F400B3B, | 872 | .dev_id = I28F400B3B, |
889 | .name = "Intel 28F400B3B", | 873 | .name = "Intel 28F400B3B", |
890 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 874 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -897,7 +881,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
897 | ERASEINFO(0x10000, 7), | 881 | ERASEINFO(0x10000, 7), |
898 | } | 882 | } |
899 | }, { | 883 | }, { |
900 | .mfr_id = MANUFACTURER_INTEL, | 884 | .mfr_id = CFI_MFR_INTEL, |
901 | .dev_id = I28F400B3T, | 885 | .dev_id = I28F400B3T, |
902 | .name = "Intel 28F400B3T", | 886 | .name = "Intel 28F400B3T", |
903 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 887 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -910,7 +894,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
910 | ERASEINFO(0x02000, 8), | 894 | ERASEINFO(0x02000, 8), |
911 | } | 895 | } |
912 | }, { | 896 | }, { |
913 | .mfr_id = MANUFACTURER_INTEL, | 897 | .mfr_id = CFI_MFR_INTEL, |
914 | .dev_id = I28F008B3B, | 898 | .dev_id = I28F008B3B, |
915 | .name = "Intel 28F008B3B", | 899 | .name = "Intel 28F008B3B", |
916 | .devtypes = CFI_DEVICETYPE_X8, | 900 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -923,7 +907,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
923 | ERASEINFO(0x10000, 15), | 907 | ERASEINFO(0x10000, 15), |
924 | } | 908 | } |
925 | }, { | 909 | }, { |
926 | .mfr_id = MANUFACTURER_INTEL, | 910 | .mfr_id = CFI_MFR_INTEL, |
927 | .dev_id = I28F008B3T, | 911 | .dev_id = I28F008B3T, |
928 | .name = "Intel 28F008B3T", | 912 | .name = "Intel 28F008B3T", |
929 | .devtypes = CFI_DEVICETYPE_X8, | 913 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -936,7 +920,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
936 | ERASEINFO(0x02000, 8), | 920 | ERASEINFO(0x02000, 8), |
937 | } | 921 | } |
938 | }, { | 922 | }, { |
939 | .mfr_id = MANUFACTURER_INTEL, | 923 | .mfr_id = CFI_MFR_INTEL, |
940 | .dev_id = I28F008S5, | 924 | .dev_id = I28F008S5, |
941 | .name = "Intel 28F008S5", | 925 | .name = "Intel 28F008S5", |
942 | .devtypes = CFI_DEVICETYPE_X8, | 926 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -948,7 +932,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
948 | ERASEINFO(0x10000,16), | 932 | ERASEINFO(0x10000,16), |
949 | } | 933 | } |
950 | }, { | 934 | }, { |
951 | .mfr_id = MANUFACTURER_INTEL, | 935 | .mfr_id = CFI_MFR_INTEL, |
952 | .dev_id = I28F016S5, | 936 | .dev_id = I28F016S5, |
953 | .name = "Intel 28F016S5", | 937 | .name = "Intel 28F016S5", |
954 | .devtypes = CFI_DEVICETYPE_X8, | 938 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -960,7 +944,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
960 | ERASEINFO(0x10000,32), | 944 | ERASEINFO(0x10000,32), |
961 | } | 945 | } |
962 | }, { | 946 | }, { |
963 | .mfr_id = MANUFACTURER_INTEL, | 947 | .mfr_id = CFI_MFR_INTEL, |
964 | .dev_id = I28F008SA, | 948 | .dev_id = I28F008SA, |
965 | .name = "Intel 28F008SA", | 949 | .name = "Intel 28F008SA", |
966 | .devtypes = CFI_DEVICETYPE_X8, | 950 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -972,7 +956,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
972 | ERASEINFO(0x10000, 16), | 956 | ERASEINFO(0x10000, 16), |
973 | } | 957 | } |
974 | }, { | 958 | }, { |
975 | .mfr_id = MANUFACTURER_INTEL, | 959 | .mfr_id = CFI_MFR_INTEL, |
976 | .dev_id = I28F800B3B, | 960 | .dev_id = I28F800B3B, |
977 | .name = "Intel 28F800B3B", | 961 | .name = "Intel 28F800B3B", |
978 | .devtypes = CFI_DEVICETYPE_X16, | 962 | .devtypes = CFI_DEVICETYPE_X16, |
@@ -985,7 +969,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
985 | ERASEINFO(0x10000, 15), | 969 | ERASEINFO(0x10000, 15), |
986 | } | 970 | } |
987 | }, { | 971 | }, { |
988 | .mfr_id = MANUFACTURER_INTEL, | 972 | .mfr_id = CFI_MFR_INTEL, |
989 | .dev_id = I28F800B3T, | 973 | .dev_id = I28F800B3T, |
990 | .name = "Intel 28F800B3T", | 974 | .name = "Intel 28F800B3T", |
991 | .devtypes = CFI_DEVICETYPE_X16, | 975 | .devtypes = CFI_DEVICETYPE_X16, |
@@ -998,7 +982,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
998 | ERASEINFO(0x02000, 8), | 982 | ERASEINFO(0x02000, 8), |
999 | } | 983 | } |
1000 | }, { | 984 | }, { |
1001 | .mfr_id = MANUFACTURER_INTEL, | 985 | .mfr_id = CFI_MFR_INTEL, |
1002 | .dev_id = I28F016B3B, | 986 | .dev_id = I28F016B3B, |
1003 | .name = "Intel 28F016B3B", | 987 | .name = "Intel 28F016B3B", |
1004 | .devtypes = CFI_DEVICETYPE_X8, | 988 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1011,7 +995,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1011 | ERASEINFO(0x10000, 31), | 995 | ERASEINFO(0x10000, 31), |
1012 | } | 996 | } |
1013 | }, { | 997 | }, { |
1014 | .mfr_id = MANUFACTURER_INTEL, | 998 | .mfr_id = CFI_MFR_INTEL, |
1015 | .dev_id = I28F016S3, | 999 | .dev_id = I28F016S3, |
1016 | .name = "Intel I28F016S3", | 1000 | .name = "Intel I28F016S3", |
1017 | .devtypes = CFI_DEVICETYPE_X8, | 1001 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1023,7 +1007,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1023 | ERASEINFO(0x10000, 32), | 1007 | ERASEINFO(0x10000, 32), |
1024 | } | 1008 | } |
1025 | }, { | 1009 | }, { |
1026 | .mfr_id = MANUFACTURER_INTEL, | 1010 | .mfr_id = CFI_MFR_INTEL, |
1027 | .dev_id = I28F016B3T, | 1011 | .dev_id = I28F016B3T, |
1028 | .name = "Intel 28F016B3T", | 1012 | .name = "Intel 28F016B3T", |
1029 | .devtypes = CFI_DEVICETYPE_X8, | 1013 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1036,7 +1020,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1036 | ERASEINFO(0x02000, 8), | 1020 | ERASEINFO(0x02000, 8), |
1037 | } | 1021 | } |
1038 | }, { | 1022 | }, { |
1039 | .mfr_id = MANUFACTURER_INTEL, | 1023 | .mfr_id = CFI_MFR_INTEL, |
1040 | .dev_id = I28F160B3B, | 1024 | .dev_id = I28F160B3B, |
1041 | .name = "Intel 28F160B3B", | 1025 | .name = "Intel 28F160B3B", |
1042 | .devtypes = CFI_DEVICETYPE_X16, | 1026 | .devtypes = CFI_DEVICETYPE_X16, |
@@ -1049,7 +1033,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1049 | ERASEINFO(0x10000, 31), | 1033 | ERASEINFO(0x10000, 31), |
1050 | } | 1034 | } |
1051 | }, { | 1035 | }, { |
1052 | .mfr_id = MANUFACTURER_INTEL, | 1036 | .mfr_id = CFI_MFR_INTEL, |
1053 | .dev_id = I28F160B3T, | 1037 | .dev_id = I28F160B3T, |
1054 | .name = "Intel 28F160B3T", | 1038 | .name = "Intel 28F160B3T", |
1055 | .devtypes = CFI_DEVICETYPE_X16, | 1039 | .devtypes = CFI_DEVICETYPE_X16, |
@@ -1062,7 +1046,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1062 | ERASEINFO(0x02000, 8), | 1046 | ERASEINFO(0x02000, 8), |
1063 | } | 1047 | } |
1064 | }, { | 1048 | }, { |
1065 | .mfr_id = MANUFACTURER_INTEL, | 1049 | .mfr_id = CFI_MFR_INTEL, |
1066 | .dev_id = I28F320B3B, | 1050 | .dev_id = I28F320B3B, |
1067 | .name = "Intel 28F320B3B", | 1051 | .name = "Intel 28F320B3B", |
1068 | .devtypes = CFI_DEVICETYPE_X16, | 1052 | .devtypes = CFI_DEVICETYPE_X16, |
@@ -1075,7 +1059,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1075 | ERASEINFO(0x10000, 63), | 1059 | ERASEINFO(0x10000, 63), |
1076 | } | 1060 | } |
1077 | }, { | 1061 | }, { |
1078 | .mfr_id = MANUFACTURER_INTEL, | 1062 | .mfr_id = CFI_MFR_INTEL, |
1079 | .dev_id = I28F320B3T, | 1063 | .dev_id = I28F320B3T, |
1080 | .name = "Intel 28F320B3T", | 1064 | .name = "Intel 28F320B3T", |
1081 | .devtypes = CFI_DEVICETYPE_X16, | 1065 | .devtypes = CFI_DEVICETYPE_X16, |
@@ -1088,7 +1072,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1088 | ERASEINFO(0x02000, 8), | 1072 | ERASEINFO(0x02000, 8), |
1089 | } | 1073 | } |
1090 | }, { | 1074 | }, { |
1091 | .mfr_id = MANUFACTURER_INTEL, | 1075 | .mfr_id = CFI_MFR_INTEL, |
1092 | .dev_id = I28F640B3B, | 1076 | .dev_id = I28F640B3B, |
1093 | .name = "Intel 28F640B3B", | 1077 | .name = "Intel 28F640B3B", |
1094 | .devtypes = CFI_DEVICETYPE_X16, | 1078 | .devtypes = CFI_DEVICETYPE_X16, |
@@ -1101,7 +1085,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1101 | ERASEINFO(0x10000, 127), | 1085 | ERASEINFO(0x10000, 127), |
1102 | } | 1086 | } |
1103 | }, { | 1087 | }, { |
1104 | .mfr_id = MANUFACTURER_INTEL, | 1088 | .mfr_id = CFI_MFR_INTEL, |
1105 | .dev_id = I28F640B3T, | 1089 | .dev_id = I28F640B3T, |
1106 | .name = "Intel 28F640B3T", | 1090 | .name = "Intel 28F640B3T", |
1107 | .devtypes = CFI_DEVICETYPE_X16, | 1091 | .devtypes = CFI_DEVICETYPE_X16, |
@@ -1114,7 +1098,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1114 | ERASEINFO(0x02000, 8), | 1098 | ERASEINFO(0x02000, 8), |
1115 | } | 1099 | } |
1116 | }, { | 1100 | }, { |
1117 | .mfr_id = MANUFACTURER_INTEL, | 1101 | .mfr_id = CFI_MFR_INTEL, |
1118 | .dev_id = I28F640C3B, | 1102 | .dev_id = I28F640C3B, |
1119 | .name = "Intel 28F640C3B", | 1103 | .name = "Intel 28F640C3B", |
1120 | .devtypes = CFI_DEVICETYPE_X16, | 1104 | .devtypes = CFI_DEVICETYPE_X16, |
@@ -1127,7 +1111,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1127 | ERASEINFO(0x10000, 127), | 1111 | ERASEINFO(0x10000, 127), |
1128 | } | 1112 | } |
1129 | }, { | 1113 | }, { |
1130 | .mfr_id = MANUFACTURER_INTEL, | 1114 | .mfr_id = CFI_MFR_INTEL, |
1131 | .dev_id = I82802AB, | 1115 | .dev_id = I82802AB, |
1132 | .name = "Intel 82802AB", | 1116 | .name = "Intel 82802AB", |
1133 | .devtypes = CFI_DEVICETYPE_X8, | 1117 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1139,7 +1123,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1139 | ERASEINFO(0x10000,8), | 1123 | ERASEINFO(0x10000,8), |
1140 | } | 1124 | } |
1141 | }, { | 1125 | }, { |
1142 | .mfr_id = MANUFACTURER_INTEL, | 1126 | .mfr_id = CFI_MFR_INTEL, |
1143 | .dev_id = I82802AC, | 1127 | .dev_id = I82802AC, |
1144 | .name = "Intel 82802AC", | 1128 | .name = "Intel 82802AC", |
1145 | .devtypes = CFI_DEVICETYPE_X8, | 1129 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1151,7 +1135,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1151 | ERASEINFO(0x10000,16), | 1135 | ERASEINFO(0x10000,16), |
1152 | } | 1136 | } |
1153 | }, { | 1137 | }, { |
1154 | .mfr_id = MANUFACTURER_MACRONIX, | 1138 | .mfr_id = CFI_MFR_MACRONIX, |
1155 | .dev_id = MX29LV040C, | 1139 | .dev_id = MX29LV040C, |
1156 | .name = "Macronix MX29LV040C", | 1140 | .name = "Macronix MX29LV040C", |
1157 | .devtypes = CFI_DEVICETYPE_X8, | 1141 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1163,7 +1147,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1163 | ERASEINFO(0x10000,8), | 1147 | ERASEINFO(0x10000,8), |
1164 | } | 1148 | } |
1165 | }, { | 1149 | }, { |
1166 | .mfr_id = MANUFACTURER_MACRONIX, | 1150 | .mfr_id = CFI_MFR_MACRONIX, |
1167 | .dev_id = MX29LV160T, | 1151 | .dev_id = MX29LV160T, |
1168 | .name = "MXIC MX29LV160T", | 1152 | .name = "MXIC MX29LV160T", |
1169 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 1153 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -1178,7 +1162,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1178 | ERASEINFO(0x04000,1) | 1162 | ERASEINFO(0x04000,1) |
1179 | } | 1163 | } |
1180 | }, { | 1164 | }, { |
1181 | .mfr_id = MANUFACTURER_NEC, | 1165 | .mfr_id = CFI_MFR_NEC, |
1182 | .dev_id = UPD29F064115, | 1166 | .dev_id = UPD29F064115, |
1183 | .name = "NEC uPD29F064115", | 1167 | .name = "NEC uPD29F064115", |
1184 | .devtypes = CFI_DEVICETYPE_X16, | 1168 | .devtypes = CFI_DEVICETYPE_X16, |
@@ -1192,7 +1176,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1192 | ERASEINFO(0x2000,8), | 1176 | ERASEINFO(0x2000,8), |
1193 | } | 1177 | } |
1194 | }, { | 1178 | }, { |
1195 | .mfr_id = MANUFACTURER_MACRONIX, | 1179 | .mfr_id = CFI_MFR_MACRONIX, |
1196 | .dev_id = MX29LV160B, | 1180 | .dev_id = MX29LV160B, |
1197 | .name = "MXIC MX29LV160B", | 1181 | .name = "MXIC MX29LV160B", |
1198 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 1182 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -1207,7 +1191,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1207 | ERASEINFO(0x10000,31) | 1191 | ERASEINFO(0x10000,31) |
1208 | } | 1192 | } |
1209 | }, { | 1193 | }, { |
1210 | .mfr_id = MANUFACTURER_MACRONIX, | 1194 | .mfr_id = CFI_MFR_MACRONIX, |
1211 | .dev_id = MX29F040, | 1195 | .dev_id = MX29F040, |
1212 | .name = "Macronix MX29F040", | 1196 | .name = "Macronix MX29F040", |
1213 | .devtypes = CFI_DEVICETYPE_X8, | 1197 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1219,7 +1203,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1219 | ERASEINFO(0x10000,8), | 1203 | ERASEINFO(0x10000,8), |
1220 | } | 1204 | } |
1221 | }, { | 1205 | }, { |
1222 | .mfr_id = MANUFACTURER_MACRONIX, | 1206 | .mfr_id = CFI_MFR_MACRONIX, |
1223 | .dev_id = MX29F016, | 1207 | .dev_id = MX29F016, |
1224 | .name = "Macronix MX29F016", | 1208 | .name = "Macronix MX29F016", |
1225 | .devtypes = CFI_DEVICETYPE_X8, | 1209 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1231,7 +1215,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1231 | ERASEINFO(0x10000,32), | 1215 | ERASEINFO(0x10000,32), |
1232 | } | 1216 | } |
1233 | }, { | 1217 | }, { |
1234 | .mfr_id = MANUFACTURER_MACRONIX, | 1218 | .mfr_id = CFI_MFR_MACRONIX, |
1235 | .dev_id = MX29F004T, | 1219 | .dev_id = MX29F004T, |
1236 | .name = "Macronix MX29F004T", | 1220 | .name = "Macronix MX29F004T", |
1237 | .devtypes = CFI_DEVICETYPE_X8, | 1221 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1246,7 +1230,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1246 | ERASEINFO(0x04000,1), | 1230 | ERASEINFO(0x04000,1), |
1247 | } | 1231 | } |
1248 | }, { | 1232 | }, { |
1249 | .mfr_id = MANUFACTURER_MACRONIX, | 1233 | .mfr_id = CFI_MFR_MACRONIX, |
1250 | .dev_id = MX29F004B, | 1234 | .dev_id = MX29F004B, |
1251 | .name = "Macronix MX29F004B", | 1235 | .name = "Macronix MX29F004B", |
1252 | .devtypes = CFI_DEVICETYPE_X8, | 1236 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1261,7 +1245,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1261 | ERASEINFO(0x10000,7), | 1245 | ERASEINFO(0x10000,7), |
1262 | } | 1246 | } |
1263 | }, { | 1247 | }, { |
1264 | .mfr_id = MANUFACTURER_MACRONIX, | 1248 | .mfr_id = CFI_MFR_MACRONIX, |
1265 | .dev_id = MX29F002T, | 1249 | .dev_id = MX29F002T, |
1266 | .name = "Macronix MX29F002T", | 1250 | .name = "Macronix MX29F002T", |
1267 | .devtypes = CFI_DEVICETYPE_X8, | 1251 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1276,7 +1260,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1276 | ERASEINFO(0x04000,1), | 1260 | ERASEINFO(0x04000,1), |
1277 | } | 1261 | } |
1278 | }, { | 1262 | }, { |
1279 | .mfr_id = MANUFACTURER_PMC, | 1263 | .mfr_id = CFI_MFR_PMC, |
1280 | .dev_id = PM49FL002, | 1264 | .dev_id = PM49FL002, |
1281 | .name = "PMC Pm49FL002", | 1265 | .name = "PMC Pm49FL002", |
1282 | .devtypes = CFI_DEVICETYPE_X8, | 1266 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1288,7 +1272,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1288 | ERASEINFO( 0x01000, 64 ) | 1272 | ERASEINFO( 0x01000, 64 ) |
1289 | } | 1273 | } |
1290 | }, { | 1274 | }, { |
1291 | .mfr_id = MANUFACTURER_PMC, | 1275 | .mfr_id = CFI_MFR_PMC, |
1292 | .dev_id = PM49FL004, | 1276 | .dev_id = PM49FL004, |
1293 | .name = "PMC Pm49FL004", | 1277 | .name = "PMC Pm49FL004", |
1294 | .devtypes = CFI_DEVICETYPE_X8, | 1278 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1300,7 +1284,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1300 | ERASEINFO( 0x01000, 128 ) | 1284 | ERASEINFO( 0x01000, 128 ) |
1301 | } | 1285 | } |
1302 | }, { | 1286 | }, { |
1303 | .mfr_id = MANUFACTURER_PMC, | 1287 | .mfr_id = CFI_MFR_PMC, |
1304 | .dev_id = PM49FL008, | 1288 | .dev_id = PM49FL008, |
1305 | .name = "PMC Pm49FL008", | 1289 | .name = "PMC Pm49FL008", |
1306 | .devtypes = CFI_DEVICETYPE_X8, | 1290 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1312,7 +1296,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1312 | ERASEINFO( 0x01000, 256 ) | 1296 | ERASEINFO( 0x01000, 256 ) |
1313 | } | 1297 | } |
1314 | }, { | 1298 | }, { |
1315 | .mfr_id = MANUFACTURER_SHARP, | 1299 | .mfr_id = CFI_MFR_SHARP, |
1316 | .dev_id = LH28F640BF, | 1300 | .dev_id = LH28F640BF, |
1317 | .name = "LH28F640BF", | 1301 | .name = "LH28F640BF", |
1318 | .devtypes = CFI_DEVICETYPE_X8, | 1302 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1324,7 +1308,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1324 | ERASEINFO(0x40000,16), | 1308 | ERASEINFO(0x40000,16), |
1325 | } | 1309 | } |
1326 | }, { | 1310 | }, { |
1327 | .mfr_id = MANUFACTURER_SST, | 1311 | .mfr_id = CFI_MFR_SST, |
1328 | .dev_id = SST39LF512, | 1312 | .dev_id = SST39LF512, |
1329 | .name = "SST 39LF512", | 1313 | .name = "SST 39LF512", |
1330 | .devtypes = CFI_DEVICETYPE_X8, | 1314 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1336,7 +1320,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1336 | ERASEINFO(0x01000,16), | 1320 | ERASEINFO(0x01000,16), |
1337 | } | 1321 | } |
1338 | }, { | 1322 | }, { |
1339 | .mfr_id = MANUFACTURER_SST, | 1323 | .mfr_id = CFI_MFR_SST, |
1340 | .dev_id = SST39LF010, | 1324 | .dev_id = SST39LF010, |
1341 | .name = "SST 39LF010", | 1325 | .name = "SST 39LF010", |
1342 | .devtypes = CFI_DEVICETYPE_X8, | 1326 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1348,8 +1332,8 @@ static const struct amd_flash_info jedec_table[] = { | |||
1348 | ERASEINFO(0x01000,32), | 1332 | ERASEINFO(0x01000,32), |
1349 | } | 1333 | } |
1350 | }, { | 1334 | }, { |
1351 | .mfr_id = MANUFACTURER_SST, | 1335 | .mfr_id = CFI_MFR_SST, |
1352 | .dev_id = SST29EE020, | 1336 | .dev_id = SST29EE020, |
1353 | .name = "SST 29EE020", | 1337 | .name = "SST 29EE020", |
1354 | .devtypes = CFI_DEVICETYPE_X8, | 1338 | .devtypes = CFI_DEVICETYPE_X8, |
1355 | .uaddr = MTD_UADDR_0x5555_0x2AAA, | 1339 | .uaddr = MTD_UADDR_0x5555_0x2AAA, |
@@ -1359,9 +1343,9 @@ static const struct amd_flash_info jedec_table[] = { | |||
1359 | .regions = {ERASEINFO(0x01000,64), | 1343 | .regions = {ERASEINFO(0x01000,64), |
1360 | } | 1344 | } |
1361 | }, { | 1345 | }, { |
1362 | .mfr_id = MANUFACTURER_SST, | 1346 | .mfr_id = CFI_MFR_SST, |
1363 | .dev_id = SST29LE020, | 1347 | .dev_id = SST29LE020, |
1364 | .name = "SST 29LE020", | 1348 | .name = "SST 29LE020", |
1365 | .devtypes = CFI_DEVICETYPE_X8, | 1349 | .devtypes = CFI_DEVICETYPE_X8, |
1366 | .uaddr = MTD_UADDR_0x5555_0x2AAA, | 1350 | .uaddr = MTD_UADDR_0x5555_0x2AAA, |
1367 | .dev_size = SIZE_256KiB, | 1351 | .dev_size = SIZE_256KiB, |
@@ -1370,7 +1354,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1370 | .regions = {ERASEINFO(0x01000,64), | 1354 | .regions = {ERASEINFO(0x01000,64), |
1371 | } | 1355 | } |
1372 | }, { | 1356 | }, { |
1373 | .mfr_id = MANUFACTURER_SST, | 1357 | .mfr_id = CFI_MFR_SST, |
1374 | .dev_id = SST39LF020, | 1358 | .dev_id = SST39LF020, |
1375 | .name = "SST 39LF020", | 1359 | .name = "SST 39LF020", |
1376 | .devtypes = CFI_DEVICETYPE_X8, | 1360 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1382,7 +1366,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1382 | ERASEINFO(0x01000,64), | 1366 | ERASEINFO(0x01000,64), |
1383 | } | 1367 | } |
1384 | }, { | 1368 | }, { |
1385 | .mfr_id = MANUFACTURER_SST, | 1369 | .mfr_id = CFI_MFR_SST, |
1386 | .dev_id = SST39LF040, | 1370 | .dev_id = SST39LF040, |
1387 | .name = "SST 39LF040", | 1371 | .name = "SST 39LF040", |
1388 | .devtypes = CFI_DEVICETYPE_X8, | 1372 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1394,7 +1378,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1394 | ERASEINFO(0x01000,128), | 1378 | ERASEINFO(0x01000,128), |
1395 | } | 1379 | } |
1396 | }, { | 1380 | }, { |
1397 | .mfr_id = MANUFACTURER_SST, | 1381 | .mfr_id = CFI_MFR_SST, |
1398 | .dev_id = SST39SF010A, | 1382 | .dev_id = SST39SF010A, |
1399 | .name = "SST 39SF010A", | 1383 | .name = "SST 39SF010A", |
1400 | .devtypes = CFI_DEVICETYPE_X8, | 1384 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1406,7 +1390,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1406 | ERASEINFO(0x01000,32), | 1390 | ERASEINFO(0x01000,32), |
1407 | } | 1391 | } |
1408 | }, { | 1392 | }, { |
1409 | .mfr_id = MANUFACTURER_SST, | 1393 | .mfr_id = CFI_MFR_SST, |
1410 | .dev_id = SST39SF020A, | 1394 | .dev_id = SST39SF020A, |
1411 | .name = "SST 39SF020A", | 1395 | .name = "SST 39SF020A", |
1412 | .devtypes = CFI_DEVICETYPE_X8, | 1396 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1418,7 +1402,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1418 | ERASEINFO(0x01000,64), | 1402 | ERASEINFO(0x01000,64), |
1419 | } | 1403 | } |
1420 | }, { | 1404 | }, { |
1421 | .mfr_id = MANUFACTURER_SST, | 1405 | .mfr_id = CFI_MFR_SST, |
1422 | .dev_id = SST39SF040, | 1406 | .dev_id = SST39SF040, |
1423 | .name = "SST 39SF040", | 1407 | .name = "SST 39SF040", |
1424 | .devtypes = CFI_DEVICETYPE_X8, | 1408 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1430,7 +1414,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1430 | ERASEINFO(0x01000,128), | 1414 | ERASEINFO(0x01000,128), |
1431 | } | 1415 | } |
1432 | }, { | 1416 | }, { |
1433 | .mfr_id = MANUFACTURER_SST, | 1417 | .mfr_id = CFI_MFR_SST, |
1434 | .dev_id = SST49LF040B, | 1418 | .dev_id = SST49LF040B, |
1435 | .name = "SST 49LF040B", | 1419 | .name = "SST 49LF040B", |
1436 | .devtypes = CFI_DEVICETYPE_X8, | 1420 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1443,7 +1427,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1443 | } | 1427 | } |
1444 | }, { | 1428 | }, { |
1445 | 1429 | ||
1446 | .mfr_id = MANUFACTURER_SST, | 1430 | .mfr_id = CFI_MFR_SST, |
1447 | .dev_id = SST49LF004B, | 1431 | .dev_id = SST49LF004B, |
1448 | .name = "SST 49LF004B", | 1432 | .name = "SST 49LF004B", |
1449 | .devtypes = CFI_DEVICETYPE_X8, | 1433 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1455,7 +1439,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1455 | ERASEINFO(0x01000,128), | 1439 | ERASEINFO(0x01000,128), |
1456 | } | 1440 | } |
1457 | }, { | 1441 | }, { |
1458 | .mfr_id = MANUFACTURER_SST, | 1442 | .mfr_id = CFI_MFR_SST, |
1459 | .dev_id = SST49LF008A, | 1443 | .dev_id = SST49LF008A, |
1460 | .name = "SST 49LF008A", | 1444 | .name = "SST 49LF008A", |
1461 | .devtypes = CFI_DEVICETYPE_X8, | 1445 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1467,7 +1451,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1467 | ERASEINFO(0x01000,256), | 1451 | ERASEINFO(0x01000,256), |
1468 | } | 1452 | } |
1469 | }, { | 1453 | }, { |
1470 | .mfr_id = MANUFACTURER_SST, | 1454 | .mfr_id = CFI_MFR_SST, |
1471 | .dev_id = SST49LF030A, | 1455 | .dev_id = SST49LF030A, |
1472 | .name = "SST 49LF030A", | 1456 | .name = "SST 49LF030A", |
1473 | .devtypes = CFI_DEVICETYPE_X8, | 1457 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1479,7 +1463,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1479 | ERASEINFO(0x01000,96), | 1463 | ERASEINFO(0x01000,96), |
1480 | } | 1464 | } |
1481 | }, { | 1465 | }, { |
1482 | .mfr_id = MANUFACTURER_SST, | 1466 | .mfr_id = CFI_MFR_SST, |
1483 | .dev_id = SST49LF040A, | 1467 | .dev_id = SST49LF040A, |
1484 | .name = "SST 49LF040A", | 1468 | .name = "SST 49LF040A", |
1485 | .devtypes = CFI_DEVICETYPE_X8, | 1469 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1491,7 +1475,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1491 | ERASEINFO(0x01000,128), | 1475 | ERASEINFO(0x01000,128), |
1492 | } | 1476 | } |
1493 | }, { | 1477 | }, { |
1494 | .mfr_id = MANUFACTURER_SST, | 1478 | .mfr_id = CFI_MFR_SST, |
1495 | .dev_id = SST49LF080A, | 1479 | .dev_id = SST49LF080A, |
1496 | .name = "SST 49LF080A", | 1480 | .name = "SST 49LF080A", |
1497 | .devtypes = CFI_DEVICETYPE_X8, | 1481 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1503,7 +1487,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1503 | ERASEINFO(0x01000,256), | 1487 | ERASEINFO(0x01000,256), |
1504 | } | 1488 | } |
1505 | }, { | 1489 | }, { |
1506 | .mfr_id = MANUFACTURER_SST, /* should be CFI */ | 1490 | .mfr_id = CFI_MFR_SST, /* should be CFI */ |
1507 | .dev_id = SST39LF160, | 1491 | .dev_id = SST39LF160, |
1508 | .name = "SST 39LF160", | 1492 | .name = "SST 39LF160", |
1509 | .devtypes = CFI_DEVICETYPE_X16, | 1493 | .devtypes = CFI_DEVICETYPE_X16, |
@@ -1516,7 +1500,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1516 | ERASEINFO(0x1000,256) | 1500 | ERASEINFO(0x1000,256) |
1517 | } | 1501 | } |
1518 | }, { | 1502 | }, { |
1519 | .mfr_id = MANUFACTURER_SST, /* should be CFI */ | 1503 | .mfr_id = CFI_MFR_SST, /* should be CFI */ |
1520 | .dev_id = SST39VF1601, | 1504 | .dev_id = SST39VF1601, |
1521 | .name = "SST 39VF1601", | 1505 | .name = "SST 39VF1601", |
1522 | .devtypes = CFI_DEVICETYPE_X16, | 1506 | .devtypes = CFI_DEVICETYPE_X16, |
@@ -1529,7 +1513,35 @@ static const struct amd_flash_info jedec_table[] = { | |||
1529 | ERASEINFO(0x1000,256) | 1513 | ERASEINFO(0x1000,256) |
1530 | } | 1514 | } |
1531 | }, { | 1515 | }, { |
1532 | .mfr_id = MANUFACTURER_SST, /* should be CFI */ | 1516 | /* CFI is broken: reports AMD_STD, but needs custom uaddr */ |
1517 | .mfr_id = CFI_MFR_SST, | ||
1518 | .dev_id = SST39WF1601, | ||
1519 | .name = "SST 39WF1601", | ||
1520 | .devtypes = CFI_DEVICETYPE_X16, | ||
1521 | .uaddr = MTD_UADDR_0xAAAA_0x5555, | ||
1522 | .dev_size = SIZE_2MiB, | ||
1523 | .cmd_set = P_ID_AMD_STD, | ||
1524 | .nr_regions = 2, | ||
1525 | .regions = { | ||
1526 | ERASEINFO(0x1000,256), | ||
1527 | ERASEINFO(0x1000,256) | ||
1528 | } | ||
1529 | }, { | ||
1530 | /* CFI is broken: reports AMD_STD, but needs custom uaddr */ | ||
1531 | .mfr_id = CFI_MFR_SST, | ||
1532 | .dev_id = SST39WF1602, | ||
1533 | .name = "SST 39WF1602", | ||
1534 | .devtypes = CFI_DEVICETYPE_X16, | ||
1535 | .uaddr = MTD_UADDR_0xAAAA_0x5555, | ||
1536 | .dev_size = SIZE_2MiB, | ||
1537 | .cmd_set = P_ID_AMD_STD, | ||
1538 | .nr_regions = 2, | ||
1539 | .regions = { | ||
1540 | ERASEINFO(0x1000,256), | ||
1541 | ERASEINFO(0x1000,256) | ||
1542 | } | ||
1543 | }, { | ||
1544 | .mfr_id = CFI_MFR_SST, /* should be CFI */ | ||
1533 | .dev_id = SST39VF3201, | 1545 | .dev_id = SST39VF3201, |
1534 | .name = "SST 39VF3201", | 1546 | .name = "SST 39VF3201", |
1535 | .devtypes = CFI_DEVICETYPE_X16, | 1547 | .devtypes = CFI_DEVICETYPE_X16, |
@@ -1544,7 +1556,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1544 | ERASEINFO(0x1000,256) | 1556 | ERASEINFO(0x1000,256) |
1545 | } | 1557 | } |
1546 | }, { | 1558 | }, { |
1547 | .mfr_id = MANUFACTURER_SST, | 1559 | .mfr_id = CFI_MFR_SST, |
1548 | .dev_id = SST36VF3203, | 1560 | .dev_id = SST36VF3203, |
1549 | .name = "SST 36VF3203", | 1561 | .name = "SST 36VF3203", |
1550 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 1562 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -1556,7 +1568,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1556 | ERASEINFO(0x10000,64), | 1568 | ERASEINFO(0x10000,64), |
1557 | } | 1569 | } |
1558 | }, { | 1570 | }, { |
1559 | .mfr_id = MANUFACTURER_ST, | 1571 | .mfr_id = CFI_MFR_ST, |
1560 | .dev_id = M29F800AB, | 1572 | .dev_id = M29F800AB, |
1561 | .name = "ST M29F800AB", | 1573 | .name = "ST M29F800AB", |
1562 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 1574 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -1571,7 +1583,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1571 | ERASEINFO(0x10000,15), | 1583 | ERASEINFO(0x10000,15), |
1572 | } | 1584 | } |
1573 | }, { | 1585 | }, { |
1574 | .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ | 1586 | .mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */ |
1575 | .dev_id = M29W800DT, | 1587 | .dev_id = M29W800DT, |
1576 | .name = "ST M29W800DT", | 1588 | .name = "ST M29W800DT", |
1577 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 1589 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -1586,7 +1598,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1586 | ERASEINFO(0x04000,1) | 1598 | ERASEINFO(0x04000,1) |
1587 | } | 1599 | } |
1588 | }, { | 1600 | }, { |
1589 | .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ | 1601 | .mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */ |
1590 | .dev_id = M29W800DB, | 1602 | .dev_id = M29W800DB, |
1591 | .name = "ST M29W800DB", | 1603 | .name = "ST M29W800DB", |
1592 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 1604 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -1601,7 +1613,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1601 | ERASEINFO(0x10000,15) | 1613 | ERASEINFO(0x10000,15) |
1602 | } | 1614 | } |
1603 | }, { | 1615 | }, { |
1604 | .mfr_id = MANUFACTURER_ST, | 1616 | .mfr_id = CFI_MFR_ST, |
1605 | .dev_id = M29W400DT, | 1617 | .dev_id = M29W400DT, |
1606 | .name = "ST M29W400DT", | 1618 | .name = "ST M29W400DT", |
1607 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 1619 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -1616,7 +1628,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1616 | ERASEINFO(0x10000,1) | 1628 | ERASEINFO(0x10000,1) |
1617 | } | 1629 | } |
1618 | }, { | 1630 | }, { |
1619 | .mfr_id = MANUFACTURER_ST, | 1631 | .mfr_id = CFI_MFR_ST, |
1620 | .dev_id = M29W400DB, | 1632 | .dev_id = M29W400DB, |
1621 | .name = "ST M29W400DB", | 1633 | .name = "ST M29W400DB", |
1622 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 1634 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -1631,7 +1643,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1631 | ERASEINFO(0x10000,7) | 1643 | ERASEINFO(0x10000,7) |
1632 | } | 1644 | } |
1633 | }, { | 1645 | }, { |
1634 | .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ | 1646 | .mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */ |
1635 | .dev_id = M29W160DT, | 1647 | .dev_id = M29W160DT, |
1636 | .name = "ST M29W160DT", | 1648 | .name = "ST M29W160DT", |
1637 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 1649 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -1646,7 +1658,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1646 | ERASEINFO(0x04000,1) | 1658 | ERASEINFO(0x04000,1) |
1647 | } | 1659 | } |
1648 | }, { | 1660 | }, { |
1649 | .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ | 1661 | .mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */ |
1650 | .dev_id = M29W160DB, | 1662 | .dev_id = M29W160DB, |
1651 | .name = "ST M29W160DB", | 1663 | .name = "ST M29W160DB", |
1652 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 1664 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -1661,7 +1673,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1661 | ERASEINFO(0x10000,31) | 1673 | ERASEINFO(0x10000,31) |
1662 | } | 1674 | } |
1663 | }, { | 1675 | }, { |
1664 | .mfr_id = MANUFACTURER_ST, | 1676 | .mfr_id = CFI_MFR_ST, |
1665 | .dev_id = M29W040B, | 1677 | .dev_id = M29W040B, |
1666 | .name = "ST M29W040B", | 1678 | .name = "ST M29W040B", |
1667 | .devtypes = CFI_DEVICETYPE_X8, | 1679 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1673,7 +1685,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1673 | ERASEINFO(0x10000,8), | 1685 | ERASEINFO(0x10000,8), |
1674 | } | 1686 | } |
1675 | }, { | 1687 | }, { |
1676 | .mfr_id = MANUFACTURER_ST, | 1688 | .mfr_id = CFI_MFR_ST, |
1677 | .dev_id = M50FW040, | 1689 | .dev_id = M50FW040, |
1678 | .name = "ST M50FW040", | 1690 | .name = "ST M50FW040", |
1679 | .devtypes = CFI_DEVICETYPE_X8, | 1691 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1685,7 +1697,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1685 | ERASEINFO(0x10000,8), | 1697 | ERASEINFO(0x10000,8), |
1686 | } | 1698 | } |
1687 | }, { | 1699 | }, { |
1688 | .mfr_id = MANUFACTURER_ST, | 1700 | .mfr_id = CFI_MFR_ST, |
1689 | .dev_id = M50FW080, | 1701 | .dev_id = M50FW080, |
1690 | .name = "ST M50FW080", | 1702 | .name = "ST M50FW080", |
1691 | .devtypes = CFI_DEVICETYPE_X8, | 1703 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1697,7 +1709,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1697 | ERASEINFO(0x10000,16), | 1709 | ERASEINFO(0x10000,16), |
1698 | } | 1710 | } |
1699 | }, { | 1711 | }, { |
1700 | .mfr_id = MANUFACTURER_ST, | 1712 | .mfr_id = CFI_MFR_ST, |
1701 | .dev_id = M50FW016, | 1713 | .dev_id = M50FW016, |
1702 | .name = "ST M50FW016", | 1714 | .name = "ST M50FW016", |
1703 | .devtypes = CFI_DEVICETYPE_X8, | 1715 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1709,7 +1721,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1709 | ERASEINFO(0x10000,32), | 1721 | ERASEINFO(0x10000,32), |
1710 | } | 1722 | } |
1711 | }, { | 1723 | }, { |
1712 | .mfr_id = MANUFACTURER_ST, | 1724 | .mfr_id = CFI_MFR_ST, |
1713 | .dev_id = M50LPW080, | 1725 | .dev_id = M50LPW080, |
1714 | .name = "ST M50LPW080", | 1726 | .name = "ST M50LPW080", |
1715 | .devtypes = CFI_DEVICETYPE_X8, | 1727 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1721,7 +1733,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1721 | ERASEINFO(0x10000,16), | 1733 | ERASEINFO(0x10000,16), |
1722 | }, | 1734 | }, |
1723 | }, { | 1735 | }, { |
1724 | .mfr_id = MANUFACTURER_ST, | 1736 | .mfr_id = CFI_MFR_ST, |
1725 | .dev_id = M50FLW080A, | 1737 | .dev_id = M50FLW080A, |
1726 | .name = "ST M50FLW080A", | 1738 | .name = "ST M50FLW080A", |
1727 | .devtypes = CFI_DEVICETYPE_X8, | 1739 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1736,7 +1748,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1736 | ERASEINFO(0x1000,16), | 1748 | ERASEINFO(0x1000,16), |
1737 | } | 1749 | } |
1738 | }, { | 1750 | }, { |
1739 | .mfr_id = MANUFACTURER_ST, | 1751 | .mfr_id = CFI_MFR_ST, |
1740 | .dev_id = M50FLW080B, | 1752 | .dev_id = M50FLW080B, |
1741 | .name = "ST M50FLW080B", | 1753 | .name = "ST M50FLW080B", |
1742 | .devtypes = CFI_DEVICETYPE_X8, | 1754 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1751,7 +1763,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1751 | ERASEINFO(0x1000,16), | 1763 | ERASEINFO(0x1000,16), |
1752 | } | 1764 | } |
1753 | }, { | 1765 | }, { |
1754 | .mfr_id = 0xff00 | MANUFACTURER_ST, | 1766 | .mfr_id = 0xff00 | CFI_MFR_ST, |
1755 | .dev_id = 0xff00 | PSD4256G6V, | 1767 | .dev_id = 0xff00 | PSD4256G6V, |
1756 | .name = "ST PSD4256G6V", | 1768 | .name = "ST PSD4256G6V", |
1757 | .devtypes = CFI_DEVICETYPE_X16, | 1769 | .devtypes = CFI_DEVICETYPE_X16, |
@@ -1763,7 +1775,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1763 | ERASEINFO(0x10000,16), | 1775 | ERASEINFO(0x10000,16), |
1764 | } | 1776 | } |
1765 | }, { | 1777 | }, { |
1766 | .mfr_id = MANUFACTURER_TOSHIBA, | 1778 | .mfr_id = CFI_MFR_TOSHIBA, |
1767 | .dev_id = TC58FVT160, | 1779 | .dev_id = TC58FVT160, |
1768 | .name = "Toshiba TC58FVT160", | 1780 | .name = "Toshiba TC58FVT160", |
1769 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 1781 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -1778,7 +1790,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1778 | ERASEINFO(0x04000,1) | 1790 | ERASEINFO(0x04000,1) |
1779 | } | 1791 | } |
1780 | }, { | 1792 | }, { |
1781 | .mfr_id = MANUFACTURER_TOSHIBA, | 1793 | .mfr_id = CFI_MFR_TOSHIBA, |
1782 | .dev_id = TC58FVB160, | 1794 | .dev_id = TC58FVB160, |
1783 | .name = "Toshiba TC58FVB160", | 1795 | .name = "Toshiba TC58FVB160", |
1784 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 1796 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -1793,7 +1805,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1793 | ERASEINFO(0x10000,31) | 1805 | ERASEINFO(0x10000,31) |
1794 | } | 1806 | } |
1795 | }, { | 1807 | }, { |
1796 | .mfr_id = MANUFACTURER_TOSHIBA, | 1808 | .mfr_id = CFI_MFR_TOSHIBA, |
1797 | .dev_id = TC58FVB321, | 1809 | .dev_id = TC58FVB321, |
1798 | .name = "Toshiba TC58FVB321", | 1810 | .name = "Toshiba TC58FVB321", |
1799 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 1811 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -1806,7 +1818,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1806 | ERASEINFO(0x10000,63) | 1818 | ERASEINFO(0x10000,63) |
1807 | } | 1819 | } |
1808 | }, { | 1820 | }, { |
1809 | .mfr_id = MANUFACTURER_TOSHIBA, | 1821 | .mfr_id = CFI_MFR_TOSHIBA, |
1810 | .dev_id = TC58FVT321, | 1822 | .dev_id = TC58FVT321, |
1811 | .name = "Toshiba TC58FVT321", | 1823 | .name = "Toshiba TC58FVT321", |
1812 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 1824 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -1819,7 +1831,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1819 | ERASEINFO(0x02000,8) | 1831 | ERASEINFO(0x02000,8) |
1820 | } | 1832 | } |
1821 | }, { | 1833 | }, { |
1822 | .mfr_id = MANUFACTURER_TOSHIBA, | 1834 | .mfr_id = CFI_MFR_TOSHIBA, |
1823 | .dev_id = TC58FVB641, | 1835 | .dev_id = TC58FVB641, |
1824 | .name = "Toshiba TC58FVB641", | 1836 | .name = "Toshiba TC58FVB641", |
1825 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 1837 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -1832,7 +1844,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1832 | ERASEINFO(0x10000,127) | 1844 | ERASEINFO(0x10000,127) |
1833 | } | 1845 | } |
1834 | }, { | 1846 | }, { |
1835 | .mfr_id = MANUFACTURER_TOSHIBA, | 1847 | .mfr_id = CFI_MFR_TOSHIBA, |
1836 | .dev_id = TC58FVT641, | 1848 | .dev_id = TC58FVT641, |
1837 | .name = "Toshiba TC58FVT641", | 1849 | .name = "Toshiba TC58FVT641", |
1838 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, | 1850 | .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, |
@@ -1845,7 +1857,7 @@ static const struct amd_flash_info jedec_table[] = { | |||
1845 | ERASEINFO(0x02000,8) | 1857 | ERASEINFO(0x02000,8) |
1846 | } | 1858 | } |
1847 | }, { | 1859 | }, { |
1848 | .mfr_id = MANUFACTURER_WINBOND, | 1860 | .mfr_id = CFI_MFR_WINBOND, |
1849 | .dev_id = W49V002A, | 1861 | .dev_id = W49V002A, |
1850 | .name = "Winbond W49V002A", | 1862 | .name = "Winbond W49V002A", |
1851 | .devtypes = CFI_DEVICETYPE_X8, | 1863 | .devtypes = CFI_DEVICETYPE_X8, |
@@ -1878,7 +1890,7 @@ static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base, | |||
1878 | mask = (1 << (cfi->device_type * 8)) - 1; | 1890 | mask = (1 << (cfi->device_type * 8)) - 1; |
1879 | result = map_read(map, base + ofs); | 1891 | result = map_read(map, base + ofs); |
1880 | bank++; | 1892 | bank++; |
1881 | } while ((result.x[0] & mask) == CONTINUATION_CODE); | 1893 | } while ((result.x[0] & mask) == CFI_MFR_CONTINUATION); |
1882 | 1894 | ||
1883 | return result.x[0] & mask; | 1895 | return result.x[0] & mask; |
1884 | } | 1896 | } |
@@ -1969,7 +1981,7 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index) | |||
1969 | p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / p_cfi->device_type; | 1981 | p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / p_cfi->device_type; |
1970 | p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / p_cfi->device_type; | 1982 | p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / p_cfi->device_type; |
1971 | 1983 | ||
1972 | return 1; /* ok */ | 1984 | return 1; /* ok */ |
1973 | } | 1985 | } |
1974 | 1986 | ||
1975 | 1987 | ||
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile index ab5c9b92ac82..f3226b1d38fc 100644 --- a/drivers/mtd/devices/Makefile +++ b/drivers/mtd/devices/Makefile | |||
@@ -1,5 +1,5 @@ | |||
1 | # | 1 | # |
2 | # linux/drivers/devices/Makefile | 2 | # linux/drivers/mtd/devices/Makefile |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_MTD_DOC2000) += doc2000.o | 5 | obj-$(CONFIG_MTD_DOC2000) += doc2000.o |
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c index ce6424008ed9..93651865ddbe 100644 --- a/drivers/mtd/devices/block2mtd.c +++ b/drivers/mtd/devices/block2mtd.c | |||
@@ -276,12 +276,10 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size) | |||
276 | 276 | ||
277 | /* Setup the MTD structure */ | 277 | /* Setup the MTD structure */ |
278 | /* make the name contain the block device in */ | 278 | /* make the name contain the block device in */ |
279 | name = kmalloc(sizeof("block2mtd: ") + strlen(devname) + 1, | 279 | name = kasprintf(GFP_KERNEL, "block2mtd: %s", devname); |
280 | GFP_KERNEL); | ||
281 | if (!name) | 280 | if (!name) |
282 | goto devinit_err; | 281 | goto devinit_err; |
283 | 282 | ||
284 | sprintf(name, "block2mtd: %s", devname); | ||
285 | dev->mtd.name = name; | 283 | dev->mtd.name = name; |
286 | 284 | ||
287 | dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK; | 285 | dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK; |
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c index d2fd550f7e09..fc8ea0a57ac2 100644 --- a/drivers/mtd/devices/pmc551.c +++ b/drivers/mtd/devices/pmc551.c | |||
@@ -668,7 +668,7 @@ static int __init init_pmc551(void) | |||
668 | { | 668 | { |
669 | struct pci_dev *PCI_Device = NULL; | 669 | struct pci_dev *PCI_Device = NULL; |
670 | struct mypriv *priv; | 670 | struct mypriv *priv; |
671 | int count, found = 0; | 671 | int found = 0; |
672 | struct mtd_info *mtd; | 672 | struct mtd_info *mtd; |
673 | u32 length = 0; | 673 | u32 length = 0; |
674 | 674 | ||
@@ -695,7 +695,7 @@ static int __init init_pmc551(void) | |||
695 | /* | 695 | /* |
696 | * PCU-bus chipset probe. | 696 | * PCU-bus chipset probe. |
697 | */ | 697 | */ |
698 | for (count = 0; count < MAX_MTD_DEVICES; count++) { | 698 | for (;;) { |
699 | 699 | ||
700 | if ((PCI_Device = pci_get_device(PCI_VENDOR_ID_V3_SEMI, | 700 | if ((PCI_Device = pci_get_device(PCI_VENDOR_ID_V3_SEMI, |
701 | PCI_DEVICE_ID_V3_SEMI_V370PDC, | 701 | PCI_DEVICE_ID_V3_SEMI_V370PDC, |
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c index fe17054ee2fe..ab5d8cd02a15 100644 --- a/drivers/mtd/devices/sst25l.c +++ b/drivers/mtd/devices/sst25l.c | |||
@@ -73,15 +73,25 @@ static struct flash_info __initdata sst25l_flash_info[] = { | |||
73 | 73 | ||
74 | static int sst25l_status(struct sst25l_flash *flash, int *status) | 74 | static int sst25l_status(struct sst25l_flash *flash, int *status) |
75 | { | 75 | { |
76 | unsigned char command, response; | 76 | struct spi_message m; |
77 | struct spi_transfer t; | ||
78 | unsigned char cmd_resp[2]; | ||
77 | int err; | 79 | int err; |
78 | 80 | ||
79 | command = SST25L_CMD_RDSR; | 81 | spi_message_init(&m); |
80 | err = spi_write_then_read(flash->spi, &command, 1, &response, 1); | 82 | memset(&t, 0, sizeof(struct spi_transfer)); |
83 | |||
84 | cmd_resp[0] = SST25L_CMD_RDSR; | ||
85 | cmd_resp[1] = 0xff; | ||
86 | t.tx_buf = cmd_resp; | ||
87 | t.rx_buf = cmd_resp; | ||
88 | t.len = sizeof(cmd_resp); | ||
89 | spi_message_add_tail(&t, &m); | ||
90 | err = spi_sync(flash->spi, &m); | ||
81 | if (err < 0) | 91 | if (err < 0) |
82 | return err; | 92 | return err; |
83 | 93 | ||
84 | *status = response; | 94 | *status = cmd_resp[1]; |
85 | return 0; | 95 | return 0; |
86 | } | 96 | } |
87 | 97 | ||
@@ -328,33 +338,32 @@ out: | |||
328 | static struct flash_info *__init sst25l_match_device(struct spi_device *spi) | 338 | static struct flash_info *__init sst25l_match_device(struct spi_device *spi) |
329 | { | 339 | { |
330 | struct flash_info *flash_info = NULL; | 340 | struct flash_info *flash_info = NULL; |
331 | unsigned char command[4], response; | 341 | struct spi_message m; |
342 | struct spi_transfer t; | ||
343 | unsigned char cmd_resp[6]; | ||
332 | int i, err; | 344 | int i, err; |
333 | uint16_t id; | 345 | uint16_t id; |
334 | 346 | ||
335 | command[0] = SST25L_CMD_READ_ID; | 347 | spi_message_init(&m); |
336 | command[1] = 0; | 348 | memset(&t, 0, sizeof(struct spi_transfer)); |
337 | command[2] = 0; | 349 | |
338 | command[3] = 0; | 350 | cmd_resp[0] = SST25L_CMD_READ_ID; |
339 | err = spi_write_then_read(spi, command, sizeof(command), &response, 1); | 351 | cmd_resp[1] = 0; |
352 | cmd_resp[2] = 0; | ||
353 | cmd_resp[3] = 0; | ||
354 | cmd_resp[4] = 0xff; | ||
355 | cmd_resp[5] = 0xff; | ||
356 | t.tx_buf = cmd_resp; | ||
357 | t.rx_buf = cmd_resp; | ||
358 | t.len = sizeof(cmd_resp); | ||
359 | spi_message_add_tail(&t, &m); | ||
360 | err = spi_sync(spi, &m); | ||
340 | if (err < 0) { | 361 | if (err < 0) { |
341 | dev_err(&spi->dev, "error reading device id msb\n"); | 362 | dev_err(&spi->dev, "error reading device id\n"); |
342 | return NULL; | 363 | return NULL; |
343 | } | 364 | } |
344 | 365 | ||
345 | id = response << 8; | 366 | id = (cmd_resp[4] << 8) | cmd_resp[5]; |
346 | |||
347 | command[0] = SST25L_CMD_READ_ID; | ||
348 | command[1] = 0; | ||
349 | command[2] = 0; | ||
350 | command[3] = 1; | ||
351 | err = spi_write_then_read(spi, command, sizeof(command), &response, 1); | ||
352 | if (err < 0) { | ||
353 | dev_err(&spi->dev, "error reading device id lsb\n"); | ||
354 | return NULL; | ||
355 | } | ||
356 | |||
357 | id |= response; | ||
358 | 367 | ||
359 | for (i = 0; i < ARRAY_SIZE(sst25l_flash_info); i++) | 368 | for (i = 0; i < ARRAY_SIZE(sst25l_flash_info); i++) |
360 | if (sst25l_flash_info[i].device_id == id) | 369 | if (sst25l_flash_info[i].device_id == id) |
@@ -411,17 +420,6 @@ static int __init sst25l_probe(struct spi_device *spi) | |||
411 | flash->mtd.erasesize, flash->mtd.erasesize / 1024, | 420 | flash->mtd.erasesize, flash->mtd.erasesize / 1024, |
412 | flash->mtd.numeraseregions); | 421 | flash->mtd.numeraseregions); |
413 | 422 | ||
414 | if (flash->mtd.numeraseregions) | ||
415 | for (i = 0; i < flash->mtd.numeraseregions; i++) | ||
416 | DEBUG(MTD_DEBUG_LEVEL2, | ||
417 | "mtd.eraseregions[%d] = { .offset = 0x%llx, " | ||
418 | ".erasesize = 0x%.8x (%uKiB), " | ||
419 | ".numblocks = %d }\n", | ||
420 | i, (long long)flash->mtd.eraseregions[i].offset, | ||
421 | flash->mtd.eraseregions[i].erasesize, | ||
422 | flash->mtd.eraseregions[i].erasesize / 1024, | ||
423 | flash->mtd.eraseregions[i].numblocks); | ||
424 | |||
425 | if (mtd_has_partitions()) { | 423 | if (mtd_has_partitions()) { |
426 | struct mtd_partition *parts = NULL; | 424 | struct mtd_partition *parts = NULL; |
427 | int nr_parts = 0; | 425 | int nr_parts = 0; |
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c index e56d6b42f020..62da9eb7032b 100644 --- a/drivers/mtd/ftl.c +++ b/drivers/mtd/ftl.c | |||
@@ -1082,7 +1082,6 @@ static void ftl_remove_dev(struct mtd_blktrans_dev *dev) | |||
1082 | { | 1082 | { |
1083 | del_mtd_blktrans_dev(dev); | 1083 | del_mtd_blktrans_dev(dev); |
1084 | ftl_freepart((partition_t *)dev); | 1084 | ftl_freepart((partition_t *)dev); |
1085 | kfree(dev); | ||
1086 | } | 1085 | } |
1087 | 1086 | ||
1088 | static struct mtd_blktrans_ops ftl_tr = { | 1087 | static struct mtd_blktrans_ops ftl_tr = { |
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c index 8aca5523a337..015a7fe1b6ee 100644 --- a/drivers/mtd/inftlcore.c +++ b/drivers/mtd/inftlcore.c | |||
@@ -139,7 +139,6 @@ static void inftl_remove_dev(struct mtd_blktrans_dev *dev) | |||
139 | 139 | ||
140 | kfree(inftl->PUtable); | 140 | kfree(inftl->PUtable); |
141 | kfree(inftl->VUtable); | 141 | kfree(inftl->VUtable); |
142 | kfree(inftl); | ||
143 | } | 142 | } |
144 | 143 | ||
145 | /* | 144 | /* |
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c index 32e82aef3e53..8f988d7d3c5c 100644 --- a/drivers/mtd/inftlmount.c +++ b/drivers/mtd/inftlmount.c | |||
@@ -100,9 +100,10 @@ static int find_boot_record(struct INFTLrecord *inftl) | |||
100 | } | 100 | } |
101 | 101 | ||
102 | /* To be safer with BIOS, also use erase mark as discriminant */ | 102 | /* To be safer with BIOS, also use erase mark as discriminant */ |
103 | if ((ret = inftl_read_oob(mtd, block * inftl->EraseSize + | 103 | ret = inftl_read_oob(mtd, |
104 | SECTORSIZE + 8, 8, &retlen, | 104 | block * inftl->EraseSize + SECTORSIZE + 8, |
105 | (char *)&h1) < 0)) { | 105 | 8, &retlen,(char *)&h1); |
106 | if (ret < 0) { | ||
106 | printk(KERN_WARNING "INFTL: ANAND header found at " | 107 | printk(KERN_WARNING "INFTL: ANAND header found at " |
107 | "0x%x in mtd%d, but OOB data read failed " | 108 | "0x%x in mtd%d, but OOB data read failed " |
108 | "(err %d)\n", block * inftl->EraseSize, | 109 | "(err %d)\n", block * inftl->EraseSize, |
diff --git a/drivers/mtd/internal.h b/drivers/mtd/internal.h deleted file mode 100644 index c658fe7216b5..000000000000 --- a/drivers/mtd/internal.h +++ /dev/null | |||
@@ -1,17 +0,0 @@ | |||
1 | /* Internal MTD definitions | ||
2 | * | ||
3 | * Copyright © 2006 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | /* | ||
13 | * mtdbdi.c | ||
14 | */ | ||
15 | extern struct backing_dev_info mtd_bdi_unmappable; | ||
16 | extern struct backing_dev_info mtd_bdi_ro_mappable; | ||
17 | extern struct backing_dev_info mtd_bdi_rw_mappable; | ||
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c index a73ee12aad81..fece5be58715 100644 --- a/drivers/mtd/lpddr/lpddr_cmds.c +++ b/drivers/mtd/lpddr/lpddr_cmds.c | |||
@@ -107,8 +107,7 @@ struct mtd_info *lpddr_cmdset(struct map_info *map) | |||
107 | /* those should be reset too since | 107 | /* those should be reset too since |
108 | they create memory references. */ | 108 | they create memory references. */ |
109 | init_waitqueue_head(&chip->wq); | 109 | init_waitqueue_head(&chip->wq); |
110 | spin_lock_init(&chip->_spinlock); | 110 | mutex_init(&chip->mutex); |
111 | chip->mutex = &chip->_spinlock; | ||
112 | chip++; | 111 | chip++; |
113 | } | 112 | } |
114 | } | 113 | } |
@@ -144,7 +143,7 @@ static int wait_for_ready(struct map_info *map, struct flchip *chip, | |||
144 | } | 143 | } |
145 | 144 | ||
146 | /* OK Still waiting. Drop the lock, wait a while and retry. */ | 145 | /* OK Still waiting. Drop the lock, wait a while and retry. */ |
147 | spin_unlock(chip->mutex); | 146 | mutex_unlock(&chip->mutex); |
148 | if (sleep_time >= 1000000/HZ) { | 147 | if (sleep_time >= 1000000/HZ) { |
149 | /* | 148 | /* |
150 | * Half of the normal delay still remaining | 149 | * Half of the normal delay still remaining |
@@ -159,17 +158,17 @@ static int wait_for_ready(struct map_info *map, struct flchip *chip, | |||
159 | cond_resched(); | 158 | cond_resched(); |
160 | timeo--; | 159 | timeo--; |
161 | } | 160 | } |
162 | spin_lock(chip->mutex); | 161 | mutex_lock(&chip->mutex); |
163 | 162 | ||
164 | while (chip->state != chip_state) { | 163 | while (chip->state != chip_state) { |
165 | /* Someone's suspended the operation: sleep */ | 164 | /* Someone's suspended the operation: sleep */ |
166 | DECLARE_WAITQUEUE(wait, current); | 165 | DECLARE_WAITQUEUE(wait, current); |
167 | set_current_state(TASK_UNINTERRUPTIBLE); | 166 | set_current_state(TASK_UNINTERRUPTIBLE); |
168 | add_wait_queue(&chip->wq, &wait); | 167 | add_wait_queue(&chip->wq, &wait); |
169 | spin_unlock(chip->mutex); | 168 | mutex_unlock(&chip->mutex); |
170 | schedule(); | 169 | schedule(); |
171 | remove_wait_queue(&chip->wq, &wait); | 170 | remove_wait_queue(&chip->wq, &wait); |
172 | spin_lock(chip->mutex); | 171 | mutex_lock(&chip->mutex); |
173 | } | 172 | } |
174 | if (chip->erase_suspended || chip->write_suspended) { | 173 | if (chip->erase_suspended || chip->write_suspended) { |
175 | /* Suspend has occured while sleep: reset timeout */ | 174 | /* Suspend has occured while sleep: reset timeout */ |
@@ -230,20 +229,20 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode) | |||
230 | * it'll happily send us to sleep. In any case, when | 229 | * it'll happily send us to sleep. In any case, when |
231 | * get_chip returns success we're clear to go ahead. | 230 | * get_chip returns success we're clear to go ahead. |
232 | */ | 231 | */ |
233 | ret = spin_trylock(contender->mutex); | 232 | ret = mutex_trylock(&contender->mutex); |
234 | spin_unlock(&shared->lock); | 233 | spin_unlock(&shared->lock); |
235 | if (!ret) | 234 | if (!ret) |
236 | goto retry; | 235 | goto retry; |
237 | spin_unlock(chip->mutex); | 236 | mutex_unlock(&chip->mutex); |
238 | ret = chip_ready(map, contender, mode); | 237 | ret = chip_ready(map, contender, mode); |
239 | spin_lock(chip->mutex); | 238 | mutex_lock(&chip->mutex); |
240 | 239 | ||
241 | if (ret == -EAGAIN) { | 240 | if (ret == -EAGAIN) { |
242 | spin_unlock(contender->mutex); | 241 | mutex_unlock(&contender->mutex); |
243 | goto retry; | 242 | goto retry; |
244 | } | 243 | } |
245 | if (ret) { | 244 | if (ret) { |
246 | spin_unlock(contender->mutex); | 245 | mutex_unlock(&contender->mutex); |
247 | return ret; | 246 | return ret; |
248 | } | 247 | } |
249 | spin_lock(&shared->lock); | 248 | spin_lock(&shared->lock); |
@@ -252,10 +251,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode) | |||
252 | * state. Put contender and retry. */ | 251 | * state. Put contender and retry. */ |
253 | if (chip->state == FL_SYNCING) { | 252 | if (chip->state == FL_SYNCING) { |
254 | put_chip(map, contender); | 253 | put_chip(map, contender); |
255 | spin_unlock(contender->mutex); | 254 | mutex_unlock(&contender->mutex); |
256 | goto retry; | 255 | goto retry; |
257 | } | 256 | } |
258 | spin_unlock(contender->mutex); | 257 | mutex_unlock(&contender->mutex); |
259 | } | 258 | } |
260 | 259 | ||
261 | /* Check if we have suspended erase on this chip. | 260 | /* Check if we have suspended erase on this chip. |
@@ -265,10 +264,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode) | |||
265 | spin_unlock(&shared->lock); | 264 | spin_unlock(&shared->lock); |
266 | set_current_state(TASK_UNINTERRUPTIBLE); | 265 | set_current_state(TASK_UNINTERRUPTIBLE); |
267 | add_wait_queue(&chip->wq, &wait); | 266 | add_wait_queue(&chip->wq, &wait); |
268 | spin_unlock(chip->mutex); | 267 | mutex_unlock(&chip->mutex); |
269 | schedule(); | 268 | schedule(); |
270 | remove_wait_queue(&chip->wq, &wait); | 269 | remove_wait_queue(&chip->wq, &wait); |
271 | spin_lock(chip->mutex); | 270 | mutex_lock(&chip->mutex); |
272 | goto retry; | 271 | goto retry; |
273 | } | 272 | } |
274 | 273 | ||
@@ -337,10 +336,10 @@ static int chip_ready(struct map_info *map, struct flchip *chip, int mode) | |||
337 | sleep: | 336 | sleep: |
338 | set_current_state(TASK_UNINTERRUPTIBLE); | 337 | set_current_state(TASK_UNINTERRUPTIBLE); |
339 | add_wait_queue(&chip->wq, &wait); | 338 | add_wait_queue(&chip->wq, &wait); |
340 | spin_unlock(chip->mutex); | 339 | mutex_unlock(&chip->mutex); |
341 | schedule(); | 340 | schedule(); |
342 | remove_wait_queue(&chip->wq, &wait); | 341 | remove_wait_queue(&chip->wq, &wait); |
343 | spin_lock(chip->mutex); | 342 | mutex_lock(&chip->mutex); |
344 | return -EAGAIN; | 343 | return -EAGAIN; |
345 | } | 344 | } |
346 | } | 345 | } |
@@ -356,12 +355,12 @@ static void put_chip(struct map_info *map, struct flchip *chip) | |||
356 | if (shared->writing && shared->writing != chip) { | 355 | if (shared->writing && shared->writing != chip) { |
357 | /* give back the ownership */ | 356 | /* give back the ownership */ |
358 | struct flchip *loaner = shared->writing; | 357 | struct flchip *loaner = shared->writing; |
359 | spin_lock(loaner->mutex); | 358 | mutex_lock(&loaner->mutex); |
360 | spin_unlock(&shared->lock); | 359 | spin_unlock(&shared->lock); |
361 | spin_unlock(chip->mutex); | 360 | mutex_unlock(&chip->mutex); |
362 | put_chip(map, loaner); | 361 | put_chip(map, loaner); |
363 | spin_lock(chip->mutex); | 362 | mutex_lock(&chip->mutex); |
364 | spin_unlock(loaner->mutex); | 363 | mutex_unlock(&loaner->mutex); |
365 | wake_up(&chip->wq); | 364 | wake_up(&chip->wq); |
366 | return; | 365 | return; |
367 | } | 366 | } |
@@ -414,10 +413,10 @@ int do_write_buffer(struct map_info *map, struct flchip *chip, | |||
414 | 413 | ||
415 | wbufsize = 1 << lpddr->qinfo->BufSizeShift; | 414 | wbufsize = 1 << lpddr->qinfo->BufSizeShift; |
416 | 415 | ||
417 | spin_lock(chip->mutex); | 416 | mutex_lock(&chip->mutex); |
418 | ret = get_chip(map, chip, FL_WRITING); | 417 | ret = get_chip(map, chip, FL_WRITING); |
419 | if (ret) { | 418 | if (ret) { |
420 | spin_unlock(chip->mutex); | 419 | mutex_unlock(&chip->mutex); |
421 | return ret; | 420 | return ret; |
422 | } | 421 | } |
423 | /* Figure out the number of words to write */ | 422 | /* Figure out the number of words to write */ |
@@ -478,7 +477,7 @@ int do_write_buffer(struct map_info *map, struct flchip *chip, | |||
478 | } | 477 | } |
479 | 478 | ||
480 | out: put_chip(map, chip); | 479 | out: put_chip(map, chip); |
481 | spin_unlock(chip->mutex); | 480 | mutex_unlock(&chip->mutex); |
482 | return ret; | 481 | return ret; |
483 | } | 482 | } |
484 | 483 | ||
@@ -490,10 +489,10 @@ int do_erase_oneblock(struct mtd_info *mtd, loff_t adr) | |||
490 | struct flchip *chip = &lpddr->chips[chipnum]; | 489 | struct flchip *chip = &lpddr->chips[chipnum]; |
491 | int ret; | 490 | int ret; |
492 | 491 | ||
493 | spin_lock(chip->mutex); | 492 | mutex_lock(&chip->mutex); |
494 | ret = get_chip(map, chip, FL_ERASING); | 493 | ret = get_chip(map, chip, FL_ERASING); |
495 | if (ret) { | 494 | if (ret) { |
496 | spin_unlock(chip->mutex); | 495 | mutex_unlock(&chip->mutex); |
497 | return ret; | 496 | return ret; |
498 | } | 497 | } |
499 | send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL); | 498 | send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL); |
@@ -505,7 +504,7 @@ int do_erase_oneblock(struct mtd_info *mtd, loff_t adr) | |||
505 | goto out; | 504 | goto out; |
506 | } | 505 | } |
507 | out: put_chip(map, chip); | 506 | out: put_chip(map, chip); |
508 | spin_unlock(chip->mutex); | 507 | mutex_unlock(&chip->mutex); |
509 | return ret; | 508 | return ret; |
510 | } | 509 | } |
511 | 510 | ||
@@ -518,10 +517,10 @@ static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len, | |||
518 | struct flchip *chip = &lpddr->chips[chipnum]; | 517 | struct flchip *chip = &lpddr->chips[chipnum]; |
519 | int ret = 0; | 518 | int ret = 0; |
520 | 519 | ||
521 | spin_lock(chip->mutex); | 520 | mutex_lock(&chip->mutex); |
522 | ret = get_chip(map, chip, FL_READY); | 521 | ret = get_chip(map, chip, FL_READY); |
523 | if (ret) { | 522 | if (ret) { |
524 | spin_unlock(chip->mutex); | 523 | mutex_unlock(&chip->mutex); |
525 | return ret; | 524 | return ret; |
526 | } | 525 | } |
527 | 526 | ||
@@ -529,7 +528,7 @@ static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len, | |||
529 | *retlen = len; | 528 | *retlen = len; |
530 | 529 | ||
531 | put_chip(map, chip); | 530 | put_chip(map, chip); |
532 | spin_unlock(chip->mutex); | 531 | mutex_unlock(&chip->mutex); |
533 | return ret; | 532 | return ret; |
534 | } | 533 | } |
535 | 534 | ||
@@ -569,9 +568,9 @@ static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len, | |||
569 | else | 568 | else |
570 | thislen = len; | 569 | thislen = len; |
571 | /* get the chip */ | 570 | /* get the chip */ |
572 | spin_lock(chip->mutex); | 571 | mutex_lock(&chip->mutex); |
573 | ret = get_chip(map, chip, FL_POINT); | 572 | ret = get_chip(map, chip, FL_POINT); |
574 | spin_unlock(chip->mutex); | 573 | mutex_unlock(&chip->mutex); |
575 | if (ret) | 574 | if (ret) |
576 | break; | 575 | break; |
577 | 576 | ||
@@ -611,7 +610,7 @@ static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len) | |||
611 | else | 610 | else |
612 | thislen = len; | 611 | thislen = len; |
613 | 612 | ||
614 | spin_lock(chip->mutex); | 613 | mutex_lock(&chip->mutex); |
615 | if (chip->state == FL_POINT) { | 614 | if (chip->state == FL_POINT) { |
616 | chip->ref_point_counter--; | 615 | chip->ref_point_counter--; |
617 | if (chip->ref_point_counter == 0) | 616 | if (chip->ref_point_counter == 0) |
@@ -621,7 +620,7 @@ static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len) | |||
621 | "pointed region\n", map->name); | 620 | "pointed region\n", map->name); |
622 | 621 | ||
623 | put_chip(map, chip); | 622 | put_chip(map, chip); |
624 | spin_unlock(chip->mutex); | 623 | mutex_unlock(&chip->mutex); |
625 | 624 | ||
626 | len -= thislen; | 625 | len -= thislen; |
627 | ofs = 0; | 626 | ofs = 0; |
@@ -727,10 +726,10 @@ int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk) | |||
727 | int chipnum = adr >> lpddr->chipshift; | 726 | int chipnum = adr >> lpddr->chipshift; |
728 | struct flchip *chip = &lpddr->chips[chipnum]; | 727 | struct flchip *chip = &lpddr->chips[chipnum]; |
729 | 728 | ||
730 | spin_lock(chip->mutex); | 729 | mutex_lock(&chip->mutex); |
731 | ret = get_chip(map, chip, FL_LOCKING); | 730 | ret = get_chip(map, chip, FL_LOCKING); |
732 | if (ret) { | 731 | if (ret) { |
733 | spin_unlock(chip->mutex); | 732 | mutex_unlock(&chip->mutex); |
734 | return ret; | 733 | return ret; |
735 | } | 734 | } |
736 | 735 | ||
@@ -750,7 +749,7 @@ int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk) | |||
750 | goto out; | 749 | goto out; |
751 | } | 750 | } |
752 | out: put_chip(map, chip); | 751 | out: put_chip(map, chip); |
753 | spin_unlock(chip->mutex); | 752 | mutex_unlock(&chip->mutex); |
754 | return ret; | 753 | return ret; |
755 | } | 754 | } |
756 | 755 | ||
@@ -771,10 +770,10 @@ int word_program(struct map_info *map, loff_t adr, uint32_t curval) | |||
771 | int chipnum = adr >> lpddr->chipshift; | 770 | int chipnum = adr >> lpddr->chipshift; |
772 | struct flchip *chip = &lpddr->chips[chipnum]; | 771 | struct flchip *chip = &lpddr->chips[chipnum]; |
773 | 772 | ||
774 | spin_lock(chip->mutex); | 773 | mutex_lock(&chip->mutex); |
775 | ret = get_chip(map, chip, FL_WRITING); | 774 | ret = get_chip(map, chip, FL_WRITING); |
776 | if (ret) { | 775 | if (ret) { |
777 | spin_unlock(chip->mutex); | 776 | mutex_unlock(&chip->mutex); |
778 | return ret; | 777 | return ret; |
779 | } | 778 | } |
780 | 779 | ||
@@ -788,7 +787,7 @@ int word_program(struct map_info *map, loff_t adr, uint32_t curval) | |||
788 | } | 787 | } |
789 | 788 | ||
790 | out: put_chip(map, chip); | 789 | out: put_chip(map, chip); |
791 | spin_unlock(chip->mutex); | 790 | mutex_unlock(&chip->mutex); |
792 | return ret; | 791 | return ret; |
793 | } | 792 | } |
794 | 793 | ||
diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c index 79bf40f48b75..dbfe17baf046 100644 --- a/drivers/mtd/lpddr/qinfo_probe.c +++ b/drivers/mtd/lpddr/qinfo_probe.c | |||
@@ -134,13 +134,12 @@ out: | |||
134 | static int lpddr_chip_setup(struct map_info *map, struct lpddr_private *lpddr) | 134 | static int lpddr_chip_setup(struct map_info *map, struct lpddr_private *lpddr) |
135 | { | 135 | { |
136 | 136 | ||
137 | lpddr->qinfo = kmalloc(sizeof(struct qinfo_chip), GFP_KERNEL); | 137 | lpddr->qinfo = kzalloc(sizeof(struct qinfo_chip), GFP_KERNEL); |
138 | if (!lpddr->qinfo) { | 138 | if (!lpddr->qinfo) { |
139 | printk(KERN_WARNING "%s: no memory for LPDDR qinfo structure\n", | 139 | printk(KERN_WARNING "%s: no memory for LPDDR qinfo structure\n", |
140 | map->name); | 140 | map->name); |
141 | return 0; | 141 | return 0; |
142 | } | 142 | } |
143 | memset(lpddr->qinfo, 0, sizeof(struct qinfo_chip)); | ||
144 | 143 | ||
145 | /* Get the ManuID */ | 144 | /* Get the ManuID */ |
146 | lpddr->ManufactId = CMDVAL(map_read(map, map->pfow_base + PFOW_MANUFACTURER_ID)); | 145 | lpddr->ManufactId = CMDVAL(map_read(map, map->pfow_base + PFOW_MANUFACTURER_ID)); |
@@ -185,13 +184,11 @@ static struct lpddr_private *lpddr_probe_chip(struct map_info *map) | |||
185 | lpddr.numchips = 1; | 184 | lpddr.numchips = 1; |
186 | 185 | ||
187 | numvirtchips = lpddr.numchips * lpddr.qinfo->HWPartsNum; | 186 | numvirtchips = lpddr.numchips * lpddr.qinfo->HWPartsNum; |
188 | retlpddr = kmalloc(sizeof(struct lpddr_private) + | 187 | retlpddr = kzalloc(sizeof(struct lpddr_private) + |
189 | numvirtchips * sizeof(struct flchip), GFP_KERNEL); | 188 | numvirtchips * sizeof(struct flchip), GFP_KERNEL); |
190 | if (!retlpddr) | 189 | if (!retlpddr) |
191 | return NULL; | 190 | return NULL; |
192 | 191 | ||
193 | memset(retlpddr, 0, sizeof(struct lpddr_private) + | ||
194 | numvirtchips * sizeof(struct flchip)); | ||
195 | memcpy(retlpddr, &lpddr, sizeof(struct lpddr_private)); | 192 | memcpy(retlpddr, &lpddr, sizeof(struct lpddr_private)); |
196 | 193 | ||
197 | retlpddr->numchips = numvirtchips; | 194 | retlpddr->numchips = numvirtchips; |
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index aa2807d0ce72..f22bc9f05ddb 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig | |||
@@ -435,7 +435,7 @@ config MTD_PCI | |||
435 | 435 | ||
436 | config MTD_PCMCIA | 436 | config MTD_PCMCIA |
437 | tristate "PCMCIA MTD driver" | 437 | tristate "PCMCIA MTD driver" |
438 | depends on PCMCIA && MTD_COMPLEX_MAPPINGS && BROKEN | 438 | depends on PCMCIA && MTD_COMPLEX_MAPPINGS |
439 | help | 439 | help |
440 | Map driver for accessing PCMCIA linear flash memory cards. These | 440 | Map driver for accessing PCMCIA linear flash memory cards. These |
441 | cards are usually around 4-16MiB in size. This does not include | 441 | cards are usually around 4-16MiB in size. This does not include |
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c index c0fd99b0c525..85dd18193cf2 100644 --- a/drivers/mtd/maps/bfin-async-flash.c +++ b/drivers/mtd/maps/bfin-async-flash.c | |||
@@ -70,7 +70,7 @@ static void switch_back(struct async_state *state) | |||
70 | local_irq_restore(state->irq_flags); | 70 | local_irq_restore(state->irq_flags); |
71 | } | 71 | } |
72 | 72 | ||
73 | static map_word bfin_read(struct map_info *map, unsigned long ofs) | 73 | static map_word bfin_flash_read(struct map_info *map, unsigned long ofs) |
74 | { | 74 | { |
75 | struct async_state *state = (struct async_state *)map->map_priv_1; | 75 | struct async_state *state = (struct async_state *)map->map_priv_1; |
76 | uint16_t word; | 76 | uint16_t word; |
@@ -86,7 +86,7 @@ static map_word bfin_read(struct map_info *map, unsigned long ofs) | |||
86 | return test; | 86 | return test; |
87 | } | 87 | } |
88 | 88 | ||
89 | static void bfin_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) | 89 | static void bfin_flash_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) |
90 | { | 90 | { |
91 | struct async_state *state = (struct async_state *)map->map_priv_1; | 91 | struct async_state *state = (struct async_state *)map->map_priv_1; |
92 | 92 | ||
@@ -97,7 +97,7 @@ static void bfin_copy_from(struct map_info *map, void *to, unsigned long from, s | |||
97 | switch_back(state); | 97 | switch_back(state); |
98 | } | 98 | } |
99 | 99 | ||
100 | static void bfin_write(struct map_info *map, map_word d1, unsigned long ofs) | 100 | static void bfin_flash_write(struct map_info *map, map_word d1, unsigned long ofs) |
101 | { | 101 | { |
102 | struct async_state *state = (struct async_state *)map->map_priv_1; | 102 | struct async_state *state = (struct async_state *)map->map_priv_1; |
103 | uint16_t d; | 103 | uint16_t d; |
@@ -112,7 +112,7 @@ static void bfin_write(struct map_info *map, map_word d1, unsigned long ofs) | |||
112 | switch_back(state); | 112 | switch_back(state); |
113 | } | 113 | } |
114 | 114 | ||
115 | static void bfin_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) | 115 | static void bfin_flash_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) |
116 | { | 116 | { |
117 | struct async_state *state = (struct async_state *)map->map_priv_1; | 117 | struct async_state *state = (struct async_state *)map->map_priv_1; |
118 | 118 | ||
@@ -141,10 +141,10 @@ static int __devinit bfin_flash_probe(struct platform_device *pdev) | |||
141 | return -ENOMEM; | 141 | return -ENOMEM; |
142 | 142 | ||
143 | state->map.name = DRIVER_NAME; | 143 | state->map.name = DRIVER_NAME; |
144 | state->map.read = bfin_read; | 144 | state->map.read = bfin_flash_read; |
145 | state->map.copy_from = bfin_copy_from; | 145 | state->map.copy_from = bfin_flash_copy_from; |
146 | state->map.write = bfin_write; | 146 | state->map.write = bfin_flash_write; |
147 | state->map.copy_to = bfin_copy_to; | 147 | state->map.copy_to = bfin_flash_copy_to; |
148 | state->map.bankwidth = pdata->width; | 148 | state->map.bankwidth = pdata->width; |
149 | state->map.size = memory->end - memory->start + 1; | 149 | state->map.size = memory->end - memory->start + 1; |
150 | state->map.virt = (void __iomem *)memory->start; | 150 | state->map.virt = (void __iomem *)memory->start; |
diff --git a/drivers/mtd/maps/ceiva.c b/drivers/mtd/maps/ceiva.c index d41f34766e53..c09f4f57093e 100644 --- a/drivers/mtd/maps/ceiva.c +++ b/drivers/mtd/maps/ceiva.c | |||
@@ -253,7 +253,7 @@ static void __exit clps_destroy_mtd(struct clps_info *clps, struct mtd_info *mtd | |||
253 | 253 | ||
254 | static int __init clps_setup_flash(void) | 254 | static int __init clps_setup_flash(void) |
255 | { | 255 | { |
256 | int nr; | 256 | int nr = 0; |
257 | 257 | ||
258 | #ifdef CONFIG_ARCH_CEIVA | 258 | #ifdef CONFIG_ARCH_CEIVA |
259 | if (machine_is_ceiva()) { | 259 | if (machine_is_ceiva()) { |
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c index 1bdf0ee6d0b6..9639d83a9d6c 100644 --- a/drivers/mtd/maps/ixp2000.c +++ b/drivers/mtd/maps/ixp2000.c | |||
@@ -165,12 +165,11 @@ static int ixp2000_flash_probe(struct platform_device *dev) | |||
165 | return -EIO; | 165 | return -EIO; |
166 | } | 166 | } |
167 | 167 | ||
168 | info = kmalloc(sizeof(struct ixp2000_flash_info), GFP_KERNEL); | 168 | info = kzalloc(sizeof(struct ixp2000_flash_info), GFP_KERNEL); |
169 | if(!info) { | 169 | if(!info) { |
170 | err = -ENOMEM; | 170 | err = -ENOMEM; |
171 | goto Error; | 171 | goto Error; |
172 | } | 172 | } |
173 | memset(info, 0, sizeof(struct ixp2000_flash_info)); | ||
174 | 173 | ||
175 | platform_set_drvdata(dev, info); | 174 | platform_set_drvdata(dev, info); |
176 | 175 | ||
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c index 7b0515297411..e0a5e0426ead 100644 --- a/drivers/mtd/maps/ixp4xx.c +++ b/drivers/mtd/maps/ixp4xx.c | |||
@@ -107,8 +107,8 @@ static void ixp4xx_copy_from(struct map_info *map, void *to, | |||
107 | return; | 107 | return; |
108 | 108 | ||
109 | if (from & 1) { | 109 | if (from & 1) { |
110 | *dest++ = BYTE1(flash_read16(src)); | 110 | *dest++ = BYTE1(flash_read16(src-1)); |
111 | src++; | 111 | src++; |
112 | --len; | 112 | --len; |
113 | } | 113 | } |
114 | 114 | ||
@@ -196,12 +196,11 @@ static int ixp4xx_flash_probe(struct platform_device *dev) | |||
196 | return err; | 196 | return err; |
197 | } | 197 | } |
198 | 198 | ||
199 | info = kmalloc(sizeof(struct ixp4xx_flash_info), GFP_KERNEL); | 199 | info = kzalloc(sizeof(struct ixp4xx_flash_info), GFP_KERNEL); |
200 | if(!info) { | 200 | if(!info) { |
201 | err = -ENOMEM; | 201 | err = -ENOMEM; |
202 | goto Error; | 202 | goto Error; |
203 | } | 203 | } |
204 | memset(info, 0, sizeof(struct ixp4xx_flash_info)); | ||
205 | 204 | ||
206 | platform_set_drvdata(dev, info); | 205 | platform_set_drvdata(dev, info); |
207 | 206 | ||
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c index 689d6a79ffc0..e699e6ac23df 100644 --- a/drivers/mtd/maps/pcmciamtd.c +++ b/drivers/mtd/maps/pcmciamtd.c | |||
@@ -40,10 +40,7 @@ MODULE_PARM_DESC(debug, "Set Debug Level 0=quiet, 5=noisy"); | |||
40 | static const int debug = 0; | 40 | static const int debug = 0; |
41 | #endif | 41 | #endif |
42 | 42 | ||
43 | #define err(format, arg...) printk(KERN_ERR "pcmciamtd: " format "\n" , ## arg) | ||
44 | #define info(format, arg...) printk(KERN_INFO "pcmciamtd: " format "\n" , ## arg) | 43 | #define info(format, arg...) printk(KERN_INFO "pcmciamtd: " format "\n" , ## arg) |
45 | #define warn(format, arg...) printk(KERN_WARNING "pcmciamtd: " format "\n" , ## arg) | ||
46 | |||
47 | 44 | ||
48 | #define DRIVER_DESC "PCMCIA Flash memory card driver" | 45 | #define DRIVER_DESC "PCMCIA Flash memory card driver" |
49 | 46 | ||
@@ -52,7 +49,6 @@ static const int debug = 0; | |||
52 | 49 | ||
53 | struct pcmciamtd_dev { | 50 | struct pcmciamtd_dev { |
54 | struct pcmcia_device *p_dev; | 51 | struct pcmcia_device *p_dev; |
55 | dev_node_t node; /* device node */ | ||
56 | caddr_t win_base; /* ioremapped address of PCMCIA window */ | 52 | caddr_t win_base; /* ioremapped address of PCMCIA window */ |
57 | unsigned int win_size; /* size of window */ | 53 | unsigned int win_size; /* size of window */ |
58 | unsigned int offset; /* offset into card the window currently points at */ | 54 | unsigned int offset; /* offset into card the window currently points at */ |
@@ -100,7 +96,9 @@ module_param(mem_type, int, 0); | |||
100 | MODULE_PARM_DESC(mem_type, "Set Memory type (0=Flash, 1=RAM, 2=ROM, default=0)"); | 96 | MODULE_PARM_DESC(mem_type, "Set Memory type (0=Flash, 1=RAM, 2=ROM, default=0)"); |
101 | 97 | ||
102 | 98 | ||
103 | /* read/write{8,16} copy_{from,to} routines with window remapping to access whole card */ | 99 | /* read/write{8,16} copy_{from,to} routines with window remapping |
100 | * to access whole card | ||
101 | */ | ||
104 | static caddr_t remap_window(struct map_info *map, unsigned long to) | 102 | static caddr_t remap_window(struct map_info *map, unsigned long to) |
105 | { | 103 | { |
106 | struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; | 104 | struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; |
@@ -137,7 +135,7 @@ static map_word pcmcia_read8_remap(struct map_info *map, unsigned long ofs) | |||
137 | return d; | 135 | return d; |
138 | 136 | ||
139 | d.x[0] = readb(addr); | 137 | d.x[0] = readb(addr); |
140 | DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02x", ofs, addr, d.x[0]); | 138 | DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02lx", ofs, addr, d.x[0]); |
141 | return d; | 139 | return d; |
142 | } | 140 | } |
143 | 141 | ||
@@ -152,7 +150,7 @@ static map_word pcmcia_read16_remap(struct map_info *map, unsigned long ofs) | |||
152 | return d; | 150 | return d; |
153 | 151 | ||
154 | d.x[0] = readw(addr); | 152 | d.x[0] = readw(addr); |
155 | DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04x", ofs, addr, d.x[0]); | 153 | DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04lx", ofs, addr, d.x[0]); |
156 | return d; | 154 | return d; |
157 | } | 155 | } |
158 | 156 | ||
@@ -162,7 +160,7 @@ static void pcmcia_copy_from_remap(struct map_info *map, void *to, unsigned long | |||
162 | struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; | 160 | struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; |
163 | unsigned long win_size = dev->win_size; | 161 | unsigned long win_size = dev->win_size; |
164 | 162 | ||
165 | DEBUG(3, "to = %p from = %lu len = %u", to, from, len); | 163 | DEBUG(3, "to = %p from = %lu len = %zd", to, from, len); |
166 | while(len) { | 164 | while(len) { |
167 | int toread = win_size - (from & (win_size-1)); | 165 | int toread = win_size - (from & (win_size-1)); |
168 | caddr_t addr; | 166 | caddr_t addr; |
@@ -190,7 +188,7 @@ static void pcmcia_write8_remap(struct map_info *map, map_word d, unsigned long | |||
190 | if(!addr) | 188 | if(!addr) |
191 | return; | 189 | return; |
192 | 190 | ||
193 | DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02x", adr, addr, d.x[0]); | 191 | DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02lx", adr, addr, d.x[0]); |
194 | writeb(d.x[0], addr); | 192 | writeb(d.x[0], addr); |
195 | } | 193 | } |
196 | 194 | ||
@@ -201,7 +199,7 @@ static void pcmcia_write16_remap(struct map_info *map, map_word d, unsigned long | |||
201 | if(!addr) | 199 | if(!addr) |
202 | return; | 200 | return; |
203 | 201 | ||
204 | DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04x", adr, addr, d.x[0]); | 202 | DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04lx", adr, addr, d.x[0]); |
205 | writew(d.x[0], addr); | 203 | writew(d.x[0], addr); |
206 | } | 204 | } |
207 | 205 | ||
@@ -211,7 +209,7 @@ static void pcmcia_copy_to_remap(struct map_info *map, unsigned long to, const v | |||
211 | struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; | 209 | struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; |
212 | unsigned long win_size = dev->win_size; | 210 | unsigned long win_size = dev->win_size; |
213 | 211 | ||
214 | DEBUG(3, "to = %lu from = %p len = %u", to, from, len); | 212 | DEBUG(3, "to = %lu from = %p len = %zd", to, from, len); |
215 | while(len) { | 213 | while(len) { |
216 | int towrite = win_size - (to & (win_size-1)); | 214 | int towrite = win_size - (to & (win_size-1)); |
217 | caddr_t addr; | 215 | caddr_t addr; |
@@ -245,7 +243,8 @@ static map_word pcmcia_read8(struct map_info *map, unsigned long ofs) | |||
245 | return d; | 243 | return d; |
246 | 244 | ||
247 | d.x[0] = readb(win_base + ofs); | 245 | d.x[0] = readb(win_base + ofs); |
248 | DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02x", ofs, win_base + ofs, d.x[0]); | 246 | DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02lx", |
247 | ofs, win_base + ofs, d.x[0]); | ||
249 | return d; | 248 | return d; |
250 | } | 249 | } |
251 | 250 | ||
@@ -259,7 +258,8 @@ static map_word pcmcia_read16(struct map_info *map, unsigned long ofs) | |||
259 | return d; | 258 | return d; |
260 | 259 | ||
261 | d.x[0] = readw(win_base + ofs); | 260 | d.x[0] = readw(win_base + ofs); |
262 | DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04x", ofs, win_base + ofs, d.x[0]); | 261 | DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04lx", |
262 | ofs, win_base + ofs, d.x[0]); | ||
263 | return d; | 263 | return d; |
264 | } | 264 | } |
265 | 265 | ||
@@ -271,32 +271,34 @@ static void pcmcia_copy_from(struct map_info *map, void *to, unsigned long from, | |||
271 | if(DEV_REMOVED(map)) | 271 | if(DEV_REMOVED(map)) |
272 | return; | 272 | return; |
273 | 273 | ||
274 | DEBUG(3, "to = %p from = %lu len = %u", to, from, len); | 274 | DEBUG(3, "to = %p from = %lu len = %zd", to, from, len); |
275 | memcpy_fromio(to, win_base + from, len); | 275 | memcpy_fromio(to, win_base + from, len); |
276 | } | 276 | } |
277 | 277 | ||
278 | 278 | ||
279 | static void pcmcia_write8(struct map_info *map, u8 d, unsigned long adr) | 279 | static void pcmcia_write8(struct map_info *map, map_word d, unsigned long adr) |
280 | { | 280 | { |
281 | caddr_t win_base = (caddr_t)map->map_priv_2; | 281 | caddr_t win_base = (caddr_t)map->map_priv_2; |
282 | 282 | ||
283 | if(DEV_REMOVED(map)) | 283 | if(DEV_REMOVED(map)) |
284 | return; | 284 | return; |
285 | 285 | ||
286 | DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02x", adr, win_base + adr, d); | 286 | DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02lx", |
287 | writeb(d, win_base + adr); | 287 | adr, win_base + adr, d.x[0]); |
288 | writeb(d.x[0], win_base + adr); | ||
288 | } | 289 | } |
289 | 290 | ||
290 | 291 | ||
291 | static void pcmcia_write16(struct map_info *map, u16 d, unsigned long adr) | 292 | static void pcmcia_write16(struct map_info *map, map_word d, unsigned long adr) |
292 | { | 293 | { |
293 | caddr_t win_base = (caddr_t)map->map_priv_2; | 294 | caddr_t win_base = (caddr_t)map->map_priv_2; |
294 | 295 | ||
295 | if(DEV_REMOVED(map)) | 296 | if(DEV_REMOVED(map)) |
296 | return; | 297 | return; |
297 | 298 | ||
298 | DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04x", adr, win_base + adr, d); | 299 | DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04lx", |
299 | writew(d, win_base + adr); | 300 | adr, win_base + adr, d.x[0]); |
301 | writew(d.x[0], win_base + adr); | ||
300 | } | 302 | } |
301 | 303 | ||
302 | 304 | ||
@@ -307,7 +309,7 @@ static void pcmcia_copy_to(struct map_info *map, unsigned long to, const void *f | |||
307 | if(DEV_REMOVED(map)) | 309 | if(DEV_REMOVED(map)) |
308 | return; | 310 | return; |
309 | 311 | ||
310 | DEBUG(3, "to = %lu from = %p len = %u", to, from, len); | 312 | DEBUG(3, "to = %lu from = %p len = %zd", to, from, len); |
311 | memcpy_toio(win_base + to, from, len); | 313 | memcpy_toio(win_base + to, from, len); |
312 | } | 314 | } |
313 | 315 | ||
@@ -376,7 +378,8 @@ static int pcmciamtd_cistpl_jedec(struct pcmcia_device *p_dev, | |||
376 | if (!pcmcia_parse_tuple(tuple, &parse)) { | 378 | if (!pcmcia_parse_tuple(tuple, &parse)) { |
377 | cistpl_jedec_t *t = &parse.jedec; | 379 | cistpl_jedec_t *t = &parse.jedec; |
378 | for (i = 0; i < t->nid; i++) | 380 | for (i = 0; i < t->nid; i++) |
379 | DEBUG(2, "JEDEC: 0x%02x 0x%02x", t->id[i].mfr, t->id[i].info); | 381 | DEBUG(2, "JEDEC: 0x%02x 0x%02x", |
382 | t->id[i].mfr, t->id[i].info); | ||
380 | } | 383 | } |
381 | return -ENOSPC; | 384 | return -ENOSPC; |
382 | } | 385 | } |
@@ -432,7 +435,7 @@ static int pcmciamtd_cistpl_geo(struct pcmcia_device *p_dev, | |||
432 | } | 435 | } |
433 | 436 | ||
434 | 437 | ||
435 | static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *link, int *new_name) | 438 | static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *p_dev, int *new_name) |
436 | { | 439 | { |
437 | int i; | 440 | int i; |
438 | 441 | ||
@@ -477,7 +480,8 @@ static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *link, | |||
477 | } | 480 | } |
478 | 481 | ||
479 | DEBUG(1, "Device: Size: %lu Width:%d Name: %s", | 482 | DEBUG(1, "Device: Size: %lu Width:%d Name: %s", |
480 | dev->pcmcia_map.size, dev->pcmcia_map.bankwidth << 3, dev->mtd_name); | 483 | dev->pcmcia_map.size, |
484 | dev->pcmcia_map.bankwidth << 3, dev->mtd_name); | ||
481 | } | 485 | } |
482 | 486 | ||
483 | 487 | ||
@@ -490,7 +494,6 @@ static int pcmciamtd_config(struct pcmcia_device *link) | |||
490 | { | 494 | { |
491 | struct pcmciamtd_dev *dev = link->priv; | 495 | struct pcmciamtd_dev *dev = link->priv; |
492 | struct mtd_info *mtd = NULL; | 496 | struct mtd_info *mtd = NULL; |
493 | cs_status_t status; | ||
494 | win_req_t req; | 497 | win_req_t req; |
495 | int ret; | 498 | int ret; |
496 | int i; | 499 | int i; |
@@ -514,9 +517,11 @@ static int pcmciamtd_config(struct pcmcia_device *link) | |||
514 | if(setvpp == 1) | 517 | if(setvpp == 1) |
515 | dev->pcmcia_map.set_vpp = pcmciamtd_set_vpp; | 518 | dev->pcmcia_map.set_vpp = pcmciamtd_set_vpp; |
516 | 519 | ||
517 | /* Request a memory window for PCMCIA. Some architeures can map windows upto the maximum | 520 | /* Request a memory window for PCMCIA. Some architeures can map windows |
518 | that PCMCIA can support (64MiB) - this is ideal and we aim for a window the size of the | 521 | * upto the maximum that PCMCIA can support (64MiB) - this is ideal and |
519 | whole card - otherwise we try smaller windows until we succeed */ | 522 | * we aim for a window the size of the whole card - otherwise we try |
523 | * smaller windows until we succeed | ||
524 | */ | ||
520 | 525 | ||
521 | req.Attributes = WIN_MEMORY_TYPE_CM | WIN_ENABLE; | 526 | req.Attributes = WIN_MEMORY_TYPE_CM | WIN_ENABLE; |
522 | req.Attributes |= (dev->pcmcia_map.bankwidth == 1) ? WIN_DATA_WIDTH_8 : WIN_DATA_WIDTH_16; | 527 | req.Attributes |= (dev->pcmcia_map.bankwidth == 1) ? WIN_DATA_WIDTH_8 : WIN_DATA_WIDTH_16; |
@@ -544,7 +549,7 @@ static int pcmciamtd_config(struct pcmcia_device *link) | |||
544 | DEBUG(2, "dev->win_size = %d", dev->win_size); | 549 | DEBUG(2, "dev->win_size = %d", dev->win_size); |
545 | 550 | ||
546 | if(!dev->win_size) { | 551 | if(!dev->win_size) { |
547 | err("Cant allocate memory window"); | 552 | dev_err(&dev->p_dev->dev, "Cannot allocate memory window\n"); |
548 | pcmciamtd_release(link); | 553 | pcmciamtd_release(link); |
549 | return -ENODEV; | 554 | return -ENODEV; |
550 | } | 555 | } |
@@ -554,7 +559,8 @@ static int pcmciamtd_config(struct pcmcia_device *link) | |||
554 | DEBUG(2, "window handle = 0x%8.8lx", (unsigned long)link->win); | 559 | DEBUG(2, "window handle = 0x%8.8lx", (unsigned long)link->win); |
555 | dev->win_base = ioremap(req.Base, req.Size); | 560 | dev->win_base = ioremap(req.Base, req.Size); |
556 | if(!dev->win_base) { | 561 | if(!dev->win_base) { |
557 | err("ioremap(%lu, %u) failed", req.Base, req.Size); | 562 | dev_err(&dev->p_dev->dev, "ioremap(%lu, %u) failed\n", |
563 | req.Base, req.Size); | ||
558 | pcmciamtd_release(link); | 564 | pcmciamtd_release(link); |
559 | return -ENODEV; | 565 | return -ENODEV; |
560 | } | 566 | } |
@@ -565,7 +571,7 @@ static int pcmciamtd_config(struct pcmcia_device *link) | |||
565 | dev->pcmcia_map.map_priv_1 = (unsigned long)dev; | 571 | dev->pcmcia_map.map_priv_1 = (unsigned long)dev; |
566 | dev->pcmcia_map.map_priv_2 = (unsigned long)link->win; | 572 | dev->pcmcia_map.map_priv_2 = (unsigned long)link->win; |
567 | 573 | ||
568 | dev->vpp = (vpp) ? vpp : link->socket.socket.Vpp; | 574 | dev->vpp = (vpp) ? vpp : link->socket->socket.Vpp; |
569 | link->conf.Attributes = 0; | 575 | link->conf.Attributes = 0; |
570 | if(setvpp == 2) { | 576 | if(setvpp == 2) { |
571 | link->conf.Vpp = dev->vpp; | 577 | link->conf.Vpp = dev->vpp; |
@@ -601,7 +607,7 @@ static int pcmciamtd_config(struct pcmcia_device *link) | |||
601 | } | 607 | } |
602 | 608 | ||
603 | if(!mtd) { | 609 | if(!mtd) { |
604 | DEBUG(1, "Cant find an MTD"); | 610 | DEBUG(1, "Can not find an MTD"); |
605 | pcmciamtd_release(link); | 611 | pcmciamtd_release(link); |
606 | return -ENODEV; | 612 | return -ENODEV; |
607 | } | 613 | } |
@@ -612,8 +618,9 @@ static int pcmciamtd_config(struct pcmcia_device *link) | |||
612 | if(new_name) { | 618 | if(new_name) { |
613 | int size = 0; | 619 | int size = 0; |
614 | char unit = ' '; | 620 | char unit = ' '; |
615 | /* Since we are using a default name, make it better by adding in the | 621 | /* Since we are using a default name, make it better by adding |
616 | size */ | 622 | * in the size |
623 | */ | ||
617 | if(mtd->size < 1048576) { /* <1MiB in size, show size in KiB */ | 624 | if(mtd->size < 1048576) { /* <1MiB in size, show size in KiB */ |
618 | size = mtd->size >> 10; | 625 | size = mtd->size >> 10; |
619 | unit = 'K'; | 626 | unit = 'K'; |
@@ -643,17 +650,15 @@ static int pcmciamtd_config(struct pcmcia_device *link) | |||
643 | if(add_mtd_device(mtd)) { | 650 | if(add_mtd_device(mtd)) { |
644 | map_destroy(mtd); | 651 | map_destroy(mtd); |
645 | dev->mtd_info = NULL; | 652 | dev->mtd_info = NULL; |
646 | err("Couldnt register MTD device"); | 653 | dev_err(&dev->p_dev->dev, |
654 | "Could not register the MTD device\n"); | ||
647 | pcmciamtd_release(link); | 655 | pcmciamtd_release(link); |
648 | return -ENODEV; | 656 | return -ENODEV; |
649 | } | 657 | } |
650 | snprintf(dev->node.dev_name, sizeof(dev->node.dev_name), "mtd%d", mtd->index); | 658 | dev_info(&dev->p_dev->dev, "mtd%d: %s\n", mtd->index, mtd->name); |
651 | info("mtd%d: %s", mtd->index, mtd->name); | ||
652 | link->dev_node = &dev->node; | ||
653 | return 0; | 659 | return 0; |
654 | 660 | ||
655 | failed: | 661 | dev_err(&dev->p_dev->dev, "CS Error, exiting\n"); |
656 | err("CS Error, exiting"); | ||
657 | pcmciamtd_release(link); | 662 | pcmciamtd_release(link); |
658 | return -ENODEV; | 663 | return -ENODEV; |
659 | } | 664 | } |
@@ -692,8 +697,9 @@ static void pcmciamtd_detach(struct pcmcia_device *link) | |||
692 | 697 | ||
693 | if(dev->mtd_info) { | 698 | if(dev->mtd_info) { |
694 | del_mtd_device(dev->mtd_info); | 699 | del_mtd_device(dev->mtd_info); |
700 | dev_info(&dev->p_dev->dev, "mtd%d: Removing\n", | ||
701 | dev->mtd_info->index); | ||
695 | map_destroy(dev->mtd_info); | 702 | map_destroy(dev->mtd_info); |
696 | info("mtd%d: Removed", dev->mtd_info->index); | ||
697 | } | 703 | } |
698 | 704 | ||
699 | pcmciamtd_release(link); | 705 | pcmciamtd_release(link); |
@@ -737,8 +743,11 @@ static struct pcmcia_device_id pcmciamtd_ids[] = { | |||
737 | PCMCIA_DEVICE_PROD_ID12("intel", "VALUE SERIES 100 ", 0x40ade711, 0xdf8506d8), | 743 | PCMCIA_DEVICE_PROD_ID12("intel", "VALUE SERIES 100 ", 0x40ade711, 0xdf8506d8), |
738 | PCMCIA_DEVICE_PROD_ID12("KINGMAX TECHNOLOGY INC.", "SRAM 256K Bytes", 0x54d0c69c, 0xad12c29c), | 744 | PCMCIA_DEVICE_PROD_ID12("KINGMAX TECHNOLOGY INC.", "SRAM 256K Bytes", 0x54d0c69c, 0xad12c29c), |
739 | PCMCIA_DEVICE_PROD_ID12("Maxtor", "MAXFL MobileMax Flash Memory Card", 0xb68968c8, 0x2dfb47b0), | 745 | PCMCIA_DEVICE_PROD_ID12("Maxtor", "MAXFL MobileMax Flash Memory Card", 0xb68968c8, 0x2dfb47b0), |
746 | PCMCIA_DEVICE_PROD_ID123("M-Systems", "M-SYS Flash Memory Card", "(c) M-Systems", 0x7ed2ad87, 0x675dc3fb, 0x7aef3965), | ||
747 | PCMCIA_DEVICE_PROD_ID12("PRETEC", " 2MB SRAM CARD", 0xebf91155, 0x805360ca), | ||
740 | PCMCIA_DEVICE_PROD_ID12("SEIKO EPSON", "WWB101EN20", 0xf9876baf, 0xad0b207b), | 748 | PCMCIA_DEVICE_PROD_ID12("SEIKO EPSON", "WWB101EN20", 0xf9876baf, 0xad0b207b), |
741 | PCMCIA_DEVICE_PROD_ID12("SEIKO EPSON", "WWB513EN20", 0xf9876baf, 0xe8d884ad), | 749 | PCMCIA_DEVICE_PROD_ID12("SEIKO EPSON", "WWB513EN20", 0xf9876baf, 0xe8d884ad), |
750 | PCMCIA_DEVICE_PROD_ID12("SMART Modular Technologies", " 4MB FLASH Card", 0x96fd8277, 0x737a5b05), | ||
742 | PCMCIA_DEVICE_PROD_ID12("Starfish, Inc.", "REX-3000", 0x05ddca47, 0xe7d67bca), | 751 | PCMCIA_DEVICE_PROD_ID12("Starfish, Inc.", "REX-3000", 0x05ddca47, 0xe7d67bca), |
743 | PCMCIA_DEVICE_PROD_ID12("Starfish, Inc.", "REX-4100", 0x05ddca47, 0x7bc32944), | 752 | PCMCIA_DEVICE_PROD_ID12("Starfish, Inc.", "REX-4100", 0x05ddca47, 0x7bc32944), |
744 | /* the following was commented out in pcmcia-cs-3.2.7 */ | 753 | /* the following was commented out in pcmcia-cs-3.2.7 */ |
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c index d9603f7f9652..426461a5f0d4 100644 --- a/drivers/mtd/maps/physmap.c +++ b/drivers/mtd/maps/physmap.c | |||
@@ -264,8 +264,11 @@ static int __init physmap_init(void) | |||
264 | 264 | ||
265 | err = platform_driver_register(&physmap_flash_driver); | 265 | err = platform_driver_register(&physmap_flash_driver); |
266 | #ifdef CONFIG_MTD_PHYSMAP_COMPAT | 266 | #ifdef CONFIG_MTD_PHYSMAP_COMPAT |
267 | if (err == 0) | 267 | if (err == 0) { |
268 | platform_device_register(&physmap_flash); | 268 | err = platform_device_register(&physmap_flash); |
269 | if (err) | ||
270 | platform_driver_unregister(&physmap_flash_driver); | ||
271 | } | ||
269 | #endif | 272 | #endif |
270 | 273 | ||
271 | return err; | 274 | return err; |
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c index 4147fe328c55..ba124baa646d 100644 --- a/drivers/mtd/maps/physmap_of.c +++ b/drivers/mtd/maps/physmap_of.c | |||
@@ -173,12 +173,53 @@ static struct mtd_info * __devinit obsolete_probe(struct of_device *dev, | |||
173 | } | 173 | } |
174 | } | 174 | } |
175 | 175 | ||
176 | #ifdef CONFIG_MTD_PARTITIONS | ||
177 | /* When partitions are set we look for a linux,part-probe property which | ||
178 | specifies the list of partition probers to use. If none is given then the | ||
179 | default is use. These take precedence over other device tree | ||
180 | information. */ | ||
181 | static const char *part_probe_types_def[] = { "cmdlinepart", "RedBoot", NULL }; | ||
182 | static const char ** __devinit of_get_probes(struct device_node *dp) | ||
183 | { | ||
184 | const char *cp; | ||
185 | int cplen; | ||
186 | unsigned int l; | ||
187 | unsigned int count; | ||
188 | const char **res; | ||
189 | |||
190 | cp = of_get_property(dp, "linux,part-probe", &cplen); | ||
191 | if (cp == NULL) | ||
192 | return part_probe_types_def; | ||
193 | |||
194 | count = 0; | ||
195 | for (l = 0; l != cplen; l++) | ||
196 | if (cp[l] == 0) | ||
197 | count++; | ||
198 | |||
199 | res = kzalloc((count + 1)*sizeof(*res), GFP_KERNEL); | ||
200 | count = 0; | ||
201 | while (cplen > 0) { | ||
202 | res[count] = cp; | ||
203 | l = strlen(cp) + 1; | ||
204 | cp += l; | ||
205 | cplen -= l; | ||
206 | count++; | ||
207 | } | ||
208 | return res; | ||
209 | } | ||
210 | |||
211 | static void __devinit of_free_probes(const char **probes) | ||
212 | { | ||
213 | if (probes != part_probe_types_def) | ||
214 | kfree(probes); | ||
215 | } | ||
216 | #endif | ||
217 | |||
176 | static int __devinit of_flash_probe(struct of_device *dev, | 218 | static int __devinit of_flash_probe(struct of_device *dev, |
177 | const struct of_device_id *match) | 219 | const struct of_device_id *match) |
178 | { | 220 | { |
179 | #ifdef CONFIG_MTD_PARTITIONS | 221 | #ifdef CONFIG_MTD_PARTITIONS |
180 | static const char *part_probe_types[] | 222 | const char **part_probe_types; |
181 | = { "cmdlinepart", "RedBoot", NULL }; | ||
182 | #endif | 223 | #endif |
183 | struct device_node *dp = dev->dev.of_node; | 224 | struct device_node *dp = dev->dev.of_node; |
184 | struct resource res; | 225 | struct resource res; |
@@ -218,7 +259,7 @@ static int __devinit of_flash_probe(struct of_device *dev, | |||
218 | 259 | ||
219 | dev_set_drvdata(&dev->dev, info); | 260 | dev_set_drvdata(&dev->dev, info); |
220 | 261 | ||
221 | mtd_list = kzalloc(sizeof(struct mtd_info) * count, GFP_KERNEL); | 262 | mtd_list = kzalloc(sizeof(*mtd_list) * count, GFP_KERNEL); |
222 | if (!mtd_list) | 263 | if (!mtd_list) |
223 | goto err_flash_remove; | 264 | goto err_flash_remove; |
224 | 265 | ||
@@ -307,12 +348,14 @@ static int __devinit of_flash_probe(struct of_device *dev, | |||
307 | goto err_out; | 348 | goto err_out; |
308 | 349 | ||
309 | #ifdef CONFIG_MTD_PARTITIONS | 350 | #ifdef CONFIG_MTD_PARTITIONS |
310 | /* First look for RedBoot table or partitions on the command | 351 | part_probe_types = of_get_probes(dp); |
311 | * line, these take precedence over device tree information */ | ||
312 | err = parse_mtd_partitions(info->cmtd, part_probe_types, | 352 | err = parse_mtd_partitions(info->cmtd, part_probe_types, |
313 | &info->parts, 0); | 353 | &info->parts, 0); |
314 | if (err < 0) | 354 | if (err < 0) { |
355 | of_free_probes(part_probe_types); | ||
315 | return err; | 356 | return err; |
357 | } | ||
358 | of_free_probes(part_probe_types); | ||
316 | 359 | ||
317 | #ifdef CONFIG_MTD_OF_PARTS | 360 | #ifdef CONFIG_MTD_OF_PARTS |
318 | if (err == 0) { | 361 | if (err == 0) { |
diff --git a/drivers/mtd/maps/pismo.c b/drivers/mtd/maps/pismo.c index 60c068db452d..eb476b7f8d11 100644 --- a/drivers/mtd/maps/pismo.c +++ b/drivers/mtd/maps/pismo.c | |||
@@ -234,6 +234,7 @@ static int __devexit pismo_remove(struct i2c_client *client) | |||
234 | /* FIXME: set_vpp needs saner arguments */ | 234 | /* FIXME: set_vpp needs saner arguments */ |
235 | pismo_setvpp_remove_fix(pismo); | 235 | pismo_setvpp_remove_fix(pismo); |
236 | 236 | ||
237 | i2c_set_clientdata(client, NULL); | ||
237 | kfree(pismo); | 238 | kfree(pismo); |
238 | 239 | ||
239 | return 0; | 240 | return 0; |
@@ -272,7 +273,7 @@ static int __devinit pismo_probe(struct i2c_client *client, | |||
272 | ret = pismo_eeprom_read(client, &eeprom, 0, sizeof(eeprom)); | 273 | ret = pismo_eeprom_read(client, &eeprom, 0, sizeof(eeprom)); |
273 | if (ret < 0) { | 274 | if (ret < 0) { |
274 | dev_err(&client->dev, "error reading EEPROM: %d\n", ret); | 275 | dev_err(&client->dev, "error reading EEPROM: %d\n", ret); |
275 | return ret; | 276 | goto exit_free; |
276 | } | 277 | } |
277 | 278 | ||
278 | dev_info(&client->dev, "%.15s board found\n", eeprom.board); | 279 | dev_info(&client->dev, "%.15s board found\n", eeprom.board); |
@@ -283,6 +284,11 @@ static int __devinit pismo_probe(struct i2c_client *client, | |||
283 | pdata->cs_addrs[i]); | 284 | pdata->cs_addrs[i]); |
284 | 285 | ||
285 | return 0; | 286 | return 0; |
287 | |||
288 | exit_free: | ||
289 | i2c_set_clientdata(client, NULL); | ||
290 | kfree(pismo); | ||
291 | return ret; | ||
286 | } | 292 | } |
287 | 293 | ||
288 | static const struct i2c_device_id pismo_id[] = { | 294 | static const struct i2c_device_id pismo_id[] = { |
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c index 91dc6331053f..dd90880048cf 100644 --- a/drivers/mtd/maps/pxa2xx-flash.c +++ b/drivers/mtd/maps/pxa2xx-flash.c | |||
@@ -63,11 +63,10 @@ static int __init pxa2xx_flash_probe(struct platform_device *pdev) | |||
63 | if (!res) | 63 | if (!res) |
64 | return -ENODEV; | 64 | return -ENODEV; |
65 | 65 | ||
66 | info = kmalloc(sizeof(struct pxa2xx_flash_info), GFP_KERNEL); | 66 | info = kzalloc(sizeof(struct pxa2xx_flash_info), GFP_KERNEL); |
67 | if (!info) | 67 | if (!info) |
68 | return -ENOMEM; | 68 | return -ENOMEM; |
69 | 69 | ||
70 | memset(info, 0, sizeof(struct pxa2xx_flash_info)); | ||
71 | info->map.name = (char *) flash->name; | 70 | info->map.name = (char *) flash->name; |
72 | info->map.bankwidth = flash->width; | 71 | info->map.bankwidth = flash->width; |
73 | info->map.phys = res->start; | 72 | info->map.phys = res->start; |
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index c82e09bbc5fd..03e19c1965cc 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <linux/mtd/mtd.h> | 14 | #include <linux/mtd/mtd.h> |
15 | #include <linux/blkdev.h> | 15 | #include <linux/blkdev.h> |
16 | #include <linux/blkpg.h> | 16 | #include <linux/blkpg.h> |
17 | #include <linux/freezer.h> | ||
18 | #include <linux/spinlock.h> | 17 | #include <linux/spinlock.h> |
19 | #include <linux/hdreg.h> | 18 | #include <linux/hdreg.h> |
20 | #include <linux/init.h> | 19 | #include <linux/init.h> |
@@ -25,12 +24,42 @@ | |||
25 | #include "mtdcore.h" | 24 | #include "mtdcore.h" |
26 | 25 | ||
27 | static LIST_HEAD(blktrans_majors); | 26 | static LIST_HEAD(blktrans_majors); |
27 | static DEFINE_MUTEX(blktrans_ref_mutex); | ||
28 | |||
29 | void blktrans_dev_release(struct kref *kref) | ||
30 | { | ||
31 | struct mtd_blktrans_dev *dev = | ||
32 | container_of(kref, struct mtd_blktrans_dev, ref); | ||
33 | |||
34 | dev->disk->private_data = NULL; | ||
35 | blk_cleanup_queue(dev->rq); | ||
36 | put_disk(dev->disk); | ||
37 | list_del(&dev->list); | ||
38 | kfree(dev); | ||
39 | } | ||
40 | |||
41 | static struct mtd_blktrans_dev *blktrans_dev_get(struct gendisk *disk) | ||
42 | { | ||
43 | struct mtd_blktrans_dev *dev; | ||
44 | |||
45 | mutex_lock(&blktrans_ref_mutex); | ||
46 | dev = disk->private_data; | ||
47 | |||
48 | if (!dev) | ||
49 | goto unlock; | ||
50 | kref_get(&dev->ref); | ||
51 | unlock: | ||
52 | mutex_unlock(&blktrans_ref_mutex); | ||
53 | return dev; | ||
54 | } | ||
55 | |||
56 | void blktrans_dev_put(struct mtd_blktrans_dev *dev) | ||
57 | { | ||
58 | mutex_lock(&blktrans_ref_mutex); | ||
59 | kref_put(&dev->ref, blktrans_dev_release); | ||
60 | mutex_unlock(&blktrans_ref_mutex); | ||
61 | } | ||
28 | 62 | ||
29 | struct mtd_blkcore_priv { | ||
30 | struct task_struct *thread; | ||
31 | struct request_queue *rq; | ||
32 | spinlock_t queue_lock; | ||
33 | }; | ||
34 | 63 | ||
35 | static int do_blktrans_request(struct mtd_blktrans_ops *tr, | 64 | static int do_blktrans_request(struct mtd_blktrans_ops *tr, |
36 | struct mtd_blktrans_dev *dev, | 65 | struct mtd_blktrans_dev *dev, |
@@ -61,7 +90,6 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, | |||
61 | return -EIO; | 90 | return -EIO; |
62 | rq_flush_dcache_pages(req); | 91 | rq_flush_dcache_pages(req); |
63 | return 0; | 92 | return 0; |
64 | |||
65 | case WRITE: | 93 | case WRITE: |
66 | if (!tr->writesect) | 94 | if (!tr->writesect) |
67 | return -EIO; | 95 | return -EIO; |
@@ -71,7 +99,6 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, | |||
71 | if (tr->writesect(dev, block, buf)) | 99 | if (tr->writesect(dev, block, buf)) |
72 | return -EIO; | 100 | return -EIO; |
73 | return 0; | 101 | return 0; |
74 | |||
75 | default: | 102 | default: |
76 | printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req)); | 103 | printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req)); |
77 | return -EIO; | 104 | return -EIO; |
@@ -80,14 +107,13 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, | |||
80 | 107 | ||
81 | static int mtd_blktrans_thread(void *arg) | 108 | static int mtd_blktrans_thread(void *arg) |
82 | { | 109 | { |
83 | struct mtd_blktrans_ops *tr = arg; | 110 | struct mtd_blktrans_dev *dev = arg; |
84 | struct request_queue *rq = tr->blkcore_priv->rq; | 111 | struct request_queue *rq = dev->rq; |
85 | struct request *req = NULL; | 112 | struct request *req = NULL; |
86 | 113 | ||
87 | spin_lock_irq(rq->queue_lock); | 114 | spin_lock_irq(rq->queue_lock); |
88 | 115 | ||
89 | while (!kthread_should_stop()) { | 116 | while (!kthread_should_stop()) { |
90 | struct mtd_blktrans_dev *dev; | ||
91 | int res; | 117 | int res; |
92 | 118 | ||
93 | if (!req && !(req = blk_fetch_request(rq))) { | 119 | if (!req && !(req = blk_fetch_request(rq))) { |
@@ -98,13 +124,10 @@ static int mtd_blktrans_thread(void *arg) | |||
98 | continue; | 124 | continue; |
99 | } | 125 | } |
100 | 126 | ||
101 | dev = req->rq_disk->private_data; | ||
102 | tr = dev->tr; | ||
103 | |||
104 | spin_unlock_irq(rq->queue_lock); | 127 | spin_unlock_irq(rq->queue_lock); |
105 | 128 | ||
106 | mutex_lock(&dev->lock); | 129 | mutex_lock(&dev->lock); |
107 | res = do_blktrans_request(tr, dev, req); | 130 | res = do_blktrans_request(dev->tr, dev, req); |
108 | mutex_unlock(&dev->lock); | 131 | mutex_unlock(&dev->lock); |
109 | 132 | ||
110 | spin_lock_irq(rq->queue_lock); | 133 | spin_lock_irq(rq->queue_lock); |
@@ -123,81 +146,112 @@ static int mtd_blktrans_thread(void *arg) | |||
123 | 146 | ||
124 | static void mtd_blktrans_request(struct request_queue *rq) | 147 | static void mtd_blktrans_request(struct request_queue *rq) |
125 | { | 148 | { |
126 | struct mtd_blktrans_ops *tr = rq->queuedata; | 149 | struct mtd_blktrans_dev *dev; |
127 | wake_up_process(tr->blkcore_priv->thread); | 150 | struct request *req = NULL; |
128 | } | 151 | |
152 | dev = rq->queuedata; | ||
129 | 153 | ||
154 | if (!dev) | ||
155 | while ((req = blk_fetch_request(rq)) != NULL) | ||
156 | __blk_end_request_all(req, -ENODEV); | ||
157 | else | ||
158 | wake_up_process(dev->thread); | ||
159 | } | ||
130 | 160 | ||
131 | static int blktrans_open(struct block_device *bdev, fmode_t mode) | 161 | static int blktrans_open(struct block_device *bdev, fmode_t mode) |
132 | { | 162 | { |
133 | struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data; | 163 | struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk); |
134 | struct mtd_blktrans_ops *tr = dev->tr; | 164 | int ret; |
135 | int ret = -ENODEV; | 165 | |
136 | 166 | if (!dev) | |
137 | if (!get_mtd_device(NULL, dev->mtd->index)) | 167 | return -ERESTARTSYS; |
138 | goto out; | 168 | |
139 | 169 | mutex_lock(&dev->lock); | |
140 | if (!try_module_get(tr->owner)) | 170 | |
141 | goto out_tr; | 171 | if (!dev->mtd) { |
142 | 172 | ret = -ENXIO; | |
143 | /* FIXME: Locking. A hot pluggable device can go away | 173 | goto unlock; |
144 | (del_mtd_device can be called for it) without its module | ||
145 | being unloaded. */ | ||
146 | dev->mtd->usecount++; | ||
147 | |||
148 | ret = 0; | ||
149 | if (tr->open && (ret = tr->open(dev))) { | ||
150 | dev->mtd->usecount--; | ||
151 | put_mtd_device(dev->mtd); | ||
152 | out_tr: | ||
153 | module_put(tr->owner); | ||
154 | } | 174 | } |
155 | out: | 175 | |
176 | ret = !dev->open++ && dev->tr->open ? dev->tr->open(dev) : 0; | ||
177 | |||
178 | /* Take another reference on the device so it won't go away till | ||
179 | last release */ | ||
180 | if (!ret) | ||
181 | kref_get(&dev->ref); | ||
182 | unlock: | ||
183 | mutex_unlock(&dev->lock); | ||
184 | blktrans_dev_put(dev); | ||
156 | return ret; | 185 | return ret; |
157 | } | 186 | } |
158 | 187 | ||
159 | static int blktrans_release(struct gendisk *disk, fmode_t mode) | 188 | static int blktrans_release(struct gendisk *disk, fmode_t mode) |
160 | { | 189 | { |
161 | struct mtd_blktrans_dev *dev = disk->private_data; | 190 | struct mtd_blktrans_dev *dev = blktrans_dev_get(disk); |
162 | struct mtd_blktrans_ops *tr = dev->tr; | 191 | int ret = -ENXIO; |
163 | int ret = 0; | ||
164 | 192 | ||
165 | if (tr->release) | 193 | if (!dev) |
166 | ret = tr->release(dev); | 194 | return ret; |
167 | 195 | ||
168 | if (!ret) { | 196 | mutex_lock(&dev->lock); |
169 | dev->mtd->usecount--; | 197 | |
170 | put_mtd_device(dev->mtd); | 198 | /* Release one reference, we sure its not the last one here*/ |
171 | module_put(tr->owner); | 199 | kref_put(&dev->ref, blktrans_dev_release); |
172 | } | ||
173 | 200 | ||
201 | if (!dev->mtd) | ||
202 | goto unlock; | ||
203 | |||
204 | ret = !--dev->open && dev->tr->release ? dev->tr->release(dev) : 0; | ||
205 | unlock: | ||
206 | mutex_unlock(&dev->lock); | ||
207 | blktrans_dev_put(dev); | ||
174 | return ret; | 208 | return ret; |
175 | } | 209 | } |
176 | 210 | ||
177 | static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo) | 211 | static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo) |
178 | { | 212 | { |
179 | struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data; | 213 | struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk); |
214 | int ret = -ENXIO; | ||
215 | |||
216 | if (!dev) | ||
217 | return ret; | ||
218 | |||
219 | mutex_lock(&dev->lock); | ||
220 | |||
221 | if (!dev->mtd) | ||
222 | goto unlock; | ||
180 | 223 | ||
181 | if (dev->tr->getgeo) | 224 | ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : 0; |
182 | return dev->tr->getgeo(dev, geo); | 225 | unlock: |
183 | return -ENOTTY; | 226 | mutex_unlock(&dev->lock); |
227 | blktrans_dev_put(dev); | ||
228 | return ret; | ||
184 | } | 229 | } |
185 | 230 | ||
186 | static int blktrans_ioctl(struct block_device *bdev, fmode_t mode, | 231 | static int blktrans_ioctl(struct block_device *bdev, fmode_t mode, |
187 | unsigned int cmd, unsigned long arg) | 232 | unsigned int cmd, unsigned long arg) |
188 | { | 233 | { |
189 | struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data; | 234 | struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk); |
190 | struct mtd_blktrans_ops *tr = dev->tr; | 235 | int ret = -ENXIO; |
236 | |||
237 | if (!dev) | ||
238 | return ret; | ||
239 | |||
240 | mutex_lock(&dev->lock); | ||
241 | |||
242 | if (!dev->mtd) | ||
243 | goto unlock; | ||
191 | 244 | ||
192 | switch (cmd) { | 245 | switch (cmd) { |
193 | case BLKFLSBUF: | 246 | case BLKFLSBUF: |
194 | if (tr->flush) | 247 | ret = dev->tr->flush ? dev->tr->flush(dev) : 0; |
195 | return tr->flush(dev); | ||
196 | /* The core code did the work, we had nothing to do. */ | ||
197 | return 0; | ||
198 | default: | 248 | default: |
199 | return -ENOTTY; | 249 | ret = -ENOTTY; |
200 | } | 250 | } |
251 | unlock: | ||
252 | mutex_unlock(&dev->lock); | ||
253 | blktrans_dev_put(dev); | ||
254 | return ret; | ||
201 | } | 255 | } |
202 | 256 | ||
203 | static const struct block_device_operations mtd_blktrans_ops = { | 257 | static const struct block_device_operations mtd_blktrans_ops = { |
@@ -214,12 +268,14 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) | |||
214 | struct mtd_blktrans_dev *d; | 268 | struct mtd_blktrans_dev *d; |
215 | int last_devnum = -1; | 269 | int last_devnum = -1; |
216 | struct gendisk *gd; | 270 | struct gendisk *gd; |
271 | int ret; | ||
217 | 272 | ||
218 | if (mutex_trylock(&mtd_table_mutex)) { | 273 | if (mutex_trylock(&mtd_table_mutex)) { |
219 | mutex_unlock(&mtd_table_mutex); | 274 | mutex_unlock(&mtd_table_mutex); |
220 | BUG(); | 275 | BUG(); |
221 | } | 276 | } |
222 | 277 | ||
278 | mutex_lock(&blktrans_ref_mutex); | ||
223 | list_for_each_entry(d, &tr->devs, list) { | 279 | list_for_each_entry(d, &tr->devs, list) { |
224 | if (new->devnum == -1) { | 280 | if (new->devnum == -1) { |
225 | /* Use first free number */ | 281 | /* Use first free number */ |
@@ -231,6 +287,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) | |||
231 | } | 287 | } |
232 | } else if (d->devnum == new->devnum) { | 288 | } else if (d->devnum == new->devnum) { |
233 | /* Required number taken */ | 289 | /* Required number taken */ |
290 | mutex_unlock(&blktrans_ref_mutex); | ||
234 | return -EBUSY; | 291 | return -EBUSY; |
235 | } else if (d->devnum > new->devnum) { | 292 | } else if (d->devnum > new->devnum) { |
236 | /* Required number was free */ | 293 | /* Required number was free */ |
@@ -239,24 +296,38 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) | |||
239 | } | 296 | } |
240 | last_devnum = d->devnum; | 297 | last_devnum = d->devnum; |
241 | } | 298 | } |
299 | |||
300 | ret = -EBUSY; | ||
242 | if (new->devnum == -1) | 301 | if (new->devnum == -1) |
243 | new->devnum = last_devnum+1; | 302 | new->devnum = last_devnum+1; |
244 | 303 | ||
245 | if ((new->devnum << tr->part_bits) > 256) { | 304 | /* Check that the device and any partitions will get valid |
246 | return -EBUSY; | 305 | * minor numbers and that the disk naming code below can cope |
306 | * with this number. */ | ||
307 | if (new->devnum > (MINORMASK >> tr->part_bits) || | ||
308 | (tr->part_bits && new->devnum >= 27 * 26)) { | ||
309 | mutex_unlock(&blktrans_ref_mutex); | ||
310 | goto error1; | ||
247 | } | 311 | } |
248 | 312 | ||
249 | list_add_tail(&new->list, &tr->devs); | 313 | list_add_tail(&new->list, &tr->devs); |
250 | added: | 314 | added: |
315 | mutex_unlock(&blktrans_ref_mutex); | ||
316 | |||
251 | mutex_init(&new->lock); | 317 | mutex_init(&new->lock); |
318 | kref_init(&new->ref); | ||
252 | if (!tr->writesect) | 319 | if (!tr->writesect) |
253 | new->readonly = 1; | 320 | new->readonly = 1; |
254 | 321 | ||
322 | /* Create gendisk */ | ||
323 | ret = -ENOMEM; | ||
255 | gd = alloc_disk(1 << tr->part_bits); | 324 | gd = alloc_disk(1 << tr->part_bits); |
256 | if (!gd) { | 325 | |
257 | list_del(&new->list); | 326 | if (!gd) |
258 | return -ENOMEM; | 327 | goto error2; |
259 | } | 328 | |
329 | new->disk = gd; | ||
330 | gd->private_data = new; | ||
260 | gd->major = tr->major; | 331 | gd->major = tr->major; |
261 | gd->first_minor = (new->devnum) << tr->part_bits; | 332 | gd->first_minor = (new->devnum) << tr->part_bits; |
262 | gd->fops = &mtd_blktrans_ops; | 333 | gd->fops = &mtd_blktrans_ops; |
@@ -274,13 +345,35 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) | |||
274 | snprintf(gd->disk_name, sizeof(gd->disk_name), | 345 | snprintf(gd->disk_name, sizeof(gd->disk_name), |
275 | "%s%d", tr->name, new->devnum); | 346 | "%s%d", tr->name, new->devnum); |
276 | 347 | ||
277 | /* 2.5 has capacity in units of 512 bytes while still | ||
278 | having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */ | ||
279 | set_capacity(gd, (new->size * tr->blksize) >> 9); | 348 | set_capacity(gd, (new->size * tr->blksize) >> 9); |
280 | 349 | ||
281 | gd->private_data = new; | 350 | /* Create the request queue */ |
282 | new->blkcore_priv = gd; | 351 | spin_lock_init(&new->queue_lock); |
283 | gd->queue = tr->blkcore_priv->rq; | 352 | new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock); |
353 | |||
354 | if (!new->rq) | ||
355 | goto error3; | ||
356 | |||
357 | new->rq->queuedata = new; | ||
358 | blk_queue_logical_block_size(new->rq, tr->blksize); | ||
359 | |||
360 | if (tr->discard) | ||
361 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, | ||
362 | new->rq); | ||
363 | |||
364 | gd->queue = new->rq; | ||
365 | |||
366 | __get_mtd_device(new->mtd); | ||
367 | __module_get(tr->owner); | ||
368 | |||
369 | /* Create processing thread */ | ||
370 | /* TODO: workqueue ? */ | ||
371 | new->thread = kthread_run(mtd_blktrans_thread, new, | ||
372 | "%s%d", tr->name, new->mtd->index); | ||
373 | if (IS_ERR(new->thread)) { | ||
374 | ret = PTR_ERR(new->thread); | ||
375 | goto error4; | ||
376 | } | ||
284 | gd->driverfs_dev = &new->mtd->dev; | 377 | gd->driverfs_dev = &new->mtd->dev; |
285 | 378 | ||
286 | if (new->readonly) | 379 | if (new->readonly) |
@@ -288,21 +381,65 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) | |||
288 | 381 | ||
289 | add_disk(gd); | 382 | add_disk(gd); |
290 | 383 | ||
384 | if (new->disk_attributes) { | ||
385 | ret = sysfs_create_group(&disk_to_dev(gd)->kobj, | ||
386 | new->disk_attributes); | ||
387 | WARN_ON(ret); | ||
388 | } | ||
291 | return 0; | 389 | return 0; |
390 | error4: | ||
391 | module_put(tr->owner); | ||
392 | __put_mtd_device(new->mtd); | ||
393 | blk_cleanup_queue(new->rq); | ||
394 | error3: | ||
395 | put_disk(new->disk); | ||
396 | error2: | ||
397 | list_del(&new->list); | ||
398 | error1: | ||
399 | kfree(new); | ||
400 | return ret; | ||
292 | } | 401 | } |
293 | 402 | ||
294 | int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old) | 403 | int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old) |
295 | { | 404 | { |
405 | unsigned long flags; | ||
406 | |||
296 | if (mutex_trylock(&mtd_table_mutex)) { | 407 | if (mutex_trylock(&mtd_table_mutex)) { |
297 | mutex_unlock(&mtd_table_mutex); | 408 | mutex_unlock(&mtd_table_mutex); |
298 | BUG(); | 409 | BUG(); |
299 | } | 410 | } |
300 | 411 | ||
301 | list_del(&old->list); | 412 | /* Stop new requests to arrive */ |
413 | del_gendisk(old->disk); | ||
414 | |||
415 | if (old->disk_attributes) | ||
416 | sysfs_remove_group(&disk_to_dev(old->disk)->kobj, | ||
417 | old->disk_attributes); | ||
418 | |||
419 | /* Stop the thread */ | ||
420 | kthread_stop(old->thread); | ||
421 | |||
422 | /* Kill current requests */ | ||
423 | spin_lock_irqsave(&old->queue_lock, flags); | ||
424 | old->rq->queuedata = NULL; | ||
425 | blk_start_queue(old->rq); | ||
426 | spin_unlock_irqrestore(&old->queue_lock, flags); | ||
427 | |||
428 | /* Ask trans driver for release to the mtd device */ | ||
429 | mutex_lock(&old->lock); | ||
430 | if (old->open && old->tr->release) { | ||
431 | old->tr->release(old); | ||
432 | old->open = 0; | ||
433 | } | ||
434 | |||
435 | __put_mtd_device(old->mtd); | ||
436 | module_put(old->tr->owner); | ||
302 | 437 | ||
303 | del_gendisk(old->blkcore_priv); | 438 | /* At that point, we don't touch the mtd anymore */ |
304 | put_disk(old->blkcore_priv); | 439 | old->mtd = NULL; |
305 | 440 | ||
441 | mutex_unlock(&old->lock); | ||
442 | blktrans_dev_put(old); | ||
306 | return 0; | 443 | return 0; |
307 | } | 444 | } |
308 | 445 | ||
@@ -335,7 +472,8 @@ static struct mtd_notifier blktrans_notifier = { | |||
335 | 472 | ||
336 | int register_mtd_blktrans(struct mtd_blktrans_ops *tr) | 473 | int register_mtd_blktrans(struct mtd_blktrans_ops *tr) |
337 | { | 474 | { |
338 | int ret, i; | 475 | struct mtd_info *mtd; |
476 | int ret; | ||
339 | 477 | ||
340 | /* Register the notifier if/when the first device type is | 478 | /* Register the notifier if/when the first device type is |
341 | registered, to prevent the link/init ordering from fucking | 479 | registered, to prevent the link/init ordering from fucking |
@@ -343,9 +481,6 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr) | |||
343 | if (!blktrans_notifier.list.next) | 481 | if (!blktrans_notifier.list.next) |
344 | register_mtd_user(&blktrans_notifier); | 482 | register_mtd_user(&blktrans_notifier); |
345 | 483 | ||
346 | tr->blkcore_priv = kzalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL); | ||
347 | if (!tr->blkcore_priv) | ||
348 | return -ENOMEM; | ||
349 | 484 | ||
350 | mutex_lock(&mtd_table_mutex); | 485 | mutex_lock(&mtd_table_mutex); |
351 | 486 | ||
@@ -353,49 +488,20 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr) | |||
353 | if (ret) { | 488 | if (ret) { |
354 | printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n", | 489 | printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n", |
355 | tr->name, tr->major, ret); | 490 | tr->name, tr->major, ret); |
356 | kfree(tr->blkcore_priv); | ||
357 | mutex_unlock(&mtd_table_mutex); | 491 | mutex_unlock(&mtd_table_mutex); |
358 | return ret; | 492 | return ret; |
359 | } | 493 | } |
360 | spin_lock_init(&tr->blkcore_priv->queue_lock); | ||
361 | |||
362 | tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock); | ||
363 | if (!tr->blkcore_priv->rq) { | ||
364 | unregister_blkdev(tr->major, tr->name); | ||
365 | kfree(tr->blkcore_priv); | ||
366 | mutex_unlock(&mtd_table_mutex); | ||
367 | return -ENOMEM; | ||
368 | } | ||
369 | |||
370 | tr->blkcore_priv->rq->queuedata = tr; | ||
371 | blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize); | ||
372 | if (tr->discard) | ||
373 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, | ||
374 | tr->blkcore_priv->rq); | ||
375 | 494 | ||
376 | tr->blkshift = ffs(tr->blksize) - 1; | 495 | tr->blkshift = ffs(tr->blksize) - 1; |
377 | 496 | ||
378 | tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr, | ||
379 | "%sd", tr->name); | ||
380 | if (IS_ERR(tr->blkcore_priv->thread)) { | ||
381 | ret = PTR_ERR(tr->blkcore_priv->thread); | ||
382 | blk_cleanup_queue(tr->blkcore_priv->rq); | ||
383 | unregister_blkdev(tr->major, tr->name); | ||
384 | kfree(tr->blkcore_priv); | ||
385 | mutex_unlock(&mtd_table_mutex); | ||
386 | return ret; | ||
387 | } | ||
388 | |||
389 | INIT_LIST_HEAD(&tr->devs); | 497 | INIT_LIST_HEAD(&tr->devs); |
390 | list_add(&tr->list, &blktrans_majors); | 498 | list_add(&tr->list, &blktrans_majors); |
391 | 499 | ||
392 | for (i=0; i<MAX_MTD_DEVICES; i++) { | 500 | mtd_for_each_device(mtd) |
393 | if (mtd_table[i] && mtd_table[i]->type != MTD_ABSENT) | 501 | if (mtd->type != MTD_ABSENT) |
394 | tr->add_mtd(tr, mtd_table[i]); | 502 | tr->add_mtd(tr, mtd); |
395 | } | ||
396 | 503 | ||
397 | mutex_unlock(&mtd_table_mutex); | 504 | mutex_unlock(&mtd_table_mutex); |
398 | |||
399 | return 0; | 505 | return 0; |
400 | } | 506 | } |
401 | 507 | ||
@@ -405,22 +511,15 @@ int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr) | |||
405 | 511 | ||
406 | mutex_lock(&mtd_table_mutex); | 512 | mutex_lock(&mtd_table_mutex); |
407 | 513 | ||
408 | /* Clean up the kernel thread */ | ||
409 | kthread_stop(tr->blkcore_priv->thread); | ||
410 | |||
411 | /* Remove it from the list of active majors */ | 514 | /* Remove it from the list of active majors */ |
412 | list_del(&tr->list); | 515 | list_del(&tr->list); |
413 | 516 | ||
414 | list_for_each_entry_safe(dev, next, &tr->devs, list) | 517 | list_for_each_entry_safe(dev, next, &tr->devs, list) |
415 | tr->remove_dev(dev); | 518 | tr->remove_dev(dev); |
416 | 519 | ||
417 | blk_cleanup_queue(tr->blkcore_priv->rq); | ||
418 | unregister_blkdev(tr->major, tr->name); | 520 | unregister_blkdev(tr->major, tr->name); |
419 | |||
420 | mutex_unlock(&mtd_table_mutex); | 521 | mutex_unlock(&mtd_table_mutex); |
421 | 522 | ||
422 | kfree(tr->blkcore_priv); | ||
423 | |||
424 | BUG_ON(!list_empty(&tr->devs)); | 523 | BUG_ON(!list_empty(&tr->devs)); |
425 | return 0; | 524 | return 0; |
426 | } | 525 | } |
diff --git a/drivers/mtd/mtdbdi.c b/drivers/mtd/mtdbdi.c deleted file mode 100644 index 5ca5aed0b225..000000000000 --- a/drivers/mtd/mtdbdi.c +++ /dev/null | |||
@@ -1,43 +0,0 @@ | |||
1 | /* MTD backing device capabilities | ||
2 | * | ||
3 | * Copyright © 2006 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/backing-dev.h> | ||
13 | #include <linux/mtd/mtd.h> | ||
14 | #include "internal.h" | ||
15 | |||
16 | /* | ||
17 | * backing device capabilities for non-mappable devices (such as NAND flash) | ||
18 | * - permits private mappings, copies are taken of the data | ||
19 | */ | ||
20 | struct backing_dev_info mtd_bdi_unmappable = { | ||
21 | .capabilities = BDI_CAP_MAP_COPY, | ||
22 | }; | ||
23 | |||
24 | /* | ||
25 | * backing device capabilities for R/O mappable devices (such as ROM) | ||
26 | * - permits private mappings, copies are taken of the data | ||
27 | * - permits non-writable shared mappings | ||
28 | */ | ||
29 | struct backing_dev_info mtd_bdi_ro_mappable = { | ||
30 | .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT | | ||
31 | BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP), | ||
32 | }; | ||
33 | |||
34 | /* | ||
35 | * backing device capabilities for writable mappable devices (such as RAM) | ||
36 | * - permits private mappings, copies are taken of the data | ||
37 | * - permits non-writable shared mappings | ||
38 | */ | ||
39 | struct backing_dev_info mtd_bdi_rw_mappable = { | ||
40 | .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT | | ||
41 | BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP | | ||
42 | BDI_CAP_WRITE_MAP), | ||
43 | }; | ||
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c index 9f41b1a853c1..e6edbec609fd 100644 --- a/drivers/mtd/mtdblock.c +++ b/drivers/mtd/mtdblock.c | |||
@@ -19,15 +19,15 @@ | |||
19 | #include <linux/mutex.h> | 19 | #include <linux/mutex.h> |
20 | 20 | ||
21 | 21 | ||
22 | static struct mtdblk_dev { | 22 | struct mtdblk_dev { |
23 | struct mtd_info *mtd; | 23 | struct mtd_blktrans_dev mbd; |
24 | int count; | 24 | int count; |
25 | struct mutex cache_mutex; | 25 | struct mutex cache_mutex; |
26 | unsigned char *cache_data; | 26 | unsigned char *cache_data; |
27 | unsigned long cache_offset; | 27 | unsigned long cache_offset; |
28 | unsigned int cache_size; | 28 | unsigned int cache_size; |
29 | enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state; | 29 | enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state; |
30 | } *mtdblks[MAX_MTD_DEVICES]; | 30 | }; |
31 | 31 | ||
32 | static struct mutex mtdblks_lock; | 32 | static struct mutex mtdblks_lock; |
33 | 33 | ||
@@ -98,7 +98,7 @@ static int erase_write (struct mtd_info *mtd, unsigned long pos, | |||
98 | 98 | ||
99 | static int write_cached_data (struct mtdblk_dev *mtdblk) | 99 | static int write_cached_data (struct mtdblk_dev *mtdblk) |
100 | { | 100 | { |
101 | struct mtd_info *mtd = mtdblk->mtd; | 101 | struct mtd_info *mtd = mtdblk->mbd.mtd; |
102 | int ret; | 102 | int ret; |
103 | 103 | ||
104 | if (mtdblk->cache_state != STATE_DIRTY) | 104 | if (mtdblk->cache_state != STATE_DIRTY) |
@@ -128,7 +128,7 @@ static int write_cached_data (struct mtdblk_dev *mtdblk) | |||
128 | static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos, | 128 | static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos, |
129 | int len, const char *buf) | 129 | int len, const char *buf) |
130 | { | 130 | { |
131 | struct mtd_info *mtd = mtdblk->mtd; | 131 | struct mtd_info *mtd = mtdblk->mbd.mtd; |
132 | unsigned int sect_size = mtdblk->cache_size; | 132 | unsigned int sect_size = mtdblk->cache_size; |
133 | size_t retlen; | 133 | size_t retlen; |
134 | int ret; | 134 | int ret; |
@@ -198,7 +198,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos, | |||
198 | static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos, | 198 | static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos, |
199 | int len, char *buf) | 199 | int len, char *buf) |
200 | { | 200 | { |
201 | struct mtd_info *mtd = mtdblk->mtd; | 201 | struct mtd_info *mtd = mtdblk->mbd.mtd; |
202 | unsigned int sect_size = mtdblk->cache_size; | 202 | unsigned int sect_size = mtdblk->cache_size; |
203 | size_t retlen; | 203 | size_t retlen; |
204 | int ret; | 204 | int ret; |
@@ -244,16 +244,16 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos, | |||
244 | static int mtdblock_readsect(struct mtd_blktrans_dev *dev, | 244 | static int mtdblock_readsect(struct mtd_blktrans_dev *dev, |
245 | unsigned long block, char *buf) | 245 | unsigned long block, char *buf) |
246 | { | 246 | { |
247 | struct mtdblk_dev *mtdblk = mtdblks[dev->devnum]; | 247 | struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd); |
248 | return do_cached_read(mtdblk, block<<9, 512, buf); | 248 | return do_cached_read(mtdblk, block<<9, 512, buf); |
249 | } | 249 | } |
250 | 250 | ||
251 | static int mtdblock_writesect(struct mtd_blktrans_dev *dev, | 251 | static int mtdblock_writesect(struct mtd_blktrans_dev *dev, |
252 | unsigned long block, char *buf) | 252 | unsigned long block, char *buf) |
253 | { | 253 | { |
254 | struct mtdblk_dev *mtdblk = mtdblks[dev->devnum]; | 254 | struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd); |
255 | if (unlikely(!mtdblk->cache_data && mtdblk->cache_size)) { | 255 | if (unlikely(!mtdblk->cache_data && mtdblk->cache_size)) { |
256 | mtdblk->cache_data = vmalloc(mtdblk->mtd->erasesize); | 256 | mtdblk->cache_data = vmalloc(mtdblk->mbd.mtd->erasesize); |
257 | if (!mtdblk->cache_data) | 257 | if (!mtdblk->cache_data) |
258 | return -EINTR; | 258 | return -EINTR; |
259 | /* -EINTR is not really correct, but it is the best match | 259 | /* -EINTR is not really correct, but it is the best match |
@@ -266,37 +266,26 @@ static int mtdblock_writesect(struct mtd_blktrans_dev *dev, | |||
266 | 266 | ||
267 | static int mtdblock_open(struct mtd_blktrans_dev *mbd) | 267 | static int mtdblock_open(struct mtd_blktrans_dev *mbd) |
268 | { | 268 | { |
269 | struct mtdblk_dev *mtdblk; | 269 | struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd); |
270 | struct mtd_info *mtd = mbd->mtd; | ||
271 | int dev = mbd->devnum; | ||
272 | 270 | ||
273 | DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n"); | 271 | DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n"); |
274 | 272 | ||
275 | mutex_lock(&mtdblks_lock); | 273 | mutex_lock(&mtdblks_lock); |
276 | if (mtdblks[dev]) { | 274 | if (mtdblk->count) { |
277 | mtdblks[dev]->count++; | 275 | mtdblk->count++; |
278 | mutex_unlock(&mtdblks_lock); | 276 | mutex_unlock(&mtdblks_lock); |
279 | return 0; | 277 | return 0; |
280 | } | 278 | } |
281 | 279 | ||
282 | /* OK, it's not open. Create cache info for it */ | 280 | /* OK, it's not open. Create cache info for it */ |
283 | mtdblk = kzalloc(sizeof(struct mtdblk_dev), GFP_KERNEL); | ||
284 | if (!mtdblk) { | ||
285 | mutex_unlock(&mtdblks_lock); | ||
286 | return -ENOMEM; | ||
287 | } | ||
288 | |||
289 | mtdblk->count = 1; | 281 | mtdblk->count = 1; |
290 | mtdblk->mtd = mtd; | ||
291 | |||
292 | mutex_init(&mtdblk->cache_mutex); | 282 | mutex_init(&mtdblk->cache_mutex); |
293 | mtdblk->cache_state = STATE_EMPTY; | 283 | mtdblk->cache_state = STATE_EMPTY; |
294 | if ( !(mtdblk->mtd->flags & MTD_NO_ERASE) && mtdblk->mtd->erasesize) { | 284 | if (!(mbd->mtd->flags & MTD_NO_ERASE) && mbd->mtd->erasesize) { |
295 | mtdblk->cache_size = mtdblk->mtd->erasesize; | 285 | mtdblk->cache_size = mbd->mtd->erasesize; |
296 | mtdblk->cache_data = NULL; | 286 | mtdblk->cache_data = NULL; |
297 | } | 287 | } |
298 | 288 | ||
299 | mtdblks[dev] = mtdblk; | ||
300 | mutex_unlock(&mtdblks_lock); | 289 | mutex_unlock(&mtdblks_lock); |
301 | 290 | ||
302 | DEBUG(MTD_DEBUG_LEVEL1, "ok\n"); | 291 | DEBUG(MTD_DEBUG_LEVEL1, "ok\n"); |
@@ -306,8 +295,7 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd) | |||
306 | 295 | ||
307 | static int mtdblock_release(struct mtd_blktrans_dev *mbd) | 296 | static int mtdblock_release(struct mtd_blktrans_dev *mbd) |
308 | { | 297 | { |
309 | int dev = mbd->devnum; | 298 | struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd); |
310 | struct mtdblk_dev *mtdblk = mtdblks[dev]; | ||
311 | 299 | ||
312 | DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n"); | 300 | DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n"); |
313 | 301 | ||
@@ -318,12 +306,10 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd) | |||
318 | mutex_unlock(&mtdblk->cache_mutex); | 306 | mutex_unlock(&mtdblk->cache_mutex); |
319 | 307 | ||
320 | if (!--mtdblk->count) { | 308 | if (!--mtdblk->count) { |
321 | /* It was the last usage. Free the device */ | 309 | /* It was the last usage. Free the cache */ |
322 | mtdblks[dev] = NULL; | 310 | if (mbd->mtd->sync) |
323 | if (mtdblk->mtd->sync) | 311 | mbd->mtd->sync(mbd->mtd); |
324 | mtdblk->mtd->sync(mtdblk->mtd); | ||
325 | vfree(mtdblk->cache_data); | 312 | vfree(mtdblk->cache_data); |
326 | kfree(mtdblk); | ||
327 | } | 313 | } |
328 | 314 | ||
329 | mutex_unlock(&mtdblks_lock); | 315 | mutex_unlock(&mtdblks_lock); |
@@ -335,40 +321,40 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd) | |||
335 | 321 | ||
336 | static int mtdblock_flush(struct mtd_blktrans_dev *dev) | 322 | static int mtdblock_flush(struct mtd_blktrans_dev *dev) |
337 | { | 323 | { |
338 | struct mtdblk_dev *mtdblk = mtdblks[dev->devnum]; | 324 | struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd); |
339 | 325 | ||
340 | mutex_lock(&mtdblk->cache_mutex); | 326 | mutex_lock(&mtdblk->cache_mutex); |
341 | write_cached_data(mtdblk); | 327 | write_cached_data(mtdblk); |
342 | mutex_unlock(&mtdblk->cache_mutex); | 328 | mutex_unlock(&mtdblk->cache_mutex); |
343 | 329 | ||
344 | if (mtdblk->mtd->sync) | 330 | if (dev->mtd->sync) |
345 | mtdblk->mtd->sync(mtdblk->mtd); | 331 | dev->mtd->sync(dev->mtd); |
346 | return 0; | 332 | return 0; |
347 | } | 333 | } |
348 | 334 | ||
349 | static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) | 335 | static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) |
350 | { | 336 | { |
351 | struct mtd_blktrans_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL); | 337 | struct mtdblk_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL); |
352 | 338 | ||
353 | if (!dev) | 339 | if (!dev) |
354 | return; | 340 | return; |
355 | 341 | ||
356 | dev->mtd = mtd; | 342 | dev->mbd.mtd = mtd; |
357 | dev->devnum = mtd->index; | 343 | dev->mbd.devnum = mtd->index; |
358 | 344 | ||
359 | dev->size = mtd->size >> 9; | 345 | dev->mbd.size = mtd->size >> 9; |
360 | dev->tr = tr; | 346 | dev->mbd.tr = tr; |
361 | 347 | ||
362 | if (!(mtd->flags & MTD_WRITEABLE)) | 348 | if (!(mtd->flags & MTD_WRITEABLE)) |
363 | dev->readonly = 1; | 349 | dev->mbd.readonly = 1; |
364 | 350 | ||
365 | add_mtd_blktrans_dev(dev); | 351 | if (add_mtd_blktrans_dev(&dev->mbd)) |
352 | kfree(dev); | ||
366 | } | 353 | } |
367 | 354 | ||
368 | static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev) | 355 | static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev) |
369 | { | 356 | { |
370 | del_mtd_blktrans_dev(dev); | 357 | del_mtd_blktrans_dev(dev); |
371 | kfree(dev); | ||
372 | } | 358 | } |
373 | 359 | ||
374 | static struct mtd_blktrans_ops mtdblock_tr = { | 360 | static struct mtd_blktrans_ops mtdblock_tr = { |
diff --git a/drivers/mtd/mtdblock_ro.c b/drivers/mtd/mtdblock_ro.c index 852165f8b1c3..d0d3f79f9d03 100644 --- a/drivers/mtd/mtdblock_ro.c +++ b/drivers/mtd/mtdblock_ro.c | |||
@@ -43,13 +43,13 @@ static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) | |||
43 | dev->tr = tr; | 43 | dev->tr = tr; |
44 | dev->readonly = 1; | 44 | dev->readonly = 1; |
45 | 45 | ||
46 | add_mtd_blktrans_dev(dev); | 46 | if (add_mtd_blktrans_dev(dev)) |
47 | kfree(dev); | ||
47 | } | 48 | } |
48 | 49 | ||
49 | static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev) | 50 | static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev) |
50 | { | 51 | { |
51 | del_mtd_blktrans_dev(dev); | 52 | del_mtd_blktrans_dev(dev); |
52 | kfree(dev); | ||
53 | } | 53 | } |
54 | 54 | ||
55 | static struct mtd_blktrans_ops mtdblock_tr = { | 55 | static struct mtd_blktrans_ops mtdblock_tr = { |
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index 5b081cb84351..8bb5e4a66328 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c | |||
@@ -15,12 +15,15 @@ | |||
15 | #include <linux/smp_lock.h> | 15 | #include <linux/smp_lock.h> |
16 | #include <linux/backing-dev.h> | 16 | #include <linux/backing-dev.h> |
17 | #include <linux/compat.h> | 17 | #include <linux/compat.h> |
18 | #include <linux/mount.h> | ||
18 | 19 | ||
19 | #include <linux/mtd/mtd.h> | 20 | #include <linux/mtd/mtd.h> |
20 | #include <linux/mtd/compatmac.h> | 21 | #include <linux/mtd/compatmac.h> |
21 | 22 | ||
22 | #include <asm/uaccess.h> | 23 | #include <asm/uaccess.h> |
23 | 24 | ||
25 | #define MTD_INODE_FS_MAGIC 0x11307854 | ||
26 | static struct vfsmount *mtd_inode_mnt __read_mostly; | ||
24 | 27 | ||
25 | /* | 28 | /* |
26 | * Data structure to hold the pointer to the mtd device as well | 29 | * Data structure to hold the pointer to the mtd device as well |
@@ -28,6 +31,7 @@ | |||
28 | */ | 31 | */ |
29 | struct mtd_file_info { | 32 | struct mtd_file_info { |
30 | struct mtd_info *mtd; | 33 | struct mtd_info *mtd; |
34 | struct inode *ino; | ||
31 | enum mtd_file_modes mode; | 35 | enum mtd_file_modes mode; |
32 | }; | 36 | }; |
33 | 37 | ||
@@ -64,12 +68,10 @@ static int mtd_open(struct inode *inode, struct file *file) | |||
64 | int ret = 0; | 68 | int ret = 0; |
65 | struct mtd_info *mtd; | 69 | struct mtd_info *mtd; |
66 | struct mtd_file_info *mfi; | 70 | struct mtd_file_info *mfi; |
71 | struct inode *mtd_ino; | ||
67 | 72 | ||
68 | DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n"); | 73 | DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n"); |
69 | 74 | ||
70 | if (devnum >= MAX_MTD_DEVICES) | ||
71 | return -ENODEV; | ||
72 | |||
73 | /* You can't open the RO devices RW */ | 75 | /* You can't open the RO devices RW */ |
74 | if ((file->f_mode & FMODE_WRITE) && (minor & 1)) | 76 | if ((file->f_mode & FMODE_WRITE) && (minor & 1)) |
75 | return -EACCES; | 77 | return -EACCES; |
@@ -88,11 +90,23 @@ static int mtd_open(struct inode *inode, struct file *file) | |||
88 | goto out; | 90 | goto out; |
89 | } | 91 | } |
90 | 92 | ||
91 | if (mtd->backing_dev_info) | 93 | mtd_ino = iget_locked(mtd_inode_mnt->mnt_sb, devnum); |
92 | file->f_mapping->backing_dev_info = mtd->backing_dev_info; | 94 | if (!mtd_ino) { |
95 | put_mtd_device(mtd); | ||
96 | ret = -ENOMEM; | ||
97 | goto out; | ||
98 | } | ||
99 | if (mtd_ino->i_state & I_NEW) { | ||
100 | mtd_ino->i_private = mtd; | ||
101 | mtd_ino->i_mode = S_IFCHR; | ||
102 | mtd_ino->i_data.backing_dev_info = mtd->backing_dev_info; | ||
103 | unlock_new_inode(mtd_ino); | ||
104 | } | ||
105 | file->f_mapping = mtd_ino->i_mapping; | ||
93 | 106 | ||
94 | /* You can't open it RW if it's not a writeable device */ | 107 | /* You can't open it RW if it's not a writeable device */ |
95 | if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) { | 108 | if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) { |
109 | iput(mtd_ino); | ||
96 | put_mtd_device(mtd); | 110 | put_mtd_device(mtd); |
97 | ret = -EACCES; | 111 | ret = -EACCES; |
98 | goto out; | 112 | goto out; |
@@ -100,10 +114,12 @@ static int mtd_open(struct inode *inode, struct file *file) | |||
100 | 114 | ||
101 | mfi = kzalloc(sizeof(*mfi), GFP_KERNEL); | 115 | mfi = kzalloc(sizeof(*mfi), GFP_KERNEL); |
102 | if (!mfi) { | 116 | if (!mfi) { |
117 | iput(mtd_ino); | ||
103 | put_mtd_device(mtd); | 118 | put_mtd_device(mtd); |
104 | ret = -ENOMEM; | 119 | ret = -ENOMEM; |
105 | goto out; | 120 | goto out; |
106 | } | 121 | } |
122 | mfi->ino = mtd_ino; | ||
107 | mfi->mtd = mtd; | 123 | mfi->mtd = mtd; |
108 | file->private_data = mfi; | 124 | file->private_data = mfi; |
109 | 125 | ||
@@ -125,6 +141,8 @@ static int mtd_close(struct inode *inode, struct file *file) | |||
125 | if ((file->f_mode & FMODE_WRITE) && mtd->sync) | 141 | if ((file->f_mode & FMODE_WRITE) && mtd->sync) |
126 | mtd->sync(mtd); | 142 | mtd->sync(mtd); |
127 | 143 | ||
144 | iput(mfi->ino); | ||
145 | |||
128 | put_mtd_device(mtd); | 146 | put_mtd_device(mtd); |
129 | file->private_data = NULL; | 147 | file->private_data = NULL; |
130 | kfree(mfi); | 148 | kfree(mfi); |
@@ -373,7 +391,7 @@ static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd, | |||
373 | if (!mtd->write_oob) | 391 | if (!mtd->write_oob) |
374 | ret = -EOPNOTSUPP; | 392 | ret = -EOPNOTSUPP; |
375 | else | 393 | else |
376 | ret = access_ok(VERIFY_READ, ptr, length) ? 0 : EFAULT; | 394 | ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT; |
377 | 395 | ||
378 | if (ret) | 396 | if (ret) |
379 | return ret; | 397 | return ret; |
@@ -482,7 +500,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file, | |||
482 | { | 500 | { |
483 | uint32_t ur_idx; | 501 | uint32_t ur_idx; |
484 | struct mtd_erase_region_info *kr; | 502 | struct mtd_erase_region_info *kr; |
485 | struct region_info_user *ur = (struct region_info_user *) argp; | 503 | struct region_info_user __user *ur = argp; |
486 | 504 | ||
487 | if (get_user(ur_idx, &(ur->regionindex))) | 505 | if (get_user(ur_idx, &(ur->regionindex))) |
488 | return -EFAULT; | 506 | return -EFAULT; |
@@ -954,22 +972,81 @@ static const struct file_operations mtd_fops = { | |||
954 | #endif | 972 | #endif |
955 | }; | 973 | }; |
956 | 974 | ||
975 | static int mtd_inodefs_get_sb(struct file_system_type *fs_type, int flags, | ||
976 | const char *dev_name, void *data, | ||
977 | struct vfsmount *mnt) | ||
978 | { | ||
979 | return get_sb_pseudo(fs_type, "mtd_inode:", NULL, MTD_INODE_FS_MAGIC, | ||
980 | mnt); | ||
981 | } | ||
982 | |||
983 | static struct file_system_type mtd_inodefs_type = { | ||
984 | .name = "mtd_inodefs", | ||
985 | .get_sb = mtd_inodefs_get_sb, | ||
986 | .kill_sb = kill_anon_super, | ||
987 | }; | ||
988 | |||
989 | static void mtdchar_notify_add(struct mtd_info *mtd) | ||
990 | { | ||
991 | } | ||
992 | |||
993 | static void mtdchar_notify_remove(struct mtd_info *mtd) | ||
994 | { | ||
995 | struct inode *mtd_ino = ilookup(mtd_inode_mnt->mnt_sb, mtd->index); | ||
996 | |||
997 | if (mtd_ino) { | ||
998 | /* Destroy the inode if it exists */ | ||
999 | mtd_ino->i_nlink = 0; | ||
1000 | iput(mtd_ino); | ||
1001 | } | ||
1002 | } | ||
1003 | |||
1004 | static struct mtd_notifier mtdchar_notifier = { | ||
1005 | .add = mtdchar_notify_add, | ||
1006 | .remove = mtdchar_notify_remove, | ||
1007 | }; | ||
1008 | |||
957 | static int __init init_mtdchar(void) | 1009 | static int __init init_mtdchar(void) |
958 | { | 1010 | { |
959 | int status; | 1011 | int ret; |
960 | 1012 | ||
961 | status = register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops); | 1013 | ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, |
962 | if (status < 0) { | 1014 | "mtd", &mtd_fops); |
963 | printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n", | 1015 | if (ret < 0) { |
964 | MTD_CHAR_MAJOR); | 1016 | pr_notice("Can't allocate major number %d for " |
1017 | "Memory Technology Devices.\n", MTD_CHAR_MAJOR); | ||
1018 | return ret; | ||
965 | } | 1019 | } |
966 | 1020 | ||
967 | return status; | 1021 | ret = register_filesystem(&mtd_inodefs_type); |
1022 | if (ret) { | ||
1023 | pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret); | ||
1024 | goto err_unregister_chdev; | ||
1025 | } | ||
1026 | |||
1027 | mtd_inode_mnt = kern_mount(&mtd_inodefs_type); | ||
1028 | if (IS_ERR(mtd_inode_mnt)) { | ||
1029 | ret = PTR_ERR(mtd_inode_mnt); | ||
1030 | pr_notice("Error mounting mtd_inodefs filesystem: %d\n", ret); | ||
1031 | goto err_unregister_filesystem; | ||
1032 | } | ||
1033 | register_mtd_user(&mtdchar_notifier); | ||
1034 | |||
1035 | return ret; | ||
1036 | |||
1037 | err_unregister_filesystem: | ||
1038 | unregister_filesystem(&mtd_inodefs_type); | ||
1039 | err_unregister_chdev: | ||
1040 | __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); | ||
1041 | return ret; | ||
968 | } | 1042 | } |
969 | 1043 | ||
970 | static void __exit cleanup_mtdchar(void) | 1044 | static void __exit cleanup_mtdchar(void) |
971 | { | 1045 | { |
972 | unregister_chrdev(MTD_CHAR_MAJOR, "mtd"); | 1046 | unregister_mtd_user(&mtdchar_notifier); |
1047 | mntput(mtd_inode_mnt); | ||
1048 | unregister_filesystem(&mtd_inodefs_type); | ||
1049 | __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); | ||
973 | } | 1050 | } |
974 | 1051 | ||
975 | module_init(init_mtdchar); | 1052 | module_init(init_mtdchar); |
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c index db6de74082ad..7e075621bbf4 100644 --- a/drivers/mtd/mtdconcat.c +++ b/drivers/mtd/mtdconcat.c | |||
@@ -183,10 +183,9 @@ concat_writev(struct mtd_info *mtd, const struct kvec *vecs, | |||
183 | } | 183 | } |
184 | 184 | ||
185 | /* make a copy of vecs */ | 185 | /* make a copy of vecs */ |
186 | vecs_copy = kmalloc(sizeof(struct kvec) * count, GFP_KERNEL); | 186 | vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL); |
187 | if (!vecs_copy) | 187 | if (!vecs_copy) |
188 | return -ENOMEM; | 188 | return -ENOMEM; |
189 | memcpy(vecs_copy, vecs, sizeof(struct kvec) * count); | ||
190 | 189 | ||
191 | entry_low = 0; | 190 | entry_low = 0; |
192 | for (i = 0; i < concat->num_subdev; i++) { | 191 | for (i = 0; i < concat->num_subdev; i++) { |
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 5b38b17d2229..a1b8b70d2d0a 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c | |||
@@ -2,6 +2,9 @@ | |||
2 | * Core registration and callback routines for MTD | 2 | * Core registration and callback routines for MTD |
3 | * drivers and users. | 3 | * drivers and users. |
4 | * | 4 | * |
5 | * bdi bits are: | ||
6 | * Copyright © 2006 Red Hat, Inc. All Rights Reserved. | ||
7 | * Written by David Howells (dhowells@redhat.com) | ||
5 | */ | 8 | */ |
6 | 9 | ||
7 | #include <linux/module.h> | 10 | #include <linux/module.h> |
@@ -16,11 +19,41 @@ | |||
16 | #include <linux/init.h> | 19 | #include <linux/init.h> |
17 | #include <linux/mtd/compatmac.h> | 20 | #include <linux/mtd/compatmac.h> |
18 | #include <linux/proc_fs.h> | 21 | #include <linux/proc_fs.h> |
22 | #include <linux/idr.h> | ||
23 | #include <linux/backing-dev.h> | ||
24 | #include <linux/gfp.h> | ||
19 | 25 | ||
20 | #include <linux/mtd/mtd.h> | 26 | #include <linux/mtd/mtd.h> |
21 | #include "internal.h" | ||
22 | 27 | ||
23 | #include "mtdcore.h" | 28 | #include "mtdcore.h" |
29 | /* | ||
30 | * backing device capabilities for non-mappable devices (such as NAND flash) | ||
31 | * - permits private mappings, copies are taken of the data | ||
32 | */ | ||
33 | struct backing_dev_info mtd_bdi_unmappable = { | ||
34 | .capabilities = BDI_CAP_MAP_COPY, | ||
35 | }; | ||
36 | |||
37 | /* | ||
38 | * backing device capabilities for R/O mappable devices (such as ROM) | ||
39 | * - permits private mappings, copies are taken of the data | ||
40 | * - permits non-writable shared mappings | ||
41 | */ | ||
42 | struct backing_dev_info mtd_bdi_ro_mappable = { | ||
43 | .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT | | ||
44 | BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP), | ||
45 | }; | ||
46 | |||
47 | /* | ||
48 | * backing device capabilities for writable mappable devices (such as RAM) | ||
49 | * - permits private mappings, copies are taken of the data | ||
50 | * - permits non-writable shared mappings | ||
51 | */ | ||
52 | struct backing_dev_info mtd_bdi_rw_mappable = { | ||
53 | .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT | | ||
54 | BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP | | ||
55 | BDI_CAP_WRITE_MAP), | ||
56 | }; | ||
24 | 57 | ||
25 | static int mtd_cls_suspend(struct device *dev, pm_message_t state); | 58 | static int mtd_cls_suspend(struct device *dev, pm_message_t state); |
26 | static int mtd_cls_resume(struct device *dev); | 59 | static int mtd_cls_resume(struct device *dev); |
@@ -32,13 +65,18 @@ static struct class mtd_class = { | |||
32 | .resume = mtd_cls_resume, | 65 | .resume = mtd_cls_resume, |
33 | }; | 66 | }; |
34 | 67 | ||
68 | static DEFINE_IDR(mtd_idr); | ||
69 | |||
35 | /* These are exported solely for the purpose of mtd_blkdevs.c. You | 70 | /* These are exported solely for the purpose of mtd_blkdevs.c. You |
36 | should not use them for _anything_ else */ | 71 | should not use them for _anything_ else */ |
37 | DEFINE_MUTEX(mtd_table_mutex); | 72 | DEFINE_MUTEX(mtd_table_mutex); |
38 | struct mtd_info *mtd_table[MAX_MTD_DEVICES]; | ||
39 | |||
40 | EXPORT_SYMBOL_GPL(mtd_table_mutex); | 73 | EXPORT_SYMBOL_GPL(mtd_table_mutex); |
41 | EXPORT_SYMBOL_GPL(mtd_table); | 74 | |
75 | struct mtd_info *__mtd_next_device(int i) | ||
76 | { | ||
77 | return idr_get_next(&mtd_idr, &i); | ||
78 | } | ||
79 | EXPORT_SYMBOL_GPL(__mtd_next_device); | ||
42 | 80 | ||
43 | static LIST_HEAD(mtd_notifiers); | 81 | static LIST_HEAD(mtd_notifiers); |
44 | 82 | ||
@@ -234,13 +272,13 @@ static struct device_type mtd_devtype = { | |||
234 | * Add a device to the list of MTD devices present in the system, and | 272 | * Add a device to the list of MTD devices present in the system, and |
235 | * notify each currently active MTD 'user' of its arrival. Returns | 273 | * notify each currently active MTD 'user' of its arrival. Returns |
236 | * zero on success or 1 on failure, which currently will only happen | 274 | * zero on success or 1 on failure, which currently will only happen |
237 | * if the number of present devices exceeds MAX_MTD_DEVICES (i.e. 16) | 275 | * if there is insufficient memory or a sysfs error. |
238 | * or there's a sysfs error. | ||
239 | */ | 276 | */ |
240 | 277 | ||
241 | int add_mtd_device(struct mtd_info *mtd) | 278 | int add_mtd_device(struct mtd_info *mtd) |
242 | { | 279 | { |
243 | int i; | 280 | struct mtd_notifier *not; |
281 | int i, error; | ||
244 | 282 | ||
245 | if (!mtd->backing_dev_info) { | 283 | if (!mtd->backing_dev_info) { |
246 | switch (mtd->type) { | 284 | switch (mtd->type) { |
@@ -259,70 +297,73 @@ int add_mtd_device(struct mtd_info *mtd) | |||
259 | BUG_ON(mtd->writesize == 0); | 297 | BUG_ON(mtd->writesize == 0); |
260 | mutex_lock(&mtd_table_mutex); | 298 | mutex_lock(&mtd_table_mutex); |
261 | 299 | ||
262 | for (i=0; i < MAX_MTD_DEVICES; i++) | 300 | do { |
263 | if (!mtd_table[i]) { | 301 | if (!idr_pre_get(&mtd_idr, GFP_KERNEL)) |
264 | struct mtd_notifier *not; | 302 | goto fail_locked; |
265 | 303 | error = idr_get_new(&mtd_idr, mtd, &i); | |
266 | mtd_table[i] = mtd; | 304 | } while (error == -EAGAIN); |
267 | mtd->index = i; | ||
268 | mtd->usecount = 0; | ||
269 | |||
270 | if (is_power_of_2(mtd->erasesize)) | ||
271 | mtd->erasesize_shift = ffs(mtd->erasesize) - 1; | ||
272 | else | ||
273 | mtd->erasesize_shift = 0; | ||
274 | |||
275 | if (is_power_of_2(mtd->writesize)) | ||
276 | mtd->writesize_shift = ffs(mtd->writesize) - 1; | ||
277 | else | ||
278 | mtd->writesize_shift = 0; | ||
279 | |||
280 | mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1; | ||
281 | mtd->writesize_mask = (1 << mtd->writesize_shift) - 1; | ||
282 | |||
283 | /* Some chips always power up locked. Unlock them now */ | ||
284 | if ((mtd->flags & MTD_WRITEABLE) | ||
285 | && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) { | ||
286 | if (mtd->unlock(mtd, 0, mtd->size)) | ||
287 | printk(KERN_WARNING | ||
288 | "%s: unlock failed, " | ||
289 | "writes may not work\n", | ||
290 | mtd->name); | ||
291 | } | ||
292 | 305 | ||
293 | /* Caller should have set dev.parent to match the | 306 | if (error) |
294 | * physical device. | 307 | goto fail_locked; |
295 | */ | ||
296 | mtd->dev.type = &mtd_devtype; | ||
297 | mtd->dev.class = &mtd_class; | ||
298 | mtd->dev.devt = MTD_DEVT(i); | ||
299 | dev_set_name(&mtd->dev, "mtd%d", i); | ||
300 | dev_set_drvdata(&mtd->dev, mtd); | ||
301 | if (device_register(&mtd->dev) != 0) { | ||
302 | mtd_table[i] = NULL; | ||
303 | break; | ||
304 | } | ||
305 | 308 | ||
306 | if (MTD_DEVT(i)) | 309 | mtd->index = i; |
307 | device_create(&mtd_class, mtd->dev.parent, | 310 | mtd->usecount = 0; |
308 | MTD_DEVT(i) + 1, | 311 | |
309 | NULL, "mtd%dro", i); | 312 | if (is_power_of_2(mtd->erasesize)) |
310 | 313 | mtd->erasesize_shift = ffs(mtd->erasesize) - 1; | |
311 | DEBUG(0, "mtd: Giving out device %d to %s\n",i, mtd->name); | 314 | else |
312 | /* No need to get a refcount on the module containing | 315 | mtd->erasesize_shift = 0; |
313 | the notifier, since we hold the mtd_table_mutex */ | 316 | |
314 | list_for_each_entry(not, &mtd_notifiers, list) | 317 | if (is_power_of_2(mtd->writesize)) |
315 | not->add(mtd); | 318 | mtd->writesize_shift = ffs(mtd->writesize) - 1; |
316 | 319 | else | |
317 | mutex_unlock(&mtd_table_mutex); | 320 | mtd->writesize_shift = 0; |
318 | /* We _know_ we aren't being removed, because | 321 | |
319 | our caller is still holding us here. So none | 322 | mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1; |
320 | of this try_ nonsense, and no bitching about it | 323 | mtd->writesize_mask = (1 << mtd->writesize_shift) - 1; |
321 | either. :) */ | 324 | |
322 | __module_get(THIS_MODULE); | 325 | /* Some chips always power up locked. Unlock them now */ |
323 | return 0; | 326 | if ((mtd->flags & MTD_WRITEABLE) |
324 | } | 327 | && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) { |
328 | if (mtd->unlock(mtd, 0, mtd->size)) | ||
329 | printk(KERN_WARNING | ||
330 | "%s: unlock failed, writes may not work\n", | ||
331 | mtd->name); | ||
332 | } | ||
333 | |||
334 | /* Caller should have set dev.parent to match the | ||
335 | * physical device. | ||
336 | */ | ||
337 | mtd->dev.type = &mtd_devtype; | ||
338 | mtd->dev.class = &mtd_class; | ||
339 | mtd->dev.devt = MTD_DEVT(i); | ||
340 | dev_set_name(&mtd->dev, "mtd%d", i); | ||
341 | dev_set_drvdata(&mtd->dev, mtd); | ||
342 | if (device_register(&mtd->dev) != 0) | ||
343 | goto fail_added; | ||
344 | |||
345 | if (MTD_DEVT(i)) | ||
346 | device_create(&mtd_class, mtd->dev.parent, | ||
347 | MTD_DEVT(i) + 1, | ||
348 | NULL, "mtd%dro", i); | ||
349 | |||
350 | DEBUG(0, "mtd: Giving out device %d to %s\n", i, mtd->name); | ||
351 | /* No need to get a refcount on the module containing | ||
352 | the notifier, since we hold the mtd_table_mutex */ | ||
353 | list_for_each_entry(not, &mtd_notifiers, list) | ||
354 | not->add(mtd); | ||
355 | |||
356 | mutex_unlock(&mtd_table_mutex); | ||
357 | /* We _know_ we aren't being removed, because | ||
358 | our caller is still holding us here. So none | ||
359 | of this try_ nonsense, and no bitching about it | ||
360 | either. :) */ | ||
361 | __module_get(THIS_MODULE); | ||
362 | return 0; | ||
325 | 363 | ||
364 | fail_added: | ||
365 | idr_remove(&mtd_idr, i); | ||
366 | fail_locked: | ||
326 | mutex_unlock(&mtd_table_mutex); | 367 | mutex_unlock(&mtd_table_mutex); |
327 | return 1; | 368 | return 1; |
328 | } | 369 | } |
@@ -340,31 +381,34 @@ int add_mtd_device(struct mtd_info *mtd) | |||
340 | int del_mtd_device (struct mtd_info *mtd) | 381 | int del_mtd_device (struct mtd_info *mtd) |
341 | { | 382 | { |
342 | int ret; | 383 | int ret; |
384 | struct mtd_notifier *not; | ||
343 | 385 | ||
344 | mutex_lock(&mtd_table_mutex); | 386 | mutex_lock(&mtd_table_mutex); |
345 | 387 | ||
346 | if (mtd_table[mtd->index] != mtd) { | 388 | if (idr_find(&mtd_idr, mtd->index) != mtd) { |
347 | ret = -ENODEV; | 389 | ret = -ENODEV; |
348 | } else if (mtd->usecount) { | 390 | goto out_error; |
391 | } | ||
392 | |||
393 | /* No need to get a refcount on the module containing | ||
394 | the notifier, since we hold the mtd_table_mutex */ | ||
395 | list_for_each_entry(not, &mtd_notifiers, list) | ||
396 | not->remove(mtd); | ||
397 | |||
398 | if (mtd->usecount) { | ||
349 | printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n", | 399 | printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n", |
350 | mtd->index, mtd->name, mtd->usecount); | 400 | mtd->index, mtd->name, mtd->usecount); |
351 | ret = -EBUSY; | 401 | ret = -EBUSY; |
352 | } else { | 402 | } else { |
353 | struct mtd_notifier *not; | ||
354 | |||
355 | device_unregister(&mtd->dev); | 403 | device_unregister(&mtd->dev); |
356 | 404 | ||
357 | /* No need to get a refcount on the module containing | 405 | idr_remove(&mtd_idr, mtd->index); |
358 | the notifier, since we hold the mtd_table_mutex */ | ||
359 | list_for_each_entry(not, &mtd_notifiers, list) | ||
360 | not->remove(mtd); | ||
361 | |||
362 | mtd_table[mtd->index] = NULL; | ||
363 | 406 | ||
364 | module_put(THIS_MODULE); | 407 | module_put(THIS_MODULE); |
365 | ret = 0; | 408 | ret = 0; |
366 | } | 409 | } |
367 | 410 | ||
411 | out_error: | ||
368 | mutex_unlock(&mtd_table_mutex); | 412 | mutex_unlock(&mtd_table_mutex); |
369 | return ret; | 413 | return ret; |
370 | } | 414 | } |
@@ -380,7 +424,7 @@ int del_mtd_device (struct mtd_info *mtd) | |||
380 | 424 | ||
381 | void register_mtd_user (struct mtd_notifier *new) | 425 | void register_mtd_user (struct mtd_notifier *new) |
382 | { | 426 | { |
383 | int i; | 427 | struct mtd_info *mtd; |
384 | 428 | ||
385 | mutex_lock(&mtd_table_mutex); | 429 | mutex_lock(&mtd_table_mutex); |
386 | 430 | ||
@@ -388,9 +432,8 @@ void register_mtd_user (struct mtd_notifier *new) | |||
388 | 432 | ||
389 | __module_get(THIS_MODULE); | 433 | __module_get(THIS_MODULE); |
390 | 434 | ||
391 | for (i=0; i< MAX_MTD_DEVICES; i++) | 435 | mtd_for_each_device(mtd) |
392 | if (mtd_table[i]) | 436 | new->add(mtd); |
393 | new->add(mtd_table[i]); | ||
394 | 437 | ||
395 | mutex_unlock(&mtd_table_mutex); | 438 | mutex_unlock(&mtd_table_mutex); |
396 | } | 439 | } |
@@ -407,15 +450,14 @@ void register_mtd_user (struct mtd_notifier *new) | |||
407 | 450 | ||
408 | int unregister_mtd_user (struct mtd_notifier *old) | 451 | int unregister_mtd_user (struct mtd_notifier *old) |
409 | { | 452 | { |
410 | int i; | 453 | struct mtd_info *mtd; |
411 | 454 | ||
412 | mutex_lock(&mtd_table_mutex); | 455 | mutex_lock(&mtd_table_mutex); |
413 | 456 | ||
414 | module_put(THIS_MODULE); | 457 | module_put(THIS_MODULE); |
415 | 458 | ||
416 | for (i=0; i< MAX_MTD_DEVICES; i++) | 459 | mtd_for_each_device(mtd) |
417 | if (mtd_table[i]) | 460 | old->remove(mtd); |
418 | old->remove(mtd_table[i]); | ||
419 | 461 | ||
420 | list_del(&old->list); | 462 | list_del(&old->list); |
421 | mutex_unlock(&mtd_table_mutex); | 463 | mutex_unlock(&mtd_table_mutex); |
@@ -437,42 +479,56 @@ int unregister_mtd_user (struct mtd_notifier *old) | |||
437 | 479 | ||
438 | struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num) | 480 | struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num) |
439 | { | 481 | { |
440 | struct mtd_info *ret = NULL; | 482 | struct mtd_info *ret = NULL, *other; |
441 | int i, err = -ENODEV; | 483 | int err = -ENODEV; |
442 | 484 | ||
443 | mutex_lock(&mtd_table_mutex); | 485 | mutex_lock(&mtd_table_mutex); |
444 | 486 | ||
445 | if (num == -1) { | 487 | if (num == -1) { |
446 | for (i=0; i< MAX_MTD_DEVICES; i++) | 488 | mtd_for_each_device(other) { |
447 | if (mtd_table[i] == mtd) | 489 | if (other == mtd) { |
448 | ret = mtd_table[i]; | 490 | ret = mtd; |
449 | } else if (num >= 0 && num < MAX_MTD_DEVICES) { | 491 | break; |
450 | ret = mtd_table[num]; | 492 | } |
493 | } | ||
494 | } else if (num >= 0) { | ||
495 | ret = idr_find(&mtd_idr, num); | ||
451 | if (mtd && mtd != ret) | 496 | if (mtd && mtd != ret) |
452 | ret = NULL; | 497 | ret = NULL; |
453 | } | 498 | } |
454 | 499 | ||
455 | if (!ret) | 500 | if (!ret) { |
456 | goto out_unlock; | 501 | ret = ERR_PTR(err); |
457 | 502 | goto out; | |
458 | if (!try_module_get(ret->owner)) | ||
459 | goto out_unlock; | ||
460 | |||
461 | if (ret->get_device) { | ||
462 | err = ret->get_device(ret); | ||
463 | if (err) | ||
464 | goto out_put; | ||
465 | } | 503 | } |
466 | 504 | ||
467 | ret->usecount++; | 505 | err = __get_mtd_device(ret); |
506 | if (err) | ||
507 | ret = ERR_PTR(err); | ||
508 | out: | ||
468 | mutex_unlock(&mtd_table_mutex); | 509 | mutex_unlock(&mtd_table_mutex); |
469 | return ret; | 510 | return ret; |
511 | } | ||
470 | 512 | ||
471 | out_put: | 513 | |
472 | module_put(ret->owner); | 514 | int __get_mtd_device(struct mtd_info *mtd) |
473 | out_unlock: | 515 | { |
474 | mutex_unlock(&mtd_table_mutex); | 516 | int err; |
475 | return ERR_PTR(err); | 517 | |
518 | if (!try_module_get(mtd->owner)) | ||
519 | return -ENODEV; | ||
520 | |||
521 | if (mtd->get_device) { | ||
522 | |||
523 | err = mtd->get_device(mtd); | ||
524 | |||
525 | if (err) { | ||
526 | module_put(mtd->owner); | ||
527 | return err; | ||
528 | } | ||
529 | } | ||
530 | mtd->usecount++; | ||
531 | return 0; | ||
476 | } | 532 | } |
477 | 533 | ||
478 | /** | 534 | /** |
@@ -486,14 +542,14 @@ out_unlock: | |||
486 | 542 | ||
487 | struct mtd_info *get_mtd_device_nm(const char *name) | 543 | struct mtd_info *get_mtd_device_nm(const char *name) |
488 | { | 544 | { |
489 | int i, err = -ENODEV; | 545 | int err = -ENODEV; |
490 | struct mtd_info *mtd = NULL; | 546 | struct mtd_info *mtd = NULL, *other; |
491 | 547 | ||
492 | mutex_lock(&mtd_table_mutex); | 548 | mutex_lock(&mtd_table_mutex); |
493 | 549 | ||
494 | for (i = 0; i < MAX_MTD_DEVICES; i++) { | 550 | mtd_for_each_device(other) { |
495 | if (mtd_table[i] && !strcmp(name, mtd_table[i]->name)) { | 551 | if (!strcmp(name, other->name)) { |
496 | mtd = mtd_table[i]; | 552 | mtd = other; |
497 | break; | 553 | break; |
498 | } | 554 | } |
499 | } | 555 | } |
@@ -523,14 +579,19 @@ out_unlock: | |||
523 | 579 | ||
524 | void put_mtd_device(struct mtd_info *mtd) | 580 | void put_mtd_device(struct mtd_info *mtd) |
525 | { | 581 | { |
526 | int c; | ||
527 | |||
528 | mutex_lock(&mtd_table_mutex); | 582 | mutex_lock(&mtd_table_mutex); |
529 | c = --mtd->usecount; | 583 | __put_mtd_device(mtd); |
584 | mutex_unlock(&mtd_table_mutex); | ||
585 | |||
586 | } | ||
587 | |||
588 | void __put_mtd_device(struct mtd_info *mtd) | ||
589 | { | ||
590 | --mtd->usecount; | ||
591 | BUG_ON(mtd->usecount < 0); | ||
592 | |||
530 | if (mtd->put_device) | 593 | if (mtd->put_device) |
531 | mtd->put_device(mtd); | 594 | mtd->put_device(mtd); |
532 | mutex_unlock(&mtd_table_mutex); | ||
533 | BUG_ON(c < 0); | ||
534 | 595 | ||
535 | module_put(mtd->owner); | 596 | module_put(mtd->owner); |
536 | } | 597 | } |
@@ -568,7 +629,9 @@ EXPORT_SYMBOL_GPL(add_mtd_device); | |||
568 | EXPORT_SYMBOL_GPL(del_mtd_device); | 629 | EXPORT_SYMBOL_GPL(del_mtd_device); |
569 | EXPORT_SYMBOL_GPL(get_mtd_device); | 630 | EXPORT_SYMBOL_GPL(get_mtd_device); |
570 | EXPORT_SYMBOL_GPL(get_mtd_device_nm); | 631 | EXPORT_SYMBOL_GPL(get_mtd_device_nm); |
632 | EXPORT_SYMBOL_GPL(__get_mtd_device); | ||
571 | EXPORT_SYMBOL_GPL(put_mtd_device); | 633 | EXPORT_SYMBOL_GPL(put_mtd_device); |
634 | EXPORT_SYMBOL_GPL(__put_mtd_device); | ||
572 | EXPORT_SYMBOL_GPL(register_mtd_user); | 635 | EXPORT_SYMBOL_GPL(register_mtd_user); |
573 | EXPORT_SYMBOL_GPL(unregister_mtd_user); | 636 | EXPORT_SYMBOL_GPL(unregister_mtd_user); |
574 | EXPORT_SYMBOL_GPL(default_mtd_writev); | 637 | EXPORT_SYMBOL_GPL(default_mtd_writev); |
@@ -580,14 +643,9 @@ EXPORT_SYMBOL_GPL(default_mtd_writev); | |||
580 | 643 | ||
581 | static struct proc_dir_entry *proc_mtd; | 644 | static struct proc_dir_entry *proc_mtd; |
582 | 645 | ||
583 | static inline int mtd_proc_info (char *buf, int i) | 646 | static inline int mtd_proc_info(char *buf, struct mtd_info *this) |
584 | { | 647 | { |
585 | struct mtd_info *this = mtd_table[i]; | 648 | return sprintf(buf, "mtd%d: %8.8llx %8.8x \"%s\"\n", this->index, |
586 | |||
587 | if (!this) | ||
588 | return 0; | ||
589 | |||
590 | return sprintf(buf, "mtd%d: %8.8llx %8.8x \"%s\"\n", i, | ||
591 | (unsigned long long)this->size, | 649 | (unsigned long long)this->size, |
592 | this->erasesize, this->name); | 650 | this->erasesize, this->name); |
593 | } | 651 | } |
@@ -595,15 +653,15 @@ static inline int mtd_proc_info (char *buf, int i) | |||
595 | static int mtd_read_proc (char *page, char **start, off_t off, int count, | 653 | static int mtd_read_proc (char *page, char **start, off_t off, int count, |
596 | int *eof, void *data_unused) | 654 | int *eof, void *data_unused) |
597 | { | 655 | { |
598 | int len, l, i; | 656 | struct mtd_info *mtd; |
657 | int len, l; | ||
599 | off_t begin = 0; | 658 | off_t begin = 0; |
600 | 659 | ||
601 | mutex_lock(&mtd_table_mutex); | 660 | mutex_lock(&mtd_table_mutex); |
602 | 661 | ||
603 | len = sprintf(page, "dev: size erasesize name\n"); | 662 | len = sprintf(page, "dev: size erasesize name\n"); |
604 | for (i=0; i< MAX_MTD_DEVICES; i++) { | 663 | mtd_for_each_device(mtd) { |
605 | 664 | l = mtd_proc_info(page + len, mtd); | |
606 | l = mtd_proc_info(page + len, i); | ||
607 | len += l; | 665 | len += l; |
608 | if (len+begin > off+count) | 666 | if (len+begin > off+count) |
609 | goto done; | 667 | goto done; |
@@ -628,20 +686,55 @@ done: | |||
628 | /*====================================================================*/ | 686 | /*====================================================================*/ |
629 | /* Init code */ | 687 | /* Init code */ |
630 | 688 | ||
689 | static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name) | ||
690 | { | ||
691 | int ret; | ||
692 | |||
693 | ret = bdi_init(bdi); | ||
694 | if (!ret) | ||
695 | ret = bdi_register(bdi, NULL, name); | ||
696 | |||
697 | if (ret) | ||
698 | bdi_destroy(bdi); | ||
699 | |||
700 | return ret; | ||
701 | } | ||
702 | |||
631 | static int __init init_mtd(void) | 703 | static int __init init_mtd(void) |
632 | { | 704 | { |
633 | int ret; | 705 | int ret; |
706 | |||
634 | ret = class_register(&mtd_class); | 707 | ret = class_register(&mtd_class); |
708 | if (ret) | ||
709 | goto err_reg; | ||
710 | |||
711 | ret = mtd_bdi_init(&mtd_bdi_unmappable, "mtd-unmap"); | ||
712 | if (ret) | ||
713 | goto err_bdi1; | ||
714 | |||
715 | ret = mtd_bdi_init(&mtd_bdi_ro_mappable, "mtd-romap"); | ||
716 | if (ret) | ||
717 | goto err_bdi2; | ||
718 | |||
719 | ret = mtd_bdi_init(&mtd_bdi_rw_mappable, "mtd-rwmap"); | ||
720 | if (ret) | ||
721 | goto err_bdi3; | ||
635 | 722 | ||
636 | if (ret) { | ||
637 | pr_err("Error registering mtd class: %d\n", ret); | ||
638 | return ret; | ||
639 | } | ||
640 | #ifdef CONFIG_PROC_FS | 723 | #ifdef CONFIG_PROC_FS |
641 | if ((proc_mtd = create_proc_entry( "mtd", 0, NULL ))) | 724 | if ((proc_mtd = create_proc_entry( "mtd", 0, NULL ))) |
642 | proc_mtd->read_proc = mtd_read_proc; | 725 | proc_mtd->read_proc = mtd_read_proc; |
643 | #endif /* CONFIG_PROC_FS */ | 726 | #endif /* CONFIG_PROC_FS */ |
644 | return 0; | 727 | return 0; |
728 | |||
729 | err_bdi3: | ||
730 | bdi_destroy(&mtd_bdi_ro_mappable); | ||
731 | err_bdi2: | ||
732 | bdi_destroy(&mtd_bdi_unmappable); | ||
733 | err_bdi1: | ||
734 | class_unregister(&mtd_class); | ||
735 | err_reg: | ||
736 | pr_err("Error registering mtd class or bdi: %d\n", ret); | ||
737 | return ret; | ||
645 | } | 738 | } |
646 | 739 | ||
647 | static void __exit cleanup_mtd(void) | 740 | static void __exit cleanup_mtd(void) |
@@ -651,6 +744,9 @@ static void __exit cleanup_mtd(void) | |||
651 | remove_proc_entry( "mtd", NULL); | 744 | remove_proc_entry( "mtd", NULL); |
652 | #endif /* CONFIG_PROC_FS */ | 745 | #endif /* CONFIG_PROC_FS */ |
653 | class_unregister(&mtd_class); | 746 | class_unregister(&mtd_class); |
747 | bdi_destroy(&mtd_bdi_unmappable); | ||
748 | bdi_destroy(&mtd_bdi_ro_mappable); | ||
749 | bdi_destroy(&mtd_bdi_rw_mappable); | ||
654 | } | 750 | } |
655 | 751 | ||
656 | module_init(init_mtd); | 752 | module_init(init_mtd); |
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h index a33251f4b872..6a64fdebc898 100644 --- a/drivers/mtd/mtdcore.h +++ b/drivers/mtd/mtdcore.h | |||
@@ -8,4 +8,9 @@ | |||
8 | should not use them for _anything_ else */ | 8 | should not use them for _anything_ else */ |
9 | 9 | ||
10 | extern struct mutex mtd_table_mutex; | 10 | extern struct mutex mtd_table_mutex; |
11 | extern struct mtd_info *mtd_table[MAX_MTD_DEVICES]; | 11 | extern struct mtd_info *__mtd_next_device(int i); |
12 | |||
13 | #define mtd_for_each_device(mtd) \ | ||
14 | for ((mtd) = __mtd_next_device(0); \ | ||
15 | (mtd) != NULL; \ | ||
16 | (mtd) = __mtd_next_device(mtd->index + 1)) | ||
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c index 92e12df0917f..328313c3dccb 100644 --- a/drivers/mtd/mtdoops.c +++ b/drivers/mtd/mtdoops.c | |||
@@ -429,11 +429,6 @@ static int __init mtdoops_init(void) | |||
429 | mtd_index = simple_strtoul(mtddev, &endp, 0); | 429 | mtd_index = simple_strtoul(mtddev, &endp, 0); |
430 | if (*endp == '\0') | 430 | if (*endp == '\0') |
431 | cxt->mtd_index = mtd_index; | 431 | cxt->mtd_index = mtd_index; |
432 | if (cxt->mtd_index > MAX_MTD_DEVICES) { | ||
433 | printk(KERN_ERR "mtdoops: invalid mtd device number (%u) given\n", | ||
434 | mtd_index); | ||
435 | return -EINVAL; | ||
436 | } | ||
437 | 432 | ||
438 | cxt->oops_buf = vmalloc(record_size); | 433 | cxt->oops_buf = vmalloc(record_size); |
439 | if (!cxt->oops_buf) { | 434 | if (!cxt->oops_buf) { |
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c index af8b42e0a55b..bd9a443ccf69 100644 --- a/drivers/mtd/mtdsuper.c +++ b/drivers/mtd/mtdsuper.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/mtd/super.h> | 13 | #include <linux/mtd/super.h> |
14 | #include <linux/namei.h> | 14 | #include <linux/namei.h> |
15 | #include <linux/ctype.h> | 15 | #include <linux/ctype.h> |
16 | #include <linux/slab.h> | ||
16 | 17 | ||
17 | /* | 18 | /* |
18 | * compare superblocks to see if they're equivalent | 19 | * compare superblocks to see if they're equivalent |
@@ -44,6 +45,7 @@ static int get_sb_mtd_set(struct super_block *sb, void *_mtd) | |||
44 | 45 | ||
45 | sb->s_mtd = mtd; | 46 | sb->s_mtd = mtd; |
46 | sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, mtd->index); | 47 | sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, mtd->index); |
48 | sb->s_bdi = mtd->backing_dev_info; | ||
47 | return 0; | 49 | return 0; |
48 | } | 50 | } |
49 | 51 | ||
@@ -150,18 +152,12 @@ int get_sb_mtd(struct file_system_type *fs_type, int flags, | |||
150 | DEBUG(1, "MTDSB: mtd:%%s, name \"%s\"\n", | 152 | DEBUG(1, "MTDSB: mtd:%%s, name \"%s\"\n", |
151 | dev_name + 4); | 153 | dev_name + 4); |
152 | 154 | ||
153 | for (mtdnr = 0; mtdnr < MAX_MTD_DEVICES; mtdnr++) { | 155 | mtd = get_mtd_device_nm(dev_name + 4); |
154 | mtd = get_mtd_device(NULL, mtdnr); | 156 | if (!IS_ERR(mtd)) |
155 | if (!IS_ERR(mtd)) { | 157 | return get_sb_mtd_aux( |
156 | if (!strcmp(mtd->name, dev_name + 4)) | 158 | fs_type, flags, |
157 | return get_sb_mtd_aux( | 159 | dev_name, data, mtd, |
158 | fs_type, flags, | 160 | fill_super, mnt); |
159 | dev_name, data, mtd, | ||
160 | fill_super, mnt); | ||
161 | |||
162 | put_mtd_device(mtd); | ||
163 | } | ||
164 | } | ||
165 | 161 | ||
166 | printk(KERN_NOTICE "MTD:" | 162 | printk(KERN_NOTICE "MTD:" |
167 | " MTD device with name \"%s\" not found.\n", | 163 | " MTD device with name \"%s\" not found.\n", |
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 42e5ea49e975..98a04b3c9526 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig | |||
@@ -2,11 +2,23 @@ menuconfig MTD_NAND | |||
2 | tristate "NAND Device Support" | 2 | tristate "NAND Device Support" |
3 | depends on MTD | 3 | depends on MTD |
4 | select MTD_NAND_IDS | 4 | select MTD_NAND_IDS |
5 | select MTD_NAND_ECC | ||
5 | help | 6 | help |
6 | This enables support for accessing all type of NAND flash | 7 | This enables support for accessing all type of NAND flash |
7 | devices. For further information see | 8 | devices. For further information see |
8 | <http://www.linux-mtd.infradead.org/doc/nand.html>. | 9 | <http://www.linux-mtd.infradead.org/doc/nand.html>. |
9 | 10 | ||
11 | config MTD_NAND_ECC | ||
12 | tristate | ||
13 | |||
14 | config MTD_NAND_ECC_SMC | ||
15 | bool "NAND ECC Smart Media byte order" | ||
16 | depends on MTD_NAND_ECC | ||
17 | default n | ||
18 | help | ||
19 | Software ECC according to the Smart Media Specification. | ||
20 | The original Linux implementation had byte 0 and 1 swapped. | ||
21 | |||
10 | if MTD_NAND | 22 | if MTD_NAND |
11 | 23 | ||
12 | config MTD_NAND_VERIFY_WRITE | 24 | config MTD_NAND_VERIFY_WRITE |
@@ -18,12 +30,9 @@ config MTD_NAND_VERIFY_WRITE | |||
18 | device thinks the write was successful, a bit could have been | 30 | device thinks the write was successful, a bit could have been |
19 | flipped accidentally due to device wear or something else. | 31 | flipped accidentally due to device wear or something else. |
20 | 32 | ||
21 | config MTD_NAND_ECC_SMC | 33 | config MTD_SM_COMMON |
22 | bool "NAND ECC Smart Media byte order" | 34 | tristate |
23 | default n | 35 | default n |
24 | help | ||
25 | Software ECC according to the Smart Media Specification. | ||
26 | The original Linux implementation had byte 0 and 1 swapped. | ||
27 | 36 | ||
28 | config MTD_NAND_MUSEUM_IDS | 37 | config MTD_NAND_MUSEUM_IDS |
29 | bool "Enable chip ids for obsolete ancient NAND devices" | 38 | bool "Enable chip ids for obsolete ancient NAND devices" |
@@ -41,6 +50,23 @@ config MTD_NAND_AUTCPU12 | |||
41 | This enables the driver for the autronix autcpu12 board to | 50 | This enables the driver for the autronix autcpu12 board to |
42 | access the SmartMediaCard. | 51 | access the SmartMediaCard. |
43 | 52 | ||
53 | config MTD_NAND_DENALI | ||
54 | depends on PCI | ||
55 | tristate "Support Denali NAND controller on Intel Moorestown" | ||
56 | help | ||
57 | Enable the driver for NAND flash on Intel Moorestown, using the | ||
58 | Denali NAND controller core. | ||
59 | |||
60 | config MTD_NAND_DENALI_SCRATCH_REG_ADDR | ||
61 | hex "Denali NAND size scratch register address" | ||
62 | default "0xFF108018" | ||
63 | help | ||
64 | Some platforms place the NAND chip size in a scratch register | ||
65 | because (some versions of) the driver aren't able to automatically | ||
66 | determine the size of certain chips. Set the address of the | ||
67 | scratch register here to enable this feature. On Intel Moorestown | ||
68 | boards, the scratch register is at 0xFF108018. | ||
69 | |||
44 | config MTD_NAND_EDB7312 | 70 | config MTD_NAND_EDB7312 |
45 | tristate "Support for Cirrus Logic EBD7312 evaluation board" | 71 | tristate "Support for Cirrus Logic EBD7312 evaluation board" |
46 | depends on ARCH_EDB7312 | 72 | depends on ARCH_EDB7312 |
@@ -95,15 +121,21 @@ config MTD_NAND_OMAP_PREFETCH_DMA | |||
95 | or in DMA interrupt mode. | 121 | or in DMA interrupt mode. |
96 | Say y for DMA mode or MPU mode will be used | 122 | Say y for DMA mode or MPU mode will be used |
97 | 123 | ||
98 | config MTD_NAND_TS7250 | ||
99 | tristate "NAND Flash device on TS-7250 board" | ||
100 | depends on MACH_TS72XX | ||
101 | help | ||
102 | Support for NAND flash on Technologic Systems TS-7250 platform. | ||
103 | |||
104 | config MTD_NAND_IDS | 124 | config MTD_NAND_IDS |
105 | tristate | 125 | tristate |
106 | 126 | ||
127 | config MTD_NAND_RICOH | ||
128 | tristate "Ricoh xD card reader" | ||
129 | default n | ||
130 | depends on PCI | ||
131 | select MTD_SM_COMMON | ||
132 | help | ||
133 | Enable support for Ricoh R5C852 xD card reader | ||
134 | You also need to enable ether | ||
135 | NAND SSFDC (SmartMedia) read only translation layer' or new | ||
136 | expermental, readwrite | ||
137 | 'SmartMedia/xD new translation layer' | ||
138 | |||
107 | config MTD_NAND_AU1550 | 139 | config MTD_NAND_AU1550 |
108 | tristate "Au1550/1200 NAND support" | 140 | tristate "Au1550/1200 NAND support" |
109 | depends on SOC_AU1200 || SOC_AU1550 | 141 | depends on SOC_AU1200 || SOC_AU1550 |
@@ -358,8 +390,6 @@ config MTD_NAND_ATMEL_ECC_NONE | |||
358 | 390 | ||
359 | If unsure, say N | 391 | If unsure, say N |
360 | 392 | ||
361 | endchoice | ||
362 | |||
363 | endchoice | 393 | endchoice |
364 | 394 | ||
365 | config MTD_NAND_PXA3xx | 395 | config MTD_NAND_PXA3xx |
@@ -442,6 +472,13 @@ config MTD_NAND_FSL_UPM | |||
442 | Enables support for NAND Flash chips wired onto Freescale PowerPC | 472 | Enables support for NAND Flash chips wired onto Freescale PowerPC |
443 | processor localbus with User-Programmable Machine support. | 473 | processor localbus with User-Programmable Machine support. |
444 | 474 | ||
475 | config MTD_NAND_MPC5121_NFC | ||
476 | tristate "MPC5121 built-in NAND Flash Controller support" | ||
477 | depends on PPC_MPC512x | ||
478 | help | ||
479 | This enables the driver for the NAND flash controller on the | ||
480 | MPC5121 SoC. | ||
481 | |||
445 | config MTD_NAND_MXC | 482 | config MTD_NAND_MXC |
446 | tristate "MXC NAND support" | 483 | tristate "MXC NAND support" |
447 | depends on ARCH_MX2 || ARCH_MX25 || ARCH_MX3 | 484 | depends on ARCH_MX2 || ARCH_MX25 || ARCH_MX3 |
@@ -481,11 +518,11 @@ config MTD_NAND_SOCRATES | |||
481 | help | 518 | help |
482 | Enables support for NAND Flash chips wired onto Socrates board. | 519 | Enables support for NAND Flash chips wired onto Socrates board. |
483 | 520 | ||
484 | config MTD_NAND_W90P910 | 521 | config MTD_NAND_NUC900 |
485 | tristate "Support for NAND on w90p910 evaluation board." | 522 | tristate "Support for NAND on Nuvoton NUC9xx/w90p910 evaluation boards." |
486 | depends on ARCH_W90X900 && MTD_PARTITIONS | 523 | depends on ARCH_W90X900 && MTD_PARTITIONS |
487 | help | 524 | help |
488 | This enables the driver for the NAND Flash on evaluation board based | 525 | This enables the driver for the NAND Flash on evaluation board based |
489 | on w90p910. | 526 | on w90p910 / NUC9xx. |
490 | 527 | ||
491 | endif # MTD_NAND | 528 | endif # MTD_NAND |
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index 1407bd144015..e8ab884ba47b 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile | |||
@@ -2,13 +2,16 @@ | |||
2 | # linux/drivers/nand/Makefile | 2 | # linux/drivers/nand/Makefile |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_MTD_NAND) += nand.o nand_ecc.o | 5 | obj-$(CONFIG_MTD_NAND) += nand.o |
6 | obj-$(CONFIG_MTD_NAND_ECC) += nand_ecc.o | ||
6 | obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o | 7 | obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o |
8 | obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o | ||
7 | 9 | ||
8 | obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o | 10 | obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o |
9 | obj-$(CONFIG_MTD_NAND_SPIA) += spia.o | 11 | obj-$(CONFIG_MTD_NAND_SPIA) += spia.o |
10 | obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o | 12 | obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o |
11 | obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o | 13 | obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o |
14 | obj-$(CONFIG_MTD_NAND_DENALI) += denali.o | ||
12 | obj-$(CONFIG_MTD_NAND_EDB7312) += edb7312.o | 15 | obj-$(CONFIG_MTD_NAND_EDB7312) += edb7312.o |
13 | obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o | 16 | obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o |
14 | obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o | 17 | obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o |
@@ -19,7 +22,6 @@ obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o | |||
19 | obj-$(CONFIG_MTD_NAND_H1900) += h1910.o | 22 | obj-$(CONFIG_MTD_NAND_H1900) += h1910.o |
20 | obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o | 23 | obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o |
21 | obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o | 24 | obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o |
22 | obj-$(CONFIG_MTD_NAND_TS7250) += ts7250.o | ||
23 | obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o | 25 | obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o |
24 | obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o | 26 | obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o |
25 | obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o | 27 | obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o |
@@ -39,8 +41,10 @@ obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o | |||
39 | obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o | 41 | obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o |
40 | obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o | 42 | obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o |
41 | obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o | 43 | obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o |
42 | obj-$(CONFIG_MTD_NAND_W90P910) += w90p910_nand.o | 44 | obj-$(CONFIG_MTD_NAND_NUC900) += nuc900_nand.o |
43 | obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o | 45 | obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o |
44 | obj-$(CONFIG_MTD_NAND_BCM_UMI) += bcm_umi_nand.o nand_bcm_umi.o | 46 | obj-$(CONFIG_MTD_NAND_BCM_UMI) += bcm_umi_nand.o nand_bcm_umi.o |
47 | obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o | ||
48 | obj-$(CONFIG_MTD_NAND_RICOH) += r852.o | ||
45 | 49 | ||
46 | nand-objs := nand_base.o nand_bbt.o | 50 | nand-objs := nand_base.o nand_bbt.o |
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c index 2d6773281fd9..8691e0482ed2 100644 --- a/drivers/mtd/nand/alauda.c +++ b/drivers/mtd/nand/alauda.c | |||
@@ -49,7 +49,7 @@ | |||
49 | 49 | ||
50 | #define TIMEOUT HZ | 50 | #define TIMEOUT HZ |
51 | 51 | ||
52 | static struct usb_device_id alauda_table [] = { | 52 | static const struct usb_device_id alauda_table[] = { |
53 | { USB_DEVICE(0x0584, 0x0008) }, /* Fujifilm DPC-R1 */ | 53 | { USB_DEVICE(0x0584, 0x0008) }, /* Fujifilm DPC-R1 */ |
54 | { USB_DEVICE(0x07b4, 0x010a) }, /* Olympus MAUSB-10 */ | 54 | { USB_DEVICE(0x07b4, 0x010a) }, /* Olympus MAUSB-10 */ |
55 | { } | 55 | { } |
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c index 524e6c9e0672..04d30887ca7f 100644 --- a/drivers/mtd/nand/atmel_nand.c +++ b/drivers/mtd/nand/atmel_nand.c | |||
@@ -474,7 +474,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev) | |||
474 | } | 474 | } |
475 | 475 | ||
476 | /* first scan to find the device and get the page size */ | 476 | /* first scan to find the device and get the page size */ |
477 | if (nand_scan_ident(mtd, 1)) { | 477 | if (nand_scan_ident(mtd, 1, NULL)) { |
478 | res = -ENXIO; | 478 | res = -ENXIO; |
479 | goto err_scan_ident; | 479 | goto err_scan_ident; |
480 | } | 480 | } |
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c index 43d46e424040..3ffe05db4923 100644 --- a/drivers/mtd/nand/au1550nd.c +++ b/drivers/mtd/nand/au1550nd.c | |||
@@ -451,7 +451,7 @@ static int __init au1xxx_nand_init(void) | |||
451 | u32 nand_phys; | 451 | u32 nand_phys; |
452 | 452 | ||
453 | /* Allocate memory for MTD device structure and private data */ | 453 | /* Allocate memory for MTD device structure and private data */ |
454 | au1550_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); | 454 | au1550_mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); |
455 | if (!au1550_mtd) { | 455 | if (!au1550_mtd) { |
456 | printk("Unable to allocate NAND MTD dev structure.\n"); | 456 | printk("Unable to allocate NAND MTD dev structure.\n"); |
457 | return -ENOMEM; | 457 | return -ENOMEM; |
@@ -460,10 +460,6 @@ static int __init au1xxx_nand_init(void) | |||
460 | /* Get pointer to private data */ | 460 | /* Get pointer to private data */ |
461 | this = (struct nand_chip *)(&au1550_mtd[1]); | 461 | this = (struct nand_chip *)(&au1550_mtd[1]); |
462 | 462 | ||
463 | /* Initialize structures */ | ||
464 | memset(au1550_mtd, 0, sizeof(struct mtd_info)); | ||
465 | memset(this, 0, sizeof(struct nand_chip)); | ||
466 | |||
467 | /* Link the private data with the MTD structure */ | 463 | /* Link the private data with the MTD structure */ |
468 | au1550_mtd->priv = this; | 464 | au1550_mtd->priv = this; |
469 | au1550_mtd->owner = THIS_MODULE; | 465 | au1550_mtd->owner = THIS_MODULE; |
@@ -544,7 +540,7 @@ static int __init au1xxx_nand_init(void) | |||
544 | } | 540 | } |
545 | nand_phys = (mem_staddr << 4) & 0xFFFC0000; | 541 | nand_phys = (mem_staddr << 4) & 0xFFFC0000; |
546 | 542 | ||
547 | p_nand = (void __iomem *)ioremap(nand_phys, 0x1000); | 543 | p_nand = ioremap(nand_phys, 0x1000); |
548 | 544 | ||
549 | /* make controller and MTD agree */ | 545 | /* make controller and MTD agree */ |
550 | if (NAND_CS == 0) | 546 | if (NAND_CS == 0) |
@@ -589,7 +585,7 @@ static int __init au1xxx_nand_init(void) | |||
589 | return 0; | 585 | return 0; |
590 | 586 | ||
591 | outio: | 587 | outio: |
592 | iounmap((void *)p_nand); | 588 | iounmap(p_nand); |
593 | 589 | ||
594 | outmem: | 590 | outmem: |
595 | kfree(au1550_mtd); | 591 | kfree(au1550_mtd); |
@@ -610,7 +606,7 @@ static void __exit au1550_cleanup(void) | |||
610 | kfree(au1550_mtd); | 606 | kfree(au1550_mtd); |
611 | 607 | ||
612 | /* Unmap */ | 608 | /* Unmap */ |
613 | iounmap((void *)p_nand); | 609 | iounmap(p_nand); |
614 | } | 610 | } |
615 | 611 | ||
616 | module_exit(au1550_cleanup); | 612 | module_exit(au1550_cleanup); |
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c index c997f98eeb3d..dfe262c726fb 100644 --- a/drivers/mtd/nand/bcm_umi_nand.c +++ b/drivers/mtd/nand/bcm_umi_nand.c | |||
@@ -13,7 +13,6 @@ | |||
13 | *****************************************************************************/ | 13 | *****************************************************************************/ |
14 | 14 | ||
15 | /* ---- Include Files ---------------------------------------------------- */ | 15 | /* ---- Include Files ---------------------------------------------------- */ |
16 | #include <linux/version.h> | ||
17 | #include <linux/module.h> | 16 | #include <linux/module.h> |
18 | #include <linux/types.h> | 17 | #include <linux/types.h> |
19 | #include <linux/init.h> | 18 | #include <linux/init.h> |
@@ -447,7 +446,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev) | |||
447 | * layout we'll be using. | 446 | * layout we'll be using. |
448 | */ | 447 | */ |
449 | 448 | ||
450 | err = nand_scan_ident(board_mtd, 1); | 449 | err = nand_scan_ident(board_mtd, 1, NULL); |
451 | if (err) { | 450 | if (err) { |
452 | printk(KERN_ERR "nand_scan failed: %d\n", err); | 451 | printk(KERN_ERR "nand_scan failed: %d\n", err); |
453 | iounmap(bcm_umi_io_base); | 452 | iounmap(bcm_umi_io_base); |
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c index 8506e7e606fd..2974995e194d 100644 --- a/drivers/mtd/nand/bf5xx_nand.c +++ b/drivers/mtd/nand/bf5xx_nand.c | |||
@@ -68,6 +68,27 @@ | |||
68 | #define DRV_AUTHOR "Bryan Wu <bryan.wu@analog.com>" | 68 | #define DRV_AUTHOR "Bryan Wu <bryan.wu@analog.com>" |
69 | #define DRV_DESC "BF5xx on-chip NAND FLash Controller Driver" | 69 | #define DRV_DESC "BF5xx on-chip NAND FLash Controller Driver" |
70 | 70 | ||
71 | /* NFC_STAT Masks */ | ||
72 | #define NBUSY 0x01 /* Not Busy */ | ||
73 | #define WB_FULL 0x02 /* Write Buffer Full */ | ||
74 | #define PG_WR_STAT 0x04 /* Page Write Pending */ | ||
75 | #define PG_RD_STAT 0x08 /* Page Read Pending */ | ||
76 | #define WB_EMPTY 0x10 /* Write Buffer Empty */ | ||
77 | |||
78 | /* NFC_IRQSTAT Masks */ | ||
79 | #define NBUSYIRQ 0x01 /* Not Busy IRQ */ | ||
80 | #define WB_OVF 0x02 /* Write Buffer Overflow */ | ||
81 | #define WB_EDGE 0x04 /* Write Buffer Edge Detect */ | ||
82 | #define RD_RDY 0x08 /* Read Data Ready */ | ||
83 | #define WR_DONE 0x10 /* Page Write Done */ | ||
84 | |||
85 | /* NFC_RST Masks */ | ||
86 | #define ECC_RST 0x01 /* ECC (and NFC counters) Reset */ | ||
87 | |||
88 | /* NFC_PGCTL Masks */ | ||
89 | #define PG_RD_START 0x01 /* Page Read Start */ | ||
90 | #define PG_WR_START 0x02 /* Page Write Start */ | ||
91 | |||
71 | #ifdef CONFIG_MTD_NAND_BF5XX_HWECC | 92 | #ifdef CONFIG_MTD_NAND_BF5XX_HWECC |
72 | static int hardware_ecc = 1; | 93 | static int hardware_ecc = 1; |
73 | #else | 94 | #else |
@@ -487,7 +508,7 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd, | |||
487 | * transferred to generate the correct ECC register | 508 | * transferred to generate the correct ECC register |
488 | * values. | 509 | * values. |
489 | */ | 510 | */ |
490 | bfin_write_NFC_RST(0x1); | 511 | bfin_write_NFC_RST(ECC_RST); |
491 | SSYNC(); | 512 | SSYNC(); |
492 | 513 | ||
493 | disable_dma(CH_NFC); | 514 | disable_dma(CH_NFC); |
@@ -497,7 +518,7 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd, | |||
497 | set_dma_config(CH_NFC, 0x0); | 518 | set_dma_config(CH_NFC, 0x0); |
498 | set_dma_start_addr(CH_NFC, (unsigned long) buf); | 519 | set_dma_start_addr(CH_NFC, (unsigned long) buf); |
499 | 520 | ||
500 | /* The DMAs have different size on BF52x and BF54x */ | 521 | /* The DMAs have different size on BF52x and BF54x */ |
501 | #ifdef CONFIG_BF52x | 522 | #ifdef CONFIG_BF52x |
502 | set_dma_x_count(CH_NFC, (page_size >> 1)); | 523 | set_dma_x_count(CH_NFC, (page_size >> 1)); |
503 | set_dma_x_modify(CH_NFC, 2); | 524 | set_dma_x_modify(CH_NFC, 2); |
@@ -517,9 +538,9 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd, | |||
517 | 538 | ||
518 | /* Start PAGE read/write operation */ | 539 | /* Start PAGE read/write operation */ |
519 | if (is_read) | 540 | if (is_read) |
520 | bfin_write_NFC_PGCTL(0x1); | 541 | bfin_write_NFC_PGCTL(PG_RD_START); |
521 | else | 542 | else |
522 | bfin_write_NFC_PGCTL(0x2); | 543 | bfin_write_NFC_PGCTL(PG_WR_START); |
523 | wait_for_completion(&info->dma_completion); | 544 | wait_for_completion(&info->dma_completion); |
524 | } | 545 | } |
525 | 546 | ||
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c index e5a9f9ccea60..db1dfc5a1b11 100644 --- a/drivers/mtd/nand/cafe_nand.c +++ b/drivers/mtd/nand/cafe_nand.c | |||
@@ -762,7 +762,7 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev, | |||
762 | cafe_readl(cafe, GLOBAL_CTRL), cafe_readl(cafe, GLOBAL_IRQ_MASK)); | 762 | cafe_readl(cafe, GLOBAL_CTRL), cafe_readl(cafe, GLOBAL_IRQ_MASK)); |
763 | 763 | ||
764 | /* Scan to find existence of the device */ | 764 | /* Scan to find existence of the device */ |
765 | if (nand_scan_ident(mtd, 2)) { | 765 | if (nand_scan_ident(mtd, 2, NULL)) { |
766 | err = -ENXIO; | 766 | err = -ENXIO; |
767 | goto out_irq; | 767 | goto out_irq; |
768 | } | 768 | } |
@@ -849,7 +849,7 @@ static void __devexit cafe_nand_remove(struct pci_dev *pdev) | |||
849 | kfree(mtd); | 849 | kfree(mtd); |
850 | } | 850 | } |
851 | 851 | ||
852 | static struct pci_device_id cafe_nand_tbl[] = { | 852 | static const struct pci_device_id cafe_nand_tbl[] = { |
853 | { PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_88ALP01_NAND, | 853 | { PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_88ALP01_NAND, |
854 | PCI_ANY_ID, PCI_ANY_ID }, | 854 | PCI_ANY_ID, PCI_ANY_ID }, |
855 | { } | 855 | { } |
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c index 76e2dc8e62f7..9c9d893affeb 100644 --- a/drivers/mtd/nand/davinci_nand.c +++ b/drivers/mtd/nand/davinci_nand.c | |||
@@ -567,8 +567,8 @@ static int __init nand_davinci_probe(struct platform_device *pdev) | |||
567 | goto err_nomem; | 567 | goto err_nomem; |
568 | } | 568 | } |
569 | 569 | ||
570 | vaddr = ioremap(res1->start, res1->end - res1->start); | 570 | vaddr = ioremap(res1->start, resource_size(res1)); |
571 | base = ioremap(res2->start, res2->end - res2->start); | 571 | base = ioremap(res2->start, resource_size(res2)); |
572 | if (!vaddr || !base) { | 572 | if (!vaddr || !base) { |
573 | dev_err(&pdev->dev, "ioremap failed\n"); | 573 | dev_err(&pdev->dev, "ioremap failed\n"); |
574 | ret = -EINVAL; | 574 | ret = -EINVAL; |
@@ -691,7 +691,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev) | |||
691 | spin_unlock_irq(&davinci_nand_lock); | 691 | spin_unlock_irq(&davinci_nand_lock); |
692 | 692 | ||
693 | /* Scan to find existence of the device(s) */ | 693 | /* Scan to find existence of the device(s) */ |
694 | ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1); | 694 | ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1, NULL); |
695 | if (ret < 0) { | 695 | if (ret < 0) { |
696 | dev_dbg(&pdev->dev, "no NAND chip(s) found\n"); | 696 | dev_dbg(&pdev->dev, "no NAND chip(s) found\n"); |
697 | goto err_scan; | 697 | goto err_scan; |
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c new file mode 100644 index 000000000000..ca03428b59cc --- /dev/null +++ b/drivers/mtd/nand/denali.c | |||
@@ -0,0 +1,2134 @@ | |||
1 | /* | ||
2 | * NAND Flash Controller Device Driver | ||
3 | * Copyright © 2009-2010, Intel Corporation and its suppliers. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/delay.h> | ||
22 | #include <linux/wait.h> | ||
23 | #include <linux/mutex.h> | ||
24 | #include <linux/pci.h> | ||
25 | #include <linux/mtd/mtd.h> | ||
26 | #include <linux/module.h> | ||
27 | |||
28 | #include "denali.h" | ||
29 | |||
30 | MODULE_LICENSE("GPL"); | ||
31 | |||
32 | /* We define a module parameter that allows the user to override | ||
33 | * the hardware and decide what timing mode should be used. | ||
34 | */ | ||
35 | #define NAND_DEFAULT_TIMINGS -1 | ||
36 | |||
37 | static int onfi_timing_mode = NAND_DEFAULT_TIMINGS; | ||
38 | module_param(onfi_timing_mode, int, S_IRUGO); | ||
39 | MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting. -1 indicates" | ||
40 | " use default timings"); | ||
41 | |||
42 | #define DENALI_NAND_NAME "denali-nand" | ||
43 | |||
44 | /* We define a macro here that combines all interrupts this driver uses into | ||
45 | * a single constant value, for convenience. */ | ||
46 | #define DENALI_IRQ_ALL (INTR_STATUS0__DMA_CMD_COMP | \ | ||
47 | INTR_STATUS0__ECC_TRANSACTION_DONE | \ | ||
48 | INTR_STATUS0__ECC_ERR | \ | ||
49 | INTR_STATUS0__PROGRAM_FAIL | \ | ||
50 | INTR_STATUS0__LOAD_COMP | \ | ||
51 | INTR_STATUS0__PROGRAM_COMP | \ | ||
52 | INTR_STATUS0__TIME_OUT | \ | ||
53 | INTR_STATUS0__ERASE_FAIL | \ | ||
54 | INTR_STATUS0__RST_COMP | \ | ||
55 | INTR_STATUS0__ERASE_COMP) | ||
56 | |||
57 | /* indicates whether or not the internal value for the flash bank is | ||
58 | valid or not */ | ||
59 | #define CHIP_SELECT_INVALID -1 | ||
60 | |||
61 | #define SUPPORT_8BITECC 1 | ||
62 | |||
63 | /* This macro divides two integers and rounds fractional values up | ||
64 | * to the nearest integer value. */ | ||
65 | #define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y))) | ||
66 | |||
67 | /* this macro allows us to convert from an MTD structure to our own | ||
68 | * device context (denali) structure. | ||
69 | */ | ||
70 | #define mtd_to_denali(m) container_of(m, struct denali_nand_info, mtd) | ||
71 | |||
72 | /* These constants are defined by the driver to enable common driver | ||
73 | configuration options. */ | ||
74 | #define SPARE_ACCESS 0x41 | ||
75 | #define MAIN_ACCESS 0x42 | ||
76 | #define MAIN_SPARE_ACCESS 0x43 | ||
77 | |||
78 | #define DENALI_READ 0 | ||
79 | #define DENALI_WRITE 0x100 | ||
80 | |||
81 | /* types of device accesses. We can issue commands and get status */ | ||
82 | #define COMMAND_CYCLE 0 | ||
83 | #define ADDR_CYCLE 1 | ||
84 | #define STATUS_CYCLE 2 | ||
85 | |||
86 | /* this is a helper macro that allows us to | ||
87 | * format the bank into the proper bits for the controller */ | ||
88 | #define BANK(x) ((x) << 24) | ||
89 | |||
90 | /* List of platforms this NAND controller has be integrated into */ | ||
91 | static const struct pci_device_id denali_pci_ids[] = { | ||
92 | { PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 }, | ||
93 | { PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST }, | ||
94 | { /* end: all zeroes */ } | ||
95 | }; | ||
96 | |||
97 | |||
98 | /* these are static lookup tables that give us easy access to | ||
99 | registers in the NAND controller. | ||
100 | */ | ||
101 | static const uint32_t intr_status_addresses[4] = {INTR_STATUS0, | ||
102 | INTR_STATUS1, | ||
103 | INTR_STATUS2, | ||
104 | INTR_STATUS3}; | ||
105 | |||
106 | static const uint32_t device_reset_banks[4] = {DEVICE_RESET__BANK0, | ||
107 | DEVICE_RESET__BANK1, | ||
108 | DEVICE_RESET__BANK2, | ||
109 | DEVICE_RESET__BANK3}; | ||
110 | |||
111 | static const uint32_t operation_timeout[4] = {INTR_STATUS0__TIME_OUT, | ||
112 | INTR_STATUS1__TIME_OUT, | ||
113 | INTR_STATUS2__TIME_OUT, | ||
114 | INTR_STATUS3__TIME_OUT}; | ||
115 | |||
116 | static const uint32_t reset_complete[4] = {INTR_STATUS0__RST_COMP, | ||
117 | INTR_STATUS1__RST_COMP, | ||
118 | INTR_STATUS2__RST_COMP, | ||
119 | INTR_STATUS3__RST_COMP}; | ||
120 | |||
121 | /* specifies the debug level of the driver */ | ||
122 | static int nand_debug_level = 0; | ||
123 | |||
124 | /* forward declarations */ | ||
125 | static void clear_interrupts(struct denali_nand_info *denali); | ||
126 | static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask); | ||
127 | static void denali_irq_enable(struct denali_nand_info *denali, uint32_t int_mask); | ||
128 | static uint32_t read_interrupt_status(struct denali_nand_info *denali); | ||
129 | |||
130 | #define DEBUG_DENALI 0 | ||
131 | |||
132 | /* This is a wrapper for writing to the denali registers. | ||
133 | * this allows us to create debug information so we can | ||
134 | * observe how the driver is programming the device. | ||
135 | * it uses standard linux convention for (val, addr) */ | ||
136 | static void denali_write32(uint32_t value, void *addr) | ||
137 | { | ||
138 | iowrite32(value, addr); | ||
139 | |||
140 | #if DEBUG_DENALI | ||
141 | printk(KERN_ERR "wrote: 0x%x -> 0x%x\n", value, (uint32_t)((uint32_t)addr & 0x1fff)); | ||
142 | #endif | ||
143 | } | ||
144 | |||
145 | /* Certain operations for the denali NAND controller use an indexed mode to read/write | ||
146 | data. The operation is performed by writing the address value of the command to | ||
147 | the device memory followed by the data. This function abstracts this common | ||
148 | operation. | ||
149 | */ | ||
150 | static void index_addr(struct denali_nand_info *denali, uint32_t address, uint32_t data) | ||
151 | { | ||
152 | denali_write32(address, denali->flash_mem); | ||
153 | denali_write32(data, denali->flash_mem + 0x10); | ||
154 | } | ||
155 | |||
156 | /* Perform an indexed read of the device */ | ||
157 | static void index_addr_read_data(struct denali_nand_info *denali, | ||
158 | uint32_t address, uint32_t *pdata) | ||
159 | { | ||
160 | denali_write32(address, denali->flash_mem); | ||
161 | *pdata = ioread32(denali->flash_mem + 0x10); | ||
162 | } | ||
163 | |||
164 | /* We need to buffer some data for some of the NAND core routines. | ||
165 | * The operations manage buffering that data. */ | ||
166 | static void reset_buf(struct denali_nand_info *denali) | ||
167 | { | ||
168 | denali->buf.head = denali->buf.tail = 0; | ||
169 | } | ||
170 | |||
171 | static void write_byte_to_buf(struct denali_nand_info *denali, uint8_t byte) | ||
172 | { | ||
173 | BUG_ON(denali->buf.tail >= sizeof(denali->buf.buf)); | ||
174 | denali->buf.buf[denali->buf.tail++] = byte; | ||
175 | } | ||
176 | |||
177 | /* reads the status of the device */ | ||
178 | static void read_status(struct denali_nand_info *denali) | ||
179 | { | ||
180 | uint32_t cmd = 0x0; | ||
181 | |||
182 | /* initialize the data buffer to store status */ | ||
183 | reset_buf(denali); | ||
184 | |||
185 | /* initiate a device status read */ | ||
186 | cmd = MODE_11 | BANK(denali->flash_bank); | ||
187 | index_addr(denali, cmd | COMMAND_CYCLE, 0x70); | ||
188 | denali_write32(cmd | STATUS_CYCLE, denali->flash_mem); | ||
189 | |||
190 | /* update buffer with status value */ | ||
191 | write_byte_to_buf(denali, ioread32(denali->flash_mem + 0x10)); | ||
192 | |||
193 | #if DEBUG_DENALI | ||
194 | printk("device reporting status value of 0x%2x\n", denali->buf.buf[0]); | ||
195 | #endif | ||
196 | } | ||
197 | |||
198 | /* resets a specific device connected to the core */ | ||
199 | static void reset_bank(struct denali_nand_info *denali) | ||
200 | { | ||
201 | uint32_t irq_status = 0; | ||
202 | uint32_t irq_mask = reset_complete[denali->flash_bank] | | ||
203 | operation_timeout[denali->flash_bank]; | ||
204 | int bank = 0; | ||
205 | |||
206 | clear_interrupts(denali); | ||
207 | |||
208 | bank = device_reset_banks[denali->flash_bank]; | ||
209 | denali_write32(bank, denali->flash_reg + DEVICE_RESET); | ||
210 | |||
211 | irq_status = wait_for_irq(denali, irq_mask); | ||
212 | |||
213 | if (irq_status & operation_timeout[denali->flash_bank]) | ||
214 | { | ||
215 | printk(KERN_ERR "reset bank failed.\n"); | ||
216 | } | ||
217 | } | ||
218 | |||
219 | /* Reset the flash controller */ | ||
220 | static uint16_t NAND_Flash_Reset(struct denali_nand_info *denali) | ||
221 | { | ||
222 | uint32_t i; | ||
223 | |||
224 | nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", | ||
225 | __FILE__, __LINE__, __func__); | ||
226 | |||
227 | for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) | ||
228 | denali_write32(reset_complete[i] | operation_timeout[i], | ||
229 | denali->flash_reg + intr_status_addresses[i]); | ||
230 | |||
231 | for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) { | ||
232 | denali_write32(device_reset_banks[i], denali->flash_reg + DEVICE_RESET); | ||
233 | while (!(ioread32(denali->flash_reg + intr_status_addresses[i]) & | ||
234 | (reset_complete[i] | operation_timeout[i]))) | ||
235 | ; | ||
236 | if (ioread32(denali->flash_reg + intr_status_addresses[i]) & | ||
237 | operation_timeout[i]) | ||
238 | nand_dbg_print(NAND_DBG_WARN, | ||
239 | "NAND Reset operation timed out on bank %d\n", i); | ||
240 | } | ||
241 | |||
242 | for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) | ||
243 | denali_write32(reset_complete[i] | operation_timeout[i], | ||
244 | denali->flash_reg + intr_status_addresses[i]); | ||
245 | |||
246 | return PASS; | ||
247 | } | ||
248 | |||
249 | /* this routine calculates the ONFI timing values for a given mode and programs | ||
250 | * the clocking register accordingly. The mode is determined by the get_onfi_nand_para | ||
251 | routine. | ||
252 | */ | ||
253 | static void NAND_ONFi_Timing_Mode(struct denali_nand_info *denali, uint16_t mode) | ||
254 | { | ||
255 | uint16_t Trea[6] = {40, 30, 25, 20, 20, 16}; | ||
256 | uint16_t Trp[6] = {50, 25, 17, 15, 12, 10}; | ||
257 | uint16_t Treh[6] = {30, 15, 15, 10, 10, 7}; | ||
258 | uint16_t Trc[6] = {100, 50, 35, 30, 25, 20}; | ||
259 | uint16_t Trhoh[6] = {0, 15, 15, 15, 15, 15}; | ||
260 | uint16_t Trloh[6] = {0, 0, 0, 0, 5, 5}; | ||
261 | uint16_t Tcea[6] = {100, 45, 30, 25, 25, 25}; | ||
262 | uint16_t Tadl[6] = {200, 100, 100, 100, 70, 70}; | ||
263 | uint16_t Trhw[6] = {200, 100, 100, 100, 100, 100}; | ||
264 | uint16_t Trhz[6] = {200, 100, 100, 100, 100, 100}; | ||
265 | uint16_t Twhr[6] = {120, 80, 80, 60, 60, 60}; | ||
266 | uint16_t Tcs[6] = {70, 35, 25, 25, 20, 15}; | ||
267 | |||
268 | uint16_t TclsRising = 1; | ||
269 | uint16_t data_invalid_rhoh, data_invalid_rloh, data_invalid; | ||
270 | uint16_t dv_window = 0; | ||
271 | uint16_t en_lo, en_hi; | ||
272 | uint16_t acc_clks; | ||
273 | uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt; | ||
274 | |||
275 | nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", | ||
276 | __FILE__, __LINE__, __func__); | ||
277 | |||
278 | en_lo = CEIL_DIV(Trp[mode], CLK_X); | ||
279 | en_hi = CEIL_DIV(Treh[mode], CLK_X); | ||
280 | #if ONFI_BLOOM_TIME | ||
281 | if ((en_hi * CLK_X) < (Treh[mode] + 2)) | ||
282 | en_hi++; | ||
283 | #endif | ||
284 | |||
285 | if ((en_lo + en_hi) * CLK_X < Trc[mode]) | ||
286 | en_lo += CEIL_DIV((Trc[mode] - (en_lo + en_hi) * CLK_X), CLK_X); | ||
287 | |||
288 | if ((en_lo + en_hi) < CLK_MULTI) | ||
289 | en_lo += CLK_MULTI - en_lo - en_hi; | ||
290 | |||
291 | while (dv_window < 8) { | ||
292 | data_invalid_rhoh = en_lo * CLK_X + Trhoh[mode]; | ||
293 | |||
294 | data_invalid_rloh = (en_lo + en_hi) * CLK_X + Trloh[mode]; | ||
295 | |||
296 | data_invalid = | ||
297 | data_invalid_rhoh < | ||
298 | data_invalid_rloh ? data_invalid_rhoh : data_invalid_rloh; | ||
299 | |||
300 | dv_window = data_invalid - Trea[mode]; | ||
301 | |||
302 | if (dv_window < 8) | ||
303 | en_lo++; | ||
304 | } | ||
305 | |||
306 | acc_clks = CEIL_DIV(Trea[mode], CLK_X); | ||
307 | |||
308 | while (((acc_clks * CLK_X) - Trea[mode]) < 3) | ||
309 | acc_clks++; | ||
310 | |||
311 | if ((data_invalid - acc_clks * CLK_X) < 2) | ||
312 | nand_dbg_print(NAND_DBG_WARN, "%s, Line %d: Warning!\n", | ||
313 | __FILE__, __LINE__); | ||
314 | |||
315 | addr_2_data = CEIL_DIV(Tadl[mode], CLK_X); | ||
316 | re_2_we = CEIL_DIV(Trhw[mode], CLK_X); | ||
317 | re_2_re = CEIL_DIV(Trhz[mode], CLK_X); | ||
318 | we_2_re = CEIL_DIV(Twhr[mode], CLK_X); | ||
319 | cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X); | ||
320 | if (!TclsRising) | ||
321 | cs_cnt = CEIL_DIV(Tcs[mode], CLK_X); | ||
322 | if (cs_cnt == 0) | ||
323 | cs_cnt = 1; | ||
324 | |||
325 | if (Tcea[mode]) { | ||
326 | while (((cs_cnt * CLK_X) + Trea[mode]) < Tcea[mode]) | ||
327 | cs_cnt++; | ||
328 | } | ||
329 | |||
330 | #if MODE5_WORKAROUND | ||
331 | if (mode == 5) | ||
332 | acc_clks = 5; | ||
333 | #endif | ||
334 | |||
335 | /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */ | ||
336 | if ((ioread32(denali->flash_reg + MANUFACTURER_ID) == 0) && | ||
337 | (ioread32(denali->flash_reg + DEVICE_ID) == 0x88)) | ||
338 | acc_clks = 6; | ||
339 | |||
340 | denali_write32(acc_clks, denali->flash_reg + ACC_CLKS); | ||
341 | denali_write32(re_2_we, denali->flash_reg + RE_2_WE); | ||
342 | denali_write32(re_2_re, denali->flash_reg + RE_2_RE); | ||
343 | denali_write32(we_2_re, denali->flash_reg + WE_2_RE); | ||
344 | denali_write32(addr_2_data, denali->flash_reg + ADDR_2_DATA); | ||
345 | denali_write32(en_lo, denali->flash_reg + RDWR_EN_LO_CNT); | ||
346 | denali_write32(en_hi, denali->flash_reg + RDWR_EN_HI_CNT); | ||
347 | denali_write32(cs_cnt, denali->flash_reg + CS_SETUP_CNT); | ||
348 | } | ||
349 | |||
350 | /* configures the initial ECC settings for the controller */ | ||
351 | static void set_ecc_config(struct denali_nand_info *denali) | ||
352 | { | ||
353 | #if SUPPORT_8BITECC | ||
354 | if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) < 4096) || | ||
355 | (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) <= 128)) | ||
356 | denali_write32(8, denali->flash_reg + ECC_CORRECTION); | ||
357 | #endif | ||
358 | |||
359 | if ((ioread32(denali->flash_reg + ECC_CORRECTION) & ECC_CORRECTION__VALUE) | ||
360 | == 1) { | ||
361 | denali->dev_info.wECCBytesPerSector = 4; | ||
362 | denali->dev_info.wECCBytesPerSector *= denali->dev_info.wDevicesConnected; | ||
363 | denali->dev_info.wNumPageSpareFlag = | ||
364 | denali->dev_info.wPageSpareSize - | ||
365 | denali->dev_info.wPageDataSize / | ||
366 | (ECC_SECTOR_SIZE * denali->dev_info.wDevicesConnected) * | ||
367 | denali->dev_info.wECCBytesPerSector | ||
368 | - denali->dev_info.wSpareSkipBytes; | ||
369 | } else { | ||
370 | denali->dev_info.wECCBytesPerSector = | ||
371 | (ioread32(denali->flash_reg + ECC_CORRECTION) & | ||
372 | ECC_CORRECTION__VALUE) * 13 / 8; | ||
373 | if ((denali->dev_info.wECCBytesPerSector) % 2 == 0) | ||
374 | denali->dev_info.wECCBytesPerSector += 2; | ||
375 | else | ||
376 | denali->dev_info.wECCBytesPerSector += 1; | ||
377 | |||
378 | denali->dev_info.wECCBytesPerSector *= denali->dev_info.wDevicesConnected; | ||
379 | denali->dev_info.wNumPageSpareFlag = denali->dev_info.wPageSpareSize - | ||
380 | denali->dev_info.wPageDataSize / | ||
381 | (ECC_SECTOR_SIZE * denali->dev_info.wDevicesConnected) * | ||
382 | denali->dev_info.wECCBytesPerSector | ||
383 | - denali->dev_info.wSpareSkipBytes; | ||
384 | } | ||
385 | } | ||
386 | |||
387 | /* queries the NAND device to see what ONFI modes it supports. */ | ||
388 | static uint16_t get_onfi_nand_para(struct denali_nand_info *denali) | ||
389 | { | ||
390 | int i; | ||
391 | uint16_t blks_lun_l, blks_lun_h, n_of_luns; | ||
392 | uint32_t blockperlun, id; | ||
393 | |||
394 | denali_write32(DEVICE_RESET__BANK0, denali->flash_reg + DEVICE_RESET); | ||
395 | |||
396 | while (!((ioread32(denali->flash_reg + INTR_STATUS0) & | ||
397 | INTR_STATUS0__RST_COMP) | | ||
398 | (ioread32(denali->flash_reg + INTR_STATUS0) & | ||
399 | INTR_STATUS0__TIME_OUT))) | ||
400 | ; | ||
401 | |||
402 | if (ioread32(denali->flash_reg + INTR_STATUS0) & INTR_STATUS0__RST_COMP) { | ||
403 | denali_write32(DEVICE_RESET__BANK1, denali->flash_reg + DEVICE_RESET); | ||
404 | while (!((ioread32(denali->flash_reg + INTR_STATUS1) & | ||
405 | INTR_STATUS1__RST_COMP) | | ||
406 | (ioread32(denali->flash_reg + INTR_STATUS1) & | ||
407 | INTR_STATUS1__TIME_OUT))) | ||
408 | ; | ||
409 | |||
410 | if (ioread32(denali->flash_reg + INTR_STATUS1) & | ||
411 | INTR_STATUS1__RST_COMP) { | ||
412 | denali_write32(DEVICE_RESET__BANK2, | ||
413 | denali->flash_reg + DEVICE_RESET); | ||
414 | while (!((ioread32(denali->flash_reg + INTR_STATUS2) & | ||
415 | INTR_STATUS2__RST_COMP) | | ||
416 | (ioread32(denali->flash_reg + INTR_STATUS2) & | ||
417 | INTR_STATUS2__TIME_OUT))) | ||
418 | ; | ||
419 | |||
420 | if (ioread32(denali->flash_reg + INTR_STATUS2) & | ||
421 | INTR_STATUS2__RST_COMP) { | ||
422 | denali_write32(DEVICE_RESET__BANK3, | ||
423 | denali->flash_reg + DEVICE_RESET); | ||
424 | while (!((ioread32(denali->flash_reg + INTR_STATUS3) & | ||
425 | INTR_STATUS3__RST_COMP) | | ||
426 | (ioread32(denali->flash_reg + INTR_STATUS3) & | ||
427 | INTR_STATUS3__TIME_OUT))) | ||
428 | ; | ||
429 | } else { | ||
430 | printk(KERN_ERR "Getting a time out for bank 2!\n"); | ||
431 | } | ||
432 | } else { | ||
433 | printk(KERN_ERR "Getting a time out for bank 1!\n"); | ||
434 | } | ||
435 | } | ||
436 | |||
437 | denali_write32(INTR_STATUS0__TIME_OUT, denali->flash_reg + INTR_STATUS0); | ||
438 | denali_write32(INTR_STATUS1__TIME_OUT, denali->flash_reg + INTR_STATUS1); | ||
439 | denali_write32(INTR_STATUS2__TIME_OUT, denali->flash_reg + INTR_STATUS2); | ||
440 | denali_write32(INTR_STATUS3__TIME_OUT, denali->flash_reg + INTR_STATUS3); | ||
441 | |||
442 | denali->dev_info.wONFIDevFeatures = | ||
443 | ioread32(denali->flash_reg + ONFI_DEVICE_FEATURES); | ||
444 | denali->dev_info.wONFIOptCommands = | ||
445 | ioread32(denali->flash_reg + ONFI_OPTIONAL_COMMANDS); | ||
446 | denali->dev_info.wONFITimingMode = | ||
447 | ioread32(denali->flash_reg + ONFI_TIMING_MODE); | ||
448 | denali->dev_info.wONFIPgmCacheTimingMode = | ||
449 | ioread32(denali->flash_reg + ONFI_PGM_CACHE_TIMING_MODE); | ||
450 | |||
451 | n_of_luns = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) & | ||
452 | ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS; | ||
453 | blks_lun_l = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L); | ||
454 | blks_lun_h = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U); | ||
455 | |||
456 | blockperlun = (blks_lun_h << 16) | blks_lun_l; | ||
457 | |||
458 | denali->dev_info.wTotalBlocks = n_of_luns * blockperlun; | ||
459 | |||
460 | if (!(ioread32(denali->flash_reg + ONFI_TIMING_MODE) & | ||
461 | ONFI_TIMING_MODE__VALUE)) | ||
462 | return FAIL; | ||
463 | |||
464 | for (i = 5; i > 0; i--) { | ||
465 | if (ioread32(denali->flash_reg + ONFI_TIMING_MODE) & (0x01 << i)) | ||
466 | break; | ||
467 | } | ||
468 | |||
469 | NAND_ONFi_Timing_Mode(denali, i); | ||
470 | |||
471 | index_addr(denali, MODE_11 | 0, 0x90); | ||
472 | index_addr(denali, MODE_11 | 1, 0); | ||
473 | |||
474 | for (i = 0; i < 3; i++) | ||
475 | index_addr_read_data(denali, MODE_11 | 2, &id); | ||
476 | |||
477 | nand_dbg_print(NAND_DBG_DEBUG, "3rd ID: 0x%x\n", id); | ||
478 | |||
479 | denali->dev_info.MLCDevice = id & 0x0C; | ||
480 | |||
481 | /* By now, all the ONFI devices we know support the page cache */ | ||
482 | /* rw feature. So here we enable the pipeline_rw_ahead feature */ | ||
483 | /* iowrite32(1, denali->flash_reg + CACHE_WRITE_ENABLE); */ | ||
484 | /* iowrite32(1, denali->flash_reg + CACHE_READ_ENABLE); */ | ||
485 | |||
486 | return PASS; | ||
487 | } | ||
488 | |||
489 | static void get_samsung_nand_para(struct denali_nand_info *denali) | ||
490 | { | ||
491 | uint8_t no_of_planes; | ||
492 | uint32_t blk_size; | ||
493 | uint64_t plane_size, capacity; | ||
494 | uint32_t id_bytes[5]; | ||
495 | int i; | ||
496 | |||
497 | index_addr(denali, (uint32_t)(MODE_11 | 0), 0x90); | ||
498 | index_addr(denali, (uint32_t)(MODE_11 | 1), 0); | ||
499 | for (i = 0; i < 5; i++) | ||
500 | index_addr_read_data(denali, (uint32_t)(MODE_11 | 2), &id_bytes[i]); | ||
501 | |||
502 | nand_dbg_print(NAND_DBG_DEBUG, | ||
503 | "ID bytes: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n", | ||
504 | id_bytes[0], id_bytes[1], id_bytes[2], | ||
505 | id_bytes[3], id_bytes[4]); | ||
506 | |||
507 | if ((id_bytes[1] & 0xff) == 0xd3) { /* Samsung K9WAG08U1A */ | ||
508 | /* Set timing register values according to datasheet */ | ||
509 | denali_write32(5, denali->flash_reg + ACC_CLKS); | ||
510 | denali_write32(20, denali->flash_reg + RE_2_WE); | ||
511 | denali_write32(12, denali->flash_reg + WE_2_RE); | ||
512 | denali_write32(14, denali->flash_reg + ADDR_2_DATA); | ||
513 | denali_write32(3, denali->flash_reg + RDWR_EN_LO_CNT); | ||
514 | denali_write32(2, denali->flash_reg + RDWR_EN_HI_CNT); | ||
515 | denali_write32(2, denali->flash_reg + CS_SETUP_CNT); | ||
516 | } | ||
517 | |||
518 | no_of_planes = 1 << ((id_bytes[4] & 0x0c) >> 2); | ||
519 | plane_size = (uint64_t)64 << ((id_bytes[4] & 0x70) >> 4); | ||
520 | blk_size = 64 << ((ioread32(denali->flash_reg + DEVICE_PARAM_1) & 0x30) >> 4); | ||
521 | capacity = (uint64_t)128 * plane_size * no_of_planes; | ||
522 | |||
523 | do_div(capacity, blk_size); | ||
524 | denali->dev_info.wTotalBlocks = capacity; | ||
525 | } | ||
526 | |||
527 | static void get_toshiba_nand_para(struct denali_nand_info *denali) | ||
528 | { | ||
529 | void __iomem *scratch_reg; | ||
530 | uint32_t tmp; | ||
531 | |||
532 | /* Workaround to fix a controller bug which reports a wrong */ | ||
533 | /* spare area size for some kind of Toshiba NAND device */ | ||
534 | if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) == 4096) && | ||
535 | (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64)) { | ||
536 | denali_write32(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE); | ||
537 | tmp = ioread32(denali->flash_reg + DEVICES_CONNECTED) * | ||
538 | ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE); | ||
539 | denali_write32(tmp, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE); | ||
540 | #if SUPPORT_15BITECC | ||
541 | denali_write32(15, denali->flash_reg + ECC_CORRECTION); | ||
542 | #elif SUPPORT_8BITECC | ||
543 | denali_write32(8, denali->flash_reg + ECC_CORRECTION); | ||
544 | #endif | ||
545 | } | ||
546 | |||
547 | /* As Toshiba NAND can not provide it's block number, */ | ||
548 | /* so here we need user to provide the correct block */ | ||
549 | /* number in a scratch register before the Linux NAND */ | ||
550 | /* driver is loaded. If no valid value found in the scratch */ | ||
551 | /* register, then we use default block number value */ | ||
552 | scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE); | ||
553 | if (!scratch_reg) { | ||
554 | printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d", | ||
555 | __FILE__, __LINE__); | ||
556 | denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS; | ||
557 | } else { | ||
558 | nand_dbg_print(NAND_DBG_WARN, | ||
559 | "Spectra: ioremap reg address: 0x%p\n", scratch_reg); | ||
560 | denali->dev_info.wTotalBlocks = 1 << ioread8(scratch_reg); | ||
561 | if (denali->dev_info.wTotalBlocks < 512) | ||
562 | denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS; | ||
563 | iounmap(scratch_reg); | ||
564 | } | ||
565 | } | ||
566 | |||
567 | static void get_hynix_nand_para(struct denali_nand_info *denali) | ||
568 | { | ||
569 | void __iomem *scratch_reg; | ||
570 | uint32_t main_size, spare_size; | ||
571 | |||
572 | switch (denali->dev_info.wDeviceID) { | ||
573 | case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */ | ||
574 | case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */ | ||
575 | denali_write32(128, denali->flash_reg + PAGES_PER_BLOCK); | ||
576 | denali_write32(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE); | ||
577 | denali_write32(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE); | ||
578 | main_size = 4096 * ioread32(denali->flash_reg + DEVICES_CONNECTED); | ||
579 | spare_size = 224 * ioread32(denali->flash_reg + DEVICES_CONNECTED); | ||
580 | denali_write32(main_size, denali->flash_reg + LOGICAL_PAGE_DATA_SIZE); | ||
581 | denali_write32(spare_size, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE); | ||
582 | denali_write32(0, denali->flash_reg + DEVICE_WIDTH); | ||
583 | #if SUPPORT_15BITECC | ||
584 | denali_write32(15, denali->flash_reg + ECC_CORRECTION); | ||
585 | #elif SUPPORT_8BITECC | ||
586 | denali_write32(8, denali->flash_reg + ECC_CORRECTION); | ||
587 | #endif | ||
588 | denali->dev_info.MLCDevice = 1; | ||
589 | break; | ||
590 | default: | ||
591 | nand_dbg_print(NAND_DBG_WARN, | ||
592 | "Spectra: Unknown Hynix NAND (Device ID: 0x%x)." | ||
593 | "Will use default parameter values instead.\n", | ||
594 | denali->dev_info.wDeviceID); | ||
595 | } | ||
596 | |||
597 | scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE); | ||
598 | if (!scratch_reg) { | ||
599 | printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d", | ||
600 | __FILE__, __LINE__); | ||
601 | denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS; | ||
602 | } else { | ||
603 | nand_dbg_print(NAND_DBG_WARN, | ||
604 | "Spectra: ioremap reg address: 0x%p\n", scratch_reg); | ||
605 | denali->dev_info.wTotalBlocks = 1 << ioread8(scratch_reg); | ||
606 | if (denali->dev_info.wTotalBlocks < 512) | ||
607 | denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS; | ||
608 | iounmap(scratch_reg); | ||
609 | } | ||
610 | } | ||
611 | |||
612 | /* determines how many NAND chips are connected to the controller. Note for | ||
613 | Intel CE4100 devices we don't support more than one device. | ||
614 | */ | ||
615 | static void find_valid_banks(struct denali_nand_info *denali) | ||
616 | { | ||
617 | uint32_t id[LLD_MAX_FLASH_BANKS]; | ||
618 | int i; | ||
619 | |||
620 | denali->total_used_banks = 1; | ||
621 | for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) { | ||
622 | index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 0), 0x90); | ||
623 | index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 1), 0); | ||
624 | index_addr_read_data(denali, (uint32_t)(MODE_11 | (i << 24) | 2), &id[i]); | ||
625 | |||
626 | nand_dbg_print(NAND_DBG_DEBUG, | ||
627 | "Return 1st ID for bank[%d]: %x\n", i, id[i]); | ||
628 | |||
629 | if (i == 0) { | ||
630 | if (!(id[i] & 0x0ff)) | ||
631 | break; /* WTF? */ | ||
632 | } else { | ||
633 | if ((id[i] & 0x0ff) == (id[0] & 0x0ff)) | ||
634 | denali->total_used_banks++; | ||
635 | else | ||
636 | break; | ||
637 | } | ||
638 | } | ||
639 | |||
640 | if (denali->platform == INTEL_CE4100) | ||
641 | { | ||
642 | /* Platform limitations of the CE4100 device limit | ||
643 | * users to a single chip solution for NAND. | ||
644 | * Multichip support is not enabled. | ||
645 | */ | ||
646 | if (denali->total_used_banks != 1) | ||
647 | { | ||
648 | printk(KERN_ERR "Sorry, Intel CE4100 only supports " | ||
649 | "a single NAND device.\n"); | ||
650 | BUG(); | ||
651 | } | ||
652 | } | ||
653 | nand_dbg_print(NAND_DBG_DEBUG, | ||
654 | "denali->total_used_banks: %d\n", denali->total_used_banks); | ||
655 | } | ||
656 | |||
657 | static void detect_partition_feature(struct denali_nand_info *denali) | ||
658 | { | ||
659 | if (ioread32(denali->flash_reg + FEATURES) & FEATURES__PARTITION) { | ||
660 | if ((ioread32(denali->flash_reg + PERM_SRC_ID_1) & | ||
661 | PERM_SRC_ID_1__SRCID) == SPECTRA_PARTITION_ID) { | ||
662 | denali->dev_info.wSpectraStartBlock = | ||
663 | ((ioread32(denali->flash_reg + MIN_MAX_BANK_1) & | ||
664 | MIN_MAX_BANK_1__MIN_VALUE) * | ||
665 | denali->dev_info.wTotalBlocks) | ||
666 | + | ||
667 | (ioread32(denali->flash_reg + MIN_BLK_ADDR_1) & | ||
668 | MIN_BLK_ADDR_1__VALUE); | ||
669 | |||
670 | denali->dev_info.wSpectraEndBlock = | ||
671 | (((ioread32(denali->flash_reg + MIN_MAX_BANK_1) & | ||
672 | MIN_MAX_BANK_1__MAX_VALUE) >> 2) * | ||
673 | denali->dev_info.wTotalBlocks) | ||
674 | + | ||
675 | (ioread32(denali->flash_reg + MAX_BLK_ADDR_1) & | ||
676 | MAX_BLK_ADDR_1__VALUE); | ||
677 | |||
678 | denali->dev_info.wTotalBlocks *= denali->total_used_banks; | ||
679 | |||
680 | if (denali->dev_info.wSpectraEndBlock >= | ||
681 | denali->dev_info.wTotalBlocks) { | ||
682 | denali->dev_info.wSpectraEndBlock = | ||
683 | denali->dev_info.wTotalBlocks - 1; | ||
684 | } | ||
685 | |||
686 | denali->dev_info.wDataBlockNum = | ||
687 | denali->dev_info.wSpectraEndBlock - | ||
688 | denali->dev_info.wSpectraStartBlock + 1; | ||
689 | } else { | ||
690 | denali->dev_info.wTotalBlocks *= denali->total_used_banks; | ||
691 | denali->dev_info.wSpectraStartBlock = SPECTRA_START_BLOCK; | ||
692 | denali->dev_info.wSpectraEndBlock = | ||
693 | denali->dev_info.wTotalBlocks - 1; | ||
694 | denali->dev_info.wDataBlockNum = | ||
695 | denali->dev_info.wSpectraEndBlock - | ||
696 | denali->dev_info.wSpectraStartBlock + 1; | ||
697 | } | ||
698 | } else { | ||
699 | denali->dev_info.wTotalBlocks *= denali->total_used_banks; | ||
700 | denali->dev_info.wSpectraStartBlock = SPECTRA_START_BLOCK; | ||
701 | denali->dev_info.wSpectraEndBlock = denali->dev_info.wTotalBlocks - 1; | ||
702 | denali->dev_info.wDataBlockNum = | ||
703 | denali->dev_info.wSpectraEndBlock - | ||
704 | denali->dev_info.wSpectraStartBlock + 1; | ||
705 | } | ||
706 | } | ||
707 | |||
708 | static void dump_device_info(struct denali_nand_info *denali) | ||
709 | { | ||
710 | nand_dbg_print(NAND_DBG_DEBUG, "denali->dev_info:\n"); | ||
711 | nand_dbg_print(NAND_DBG_DEBUG, "DeviceMaker: 0x%x\n", | ||
712 | denali->dev_info.wDeviceMaker); | ||
713 | nand_dbg_print(NAND_DBG_DEBUG, "DeviceID: 0x%x\n", | ||
714 | denali->dev_info.wDeviceID); | ||
715 | nand_dbg_print(NAND_DBG_DEBUG, "DeviceType: 0x%x\n", | ||
716 | denali->dev_info.wDeviceType); | ||
717 | nand_dbg_print(NAND_DBG_DEBUG, "SpectraStartBlock: %d\n", | ||
718 | denali->dev_info.wSpectraStartBlock); | ||
719 | nand_dbg_print(NAND_DBG_DEBUG, "SpectraEndBlock: %d\n", | ||
720 | denali->dev_info.wSpectraEndBlock); | ||
721 | nand_dbg_print(NAND_DBG_DEBUG, "TotalBlocks: %d\n", | ||
722 | denali->dev_info.wTotalBlocks); | ||
723 | nand_dbg_print(NAND_DBG_DEBUG, "PagesPerBlock: %d\n", | ||
724 | denali->dev_info.wPagesPerBlock); | ||
725 | nand_dbg_print(NAND_DBG_DEBUG, "PageSize: %d\n", | ||
726 | denali->dev_info.wPageSize); | ||
727 | nand_dbg_print(NAND_DBG_DEBUG, "PageDataSize: %d\n", | ||
728 | denali->dev_info.wPageDataSize); | ||
729 | nand_dbg_print(NAND_DBG_DEBUG, "PageSpareSize: %d\n", | ||
730 | denali->dev_info.wPageSpareSize); | ||
731 | nand_dbg_print(NAND_DBG_DEBUG, "NumPageSpareFlag: %d\n", | ||
732 | denali->dev_info.wNumPageSpareFlag); | ||
733 | nand_dbg_print(NAND_DBG_DEBUG, "ECCBytesPerSector: %d\n", | ||
734 | denali->dev_info.wECCBytesPerSector); | ||
735 | nand_dbg_print(NAND_DBG_DEBUG, "BlockSize: %d\n", | ||
736 | denali->dev_info.wBlockSize); | ||
737 | nand_dbg_print(NAND_DBG_DEBUG, "BlockDataSize: %d\n", | ||
738 | denali->dev_info.wBlockDataSize); | ||
739 | nand_dbg_print(NAND_DBG_DEBUG, "DataBlockNum: %d\n", | ||
740 | denali->dev_info.wDataBlockNum); | ||
741 | nand_dbg_print(NAND_DBG_DEBUG, "PlaneNum: %d\n", | ||
742 | denali->dev_info.bPlaneNum); | ||
743 | nand_dbg_print(NAND_DBG_DEBUG, "DeviceMainAreaSize: %d\n", | ||
744 | denali->dev_info.wDeviceMainAreaSize); | ||
745 | nand_dbg_print(NAND_DBG_DEBUG, "DeviceSpareAreaSize: %d\n", | ||
746 | denali->dev_info.wDeviceSpareAreaSize); | ||
747 | nand_dbg_print(NAND_DBG_DEBUG, "DevicesConnected: %d\n", | ||
748 | denali->dev_info.wDevicesConnected); | ||
749 | nand_dbg_print(NAND_DBG_DEBUG, "DeviceWidth: %d\n", | ||
750 | denali->dev_info.wDeviceWidth); | ||
751 | nand_dbg_print(NAND_DBG_DEBUG, "HWRevision: 0x%x\n", | ||
752 | denali->dev_info.wHWRevision); | ||
753 | nand_dbg_print(NAND_DBG_DEBUG, "HWFeatures: 0x%x\n", | ||
754 | denali->dev_info.wHWFeatures); | ||
755 | nand_dbg_print(NAND_DBG_DEBUG, "ONFIDevFeatures: 0x%x\n", | ||
756 | denali->dev_info.wONFIDevFeatures); | ||
757 | nand_dbg_print(NAND_DBG_DEBUG, "ONFIOptCommands: 0x%x\n", | ||
758 | denali->dev_info.wONFIOptCommands); | ||
759 | nand_dbg_print(NAND_DBG_DEBUG, "ONFITimingMode: 0x%x\n", | ||
760 | denali->dev_info.wONFITimingMode); | ||
761 | nand_dbg_print(NAND_DBG_DEBUG, "ONFIPgmCacheTimingMode: 0x%x\n", | ||
762 | denali->dev_info.wONFIPgmCacheTimingMode); | ||
763 | nand_dbg_print(NAND_DBG_DEBUG, "MLCDevice: %s\n", | ||
764 | denali->dev_info.MLCDevice ? "Yes" : "No"); | ||
765 | nand_dbg_print(NAND_DBG_DEBUG, "SpareSkipBytes: %d\n", | ||
766 | denali->dev_info.wSpareSkipBytes); | ||
767 | nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageNumber: %d\n", | ||
768 | denali->dev_info.nBitsInPageNumber); | ||
769 | nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageDataSize: %d\n", | ||
770 | denali->dev_info.nBitsInPageDataSize); | ||
771 | nand_dbg_print(NAND_DBG_DEBUG, "BitsInBlockDataSize: %d\n", | ||
772 | denali->dev_info.nBitsInBlockDataSize); | ||
773 | } | ||
774 | |||
775 | static uint16_t NAND_Read_Device_ID(struct denali_nand_info *denali) | ||
776 | { | ||
777 | uint16_t status = PASS; | ||
778 | uint8_t no_of_planes; | ||
779 | |||
780 | nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", | ||
781 | __FILE__, __LINE__, __func__); | ||
782 | |||
783 | denali->dev_info.wDeviceMaker = ioread32(denali->flash_reg + MANUFACTURER_ID); | ||
784 | denali->dev_info.wDeviceID = ioread32(denali->flash_reg + DEVICE_ID); | ||
785 | denali->dev_info.bDeviceParam0 = ioread32(denali->flash_reg + DEVICE_PARAM_0); | ||
786 | denali->dev_info.bDeviceParam1 = ioread32(denali->flash_reg + DEVICE_PARAM_1); | ||
787 | denali->dev_info.bDeviceParam2 = ioread32(denali->flash_reg + DEVICE_PARAM_2); | ||
788 | |||
789 | denali->dev_info.MLCDevice = ioread32(denali->flash_reg + DEVICE_PARAM_0) & 0x0c; | ||
790 | |||
791 | if (ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) & | ||
792 | ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */ | ||
793 | if (FAIL == get_onfi_nand_para(denali)) | ||
794 | return FAIL; | ||
795 | } else if (denali->dev_info.wDeviceMaker == 0xEC) { /* Samsung NAND */ | ||
796 | get_samsung_nand_para(denali); | ||
797 | } else if (denali->dev_info.wDeviceMaker == 0x98) { /* Toshiba NAND */ | ||
798 | get_toshiba_nand_para(denali); | ||
799 | } else if (denali->dev_info.wDeviceMaker == 0xAD) { /* Hynix NAND */ | ||
800 | get_hynix_nand_para(denali); | ||
801 | } else { | ||
802 | denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS; | ||
803 | } | ||
804 | |||
805 | nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:" | ||
806 | "acc_clks: %d, re_2_we: %d, we_2_re: %d," | ||
807 | "addr_2_data: %d, rdwr_en_lo_cnt: %d, " | ||
808 | "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n", | ||
809 | ioread32(denali->flash_reg + ACC_CLKS), | ||
810 | ioread32(denali->flash_reg + RE_2_WE), | ||
811 | ioread32(denali->flash_reg + WE_2_RE), | ||
812 | ioread32(denali->flash_reg + ADDR_2_DATA), | ||
813 | ioread32(denali->flash_reg + RDWR_EN_LO_CNT), | ||
814 | ioread32(denali->flash_reg + RDWR_EN_HI_CNT), | ||
815 | ioread32(denali->flash_reg + CS_SETUP_CNT)); | ||
816 | |||
817 | denali->dev_info.wHWRevision = ioread32(denali->flash_reg + REVISION); | ||
818 | denali->dev_info.wHWFeatures = ioread32(denali->flash_reg + FEATURES); | ||
819 | |||
820 | denali->dev_info.wDeviceMainAreaSize = | ||
821 | ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE); | ||
822 | denali->dev_info.wDeviceSpareAreaSize = | ||
823 | ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE); | ||
824 | |||
825 | denali->dev_info.wPageDataSize = | ||
826 | ioread32(denali->flash_reg + LOGICAL_PAGE_DATA_SIZE); | ||
827 | |||
828 | /* Note: When using the Micon 4K NAND device, the controller will report | ||
829 | * Page Spare Size as 216 bytes. But Micron's Spec say it's 218 bytes. | ||
830 | * And if force set it to 218 bytes, the controller can not work | ||
831 | * correctly. So just let it be. But keep in mind that this bug may | ||
832 | * cause | ||
833 | * other problems in future. - Yunpeng 2008-10-10 | ||
834 | */ | ||
835 | denali->dev_info.wPageSpareSize = | ||
836 | ioread32(denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE); | ||
837 | |||
838 | denali->dev_info.wPagesPerBlock = ioread32(denali->flash_reg + PAGES_PER_BLOCK); | ||
839 | |||
840 | denali->dev_info.wPageSize = | ||
841 | denali->dev_info.wPageDataSize + denali->dev_info.wPageSpareSize; | ||
842 | denali->dev_info.wBlockSize = | ||
843 | denali->dev_info.wPageSize * denali->dev_info.wPagesPerBlock; | ||
844 | denali->dev_info.wBlockDataSize = | ||
845 | denali->dev_info.wPagesPerBlock * denali->dev_info.wPageDataSize; | ||
846 | |||
847 | denali->dev_info.wDeviceWidth = ioread32(denali->flash_reg + DEVICE_WIDTH); | ||
848 | denali->dev_info.wDeviceType = | ||
849 | ((ioread32(denali->flash_reg + DEVICE_WIDTH) > 0) ? 16 : 8); | ||
850 | |||
851 | denali->dev_info.wDevicesConnected = ioread32(denali->flash_reg + DEVICES_CONNECTED); | ||
852 | |||
853 | denali->dev_info.wSpareSkipBytes = | ||
854 | ioread32(denali->flash_reg + SPARE_AREA_SKIP_BYTES) * | ||
855 | denali->dev_info.wDevicesConnected; | ||
856 | |||
857 | denali->dev_info.nBitsInPageNumber = | ||
858 | ilog2(denali->dev_info.wPagesPerBlock); | ||
859 | denali->dev_info.nBitsInPageDataSize = | ||
860 | ilog2(denali->dev_info.wPageDataSize); | ||
861 | denali->dev_info.nBitsInBlockDataSize = | ||
862 | ilog2(denali->dev_info.wBlockDataSize); | ||
863 | |||
864 | set_ecc_config(denali); | ||
865 | |||
866 | no_of_planes = ioread32(denali->flash_reg + NUMBER_OF_PLANES) & | ||
867 | NUMBER_OF_PLANES__VALUE; | ||
868 | |||
869 | switch (no_of_planes) { | ||
870 | case 0: | ||
871 | case 1: | ||
872 | case 3: | ||
873 | case 7: | ||
874 | denali->dev_info.bPlaneNum = no_of_planes + 1; | ||
875 | break; | ||
876 | default: | ||
877 | status = FAIL; | ||
878 | break; | ||
879 | } | ||
880 | |||
881 | find_valid_banks(denali); | ||
882 | |||
883 | detect_partition_feature(denali); | ||
884 | |||
885 | dump_device_info(denali); | ||
886 | |||
887 | /* If the user specified to override the default timings | ||
888 | * with a specific ONFI mode, we apply those changes here. | ||
889 | */ | ||
890 | if (onfi_timing_mode != NAND_DEFAULT_TIMINGS) | ||
891 | { | ||
892 | NAND_ONFi_Timing_Mode(denali, onfi_timing_mode); | ||
893 | } | ||
894 | |||
895 | return status; | ||
896 | } | ||
897 | |||
898 | static void NAND_LLD_Enable_Disable_Interrupts(struct denali_nand_info *denali, | ||
899 | uint16_t INT_ENABLE) | ||
900 | { | ||
901 | nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", | ||
902 | __FILE__, __LINE__, __func__); | ||
903 | |||
904 | if (INT_ENABLE) | ||
905 | denali_write32(1, denali->flash_reg + GLOBAL_INT_ENABLE); | ||
906 | else | ||
907 | denali_write32(0, denali->flash_reg + GLOBAL_INT_ENABLE); | ||
908 | } | ||
909 | |||
910 | /* validation function to verify that the controlling software is making | ||
911 | a valid request | ||
912 | */ | ||
913 | static inline bool is_flash_bank_valid(int flash_bank) | ||
914 | { | ||
915 | return (flash_bank >= 0 && flash_bank < 4); | ||
916 | } | ||
917 | |||
918 | static void denali_irq_init(struct denali_nand_info *denali) | ||
919 | { | ||
920 | uint32_t int_mask = 0; | ||
921 | |||
922 | /* Disable global interrupts */ | ||
923 | NAND_LLD_Enable_Disable_Interrupts(denali, false); | ||
924 | |||
925 | int_mask = DENALI_IRQ_ALL; | ||
926 | |||
927 | /* Clear all status bits */ | ||
928 | denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS0); | ||
929 | denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS1); | ||
930 | denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS2); | ||
931 | denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS3); | ||
932 | |||
933 | denali_irq_enable(denali, int_mask); | ||
934 | } | ||
935 | |||
936 | static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali) | ||
937 | { | ||
938 | NAND_LLD_Enable_Disable_Interrupts(denali, false); | ||
939 | free_irq(irqnum, denali); | ||
940 | } | ||
941 | |||
942 | static void denali_irq_enable(struct denali_nand_info *denali, uint32_t int_mask) | ||
943 | { | ||
944 | denali_write32(int_mask, denali->flash_reg + INTR_EN0); | ||
945 | denali_write32(int_mask, denali->flash_reg + INTR_EN1); | ||
946 | denali_write32(int_mask, denali->flash_reg + INTR_EN2); | ||
947 | denali_write32(int_mask, denali->flash_reg + INTR_EN3); | ||
948 | } | ||
949 | |||
950 | /* This function only returns when an interrupt that this driver cares about | ||
951 | * occurs. This is to reduce the overhead of servicing interrupts | ||
952 | */ | ||
953 | static inline uint32_t denali_irq_detected(struct denali_nand_info *denali) | ||
954 | { | ||
955 | return (read_interrupt_status(denali) & DENALI_IRQ_ALL); | ||
956 | } | ||
957 | |||
958 | /* Interrupts are cleared by writing a 1 to the appropriate status bit */ | ||
959 | static inline void clear_interrupt(struct denali_nand_info *denali, uint32_t irq_mask) | ||
960 | { | ||
961 | uint32_t intr_status_reg = 0; | ||
962 | |||
963 | intr_status_reg = intr_status_addresses[denali->flash_bank]; | ||
964 | |||
965 | denali_write32(irq_mask, denali->flash_reg + intr_status_reg); | ||
966 | } | ||
967 | |||
968 | static void clear_interrupts(struct denali_nand_info *denali) | ||
969 | { | ||
970 | uint32_t status = 0x0; | ||
971 | spin_lock_irq(&denali->irq_lock); | ||
972 | |||
973 | status = read_interrupt_status(denali); | ||
974 | |||
975 | #if DEBUG_DENALI | ||
976 | denali->irq_debug_array[denali->idx++] = 0x30000000 | status; | ||
977 | denali->idx %= 32; | ||
978 | #endif | ||
979 | |||
980 | denali->irq_status = 0x0; | ||
981 | spin_unlock_irq(&denali->irq_lock); | ||
982 | } | ||
983 | |||
984 | static uint32_t read_interrupt_status(struct denali_nand_info *denali) | ||
985 | { | ||
986 | uint32_t intr_status_reg = 0; | ||
987 | |||
988 | intr_status_reg = intr_status_addresses[denali->flash_bank]; | ||
989 | |||
990 | return ioread32(denali->flash_reg + intr_status_reg); | ||
991 | } | ||
992 | |||
993 | #if DEBUG_DENALI | ||
994 | static void print_irq_log(struct denali_nand_info *denali) | ||
995 | { | ||
996 | int i = 0; | ||
997 | |||
998 | printk("ISR debug log index = %X\n", denali->idx); | ||
999 | for (i = 0; i < 32; i++) | ||
1000 | { | ||
1001 | printk("%08X: %08X\n", i, denali->irq_debug_array[i]); | ||
1002 | } | ||
1003 | } | ||
1004 | #endif | ||
1005 | |||
1006 | /* This is the interrupt service routine. It handles all interrupts | ||
1007 | * sent to this device. Note that on CE4100, this is a shared | ||
1008 | * interrupt. | ||
1009 | */ | ||
1010 | static irqreturn_t denali_isr(int irq, void *dev_id) | ||
1011 | { | ||
1012 | struct denali_nand_info *denali = dev_id; | ||
1013 | uint32_t irq_status = 0x0; | ||
1014 | irqreturn_t result = IRQ_NONE; | ||
1015 | |||
1016 | spin_lock(&denali->irq_lock); | ||
1017 | |||
1018 | /* check to see if a valid NAND chip has | ||
1019 | * been selected. | ||
1020 | */ | ||
1021 | if (is_flash_bank_valid(denali->flash_bank)) | ||
1022 | { | ||
1023 | /* check to see if controller generated | ||
1024 | * the interrupt, since this is a shared interrupt */ | ||
1025 | if ((irq_status = denali_irq_detected(denali)) != 0) | ||
1026 | { | ||
1027 | #if DEBUG_DENALI | ||
1028 | denali->irq_debug_array[denali->idx++] = 0x10000000 | irq_status; | ||
1029 | denali->idx %= 32; | ||
1030 | |||
1031 | printk("IRQ status = 0x%04x\n", irq_status); | ||
1032 | #endif | ||
1033 | /* handle interrupt */ | ||
1034 | /* first acknowledge it */ | ||
1035 | clear_interrupt(denali, irq_status); | ||
1036 | /* store the status in the device context for someone | ||
1037 | to read */ | ||
1038 | denali->irq_status |= irq_status; | ||
1039 | /* notify anyone who cares that it happened */ | ||
1040 | complete(&denali->complete); | ||
1041 | /* tell the OS that we've handled this */ | ||
1042 | result = IRQ_HANDLED; | ||
1043 | } | ||
1044 | } | ||
1045 | spin_unlock(&denali->irq_lock); | ||
1046 | return result; | ||
1047 | } | ||
1048 | #define BANK(x) ((x) << 24) | ||
1049 | |||
1050 | static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask) | ||
1051 | { | ||
1052 | unsigned long comp_res = 0; | ||
1053 | uint32_t intr_status = 0; | ||
1054 | bool retry = false; | ||
1055 | unsigned long timeout = msecs_to_jiffies(1000); | ||
1056 | |||
1057 | do | ||
1058 | { | ||
1059 | #if DEBUG_DENALI | ||
1060 | printk("waiting for 0x%x\n", irq_mask); | ||
1061 | #endif | ||
1062 | comp_res = wait_for_completion_timeout(&denali->complete, timeout); | ||
1063 | spin_lock_irq(&denali->irq_lock); | ||
1064 | intr_status = denali->irq_status; | ||
1065 | |||
1066 | #if DEBUG_DENALI | ||
1067 | denali->irq_debug_array[denali->idx++] = 0x20000000 | (irq_mask << 16) | intr_status; | ||
1068 | denali->idx %= 32; | ||
1069 | #endif | ||
1070 | |||
1071 | if (intr_status & irq_mask) | ||
1072 | { | ||
1073 | denali->irq_status &= ~irq_mask; | ||
1074 | spin_unlock_irq(&denali->irq_lock); | ||
1075 | #if DEBUG_DENALI | ||
1076 | if (retry) printk("status on retry = 0x%x\n", intr_status); | ||
1077 | #endif | ||
1078 | /* our interrupt was detected */ | ||
1079 | break; | ||
1080 | } | ||
1081 | else | ||
1082 | { | ||
1083 | /* these are not the interrupts you are looking for - | ||
1084 | need to wait again */ | ||
1085 | spin_unlock_irq(&denali->irq_lock); | ||
1086 | #if DEBUG_DENALI | ||
1087 | print_irq_log(denali); | ||
1088 | printk("received irq nobody cared: irq_status = 0x%x," | ||
1089 | " irq_mask = 0x%x, timeout = %ld\n", intr_status, irq_mask, comp_res); | ||
1090 | #endif | ||
1091 | retry = true; | ||
1092 | } | ||
1093 | } while (comp_res != 0); | ||
1094 | |||
1095 | if (comp_res == 0) | ||
1096 | { | ||
1097 | /* timeout */ | ||
1098 | printk(KERN_ERR "timeout occurred, status = 0x%x, mask = 0x%x\n", | ||
1099 | intr_status, irq_mask); | ||
1100 | |||
1101 | intr_status = 0; | ||
1102 | } | ||
1103 | return intr_status; | ||
1104 | } | ||
1105 | |||
1106 | /* This helper function setups the registers for ECC and whether or not | ||
1107 | the spare area will be transfered. */ | ||
1108 | static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en, | ||
1109 | bool transfer_spare) | ||
1110 | { | ||
1111 | int ecc_en_flag = 0, transfer_spare_flag = 0; | ||
1112 | |||
1113 | /* set ECC, transfer spare bits if needed */ | ||
1114 | ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0; | ||
1115 | transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0; | ||
1116 | |||
1117 | /* Enable spare area/ECC per user's request. */ | ||
1118 | denali_write32(ecc_en_flag, denali->flash_reg + ECC_ENABLE); | ||
1119 | denali_write32(transfer_spare_flag, denali->flash_reg + TRANSFER_SPARE_REG); | ||
1120 | } | ||
1121 | |||
1122 | /* sends a pipeline command operation to the controller. See the Denali NAND | ||
1123 | controller's user guide for more information (section 4.2.3.6). | ||
1124 | */ | ||
1125 | static int denali_send_pipeline_cmd(struct denali_nand_info *denali, bool ecc_en, | ||
1126 | bool transfer_spare, int access_type, | ||
1127 | int op) | ||
1128 | { | ||
1129 | int status = PASS; | ||
1130 | uint32_t addr = 0x0, cmd = 0x0, page_count = 1, irq_status = 0, | ||
1131 | irq_mask = 0; | ||
1132 | |||
1133 | if (op == DENALI_READ) irq_mask = INTR_STATUS0__LOAD_COMP; | ||
1134 | else if (op == DENALI_WRITE) irq_mask = 0; | ||
1135 | else BUG(); | ||
1136 | |||
1137 | setup_ecc_for_xfer(denali, ecc_en, transfer_spare); | ||
1138 | |||
1139 | #if DEBUG_DENALI | ||
1140 | spin_lock_irq(&denali->irq_lock); | ||
1141 | denali->irq_debug_array[denali->idx++] = 0x40000000 | ioread32(denali->flash_reg + ECC_ENABLE) | (access_type << 4); | ||
1142 | denali->idx %= 32; | ||
1143 | spin_unlock_irq(&denali->irq_lock); | ||
1144 | #endif | ||
1145 | |||
1146 | |||
1147 | /* clear interrupts */ | ||
1148 | clear_interrupts(denali); | ||
1149 | |||
1150 | addr = BANK(denali->flash_bank) | denali->page; | ||
1151 | |||
1152 | if (op == DENALI_WRITE && access_type != SPARE_ACCESS) | ||
1153 | { | ||
1154 | cmd = MODE_01 | addr; | ||
1155 | denali_write32(cmd, denali->flash_mem); | ||
1156 | } | ||
1157 | else if (op == DENALI_WRITE && access_type == SPARE_ACCESS) | ||
1158 | { | ||
1159 | /* read spare area */ | ||
1160 | cmd = MODE_10 | addr; | ||
1161 | index_addr(denali, (uint32_t)cmd, access_type); | ||
1162 | |||
1163 | cmd = MODE_01 | addr; | ||
1164 | denali_write32(cmd, denali->flash_mem); | ||
1165 | } | ||
1166 | else if (op == DENALI_READ) | ||
1167 | { | ||
1168 | /* setup page read request for access type */ | ||
1169 | cmd = MODE_10 | addr; | ||
1170 | index_addr(denali, (uint32_t)cmd, access_type); | ||
1171 | |||
1172 | /* page 33 of the NAND controller spec indicates we should not | ||
1173 | use the pipeline commands in Spare area only mode. So we | ||
1174 | don't. | ||
1175 | */ | ||
1176 | if (access_type == SPARE_ACCESS) | ||
1177 | { | ||
1178 | cmd = MODE_01 | addr; | ||
1179 | denali_write32(cmd, denali->flash_mem); | ||
1180 | } | ||
1181 | else | ||
1182 | { | ||
1183 | index_addr(denali, (uint32_t)cmd, 0x2000 | op | page_count); | ||
1184 | |||
1185 | /* wait for command to be accepted | ||
1186 | * can always use status0 bit as the mask is identical for each | ||
1187 | * bank. */ | ||
1188 | irq_status = wait_for_irq(denali, irq_mask); | ||
1189 | |||
1190 | if (irq_status == 0) | ||
1191 | { | ||
1192 | printk(KERN_ERR "cmd, page, addr on timeout " | ||
1193 | "(0x%x, 0x%x, 0x%x)\n", cmd, denali->page, addr); | ||
1194 | status = FAIL; | ||
1195 | } | ||
1196 | else | ||
1197 | { | ||
1198 | cmd = MODE_01 | addr; | ||
1199 | denali_write32(cmd, denali->flash_mem); | ||
1200 | } | ||
1201 | } | ||
1202 | } | ||
1203 | return status; | ||
1204 | } | ||
1205 | |||
1206 | /* helper function that simply writes a buffer to the flash */ | ||
1207 | static int write_data_to_flash_mem(struct denali_nand_info *denali, const uint8_t *buf, | ||
1208 | int len) | ||
1209 | { | ||
1210 | uint32_t i = 0, *buf32; | ||
1211 | |||
1212 | /* verify that the len is a multiple of 4. see comment in | ||
1213 | * read_data_from_flash_mem() */ | ||
1214 | BUG_ON((len % 4) != 0); | ||
1215 | |||
1216 | /* write the data to the flash memory */ | ||
1217 | buf32 = (uint32_t *)buf; | ||
1218 | for (i = 0; i < len / 4; i++) | ||
1219 | { | ||
1220 | denali_write32(*buf32++, denali->flash_mem + 0x10); | ||
1221 | } | ||
1222 | return i*4; /* intent is to return the number of bytes read */ | ||
1223 | } | ||
1224 | |||
1225 | /* helper function that simply reads a buffer from the flash */ | ||
1226 | static int read_data_from_flash_mem(struct denali_nand_info *denali, uint8_t *buf, | ||
1227 | int len) | ||
1228 | { | ||
1229 | uint32_t i = 0, *buf32; | ||
1230 | |||
1231 | /* we assume that len will be a multiple of 4, if not | ||
1232 | * it would be nice to know about it ASAP rather than | ||
1233 | * have random failures... | ||
1234 | * | ||
1235 | * This assumption is based on the fact that this | ||
1236 | * function is designed to be used to read flash pages, | ||
1237 | * which are typically multiples of 4... | ||
1238 | */ | ||
1239 | |||
1240 | BUG_ON((len % 4) != 0); | ||
1241 | |||
1242 | /* transfer the data from the flash */ | ||
1243 | buf32 = (uint32_t *)buf; | ||
1244 | for (i = 0; i < len / 4; i++) | ||
1245 | { | ||
1246 | *buf32++ = ioread32(denali->flash_mem + 0x10); | ||
1247 | } | ||
1248 | return i*4; /* intent is to return the number of bytes read */ | ||
1249 | } | ||
1250 | |||
1251 | /* writes OOB data to the device */ | ||
1252 | static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page) | ||
1253 | { | ||
1254 | struct denali_nand_info *denali = mtd_to_denali(mtd); | ||
1255 | uint32_t irq_status = 0; | ||
1256 | uint32_t irq_mask = INTR_STATUS0__PROGRAM_COMP | | ||
1257 | INTR_STATUS0__PROGRAM_FAIL; | ||
1258 | int status = 0; | ||
1259 | |||
1260 | denali->page = page; | ||
1261 | |||
1262 | if (denali_send_pipeline_cmd(denali, false, false, SPARE_ACCESS, | ||
1263 | DENALI_WRITE) == PASS) | ||
1264 | { | ||
1265 | write_data_to_flash_mem(denali, buf, mtd->oobsize); | ||
1266 | |||
1267 | #if DEBUG_DENALI | ||
1268 | spin_lock_irq(&denali->irq_lock); | ||
1269 | denali->irq_debug_array[denali->idx++] = 0x80000000 | mtd->oobsize; | ||
1270 | denali->idx %= 32; | ||
1271 | spin_unlock_irq(&denali->irq_lock); | ||
1272 | #endif | ||
1273 | |||
1274 | |||
1275 | /* wait for operation to complete */ | ||
1276 | irq_status = wait_for_irq(denali, irq_mask); | ||
1277 | |||
1278 | if (irq_status == 0) | ||
1279 | { | ||
1280 | printk(KERN_ERR "OOB write failed\n"); | ||
1281 | status = -EIO; | ||
1282 | } | ||
1283 | } | ||
1284 | else | ||
1285 | { | ||
1286 | printk(KERN_ERR "unable to send pipeline command\n"); | ||
1287 | status = -EIO; | ||
1288 | } | ||
1289 | return status; | ||
1290 | } | ||
1291 | |||
1292 | /* reads OOB data from the device */ | ||
1293 | static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page) | ||
1294 | { | ||
1295 | struct denali_nand_info *denali = mtd_to_denali(mtd); | ||
1296 | uint32_t irq_mask = INTR_STATUS0__LOAD_COMP, irq_status = 0, addr = 0x0, cmd = 0x0; | ||
1297 | |||
1298 | denali->page = page; | ||
1299 | |||
1300 | #if DEBUG_DENALI | ||
1301 | printk("read_oob %d\n", page); | ||
1302 | #endif | ||
1303 | if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS, | ||
1304 | DENALI_READ) == PASS) | ||
1305 | { | ||
1306 | read_data_from_flash_mem(denali, buf, mtd->oobsize); | ||
1307 | |||
1308 | /* wait for command to be accepted | ||
1309 | * can always use status0 bit as the mask is identical for each | ||
1310 | * bank. */ | ||
1311 | irq_status = wait_for_irq(denali, irq_mask); | ||
1312 | |||
1313 | if (irq_status == 0) | ||
1314 | { | ||
1315 | printk(KERN_ERR "page on OOB timeout %d\n", denali->page); | ||
1316 | } | ||
1317 | |||
1318 | /* We set the device back to MAIN_ACCESS here as I observed | ||
1319 | * instability with the controller if you do a block erase | ||
1320 | * and the last transaction was a SPARE_ACCESS. Block erase | ||
1321 | * is reliable (according to the MTD test infrastructure) | ||
1322 | * if you are in MAIN_ACCESS. | ||
1323 | */ | ||
1324 | addr = BANK(denali->flash_bank) | denali->page; | ||
1325 | cmd = MODE_10 | addr; | ||
1326 | index_addr(denali, (uint32_t)cmd, MAIN_ACCESS); | ||
1327 | |||
1328 | #if DEBUG_DENALI | ||
1329 | spin_lock_irq(&denali->irq_lock); | ||
1330 | denali->irq_debug_array[denali->idx++] = 0x60000000 | mtd->oobsize; | ||
1331 | denali->idx %= 32; | ||
1332 | spin_unlock_irq(&denali->irq_lock); | ||
1333 | #endif | ||
1334 | } | ||
1335 | } | ||
1336 | |||
1337 | /* this function examines buffers to see if they contain data that | ||
1338 | * indicate that the buffer is part of an erased region of flash. | ||
1339 | */ | ||
1340 | bool is_erased(uint8_t *buf, int len) | ||
1341 | { | ||
1342 | int i = 0; | ||
1343 | for (i = 0; i < len; i++) | ||
1344 | { | ||
1345 | if (buf[i] != 0xFF) | ||
1346 | { | ||
1347 | return false; | ||
1348 | } | ||
1349 | } | ||
1350 | return true; | ||
1351 | } | ||
1352 | #define ECC_SECTOR_SIZE 512 | ||
1353 | |||
1354 | #define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12) | ||
1355 | #define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET)) | ||
1356 | #define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK) | ||
1357 | #define ECC_ERROR_CORRECTABLE(x) (!((x) & ERR_CORRECTION_INFO)) | ||
1358 | #define ECC_ERR_DEVICE(x) ((x) & ERR_CORRECTION_INFO__DEVICE_NR >> 8) | ||
1359 | #define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO) | ||
1360 | |||
1361 | static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf, | ||
1362 | uint8_t *oobbuf, uint32_t irq_status) | ||
1363 | { | ||
1364 | bool check_erased_page = false; | ||
1365 | |||
1366 | if (irq_status & INTR_STATUS0__ECC_ERR) | ||
1367 | { | ||
1368 | /* read the ECC errors. we'll ignore them for now */ | ||
1369 | uint32_t err_address = 0, err_correction_info = 0; | ||
1370 | uint32_t err_byte = 0, err_sector = 0, err_device = 0; | ||
1371 | uint32_t err_correction_value = 0; | ||
1372 | |||
1373 | do | ||
1374 | { | ||
1375 | err_address = ioread32(denali->flash_reg + | ||
1376 | ECC_ERROR_ADDRESS); | ||
1377 | err_sector = ECC_SECTOR(err_address); | ||
1378 | err_byte = ECC_BYTE(err_address); | ||
1379 | |||
1380 | |||
1381 | err_correction_info = ioread32(denali->flash_reg + | ||
1382 | ERR_CORRECTION_INFO); | ||
1383 | err_correction_value = | ||
1384 | ECC_CORRECTION_VALUE(err_correction_info); | ||
1385 | err_device = ECC_ERR_DEVICE(err_correction_info); | ||
1386 | |||
1387 | if (ECC_ERROR_CORRECTABLE(err_correction_info)) | ||
1388 | { | ||
1389 | /* offset in our buffer is computed as: | ||
1390 | sector number * sector size + offset in | ||
1391 | sector | ||
1392 | */ | ||
1393 | int offset = err_sector * ECC_SECTOR_SIZE + | ||
1394 | err_byte; | ||
1395 | if (offset < denali->mtd.writesize) | ||
1396 | { | ||
1397 | /* correct the ECC error */ | ||
1398 | buf[offset] ^= err_correction_value; | ||
1399 | denali->mtd.ecc_stats.corrected++; | ||
1400 | } | ||
1401 | else | ||
1402 | { | ||
1403 | /* bummer, couldn't correct the error */ | ||
1404 | printk(KERN_ERR "ECC offset invalid\n"); | ||
1405 | denali->mtd.ecc_stats.failed++; | ||
1406 | } | ||
1407 | } | ||
1408 | else | ||
1409 | { | ||
1410 | /* if the error is not correctable, need to | ||
1411 | * look at the page to see if it is an erased page. | ||
1412 | * if so, then it's not a real ECC error */ | ||
1413 | check_erased_page = true; | ||
1414 | } | ||
1415 | |||
1416 | #if DEBUG_DENALI | ||
1417 | printk("Detected ECC error in page %d: err_addr = 0x%08x," | ||
1418 | " info to fix is 0x%08x\n", denali->page, err_address, | ||
1419 | err_correction_info); | ||
1420 | #endif | ||
1421 | } while (!ECC_LAST_ERR(err_correction_info)); | ||
1422 | } | ||
1423 | return check_erased_page; | ||
1424 | } | ||
1425 | |||
1426 | /* programs the controller to either enable/disable DMA transfers */ | ||
1427 | static void denali_enable_dma(struct denali_nand_info *denali, bool en) | ||
1428 | { | ||
1429 | uint32_t reg_val = 0x0; | ||
1430 | |||
1431 | if (en) reg_val = DMA_ENABLE__FLAG; | ||
1432 | |||
1433 | denali_write32(reg_val, denali->flash_reg + DMA_ENABLE); | ||
1434 | ioread32(denali->flash_reg + DMA_ENABLE); | ||
1435 | } | ||
1436 | |||
1437 | /* setups the HW to perform the data DMA */ | ||
1438 | static void denali_setup_dma(struct denali_nand_info *denali, int op) | ||
1439 | { | ||
1440 | uint32_t mode = 0x0; | ||
1441 | const int page_count = 1; | ||
1442 | dma_addr_t addr = denali->buf.dma_buf; | ||
1443 | |||
1444 | mode = MODE_10 | BANK(denali->flash_bank); | ||
1445 | |||
1446 | /* DMA is a four step process */ | ||
1447 | |||
1448 | /* 1. setup transfer type and # of pages */ | ||
1449 | index_addr(denali, mode | denali->page, 0x2000 | op | page_count); | ||
1450 | |||
1451 | /* 2. set memory high address bits 23:8 */ | ||
1452 | index_addr(denali, mode | ((uint16_t)(addr >> 16) << 8), 0x2200); | ||
1453 | |||
1454 | /* 3. set memory low address bits 23:8 */ | ||
1455 | index_addr(denali, mode | ((uint16_t)addr << 8), 0x2300); | ||
1456 | |||
1457 | /* 4. interrupt when complete, burst len = 64 bytes*/ | ||
1458 | index_addr(denali, mode | 0x14000, 0x2400); | ||
1459 | } | ||
1460 | |||
1461 | /* writes a page. user specifies type, and this function handles the | ||
1462 | configuration details. */ | ||
1463 | static void write_page(struct mtd_info *mtd, struct nand_chip *chip, | ||
1464 | const uint8_t *buf, bool raw_xfer) | ||
1465 | { | ||
1466 | struct denali_nand_info *denali = mtd_to_denali(mtd); | ||
1467 | struct pci_dev *pci_dev = denali->dev; | ||
1468 | |||
1469 | dma_addr_t addr = denali->buf.dma_buf; | ||
1470 | size_t size = denali->mtd.writesize + denali->mtd.oobsize; | ||
1471 | |||
1472 | uint32_t irq_status = 0; | ||
1473 | uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP | | ||
1474 | INTR_STATUS0__PROGRAM_FAIL; | ||
1475 | |||
1476 | /* if it is a raw xfer, we want to disable ecc, and send | ||
1477 | * the spare area. | ||
1478 | * !raw_xfer - enable ecc | ||
1479 | * raw_xfer - transfer spare | ||
1480 | */ | ||
1481 | setup_ecc_for_xfer(denali, !raw_xfer, raw_xfer); | ||
1482 | |||
1483 | /* copy buffer into DMA buffer */ | ||
1484 | memcpy(denali->buf.buf, buf, mtd->writesize); | ||
1485 | |||
1486 | if (raw_xfer) | ||
1487 | { | ||
1488 | /* transfer the data to the spare area */ | ||
1489 | memcpy(denali->buf.buf + mtd->writesize, | ||
1490 | chip->oob_poi, | ||
1491 | mtd->oobsize); | ||
1492 | } | ||
1493 | |||
1494 | pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_TODEVICE); | ||
1495 | |||
1496 | clear_interrupts(denali); | ||
1497 | denali_enable_dma(denali, true); | ||
1498 | |||
1499 | denali_setup_dma(denali, DENALI_WRITE); | ||
1500 | |||
1501 | /* wait for operation to complete */ | ||
1502 | irq_status = wait_for_irq(denali, irq_mask); | ||
1503 | |||
1504 | if (irq_status == 0) | ||
1505 | { | ||
1506 | printk(KERN_ERR "timeout on write_page (type = %d)\n", raw_xfer); | ||
1507 | denali->status = | ||
1508 | (irq_status & INTR_STATUS0__PROGRAM_FAIL) ? NAND_STATUS_FAIL : | ||
1509 | PASS; | ||
1510 | } | ||
1511 | |||
1512 | denali_enable_dma(denali, false); | ||
1513 | pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_TODEVICE); | ||
1514 | } | ||
1515 | |||
1516 | /* NAND core entry points */ | ||
1517 | |||
1518 | /* this is the callback that the NAND core calls to write a page. Since | ||
1519 | writing a page with ECC or without is similar, all the work is done | ||
1520 | by write_page above. */ | ||
1521 | static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip, | ||
1522 | const uint8_t *buf) | ||
1523 | { | ||
1524 | /* for regular page writes, we let HW handle all the ECC | ||
1525 | * data written to the device. */ | ||
1526 | write_page(mtd, chip, buf, false); | ||
1527 | } | ||
1528 | |||
1529 | /* This is the callback that the NAND core calls to write a page without ECC. | ||
1530 | raw access is similiar to ECC page writes, so all the work is done in the | ||
1531 | write_page() function above. | ||
1532 | */ | ||
1533 | static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, | ||
1534 | const uint8_t *buf) | ||
1535 | { | ||
1536 | /* for raw page writes, we want to disable ECC and simply write | ||
1537 | whatever data is in the buffer. */ | ||
1538 | write_page(mtd, chip, buf, true); | ||
1539 | } | ||
1540 | |||
1541 | static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip, | ||
1542 | int page) | ||
1543 | { | ||
1544 | return write_oob_data(mtd, chip->oob_poi, page); | ||
1545 | } | ||
1546 | |||
1547 | static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip, | ||
1548 | int page, int sndcmd) | ||
1549 | { | ||
1550 | read_oob_data(mtd, chip->oob_poi, page); | ||
1551 | |||
1552 | return 0; /* notify NAND core to send command to | ||
1553 | * NAND device. */ | ||
1554 | } | ||
1555 | |||
1556 | static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip, | ||
1557 | uint8_t *buf, int page) | ||
1558 | { | ||
1559 | struct denali_nand_info *denali = mtd_to_denali(mtd); | ||
1560 | struct pci_dev *pci_dev = denali->dev; | ||
1561 | |||
1562 | dma_addr_t addr = denali->buf.dma_buf; | ||
1563 | size_t size = denali->mtd.writesize + denali->mtd.oobsize; | ||
1564 | |||
1565 | uint32_t irq_status = 0; | ||
1566 | uint32_t irq_mask = INTR_STATUS0__ECC_TRANSACTION_DONE | | ||
1567 | INTR_STATUS0__ECC_ERR; | ||
1568 | bool check_erased_page = false; | ||
1569 | |||
1570 | setup_ecc_for_xfer(denali, true, false); | ||
1571 | |||
1572 | denali_enable_dma(denali, true); | ||
1573 | pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_FROMDEVICE); | ||
1574 | |||
1575 | clear_interrupts(denali); | ||
1576 | denali_setup_dma(denali, DENALI_READ); | ||
1577 | |||
1578 | /* wait for operation to complete */ | ||
1579 | irq_status = wait_for_irq(denali, irq_mask); | ||
1580 | |||
1581 | pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_FROMDEVICE); | ||
1582 | |||
1583 | memcpy(buf, denali->buf.buf, mtd->writesize); | ||
1584 | |||
1585 | check_erased_page = handle_ecc(denali, buf, chip->oob_poi, irq_status); | ||
1586 | denali_enable_dma(denali, false); | ||
1587 | |||
1588 | if (check_erased_page) | ||
1589 | { | ||
1590 | read_oob_data(&denali->mtd, chip->oob_poi, denali->page); | ||
1591 | |||
1592 | /* check ECC failures that may have occurred on erased pages */ | ||
1593 | if (check_erased_page) | ||
1594 | { | ||
1595 | if (!is_erased(buf, denali->mtd.writesize)) | ||
1596 | { | ||
1597 | denali->mtd.ecc_stats.failed++; | ||
1598 | } | ||
1599 | if (!is_erased(buf, denali->mtd.oobsize)) | ||
1600 | { | ||
1601 | denali->mtd.ecc_stats.failed++; | ||
1602 | } | ||
1603 | } | ||
1604 | } | ||
1605 | return 0; | ||
1606 | } | ||
1607 | |||
1608 | static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, | ||
1609 | uint8_t *buf, int page) | ||
1610 | { | ||
1611 | struct denali_nand_info *denali = mtd_to_denali(mtd); | ||
1612 | struct pci_dev *pci_dev = denali->dev; | ||
1613 | |||
1614 | dma_addr_t addr = denali->buf.dma_buf; | ||
1615 | size_t size = denali->mtd.writesize + denali->mtd.oobsize; | ||
1616 | |||
1617 | uint32_t irq_status = 0; | ||
1618 | uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP; | ||
1619 | |||
1620 | setup_ecc_for_xfer(denali, false, true); | ||
1621 | denali_enable_dma(denali, true); | ||
1622 | |||
1623 | pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_FROMDEVICE); | ||
1624 | |||
1625 | clear_interrupts(denali); | ||
1626 | denali_setup_dma(denali, DENALI_READ); | ||
1627 | |||
1628 | /* wait for operation to complete */ | ||
1629 | irq_status = wait_for_irq(denali, irq_mask); | ||
1630 | |||
1631 | pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_FROMDEVICE); | ||
1632 | |||
1633 | denali_enable_dma(denali, false); | ||
1634 | |||
1635 | memcpy(buf, denali->buf.buf, mtd->writesize); | ||
1636 | memcpy(chip->oob_poi, denali->buf.buf + mtd->writesize, mtd->oobsize); | ||
1637 | |||
1638 | return 0; | ||
1639 | } | ||
1640 | |||
1641 | static uint8_t denali_read_byte(struct mtd_info *mtd) | ||
1642 | { | ||
1643 | struct denali_nand_info *denali = mtd_to_denali(mtd); | ||
1644 | uint8_t result = 0xff; | ||
1645 | |||
1646 | if (denali->buf.head < denali->buf.tail) | ||
1647 | { | ||
1648 | result = denali->buf.buf[denali->buf.head++]; | ||
1649 | } | ||
1650 | |||
1651 | #if DEBUG_DENALI | ||
1652 | printk("read byte -> 0x%02x\n", result); | ||
1653 | #endif | ||
1654 | return result; | ||
1655 | } | ||
1656 | |||
1657 | static void denali_select_chip(struct mtd_info *mtd, int chip) | ||
1658 | { | ||
1659 | struct denali_nand_info *denali = mtd_to_denali(mtd); | ||
1660 | #if DEBUG_DENALI | ||
1661 | printk("denali select chip %d\n", chip); | ||
1662 | #endif | ||
1663 | spin_lock_irq(&denali->irq_lock); | ||
1664 | denali->flash_bank = chip; | ||
1665 | spin_unlock_irq(&denali->irq_lock); | ||
1666 | } | ||
1667 | |||
1668 | static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip) | ||
1669 | { | ||
1670 | struct denali_nand_info *denali = mtd_to_denali(mtd); | ||
1671 | int status = denali->status; | ||
1672 | denali->status = 0; | ||
1673 | |||
1674 | #if DEBUG_DENALI | ||
1675 | printk("waitfunc %d\n", status); | ||
1676 | #endif | ||
1677 | return status; | ||
1678 | } | ||
1679 | |||
1680 | static void denali_erase(struct mtd_info *mtd, int page) | ||
1681 | { | ||
1682 | struct denali_nand_info *denali = mtd_to_denali(mtd); | ||
1683 | |||
1684 | uint32_t cmd = 0x0, irq_status = 0; | ||
1685 | |||
1686 | #if DEBUG_DENALI | ||
1687 | printk("erase page: %d\n", page); | ||
1688 | #endif | ||
1689 | /* clear interrupts */ | ||
1690 | clear_interrupts(denali); | ||
1691 | |||
1692 | /* setup page read request for access type */ | ||
1693 | cmd = MODE_10 | BANK(denali->flash_bank) | page; | ||
1694 | index_addr(denali, (uint32_t)cmd, 0x1); | ||
1695 | |||
1696 | /* wait for erase to complete or failure to occur */ | ||
1697 | irq_status = wait_for_irq(denali, INTR_STATUS0__ERASE_COMP | | ||
1698 | INTR_STATUS0__ERASE_FAIL); | ||
1699 | |||
1700 | denali->status = (irq_status & INTR_STATUS0__ERASE_FAIL) ? NAND_STATUS_FAIL : | ||
1701 | PASS; | ||
1702 | } | ||
1703 | |||
1704 | static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col, | ||
1705 | int page) | ||
1706 | { | ||
1707 | struct denali_nand_info *denali = mtd_to_denali(mtd); | ||
1708 | |||
1709 | #if DEBUG_DENALI | ||
1710 | printk("cmdfunc: 0x%x %d %d\n", cmd, col, page); | ||
1711 | #endif | ||
1712 | switch (cmd) | ||
1713 | { | ||
1714 | case NAND_CMD_PAGEPROG: | ||
1715 | break; | ||
1716 | case NAND_CMD_STATUS: | ||
1717 | read_status(denali); | ||
1718 | break; | ||
1719 | case NAND_CMD_READID: | ||
1720 | reset_buf(denali); | ||
1721 | if (denali->flash_bank < denali->total_used_banks) | ||
1722 | { | ||
1723 | /* write manufacturer information into nand | ||
1724 | buffer for NAND subsystem to fetch. | ||
1725 | */ | ||
1726 | write_byte_to_buf(denali, denali->dev_info.wDeviceMaker); | ||
1727 | write_byte_to_buf(denali, denali->dev_info.wDeviceID); | ||
1728 | write_byte_to_buf(denali, denali->dev_info.bDeviceParam0); | ||
1729 | write_byte_to_buf(denali, denali->dev_info.bDeviceParam1); | ||
1730 | write_byte_to_buf(denali, denali->dev_info.bDeviceParam2); | ||
1731 | } | ||
1732 | else | ||
1733 | { | ||
1734 | int i; | ||
1735 | for (i = 0; i < 5; i++) | ||
1736 | write_byte_to_buf(denali, 0xff); | ||
1737 | } | ||
1738 | break; | ||
1739 | case NAND_CMD_READ0: | ||
1740 | case NAND_CMD_SEQIN: | ||
1741 | denali->page = page; | ||
1742 | break; | ||
1743 | case NAND_CMD_RESET: | ||
1744 | reset_bank(denali); | ||
1745 | break; | ||
1746 | case NAND_CMD_READOOB: | ||
1747 | /* TODO: Read OOB data */ | ||
1748 | break; | ||
1749 | default: | ||
1750 | printk(KERN_ERR ": unsupported command received 0x%x\n", cmd); | ||
1751 | break; | ||
1752 | } | ||
1753 | } | ||
1754 | |||
1755 | /* stubs for ECC functions not used by the NAND core */ | ||
1756 | static int denali_ecc_calculate(struct mtd_info *mtd, const uint8_t *data, | ||
1757 | uint8_t *ecc_code) | ||
1758 | { | ||
1759 | printk(KERN_ERR "denali_ecc_calculate called unexpectedly\n"); | ||
1760 | BUG(); | ||
1761 | return -EIO; | ||
1762 | } | ||
1763 | |||
1764 | static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data, | ||
1765 | uint8_t *read_ecc, uint8_t *calc_ecc) | ||
1766 | { | ||
1767 | printk(KERN_ERR "denali_ecc_correct called unexpectedly\n"); | ||
1768 | BUG(); | ||
1769 | return -EIO; | ||
1770 | } | ||
1771 | |||
1772 | static void denali_ecc_hwctl(struct mtd_info *mtd, int mode) | ||
1773 | { | ||
1774 | printk(KERN_ERR "denali_ecc_hwctl called unexpectedly\n"); | ||
1775 | BUG(); | ||
1776 | } | ||
1777 | /* end NAND core entry points */ | ||
1778 | |||
1779 | /* Initialization code to bring the device up to a known good state */ | ||
1780 | static void denali_hw_init(struct denali_nand_info *denali) | ||
1781 | { | ||
1782 | denali_irq_init(denali); | ||
1783 | NAND_Flash_Reset(denali); | ||
1784 | denali_write32(0x0F, denali->flash_reg + RB_PIN_ENABLED); | ||
1785 | denali_write32(CHIP_EN_DONT_CARE__FLAG, denali->flash_reg + CHIP_ENABLE_DONT_CARE); | ||
1786 | |||
1787 | denali_write32(0x0, denali->flash_reg + SPARE_AREA_SKIP_BYTES); | ||
1788 | denali_write32(0xffff, denali->flash_reg + SPARE_AREA_MARKER); | ||
1789 | |||
1790 | /* Should set value for these registers when init */ | ||
1791 | denali_write32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES); | ||
1792 | denali_write32(1, denali->flash_reg + ECC_ENABLE); | ||
1793 | } | ||
1794 | |||
1795 | /* ECC layout for SLC devices. Denali spec indicates SLC fixed at 4 bytes */ | ||
1796 | #define ECC_BYTES_SLC 4 * (2048 / ECC_SECTOR_SIZE) | ||
1797 | static struct nand_ecclayout nand_oob_slc = { | ||
1798 | .eccbytes = 4, | ||
1799 | .eccpos = { 0, 1, 2, 3 }, /* not used */ | ||
1800 | .oobfree = {{ | ||
1801 | .offset = ECC_BYTES_SLC, | ||
1802 | .length = 64 - ECC_BYTES_SLC | ||
1803 | }} | ||
1804 | }; | ||
1805 | |||
1806 | #define ECC_BYTES_MLC 14 * (2048 / ECC_SECTOR_SIZE) | ||
1807 | static struct nand_ecclayout nand_oob_mlc_14bit = { | ||
1808 | .eccbytes = 14, | ||
1809 | .eccpos = { 0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13 }, /* not used */ | ||
1810 | .oobfree = {{ | ||
1811 | .offset = ECC_BYTES_MLC, | ||
1812 | .length = 64 - ECC_BYTES_MLC | ||
1813 | }} | ||
1814 | }; | ||
1815 | |||
1816 | static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' }; | ||
1817 | static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' }; | ||
1818 | |||
1819 | static struct nand_bbt_descr bbt_main_descr = { | ||
1820 | .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE | ||
1821 | | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, | ||
1822 | .offs = 8, | ||
1823 | .len = 4, | ||
1824 | .veroffs = 12, | ||
1825 | .maxblocks = 4, | ||
1826 | .pattern = bbt_pattern, | ||
1827 | }; | ||
1828 | |||
1829 | static struct nand_bbt_descr bbt_mirror_descr = { | ||
1830 | .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE | ||
1831 | | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, | ||
1832 | .offs = 8, | ||
1833 | .len = 4, | ||
1834 | .veroffs = 12, | ||
1835 | .maxblocks = 4, | ||
1836 | .pattern = mirror_pattern, | ||
1837 | }; | ||
1838 | |||
1839 | /* initalize driver data structures */ | ||
1840 | void denali_drv_init(struct denali_nand_info *denali) | ||
1841 | { | ||
1842 | denali->idx = 0; | ||
1843 | |||
1844 | /* setup interrupt handler */ | ||
1845 | /* the completion object will be used to notify | ||
1846 | * the callee that the interrupt is done */ | ||
1847 | init_completion(&denali->complete); | ||
1848 | |||
1849 | /* the spinlock will be used to synchronize the ISR | ||
1850 | * with any element that might be access shared | ||
1851 | * data (interrupt status) */ | ||
1852 | spin_lock_init(&denali->irq_lock); | ||
1853 | |||
1854 | /* indicate that MTD has not selected a valid bank yet */ | ||
1855 | denali->flash_bank = CHIP_SELECT_INVALID; | ||
1856 | |||
1857 | /* initialize our irq_status variable to indicate no interrupts */ | ||
1858 | denali->irq_status = 0; | ||
1859 | } | ||
1860 | |||
1861 | /* driver entry point */ | ||
1862 | static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) | ||
1863 | { | ||
1864 | int ret = -ENODEV; | ||
1865 | resource_size_t csr_base, mem_base; | ||
1866 | unsigned long csr_len, mem_len; | ||
1867 | struct denali_nand_info *denali; | ||
1868 | |||
1869 | nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", | ||
1870 | __FILE__, __LINE__, __func__); | ||
1871 | |||
1872 | denali = kzalloc(sizeof(*denali), GFP_KERNEL); | ||
1873 | if (!denali) | ||
1874 | return -ENOMEM; | ||
1875 | |||
1876 | ret = pci_enable_device(dev); | ||
1877 | if (ret) { | ||
1878 | printk(KERN_ERR "Spectra: pci_enable_device failed.\n"); | ||
1879 | goto failed_enable; | ||
1880 | } | ||
1881 | |||
1882 | if (id->driver_data == INTEL_CE4100) { | ||
1883 | /* Due to a silicon limitation, we can only support | ||
1884 | * ONFI timing mode 1 and below. | ||
1885 | */ | ||
1886 | if (onfi_timing_mode < -1 || onfi_timing_mode > 1) | ||
1887 | { | ||
1888 | printk("Intel CE4100 only supports ONFI timing mode 1 " | ||
1889 | "or below\n"); | ||
1890 | ret = -EINVAL; | ||
1891 | goto failed_enable; | ||
1892 | } | ||
1893 | denali->platform = INTEL_CE4100; | ||
1894 | mem_base = pci_resource_start(dev, 0); | ||
1895 | mem_len = pci_resource_len(dev, 1); | ||
1896 | csr_base = pci_resource_start(dev, 1); | ||
1897 | csr_len = pci_resource_len(dev, 1); | ||
1898 | } else { | ||
1899 | denali->platform = INTEL_MRST; | ||
1900 | csr_base = pci_resource_start(dev, 0); | ||
1901 | csr_len = pci_resource_start(dev, 0); | ||
1902 | mem_base = pci_resource_start(dev, 1); | ||
1903 | mem_len = pci_resource_len(dev, 1); | ||
1904 | if (!mem_len) { | ||
1905 | mem_base = csr_base + csr_len; | ||
1906 | mem_len = csr_len; | ||
1907 | nand_dbg_print(NAND_DBG_WARN, | ||
1908 | "Spectra: No second BAR for PCI device; assuming %08Lx\n", | ||
1909 | (uint64_t)csr_base); | ||
1910 | } | ||
1911 | } | ||
1912 | |||
1913 | /* Is 32-bit DMA supported? */ | ||
1914 | ret = pci_set_dma_mask(dev, DMA_BIT_MASK(32)); | ||
1915 | |||
1916 | if (ret) | ||
1917 | { | ||
1918 | printk(KERN_ERR "Spectra: no usable DMA configuration\n"); | ||
1919 | goto failed_enable; | ||
1920 | } | ||
1921 | denali->buf.dma_buf = pci_map_single(dev, denali->buf.buf, DENALI_BUF_SIZE, | ||
1922 | PCI_DMA_BIDIRECTIONAL); | ||
1923 | |||
1924 | if (pci_dma_mapping_error(dev, denali->buf.dma_buf)) | ||
1925 | { | ||
1926 | printk(KERN_ERR "Spectra: failed to map DMA buffer\n"); | ||
1927 | goto failed_enable; | ||
1928 | } | ||
1929 | |||
1930 | pci_set_master(dev); | ||
1931 | denali->dev = dev; | ||
1932 | |||
1933 | ret = pci_request_regions(dev, DENALI_NAND_NAME); | ||
1934 | if (ret) { | ||
1935 | printk(KERN_ERR "Spectra: Unable to request memory regions\n"); | ||
1936 | goto failed_req_csr; | ||
1937 | } | ||
1938 | |||
1939 | denali->flash_reg = ioremap_nocache(csr_base, csr_len); | ||
1940 | if (!denali->flash_reg) { | ||
1941 | printk(KERN_ERR "Spectra: Unable to remap memory region\n"); | ||
1942 | ret = -ENOMEM; | ||
1943 | goto failed_remap_csr; | ||
1944 | } | ||
1945 | nand_dbg_print(NAND_DBG_DEBUG, "Spectra: CSR 0x%08Lx -> 0x%p (0x%lx)\n", | ||
1946 | (uint64_t)csr_base, denali->flash_reg, csr_len); | ||
1947 | |||
1948 | denali->flash_mem = ioremap_nocache(mem_base, mem_len); | ||
1949 | if (!denali->flash_mem) { | ||
1950 | printk(KERN_ERR "Spectra: ioremap_nocache failed!"); | ||
1951 | iounmap(denali->flash_reg); | ||
1952 | ret = -ENOMEM; | ||
1953 | goto failed_remap_csr; | ||
1954 | } | ||
1955 | |||
1956 | nand_dbg_print(NAND_DBG_WARN, | ||
1957 | "Spectra: Remapped flash base address: " | ||
1958 | "0x%p, len: %ld\n", | ||
1959 | denali->flash_mem, csr_len); | ||
1960 | |||
1961 | denali_hw_init(denali); | ||
1962 | denali_drv_init(denali); | ||
1963 | |||
1964 | nand_dbg_print(NAND_DBG_DEBUG, "Spectra: IRQ %d\n", dev->irq); | ||
1965 | if (request_irq(dev->irq, denali_isr, IRQF_SHARED, | ||
1966 | DENALI_NAND_NAME, denali)) { | ||
1967 | printk(KERN_ERR "Spectra: Unable to allocate IRQ\n"); | ||
1968 | ret = -ENODEV; | ||
1969 | goto failed_request_irq; | ||
1970 | } | ||
1971 | |||
1972 | /* now that our ISR is registered, we can enable interrupts */ | ||
1973 | NAND_LLD_Enable_Disable_Interrupts(denali, true); | ||
1974 | |||
1975 | pci_set_drvdata(dev, denali); | ||
1976 | |||
1977 | NAND_Read_Device_ID(denali); | ||
1978 | |||
1979 | /* MTD supported page sizes vary by kernel. We validate our | ||
1980 | kernel supports the device here. | ||
1981 | */ | ||
1982 | if (denali->dev_info.wPageSize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) | ||
1983 | { | ||
1984 | ret = -ENODEV; | ||
1985 | printk(KERN_ERR "Spectra: device size not supported by this " | ||
1986 | "version of MTD."); | ||
1987 | goto failed_nand; | ||
1988 | } | ||
1989 | |||
1990 | nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:" | ||
1991 | "acc_clks: %d, re_2_we: %d, we_2_re: %d," | ||
1992 | "addr_2_data: %d, rdwr_en_lo_cnt: %d, " | ||
1993 | "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n", | ||
1994 | ioread32(denali->flash_reg + ACC_CLKS), | ||
1995 | ioread32(denali->flash_reg + RE_2_WE), | ||
1996 | ioread32(denali->flash_reg + WE_2_RE), | ||
1997 | ioread32(denali->flash_reg + ADDR_2_DATA), | ||
1998 | ioread32(denali->flash_reg + RDWR_EN_LO_CNT), | ||
1999 | ioread32(denali->flash_reg + RDWR_EN_HI_CNT), | ||
2000 | ioread32(denali->flash_reg + CS_SETUP_CNT)); | ||
2001 | |||
2002 | denali->mtd.name = "Denali NAND"; | ||
2003 | denali->mtd.owner = THIS_MODULE; | ||
2004 | denali->mtd.priv = &denali->nand; | ||
2005 | |||
2006 | /* register the driver with the NAND core subsystem */ | ||
2007 | denali->nand.select_chip = denali_select_chip; | ||
2008 | denali->nand.cmdfunc = denali_cmdfunc; | ||
2009 | denali->nand.read_byte = denali_read_byte; | ||
2010 | denali->nand.waitfunc = denali_waitfunc; | ||
2011 | |||
2012 | /* scan for NAND devices attached to the controller | ||
2013 | * this is the first stage in a two step process to register | ||
2014 | * with the nand subsystem */ | ||
2015 | if (nand_scan_ident(&denali->mtd, LLD_MAX_FLASH_BANKS, NULL)) | ||
2016 | { | ||
2017 | ret = -ENXIO; | ||
2018 | goto failed_nand; | ||
2019 | } | ||
2020 | |||
2021 | /* second stage of the NAND scan | ||
2022 | * this stage requires information regarding ECC and | ||
2023 | * bad block management. */ | ||
2024 | |||
2025 | /* Bad block management */ | ||
2026 | denali->nand.bbt_td = &bbt_main_descr; | ||
2027 | denali->nand.bbt_md = &bbt_mirror_descr; | ||
2028 | |||
2029 | /* skip the scan for now until we have OOB read and write support */ | ||
2030 | denali->nand.options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN; | ||
2031 | denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME; | ||
2032 | |||
2033 | if (denali->dev_info.MLCDevice) | ||
2034 | { | ||
2035 | denali->nand.ecc.layout = &nand_oob_mlc_14bit; | ||
2036 | denali->nand.ecc.bytes = ECC_BYTES_MLC; | ||
2037 | } | ||
2038 | else /* SLC */ | ||
2039 | { | ||
2040 | denali->nand.ecc.layout = &nand_oob_slc; | ||
2041 | denali->nand.ecc.bytes = ECC_BYTES_SLC; | ||
2042 | } | ||
2043 | |||
2044 | /* These functions are required by the NAND core framework, otherwise, | ||
2045 | the NAND core will assert. However, we don't need them, so we'll stub | ||
2046 | them out. */ | ||
2047 | denali->nand.ecc.calculate = denali_ecc_calculate; | ||
2048 | denali->nand.ecc.correct = denali_ecc_correct; | ||
2049 | denali->nand.ecc.hwctl = denali_ecc_hwctl; | ||
2050 | |||
2051 | /* override the default read operations */ | ||
2052 | denali->nand.ecc.size = denali->mtd.writesize; | ||
2053 | denali->nand.ecc.read_page = denali_read_page; | ||
2054 | denali->nand.ecc.read_page_raw = denali_read_page_raw; | ||
2055 | denali->nand.ecc.write_page = denali_write_page; | ||
2056 | denali->nand.ecc.write_page_raw = denali_write_page_raw; | ||
2057 | denali->nand.ecc.read_oob = denali_read_oob; | ||
2058 | denali->nand.ecc.write_oob = denali_write_oob; | ||
2059 | denali->nand.erase_cmd = denali_erase; | ||
2060 | |||
2061 | if (nand_scan_tail(&denali->mtd)) | ||
2062 | { | ||
2063 | ret = -ENXIO; | ||
2064 | goto failed_nand; | ||
2065 | } | ||
2066 | |||
2067 | ret = add_mtd_device(&denali->mtd); | ||
2068 | if (ret) { | ||
2069 | printk(KERN_ERR "Spectra: Failed to register MTD device: %d\n", ret); | ||
2070 | goto failed_nand; | ||
2071 | } | ||
2072 | return 0; | ||
2073 | |||
2074 | failed_nand: | ||
2075 | denali_irq_cleanup(dev->irq, denali); | ||
2076 | failed_request_irq: | ||
2077 | iounmap(denali->flash_reg); | ||
2078 | iounmap(denali->flash_mem); | ||
2079 | failed_remap_csr: | ||
2080 | pci_release_regions(dev); | ||
2081 | failed_req_csr: | ||
2082 | pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE, | ||
2083 | PCI_DMA_BIDIRECTIONAL); | ||
2084 | failed_enable: | ||
2085 | kfree(denali); | ||
2086 | return ret; | ||
2087 | } | ||
2088 | |||
2089 | /* driver exit point */ | ||
2090 | static void denali_pci_remove(struct pci_dev *dev) | ||
2091 | { | ||
2092 | struct denali_nand_info *denali = pci_get_drvdata(dev); | ||
2093 | |||
2094 | nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n", | ||
2095 | __FILE__, __LINE__, __func__); | ||
2096 | |||
2097 | nand_release(&denali->mtd); | ||
2098 | del_mtd_device(&denali->mtd); | ||
2099 | |||
2100 | denali_irq_cleanup(dev->irq, denali); | ||
2101 | |||
2102 | iounmap(denali->flash_reg); | ||
2103 | iounmap(denali->flash_mem); | ||
2104 | pci_release_regions(dev); | ||
2105 | pci_disable_device(dev); | ||
2106 | pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE, | ||
2107 | PCI_DMA_BIDIRECTIONAL); | ||
2108 | pci_set_drvdata(dev, NULL); | ||
2109 | kfree(denali); | ||
2110 | } | ||
2111 | |||
2112 | MODULE_DEVICE_TABLE(pci, denali_pci_ids); | ||
2113 | |||
2114 | static struct pci_driver denali_pci_driver = { | ||
2115 | .name = DENALI_NAND_NAME, | ||
2116 | .id_table = denali_pci_ids, | ||
2117 | .probe = denali_pci_probe, | ||
2118 | .remove = denali_pci_remove, | ||
2119 | }; | ||
2120 | |||
2121 | static int __devinit denali_init(void) | ||
2122 | { | ||
2123 | printk(KERN_INFO "Spectra MTD driver built on %s @ %s\n", __DATE__, __TIME__); | ||
2124 | return pci_register_driver(&denali_pci_driver); | ||
2125 | } | ||
2126 | |||
2127 | /* Free memory */ | ||
2128 | static void __devexit denali_exit(void) | ||
2129 | { | ||
2130 | pci_unregister_driver(&denali_pci_driver); | ||
2131 | } | ||
2132 | |||
2133 | module_init(denali_init); | ||
2134 | module_exit(denali_exit); | ||
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h new file mode 100644 index 000000000000..422a29ab2f60 --- /dev/null +++ b/drivers/mtd/nand/denali.h | |||
@@ -0,0 +1,816 @@ | |||
1 | /* | ||
2 | * NAND Flash Controller Device Driver | ||
3 | * Copyright (c) 2009 - 2010, Intel Corporation and its suppliers. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #include <linux/mtd/nand.h> | ||
21 | |||
22 | #define DEVICE_RESET 0x0 | ||
23 | #define DEVICE_RESET__BANK0 0x0001 | ||
24 | #define DEVICE_RESET__BANK1 0x0002 | ||
25 | #define DEVICE_RESET__BANK2 0x0004 | ||
26 | #define DEVICE_RESET__BANK3 0x0008 | ||
27 | |||
28 | #define TRANSFER_SPARE_REG 0x10 | ||
29 | #define TRANSFER_SPARE_REG__FLAG 0x0001 | ||
30 | |||
31 | #define LOAD_WAIT_CNT 0x20 | ||
32 | #define LOAD_WAIT_CNT__VALUE 0xffff | ||
33 | |||
34 | #define PROGRAM_WAIT_CNT 0x30 | ||
35 | #define PROGRAM_WAIT_CNT__VALUE 0xffff | ||
36 | |||
37 | #define ERASE_WAIT_CNT 0x40 | ||
38 | #define ERASE_WAIT_CNT__VALUE 0xffff | ||
39 | |||
40 | #define INT_MON_CYCCNT 0x50 | ||
41 | #define INT_MON_CYCCNT__VALUE 0xffff | ||
42 | |||
43 | #define RB_PIN_ENABLED 0x60 | ||
44 | #define RB_PIN_ENABLED__BANK0 0x0001 | ||
45 | #define RB_PIN_ENABLED__BANK1 0x0002 | ||
46 | #define RB_PIN_ENABLED__BANK2 0x0004 | ||
47 | #define RB_PIN_ENABLED__BANK3 0x0008 | ||
48 | |||
49 | #define MULTIPLANE_OPERATION 0x70 | ||
50 | #define MULTIPLANE_OPERATION__FLAG 0x0001 | ||
51 | |||
52 | #define MULTIPLANE_READ_ENABLE 0x80 | ||
53 | #define MULTIPLANE_READ_ENABLE__FLAG 0x0001 | ||
54 | |||
55 | #define COPYBACK_DISABLE 0x90 | ||
56 | #define COPYBACK_DISABLE__FLAG 0x0001 | ||
57 | |||
58 | #define CACHE_WRITE_ENABLE 0xa0 | ||
59 | #define CACHE_WRITE_ENABLE__FLAG 0x0001 | ||
60 | |||
61 | #define CACHE_READ_ENABLE 0xb0 | ||
62 | #define CACHE_READ_ENABLE__FLAG 0x0001 | ||
63 | |||
64 | #define PREFETCH_MODE 0xc0 | ||
65 | #define PREFETCH_MODE__PREFETCH_EN 0x0001 | ||
66 | #define PREFETCH_MODE__PREFETCH_BURST_LENGTH 0xfff0 | ||
67 | |||
68 | #define CHIP_ENABLE_DONT_CARE 0xd0 | ||
69 | #define CHIP_EN_DONT_CARE__FLAG 0x01 | ||
70 | |||
71 | #define ECC_ENABLE 0xe0 | ||
72 | #define ECC_ENABLE__FLAG 0x0001 | ||
73 | |||
74 | #define GLOBAL_INT_ENABLE 0xf0 | ||
75 | #define GLOBAL_INT_EN_FLAG 0x01 | ||
76 | |||
77 | #define WE_2_RE 0x100 | ||
78 | #define WE_2_RE__VALUE 0x003f | ||
79 | |||
80 | #define ADDR_2_DATA 0x110 | ||
81 | #define ADDR_2_DATA__VALUE 0x003f | ||
82 | |||
83 | #define RE_2_WE 0x120 | ||
84 | #define RE_2_WE__VALUE 0x003f | ||
85 | |||
86 | #define ACC_CLKS 0x130 | ||
87 | #define ACC_CLKS__VALUE 0x000f | ||
88 | |||
89 | #define NUMBER_OF_PLANES 0x140 | ||
90 | #define NUMBER_OF_PLANES__VALUE 0x0007 | ||
91 | |||
92 | #define PAGES_PER_BLOCK 0x150 | ||
93 | #define PAGES_PER_BLOCK__VALUE 0xffff | ||
94 | |||
95 | #define DEVICE_WIDTH 0x160 | ||
96 | #define DEVICE_WIDTH__VALUE 0x0003 | ||
97 | |||
98 | #define DEVICE_MAIN_AREA_SIZE 0x170 | ||
99 | #define DEVICE_MAIN_AREA_SIZE__VALUE 0xffff | ||
100 | |||
101 | #define DEVICE_SPARE_AREA_SIZE 0x180 | ||
102 | #define DEVICE_SPARE_AREA_SIZE__VALUE 0xffff | ||
103 | |||
104 | #define TWO_ROW_ADDR_CYCLES 0x190 | ||
105 | #define TWO_ROW_ADDR_CYCLES__FLAG 0x0001 | ||
106 | |||
107 | #define MULTIPLANE_ADDR_RESTRICT 0x1a0 | ||
108 | #define MULTIPLANE_ADDR_RESTRICT__FLAG 0x0001 | ||
109 | |||
110 | #define ECC_CORRECTION 0x1b0 | ||
111 | #define ECC_CORRECTION__VALUE 0x001f | ||
112 | |||
113 | #define READ_MODE 0x1c0 | ||
114 | #define READ_MODE__VALUE 0x000f | ||
115 | |||
116 | #define WRITE_MODE 0x1d0 | ||
117 | #define WRITE_MODE__VALUE 0x000f | ||
118 | |||
119 | #define COPYBACK_MODE 0x1e0 | ||
120 | #define COPYBACK_MODE__VALUE 0x000f | ||
121 | |||
122 | #define RDWR_EN_LO_CNT 0x1f0 | ||
123 | #define RDWR_EN_LO_CNT__VALUE 0x001f | ||
124 | |||
125 | #define RDWR_EN_HI_CNT 0x200 | ||
126 | #define RDWR_EN_HI_CNT__VALUE 0x001f | ||
127 | |||
128 | #define MAX_RD_DELAY 0x210 | ||
129 | #define MAX_RD_DELAY__VALUE 0x000f | ||
130 | |||
131 | #define CS_SETUP_CNT 0x220 | ||
132 | #define CS_SETUP_CNT__VALUE 0x001f | ||
133 | |||
134 | #define SPARE_AREA_SKIP_BYTES 0x230 | ||
135 | #define SPARE_AREA_SKIP_BYTES__VALUE 0x003f | ||
136 | |||
137 | #define SPARE_AREA_MARKER 0x240 | ||
138 | #define SPARE_AREA_MARKER__VALUE 0xffff | ||
139 | |||
140 | #define DEVICES_CONNECTED 0x250 | ||
141 | #define DEVICES_CONNECTED__VALUE 0x0007 | ||
142 | |||
143 | #define DIE_MASK 0x260 | ||
144 | #define DIE_MASK__VALUE 0x00ff | ||
145 | |||
146 | #define FIRST_BLOCK_OF_NEXT_PLANE 0x270 | ||
147 | #define FIRST_BLOCK_OF_NEXT_PLANE__VALUE 0xffff | ||
148 | |||
149 | #define WRITE_PROTECT 0x280 | ||
150 | #define WRITE_PROTECT__FLAG 0x0001 | ||
151 | |||
152 | #define RE_2_RE 0x290 | ||
153 | #define RE_2_RE__VALUE 0x003f | ||
154 | |||
155 | #define MANUFACTURER_ID 0x300 | ||
156 | #define MANUFACTURER_ID__VALUE 0x00ff | ||
157 | |||
158 | #define DEVICE_ID 0x310 | ||
159 | #define DEVICE_ID__VALUE 0x00ff | ||
160 | |||
161 | #define DEVICE_PARAM_0 0x320 | ||
162 | #define DEVICE_PARAM_0__VALUE 0x00ff | ||
163 | |||
164 | #define DEVICE_PARAM_1 0x330 | ||
165 | #define DEVICE_PARAM_1__VALUE 0x00ff | ||
166 | |||
167 | #define DEVICE_PARAM_2 0x340 | ||
168 | #define DEVICE_PARAM_2__VALUE 0x00ff | ||
169 | |||
170 | #define LOGICAL_PAGE_DATA_SIZE 0x350 | ||
171 | #define LOGICAL_PAGE_DATA_SIZE__VALUE 0xffff | ||
172 | |||
173 | #define LOGICAL_PAGE_SPARE_SIZE 0x360 | ||
174 | #define LOGICAL_PAGE_SPARE_SIZE__VALUE 0xffff | ||
175 | |||
176 | #define REVISION 0x370 | ||
177 | #define REVISION__VALUE 0xffff | ||
178 | |||
179 | #define ONFI_DEVICE_FEATURES 0x380 | ||
180 | #define ONFI_DEVICE_FEATURES__VALUE 0x003f | ||
181 | |||
182 | #define ONFI_OPTIONAL_COMMANDS 0x390 | ||
183 | #define ONFI_OPTIONAL_COMMANDS__VALUE 0x003f | ||
184 | |||
185 | #define ONFI_TIMING_MODE 0x3a0 | ||
186 | #define ONFI_TIMING_MODE__VALUE 0x003f | ||
187 | |||
188 | #define ONFI_PGM_CACHE_TIMING_MODE 0x3b0 | ||
189 | #define ONFI_PGM_CACHE_TIMING_MODE__VALUE 0x003f | ||
190 | |||
191 | #define ONFI_DEVICE_NO_OF_LUNS 0x3c0 | ||
192 | #define ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS 0x00ff | ||
193 | #define ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE 0x0100 | ||
194 | |||
195 | #define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L 0x3d0 | ||
196 | #define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L__VALUE 0xffff | ||
197 | |||
198 | #define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U 0x3e0 | ||
199 | #define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U__VALUE 0xffff | ||
200 | |||
201 | #define FEATURES 0x3f0 | ||
202 | #define FEATURES__N_BANKS 0x0003 | ||
203 | #define FEATURES__ECC_MAX_ERR 0x003c | ||
204 | #define FEATURES__DMA 0x0040 | ||
205 | #define FEATURES__CMD_DMA 0x0080 | ||
206 | #define FEATURES__PARTITION 0x0100 | ||
207 | #define FEATURES__XDMA_SIDEBAND 0x0200 | ||
208 | #define FEATURES__GPREG 0x0400 | ||
209 | #define FEATURES__INDEX_ADDR 0x0800 | ||
210 | |||
211 | #define TRANSFER_MODE 0x400 | ||
212 | #define TRANSFER_MODE__VALUE 0x0003 | ||
213 | |||
214 | #define INTR_STATUS0 0x410 | ||
215 | #define INTR_STATUS0__ECC_TRANSACTION_DONE 0x0001 | ||
216 | #define INTR_STATUS0__ECC_ERR 0x0002 | ||
217 | #define INTR_STATUS0__DMA_CMD_COMP 0x0004 | ||
218 | #define INTR_STATUS0__TIME_OUT 0x0008 | ||
219 | #define INTR_STATUS0__PROGRAM_FAIL 0x0010 | ||
220 | #define INTR_STATUS0__ERASE_FAIL 0x0020 | ||
221 | #define INTR_STATUS0__LOAD_COMP 0x0040 | ||
222 | #define INTR_STATUS0__PROGRAM_COMP 0x0080 | ||
223 | #define INTR_STATUS0__ERASE_COMP 0x0100 | ||
224 | #define INTR_STATUS0__PIPE_CPYBCK_CMD_COMP 0x0200 | ||
225 | #define INTR_STATUS0__LOCKED_BLK 0x0400 | ||
226 | #define INTR_STATUS0__UNSUP_CMD 0x0800 | ||
227 | #define INTR_STATUS0__INT_ACT 0x1000 | ||
228 | #define INTR_STATUS0__RST_COMP 0x2000 | ||
229 | #define INTR_STATUS0__PIPE_CMD_ERR 0x4000 | ||
230 | #define INTR_STATUS0__PAGE_XFER_INC 0x8000 | ||
231 | |||
232 | #define INTR_EN0 0x420 | ||
233 | #define INTR_EN0__ECC_TRANSACTION_DONE 0x0001 | ||
234 | #define INTR_EN0__ECC_ERR 0x0002 | ||
235 | #define INTR_EN0__DMA_CMD_COMP 0x0004 | ||
236 | #define INTR_EN0__TIME_OUT 0x0008 | ||
237 | #define INTR_EN0__PROGRAM_FAIL 0x0010 | ||
238 | #define INTR_EN0__ERASE_FAIL 0x0020 | ||
239 | #define INTR_EN0__LOAD_COMP 0x0040 | ||
240 | #define INTR_EN0__PROGRAM_COMP 0x0080 | ||
241 | #define INTR_EN0__ERASE_COMP 0x0100 | ||
242 | #define INTR_EN0__PIPE_CPYBCK_CMD_COMP 0x0200 | ||
243 | #define INTR_EN0__LOCKED_BLK 0x0400 | ||
244 | #define INTR_EN0__UNSUP_CMD 0x0800 | ||
245 | #define INTR_EN0__INT_ACT 0x1000 | ||
246 | #define INTR_EN0__RST_COMP 0x2000 | ||
247 | #define INTR_EN0__PIPE_CMD_ERR 0x4000 | ||
248 | #define INTR_EN0__PAGE_XFER_INC 0x8000 | ||
249 | |||
250 | #define PAGE_CNT0 0x430 | ||
251 | #define PAGE_CNT0__VALUE 0x00ff | ||
252 | |||
253 | #define ERR_PAGE_ADDR0 0x440 | ||
254 | #define ERR_PAGE_ADDR0__VALUE 0xffff | ||
255 | |||
256 | #define ERR_BLOCK_ADDR0 0x450 | ||
257 | #define ERR_BLOCK_ADDR0__VALUE 0xffff | ||
258 | |||
259 | #define INTR_STATUS1 0x460 | ||
260 | #define INTR_STATUS1__ECC_TRANSACTION_DONE 0x0001 | ||
261 | #define INTR_STATUS1__ECC_ERR 0x0002 | ||
262 | #define INTR_STATUS1__DMA_CMD_COMP 0x0004 | ||
263 | #define INTR_STATUS1__TIME_OUT 0x0008 | ||
264 | #define INTR_STATUS1__PROGRAM_FAIL 0x0010 | ||
265 | #define INTR_STATUS1__ERASE_FAIL 0x0020 | ||
266 | #define INTR_STATUS1__LOAD_COMP 0x0040 | ||
267 | #define INTR_STATUS1__PROGRAM_COMP 0x0080 | ||
268 | #define INTR_STATUS1__ERASE_COMP 0x0100 | ||
269 | #define INTR_STATUS1__PIPE_CPYBCK_CMD_COMP 0x0200 | ||
270 | #define INTR_STATUS1__LOCKED_BLK 0x0400 | ||
271 | #define INTR_STATUS1__UNSUP_CMD 0x0800 | ||
272 | #define INTR_STATUS1__INT_ACT 0x1000 | ||
273 | #define INTR_STATUS1__RST_COMP 0x2000 | ||
274 | #define INTR_STATUS1__PIPE_CMD_ERR 0x4000 | ||
275 | #define INTR_STATUS1__PAGE_XFER_INC 0x8000 | ||
276 | |||
277 | #define INTR_EN1 0x470 | ||
278 | #define INTR_EN1__ECC_TRANSACTION_DONE 0x0001 | ||
279 | #define INTR_EN1__ECC_ERR 0x0002 | ||
280 | #define INTR_EN1__DMA_CMD_COMP 0x0004 | ||
281 | #define INTR_EN1__TIME_OUT 0x0008 | ||
282 | #define INTR_EN1__PROGRAM_FAIL 0x0010 | ||
283 | #define INTR_EN1__ERASE_FAIL 0x0020 | ||
284 | #define INTR_EN1__LOAD_COMP 0x0040 | ||
285 | #define INTR_EN1__PROGRAM_COMP 0x0080 | ||
286 | #define INTR_EN1__ERASE_COMP 0x0100 | ||
287 | #define INTR_EN1__PIPE_CPYBCK_CMD_COMP 0x0200 | ||
288 | #define INTR_EN1__LOCKED_BLK 0x0400 | ||
289 | #define INTR_EN1__UNSUP_CMD 0x0800 | ||
290 | #define INTR_EN1__INT_ACT 0x1000 | ||
291 | #define INTR_EN1__RST_COMP 0x2000 | ||
292 | #define INTR_EN1__PIPE_CMD_ERR 0x4000 | ||
293 | #define INTR_EN1__PAGE_XFER_INC 0x8000 | ||
294 | |||
295 | #define PAGE_CNT1 0x480 | ||
296 | #define PAGE_CNT1__VALUE 0x00ff | ||
297 | |||
298 | #define ERR_PAGE_ADDR1 0x490 | ||
299 | #define ERR_PAGE_ADDR1__VALUE 0xffff | ||
300 | |||
301 | #define ERR_BLOCK_ADDR1 0x4a0 | ||
302 | #define ERR_BLOCK_ADDR1__VALUE 0xffff | ||
303 | |||
304 | #define INTR_STATUS2 0x4b0 | ||
305 | #define INTR_STATUS2__ECC_TRANSACTION_DONE 0x0001 | ||
306 | #define INTR_STATUS2__ECC_ERR 0x0002 | ||
307 | #define INTR_STATUS2__DMA_CMD_COMP 0x0004 | ||
308 | #define INTR_STATUS2__TIME_OUT 0x0008 | ||
309 | #define INTR_STATUS2__PROGRAM_FAIL 0x0010 | ||
310 | #define INTR_STATUS2__ERASE_FAIL 0x0020 | ||
311 | #define INTR_STATUS2__LOAD_COMP 0x0040 | ||
312 | #define INTR_STATUS2__PROGRAM_COMP 0x0080 | ||
313 | #define INTR_STATUS2__ERASE_COMP 0x0100 | ||
314 | #define INTR_STATUS2__PIPE_CPYBCK_CMD_COMP 0x0200 | ||
315 | #define INTR_STATUS2__LOCKED_BLK 0x0400 | ||
316 | #define INTR_STATUS2__UNSUP_CMD 0x0800 | ||
317 | #define INTR_STATUS2__INT_ACT 0x1000 | ||
318 | #define INTR_STATUS2__RST_COMP 0x2000 | ||
319 | #define INTR_STATUS2__PIPE_CMD_ERR 0x4000 | ||
320 | #define INTR_STATUS2__PAGE_XFER_INC 0x8000 | ||
321 | |||
322 | #define INTR_EN2 0x4c0 | ||
323 | #define INTR_EN2__ECC_TRANSACTION_DONE 0x0001 | ||
324 | #define INTR_EN2__ECC_ERR 0x0002 | ||
325 | #define INTR_EN2__DMA_CMD_COMP 0x0004 | ||
326 | #define INTR_EN2__TIME_OUT 0x0008 | ||
327 | #define INTR_EN2__PROGRAM_FAIL 0x0010 | ||
328 | #define INTR_EN2__ERASE_FAIL 0x0020 | ||
329 | #define INTR_EN2__LOAD_COMP 0x0040 | ||
330 | #define INTR_EN2__PROGRAM_COMP 0x0080 | ||
331 | #define INTR_EN2__ERASE_COMP 0x0100 | ||
332 | #define INTR_EN2__PIPE_CPYBCK_CMD_COMP 0x0200 | ||
333 | #define INTR_EN2__LOCKED_BLK 0x0400 | ||
334 | #define INTR_EN2__UNSUP_CMD 0x0800 | ||
335 | #define INTR_EN2__INT_ACT 0x1000 | ||
336 | #define INTR_EN2__RST_COMP 0x2000 | ||
337 | #define INTR_EN2__PIPE_CMD_ERR 0x4000 | ||
338 | #define INTR_EN2__PAGE_XFER_INC 0x8000 | ||
339 | |||
340 | #define PAGE_CNT2 0x4d0 | ||
341 | #define PAGE_CNT2__VALUE 0x00ff | ||
342 | |||
343 | #define ERR_PAGE_ADDR2 0x4e0 | ||
344 | #define ERR_PAGE_ADDR2__VALUE 0xffff | ||
345 | |||
346 | #define ERR_BLOCK_ADDR2 0x4f0 | ||
347 | #define ERR_BLOCK_ADDR2__VALUE 0xffff | ||
348 | |||
349 | #define INTR_STATUS3 0x500 | ||
350 | #define INTR_STATUS3__ECC_TRANSACTION_DONE 0x0001 | ||
351 | #define INTR_STATUS3__ECC_ERR 0x0002 | ||
352 | #define INTR_STATUS3__DMA_CMD_COMP 0x0004 | ||
353 | #define INTR_STATUS3__TIME_OUT 0x0008 | ||
354 | #define INTR_STATUS3__PROGRAM_FAIL 0x0010 | ||
355 | #define INTR_STATUS3__ERASE_FAIL 0x0020 | ||
356 | #define INTR_STATUS3__LOAD_COMP 0x0040 | ||
357 | #define INTR_STATUS3__PROGRAM_COMP 0x0080 | ||
358 | #define INTR_STATUS3__ERASE_COMP 0x0100 | ||
359 | #define INTR_STATUS3__PIPE_CPYBCK_CMD_COMP 0x0200 | ||
360 | #define INTR_STATUS3__LOCKED_BLK 0x0400 | ||
361 | #define INTR_STATUS3__UNSUP_CMD 0x0800 | ||
362 | #define INTR_STATUS3__INT_ACT 0x1000 | ||
363 | #define INTR_STATUS3__RST_COMP 0x2000 | ||
364 | #define INTR_STATUS3__PIPE_CMD_ERR 0x4000 | ||
365 | #define INTR_STATUS3__PAGE_XFER_INC 0x8000 | ||
366 | |||
367 | #define INTR_EN3 0x510 | ||
368 | #define INTR_EN3__ECC_TRANSACTION_DONE 0x0001 | ||
369 | #define INTR_EN3__ECC_ERR 0x0002 | ||
370 | #define INTR_EN3__DMA_CMD_COMP 0x0004 | ||
371 | #define INTR_EN3__TIME_OUT 0x0008 | ||
372 | #define INTR_EN3__PROGRAM_FAIL 0x0010 | ||
373 | #define INTR_EN3__ERASE_FAIL 0x0020 | ||
374 | #define INTR_EN3__LOAD_COMP 0x0040 | ||
375 | #define INTR_EN3__PROGRAM_COMP 0x0080 | ||
376 | #define INTR_EN3__ERASE_COMP 0x0100 | ||
377 | #define INTR_EN3__PIPE_CPYBCK_CMD_COMP 0x0200 | ||
378 | #define INTR_EN3__LOCKED_BLK 0x0400 | ||
379 | #define INTR_EN3__UNSUP_CMD 0x0800 | ||
380 | #define INTR_EN3__INT_ACT 0x1000 | ||
381 | #define INTR_EN3__RST_COMP 0x2000 | ||
382 | #define INTR_EN3__PIPE_CMD_ERR 0x4000 | ||
383 | #define INTR_EN3__PAGE_XFER_INC 0x8000 | ||
384 | |||
385 | #define PAGE_CNT3 0x520 | ||
386 | #define PAGE_CNT3__VALUE 0x00ff | ||
387 | |||
388 | #define ERR_PAGE_ADDR3 0x530 | ||
389 | #define ERR_PAGE_ADDR3__VALUE 0xffff | ||
390 | |||
391 | #define ERR_BLOCK_ADDR3 0x540 | ||
392 | #define ERR_BLOCK_ADDR3__VALUE 0xffff | ||
393 | |||
394 | #define DATA_INTR 0x550 | ||
395 | #define DATA_INTR__WRITE_SPACE_AV 0x0001 | ||
396 | #define DATA_INTR__READ_DATA_AV 0x0002 | ||
397 | |||
398 | #define DATA_INTR_EN 0x560 | ||
399 | #define DATA_INTR_EN__WRITE_SPACE_AV 0x0001 | ||
400 | #define DATA_INTR_EN__READ_DATA_AV 0x0002 | ||
401 | |||
402 | #define GPREG_0 0x570 | ||
403 | #define GPREG_0__VALUE 0xffff | ||
404 | |||
405 | #define GPREG_1 0x580 | ||
406 | #define GPREG_1__VALUE 0xffff | ||
407 | |||
408 | #define GPREG_2 0x590 | ||
409 | #define GPREG_2__VALUE 0xffff | ||
410 | |||
411 | #define GPREG_3 0x5a0 | ||
412 | #define GPREG_3__VALUE 0xffff | ||
413 | |||
414 | #define ECC_THRESHOLD 0x600 | ||
415 | #define ECC_THRESHOLD__VALUE 0x03ff | ||
416 | |||
417 | #define ECC_ERROR_BLOCK_ADDRESS 0x610 | ||
418 | #define ECC_ERROR_BLOCK_ADDRESS__VALUE 0xffff | ||
419 | |||
420 | #define ECC_ERROR_PAGE_ADDRESS 0x620 | ||
421 | #define ECC_ERROR_PAGE_ADDRESS__VALUE 0x0fff | ||
422 | #define ECC_ERROR_PAGE_ADDRESS__BANK 0xf000 | ||
423 | |||
424 | #define ECC_ERROR_ADDRESS 0x630 | ||
425 | #define ECC_ERROR_ADDRESS__OFFSET 0x0fff | ||
426 | #define ECC_ERROR_ADDRESS__SECTOR_NR 0xf000 | ||
427 | |||
428 | #define ERR_CORRECTION_INFO 0x640 | ||
429 | #define ERR_CORRECTION_INFO__BYTEMASK 0x00ff | ||
430 | #define ERR_CORRECTION_INFO__DEVICE_NR 0x0f00 | ||
431 | #define ERR_CORRECTION_INFO__ERROR_TYPE 0x4000 | ||
432 | #define ERR_CORRECTION_INFO__LAST_ERR_INFO 0x8000 | ||
433 | |||
434 | #define DMA_ENABLE 0x700 | ||
435 | #define DMA_ENABLE__FLAG 0x0001 | ||
436 | |||
437 | #define IGNORE_ECC_DONE 0x710 | ||
438 | #define IGNORE_ECC_DONE__FLAG 0x0001 | ||
439 | |||
440 | #define DMA_INTR 0x720 | ||
441 | #define DMA_INTR__TARGET_ERROR 0x0001 | ||
442 | #define DMA_INTR__DESC_COMP_CHANNEL0 0x0002 | ||
443 | #define DMA_INTR__DESC_COMP_CHANNEL1 0x0004 | ||
444 | #define DMA_INTR__DESC_COMP_CHANNEL2 0x0008 | ||
445 | #define DMA_INTR__DESC_COMP_CHANNEL3 0x0010 | ||
446 | #define DMA_INTR__MEMCOPY_DESC_COMP 0x0020 | ||
447 | |||
448 | #define DMA_INTR_EN 0x730 | ||
449 | #define DMA_INTR_EN__TARGET_ERROR 0x0001 | ||
450 | #define DMA_INTR_EN__DESC_COMP_CHANNEL0 0x0002 | ||
451 | #define DMA_INTR_EN__DESC_COMP_CHANNEL1 0x0004 | ||
452 | #define DMA_INTR_EN__DESC_COMP_CHANNEL2 0x0008 | ||
453 | #define DMA_INTR_EN__DESC_COMP_CHANNEL3 0x0010 | ||
454 | #define DMA_INTR_EN__MEMCOPY_DESC_COMP 0x0020 | ||
455 | |||
456 | #define TARGET_ERR_ADDR_LO 0x740 | ||
457 | #define TARGET_ERR_ADDR_LO__VALUE 0xffff | ||
458 | |||
459 | #define TARGET_ERR_ADDR_HI 0x750 | ||
460 | #define TARGET_ERR_ADDR_HI__VALUE 0xffff | ||
461 | |||
462 | #define CHNL_ACTIVE 0x760 | ||
463 | #define CHNL_ACTIVE__CHANNEL0 0x0001 | ||
464 | #define CHNL_ACTIVE__CHANNEL1 0x0002 | ||
465 | #define CHNL_ACTIVE__CHANNEL2 0x0004 | ||
466 | #define CHNL_ACTIVE__CHANNEL3 0x0008 | ||
467 | |||
468 | #define ACTIVE_SRC_ID 0x800 | ||
469 | #define ACTIVE_SRC_ID__VALUE 0x00ff | ||
470 | |||
471 | #define PTN_INTR 0x810 | ||
472 | #define PTN_INTR__CONFIG_ERROR 0x0001 | ||
473 | #define PTN_INTR__ACCESS_ERROR_BANK0 0x0002 | ||
474 | #define PTN_INTR__ACCESS_ERROR_BANK1 0x0004 | ||
475 | #define PTN_INTR__ACCESS_ERROR_BANK2 0x0008 | ||
476 | #define PTN_INTR__ACCESS_ERROR_BANK3 0x0010 | ||
477 | #define PTN_INTR__REG_ACCESS_ERROR 0x0020 | ||
478 | |||
479 | #define PTN_INTR_EN 0x820 | ||
480 | #define PTN_INTR_EN__CONFIG_ERROR 0x0001 | ||
481 | #define PTN_INTR_EN__ACCESS_ERROR_BANK0 0x0002 | ||
482 | #define PTN_INTR_EN__ACCESS_ERROR_BANK1 0x0004 | ||
483 | #define PTN_INTR_EN__ACCESS_ERROR_BANK2 0x0008 | ||
484 | #define PTN_INTR_EN__ACCESS_ERROR_BANK3 0x0010 | ||
485 | #define PTN_INTR_EN__REG_ACCESS_ERROR 0x0020 | ||
486 | |||
487 | #define PERM_SRC_ID_0 0x830 | ||
488 | #define PERM_SRC_ID_0__SRCID 0x00ff | ||
489 | #define PERM_SRC_ID_0__DIRECT_ACCESS_ACTIVE 0x0800 | ||
490 | #define PERM_SRC_ID_0__WRITE_ACTIVE 0x2000 | ||
491 | #define PERM_SRC_ID_0__READ_ACTIVE 0x4000 | ||
492 | #define PERM_SRC_ID_0__PARTITION_VALID 0x8000 | ||
493 | |||
494 | #define MIN_BLK_ADDR_0 0x840 | ||
495 | #define MIN_BLK_ADDR_0__VALUE 0xffff | ||
496 | |||
497 | #define MAX_BLK_ADDR_0 0x850 | ||
498 | #define MAX_BLK_ADDR_0__VALUE 0xffff | ||
499 | |||
500 | #define MIN_MAX_BANK_0 0x860 | ||
501 | #define MIN_MAX_BANK_0__MIN_VALUE 0x0003 | ||
502 | #define MIN_MAX_BANK_0__MAX_VALUE 0x000c | ||
503 | |||
504 | #define PERM_SRC_ID_1 0x870 | ||
505 | #define PERM_SRC_ID_1__SRCID 0x00ff | ||
506 | #define PERM_SRC_ID_1__DIRECT_ACCESS_ACTIVE 0x0800 | ||
507 | #define PERM_SRC_ID_1__WRITE_ACTIVE 0x2000 | ||
508 | #define PERM_SRC_ID_1__READ_ACTIVE 0x4000 | ||
509 | #define PERM_SRC_ID_1__PARTITION_VALID 0x8000 | ||
510 | |||
511 | #define MIN_BLK_ADDR_1 0x880 | ||
512 | #define MIN_BLK_ADDR_1__VALUE 0xffff | ||
513 | |||
514 | #define MAX_BLK_ADDR_1 0x890 | ||
515 | #define MAX_BLK_ADDR_1__VALUE 0xffff | ||
516 | |||
517 | #define MIN_MAX_BANK_1 0x8a0 | ||
518 | #define MIN_MAX_BANK_1__MIN_VALUE 0x0003 | ||
519 | #define MIN_MAX_BANK_1__MAX_VALUE 0x000c | ||
520 | |||
521 | #define PERM_SRC_ID_2 0x8b0 | ||
522 | #define PERM_SRC_ID_2__SRCID 0x00ff | ||
523 | #define PERM_SRC_ID_2__DIRECT_ACCESS_ACTIVE 0x0800 | ||
524 | #define PERM_SRC_ID_2__WRITE_ACTIVE 0x2000 | ||
525 | #define PERM_SRC_ID_2__READ_ACTIVE 0x4000 | ||
526 | #define PERM_SRC_ID_2__PARTITION_VALID 0x8000 | ||
527 | |||
528 | #define MIN_BLK_ADDR_2 0x8c0 | ||
529 | #define MIN_BLK_ADDR_2__VALUE 0xffff | ||
530 | |||
531 | #define MAX_BLK_ADDR_2 0x8d0 | ||
532 | #define MAX_BLK_ADDR_2__VALUE 0xffff | ||
533 | |||
534 | #define MIN_MAX_BANK_2 0x8e0 | ||
535 | #define MIN_MAX_BANK_2__MIN_VALUE 0x0003 | ||
536 | #define MIN_MAX_BANK_2__MAX_VALUE 0x000c | ||
537 | |||
538 | #define PERM_SRC_ID_3 0x8f0 | ||
539 | #define PERM_SRC_ID_3__SRCID 0x00ff | ||
540 | #define PERM_SRC_ID_3__DIRECT_ACCESS_ACTIVE 0x0800 | ||
541 | #define PERM_SRC_ID_3__WRITE_ACTIVE 0x2000 | ||
542 | #define PERM_SRC_ID_3__READ_ACTIVE 0x4000 | ||
543 | #define PERM_SRC_ID_3__PARTITION_VALID 0x8000 | ||
544 | |||
545 | #define MIN_BLK_ADDR_3 0x900 | ||
546 | #define MIN_BLK_ADDR_3__VALUE 0xffff | ||
547 | |||
548 | #define MAX_BLK_ADDR_3 0x910 | ||
549 | #define MAX_BLK_ADDR_3__VALUE 0xffff | ||
550 | |||
551 | #define MIN_MAX_BANK_3 0x920 | ||
552 | #define MIN_MAX_BANK_3__MIN_VALUE 0x0003 | ||
553 | #define MIN_MAX_BANK_3__MAX_VALUE 0x000c | ||
554 | |||
555 | #define PERM_SRC_ID_4 0x930 | ||
556 | #define PERM_SRC_ID_4__SRCID 0x00ff | ||
557 | #define PERM_SRC_ID_4__DIRECT_ACCESS_ACTIVE 0x0800 | ||
558 | #define PERM_SRC_ID_4__WRITE_ACTIVE 0x2000 | ||
559 | #define PERM_SRC_ID_4__READ_ACTIVE 0x4000 | ||
560 | #define PERM_SRC_ID_4__PARTITION_VALID 0x8000 | ||
561 | |||
562 | #define MIN_BLK_ADDR_4 0x940 | ||
563 | #define MIN_BLK_ADDR_4__VALUE 0xffff | ||
564 | |||
565 | #define MAX_BLK_ADDR_4 0x950 | ||
566 | #define MAX_BLK_ADDR_4__VALUE 0xffff | ||
567 | |||
568 | #define MIN_MAX_BANK_4 0x960 | ||
569 | #define MIN_MAX_BANK_4__MIN_VALUE 0x0003 | ||
570 | #define MIN_MAX_BANK_4__MAX_VALUE 0x000c | ||
571 | |||
572 | #define PERM_SRC_ID_5 0x970 | ||
573 | #define PERM_SRC_ID_5__SRCID 0x00ff | ||
574 | #define PERM_SRC_ID_5__DIRECT_ACCESS_ACTIVE 0x0800 | ||
575 | #define PERM_SRC_ID_5__WRITE_ACTIVE 0x2000 | ||
576 | #define PERM_SRC_ID_5__READ_ACTIVE 0x4000 | ||
577 | #define PERM_SRC_ID_5__PARTITION_VALID 0x8000 | ||
578 | |||
579 | #define MIN_BLK_ADDR_5 0x980 | ||
580 | #define MIN_BLK_ADDR_5__VALUE 0xffff | ||
581 | |||
582 | #define MAX_BLK_ADDR_5 0x990 | ||
583 | #define MAX_BLK_ADDR_5__VALUE 0xffff | ||
584 | |||
585 | #define MIN_MAX_BANK_5 0x9a0 | ||
586 | #define MIN_MAX_BANK_5__MIN_VALUE 0x0003 | ||
587 | #define MIN_MAX_BANK_5__MAX_VALUE 0x000c | ||
588 | |||
589 | #define PERM_SRC_ID_6 0x9b0 | ||
590 | #define PERM_SRC_ID_6__SRCID 0x00ff | ||
591 | #define PERM_SRC_ID_6__DIRECT_ACCESS_ACTIVE 0x0800 | ||
592 | #define PERM_SRC_ID_6__WRITE_ACTIVE 0x2000 | ||
593 | #define PERM_SRC_ID_6__READ_ACTIVE 0x4000 | ||
594 | #define PERM_SRC_ID_6__PARTITION_VALID 0x8000 | ||
595 | |||
596 | #define MIN_BLK_ADDR_6 0x9c0 | ||
597 | #define MIN_BLK_ADDR_6__VALUE 0xffff | ||
598 | |||
599 | #define MAX_BLK_ADDR_6 0x9d0 | ||
600 | #define MAX_BLK_ADDR_6__VALUE 0xffff | ||
601 | |||
602 | #define MIN_MAX_BANK_6 0x9e0 | ||
603 | #define MIN_MAX_BANK_6__MIN_VALUE 0x0003 | ||
604 | #define MIN_MAX_BANK_6__MAX_VALUE 0x000c | ||
605 | |||
606 | #define PERM_SRC_ID_7 0x9f0 | ||
607 | #define PERM_SRC_ID_7__SRCID 0x00ff | ||
608 | #define PERM_SRC_ID_7__DIRECT_ACCESS_ACTIVE 0x0800 | ||
609 | #define PERM_SRC_ID_7__WRITE_ACTIVE 0x2000 | ||
610 | #define PERM_SRC_ID_7__READ_ACTIVE 0x4000 | ||
611 | #define PERM_SRC_ID_7__PARTITION_VALID 0x8000 | ||
612 | |||
613 | #define MIN_BLK_ADDR_7 0xa00 | ||
614 | #define MIN_BLK_ADDR_7__VALUE 0xffff | ||
615 | |||
616 | #define MAX_BLK_ADDR_7 0xa10 | ||
617 | #define MAX_BLK_ADDR_7__VALUE 0xffff | ||
618 | |||
619 | #define MIN_MAX_BANK_7 0xa20 | ||
620 | #define MIN_MAX_BANK_7__MIN_VALUE 0x0003 | ||
621 | #define MIN_MAX_BANK_7__MAX_VALUE 0x000c | ||
622 | |||
623 | /* flash.h */ | ||
624 | struct device_info_tag { | ||
625 | uint16_t wDeviceMaker; | ||
626 | uint16_t wDeviceID; | ||
627 | uint8_t bDeviceParam0; | ||
628 | uint8_t bDeviceParam1; | ||
629 | uint8_t bDeviceParam2; | ||
630 | uint32_t wDeviceType; | ||
631 | uint32_t wSpectraStartBlock; | ||
632 | uint32_t wSpectraEndBlock; | ||
633 | uint32_t wTotalBlocks; | ||
634 | uint16_t wPagesPerBlock; | ||
635 | uint16_t wPageSize; | ||
636 | uint16_t wPageDataSize; | ||
637 | uint16_t wPageSpareSize; | ||
638 | uint16_t wNumPageSpareFlag; | ||
639 | uint16_t wECCBytesPerSector; | ||
640 | uint32_t wBlockSize; | ||
641 | uint32_t wBlockDataSize; | ||
642 | uint32_t wDataBlockNum; | ||
643 | uint8_t bPlaneNum; | ||
644 | uint16_t wDeviceMainAreaSize; | ||
645 | uint16_t wDeviceSpareAreaSize; | ||
646 | uint16_t wDevicesConnected; | ||
647 | uint16_t wDeviceWidth; | ||
648 | uint16_t wHWRevision; | ||
649 | uint16_t wHWFeatures; | ||
650 | |||
651 | uint16_t wONFIDevFeatures; | ||
652 | uint16_t wONFIOptCommands; | ||
653 | uint16_t wONFITimingMode; | ||
654 | uint16_t wONFIPgmCacheTimingMode; | ||
655 | |||
656 | uint16_t MLCDevice; | ||
657 | uint16_t wSpareSkipBytes; | ||
658 | |||
659 | uint8_t nBitsInPageNumber; | ||
660 | uint8_t nBitsInPageDataSize; | ||
661 | uint8_t nBitsInBlockDataSize; | ||
662 | }; | ||
663 | |||
664 | /* ffsdefs.h */ | ||
665 | #define CLEAR 0 /*use this to clear a field instead of "fail"*/ | ||
666 | #define SET 1 /*use this to set a field instead of "pass"*/ | ||
667 | #define FAIL 1 /*failed flag*/ | ||
668 | #define PASS 0 /*success flag*/ | ||
669 | #define ERR -1 /*error flag*/ | ||
670 | |||
671 | /* lld.h */ | ||
672 | #define GOOD_BLOCK 0 | ||
673 | #define DEFECTIVE_BLOCK 1 | ||
674 | #define READ_ERROR 2 | ||
675 | |||
676 | #define CLK_X 5 | ||
677 | #define CLK_MULTI 4 | ||
678 | |||
679 | /* ffsport.h */ | ||
680 | #define VERBOSE 1 | ||
681 | |||
682 | #define NAND_DBG_WARN 1 | ||
683 | #define NAND_DBG_DEBUG 2 | ||
684 | #define NAND_DBG_TRACE 3 | ||
685 | |||
686 | #ifdef VERBOSE | ||
687 | #define nand_dbg_print(level, args...) \ | ||
688 | do { \ | ||
689 | if (level <= nand_debug_level) \ | ||
690 | printk(KERN_ALERT args); \ | ||
691 | } while (0) | ||
692 | #else | ||
693 | #define nand_dbg_print(level, args...) | ||
694 | #endif | ||
695 | |||
696 | |||
697 | /* spectraswconfig.h */ | ||
698 | #define CMD_DMA 0 | ||
699 | |||
700 | #define SPECTRA_PARTITION_ID 0 | ||
701 | /**** Block Table and Reserved Block Parameters *****/ | ||
702 | #define SPECTRA_START_BLOCK 3 | ||
703 | #define NUM_FREE_BLOCKS_GATE 30 | ||
704 | |||
705 | /* KBV - Updated to LNW scratch register address */ | ||
706 | #define SCRATCH_REG_ADDR CONFIG_MTD_NAND_DENALI_SCRATCH_REG_ADDR | ||
707 | #define SCRATCH_REG_SIZE 64 | ||
708 | |||
709 | #define GLOB_HWCTL_DEFAULT_BLKS 2048 | ||
710 | |||
711 | #define SUPPORT_15BITECC 1 | ||
712 | #define SUPPORT_8BITECC 1 | ||
713 | |||
714 | #define CUSTOM_CONF_PARAMS 0 | ||
715 | |||
716 | #define ONFI_BLOOM_TIME 1 | ||
717 | #define MODE5_WORKAROUND 0 | ||
718 | |||
719 | /* lld_nand.h */ | ||
720 | /* | ||
721 | * NAND Flash Controller Device Driver | ||
722 | * Copyright (c) 2009, Intel Corporation and its suppliers. | ||
723 | * | ||
724 | * This program is free software; you can redistribute it and/or modify it | ||
725 | * under the terms and conditions of the GNU General Public License, | ||
726 | * version 2, as published by the Free Software Foundation. | ||
727 | * | ||
728 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
729 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
730 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
731 | * more details. | ||
732 | * | ||
733 | * You should have received a copy of the GNU General Public License along with | ||
734 | * this program; if not, write to the Free Software Foundation, Inc., | ||
735 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
736 | * | ||
737 | */ | ||
738 | |||
739 | #ifndef _LLD_NAND_ | ||
740 | #define _LLD_NAND_ | ||
741 | |||
742 | #define MODE_00 0x00000000 | ||
743 | #define MODE_01 0x04000000 | ||
744 | #define MODE_10 0x08000000 | ||
745 | #define MODE_11 0x0C000000 | ||
746 | |||
747 | |||
748 | #define DATA_TRANSFER_MODE 0 | ||
749 | #define PROTECTION_PER_BLOCK 1 | ||
750 | #define LOAD_WAIT_COUNT 2 | ||
751 | #define PROGRAM_WAIT_COUNT 3 | ||
752 | #define ERASE_WAIT_COUNT 4 | ||
753 | #define INT_MONITOR_CYCLE_COUNT 5 | ||
754 | #define READ_BUSY_PIN_ENABLED 6 | ||
755 | #define MULTIPLANE_OPERATION_SUPPORT 7 | ||
756 | #define PRE_FETCH_MODE 8 | ||
757 | #define CE_DONT_CARE_SUPPORT 9 | ||
758 | #define COPYBACK_SUPPORT 10 | ||
759 | #define CACHE_WRITE_SUPPORT 11 | ||
760 | #define CACHE_READ_SUPPORT 12 | ||
761 | #define NUM_PAGES_IN_BLOCK 13 | ||
762 | #define ECC_ENABLE_SELECT 14 | ||
763 | #define WRITE_ENABLE_2_READ_ENABLE 15 | ||
764 | #define ADDRESS_2_DATA 16 | ||
765 | #define READ_ENABLE_2_WRITE_ENABLE 17 | ||
766 | #define TWO_ROW_ADDRESS_CYCLES 18 | ||
767 | #define MULTIPLANE_ADDRESS_RESTRICT 19 | ||
768 | #define ACC_CLOCKS 20 | ||
769 | #define READ_WRITE_ENABLE_LOW_COUNT 21 | ||
770 | #define READ_WRITE_ENABLE_HIGH_COUNT 22 | ||
771 | |||
772 | #define ECC_SECTOR_SIZE 512 | ||
773 | #define LLD_MAX_FLASH_BANKS 4 | ||
774 | |||
775 | #define DENALI_BUF_SIZE NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE | ||
776 | |||
777 | struct nand_buf | ||
778 | { | ||
779 | int head; | ||
780 | int tail; | ||
781 | uint8_t buf[DENALI_BUF_SIZE]; | ||
782 | dma_addr_t dma_buf; | ||
783 | }; | ||
784 | |||
785 | #define INTEL_CE4100 1 | ||
786 | #define INTEL_MRST 2 | ||
787 | |||
788 | struct denali_nand_info { | ||
789 | struct mtd_info mtd; | ||
790 | struct nand_chip nand; | ||
791 | struct device_info_tag dev_info; | ||
792 | int flash_bank; /* currently selected chip */ | ||
793 | int status; | ||
794 | int platform; | ||
795 | struct nand_buf buf; | ||
796 | struct pci_dev *dev; | ||
797 | int total_used_banks; | ||
798 | uint32_t block; /* stored for future use */ | ||
799 | uint16_t page; | ||
800 | void __iomem *flash_reg; /* Mapped io reg base address */ | ||
801 | void __iomem *flash_mem; /* Mapped io reg base address */ | ||
802 | |||
803 | /* elements used by ISR */ | ||
804 | struct completion complete; | ||
805 | spinlock_t irq_lock; | ||
806 | uint32_t irq_status; | ||
807 | int irq_debug_array[32]; | ||
808 | int idx; | ||
809 | }; | ||
810 | |||
811 | static uint16_t NAND_Flash_Reset(struct denali_nand_info *denali); | ||
812 | static uint16_t NAND_Read_Device_ID(struct denali_nand_info *denali); | ||
813 | static void NAND_LLD_Enable_Disable_Interrupts(struct denali_nand_info *denali, uint16_t INT_ENABLE); | ||
814 | |||
815 | #endif /*_LLD_NAND_*/ | ||
816 | |||
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c index f45a8d0c1508..5084cc517944 100644 --- a/drivers/mtd/nand/fsl_elbc_nand.c +++ b/drivers/mtd/nand/fsl_elbc_nand.c | |||
@@ -874,7 +874,7 @@ static int __devinit fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl, | |||
874 | priv->ctrl = ctrl; | 874 | priv->ctrl = ctrl; |
875 | priv->dev = ctrl->dev; | 875 | priv->dev = ctrl->dev; |
876 | 876 | ||
877 | priv->vbase = ioremap(res.start, res.end - res.start + 1); | 877 | priv->vbase = ioremap(res.start, resource_size(&res)); |
878 | if (!priv->vbase) { | 878 | if (!priv->vbase) { |
879 | dev_err(ctrl->dev, "failed to map chip region\n"); | 879 | dev_err(ctrl->dev, "failed to map chip region\n"); |
880 | ret = -ENOMEM; | 880 | ret = -ENOMEM; |
@@ -891,7 +891,7 @@ static int __devinit fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl, | |||
891 | if (ret) | 891 | if (ret) |
892 | goto err; | 892 | goto err; |
893 | 893 | ||
894 | ret = nand_scan_ident(&priv->mtd, 1); | 894 | ret = nand_scan_ident(&priv->mtd, 1, NULL); |
895 | if (ret) | 895 | if (ret) |
896 | goto err; | 896 | goto err; |
897 | 897 | ||
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c index b7ab5a0ec35d..00aea6f7d1f1 100644 --- a/drivers/mtd/nand/fsl_upm.c +++ b/drivers/mtd/nand/fsl_upm.c | |||
@@ -49,7 +49,10 @@ struct fsl_upm_nand { | |||
49 | uint32_t wait_flags; | 49 | uint32_t wait_flags; |
50 | }; | 50 | }; |
51 | 51 | ||
52 | #define to_fsl_upm_nand(mtd) container_of(mtd, struct fsl_upm_nand, mtd) | 52 | static inline struct fsl_upm_nand *to_fsl_upm_nand(struct mtd_info *mtdinfo) |
53 | { | ||
54 | return container_of(mtdinfo, struct fsl_upm_nand, mtd); | ||
55 | } | ||
53 | 56 | ||
54 | static int fun_chip_ready(struct mtd_info *mtd) | 57 | static int fun_chip_ready(struct mtd_info *mtd) |
55 | { | 58 | { |
@@ -303,7 +306,7 @@ static int __devinit fun_probe(struct of_device *ofdev, | |||
303 | FSL_UPM_WAIT_WRITE_BYTE; | 306 | FSL_UPM_WAIT_WRITE_BYTE; |
304 | 307 | ||
305 | fun->io_base = devm_ioremap_nocache(&ofdev->dev, io_res.start, | 308 | fun->io_base = devm_ioremap_nocache(&ofdev->dev, io_res.start, |
306 | io_res.end - io_res.start + 1); | 309 | resource_size(&io_res)); |
307 | if (!fun->io_base) { | 310 | if (!fun->io_base) { |
308 | ret = -ENOMEM; | 311 | ret = -ENOMEM; |
309 | goto err2; | 312 | goto err2; |
@@ -350,7 +353,7 @@ static int __devexit fun_remove(struct of_device *ofdev) | |||
350 | return 0; | 353 | return 0; |
351 | } | 354 | } |
352 | 355 | ||
353 | static struct of_device_id of_fun_match[] = { | 356 | static const struct of_device_id of_fun_match[] = { |
354 | { .compatible = "fsl,upm-nand" }, | 357 | { .compatible = "fsl,upm-nand" }, |
355 | {}, | 358 | {}, |
356 | }; | 359 | }; |
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c index 8f902e75aa85..0cde618bcc1e 100644 --- a/drivers/mtd/nand/gpio.c +++ b/drivers/mtd/nand/gpio.c | |||
@@ -181,11 +181,11 @@ static int __devexit gpio_nand_remove(struct platform_device *dev) | |||
181 | res = platform_get_resource(dev, IORESOURCE_MEM, 1); | 181 | res = platform_get_resource(dev, IORESOURCE_MEM, 1); |
182 | iounmap(gpiomtd->io_sync); | 182 | iounmap(gpiomtd->io_sync); |
183 | if (res) | 183 | if (res) |
184 | release_mem_region(res->start, res->end - res->start + 1); | 184 | release_mem_region(res->start, resource_size(res)); |
185 | 185 | ||
186 | res = platform_get_resource(dev, IORESOURCE_MEM, 0); | 186 | res = platform_get_resource(dev, IORESOURCE_MEM, 0); |
187 | iounmap(gpiomtd->nand_chip.IO_ADDR_R); | 187 | iounmap(gpiomtd->nand_chip.IO_ADDR_R); |
188 | release_mem_region(res->start, res->end - res->start + 1); | 188 | release_mem_region(res->start, resource_size(res)); |
189 | 189 | ||
190 | if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) | 190 | if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) |
191 | gpio_set_value(gpiomtd->plat.gpio_nwp, 0); | 191 | gpio_set_value(gpiomtd->plat.gpio_nwp, 0); |
@@ -208,14 +208,14 @@ static void __iomem *request_and_remap(struct resource *res, size_t size, | |||
208 | { | 208 | { |
209 | void __iomem *ptr; | 209 | void __iomem *ptr; |
210 | 210 | ||
211 | if (!request_mem_region(res->start, res->end - res->start + 1, name)) { | 211 | if (!request_mem_region(res->start, resource_size(res), name)) { |
212 | *err = -EBUSY; | 212 | *err = -EBUSY; |
213 | return NULL; | 213 | return NULL; |
214 | } | 214 | } |
215 | 215 | ||
216 | ptr = ioremap(res->start, size); | 216 | ptr = ioremap(res->start, size); |
217 | if (!ptr) { | 217 | if (!ptr) { |
218 | release_mem_region(res->start, res->end - res->start + 1); | 218 | release_mem_region(res->start, resource_size(res)); |
219 | *err = -ENOMEM; | 219 | *err = -ENOMEM; |
220 | } | 220 | } |
221 | return ptr; | 221 | return ptr; |
@@ -338,10 +338,10 @@ err_nwp: | |||
338 | err_nce: | 338 | err_nce: |
339 | iounmap(gpiomtd->io_sync); | 339 | iounmap(gpiomtd->io_sync); |
340 | if (res1) | 340 | if (res1) |
341 | release_mem_region(res1->start, res1->end - res1->start + 1); | 341 | release_mem_region(res1->start, resource_size(res1)); |
342 | err_sync: | 342 | err_sync: |
343 | iounmap(gpiomtd->nand_chip.IO_ADDR_R); | 343 | iounmap(gpiomtd->nand_chip.IO_ADDR_R); |
344 | release_mem_region(res0->start, res0->end - res0->start + 1); | 344 | release_mem_region(res0->start, resource_size(res0)); |
345 | err_map: | 345 | err_map: |
346 | kfree(gpiomtd); | 346 | kfree(gpiomtd); |
347 | return ret; | 347 | return ret; |
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c new file mode 100644 index 000000000000..3d0867d829cb --- /dev/null +++ b/drivers/mtd/nand/mpc5121_nfc.c | |||
@@ -0,0 +1,917 @@ | |||
1 | /* | ||
2 | * Copyright 2004-2008 Freescale Semiconductor, Inc. | ||
3 | * Copyright 2009 Semihalf. | ||
4 | * | ||
5 | * Approved as OSADL project by a majority of OSADL members and funded | ||
6 | * by OSADL membership fees in 2009; for details see www.osadl.org. | ||
7 | * | ||
8 | * Based on original driver from Freescale Semiconductor | ||
9 | * written by John Rigby <jrigby@freescale.com> on basis | ||
10 | * of drivers/mtd/nand/mxc_nand.c. Reworked and extended | ||
11 | * Piotr Ziecik <kosmo@semihalf.com>. | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or | ||
14 | * modify it under the terms of the GNU General Public License | ||
15 | * as published by the Free Software Foundation; either version 2 | ||
16 | * of the License, or (at your option) any later version. | ||
17 | * This program is distributed in the hope that it will be useful, | ||
18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
20 | * GNU General Public License for more details. | ||
21 | * | ||
22 | * You should have received a copy of the GNU General Public License | ||
23 | * along with this program; if not, write to the Free Software | ||
24 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, | ||
25 | * MA 02110-1301, USA. | ||
26 | */ | ||
27 | |||
28 | #include <linux/module.h> | ||
29 | #include <linux/clk.h> | ||
30 | #include <linux/gfp.h> | ||
31 | #include <linux/delay.h> | ||
32 | #include <linux/init.h> | ||
33 | #include <linux/interrupt.h> | ||
34 | #include <linux/io.h> | ||
35 | #include <linux/mtd/mtd.h> | ||
36 | #include <linux/mtd/nand.h> | ||
37 | #include <linux/mtd/partitions.h> | ||
38 | #include <linux/of_device.h> | ||
39 | #include <linux/of_platform.h> | ||
40 | |||
41 | #include <asm/mpc5121.h> | ||
42 | |||
43 | /* Addresses for NFC MAIN RAM BUFFER areas */ | ||
44 | #define NFC_MAIN_AREA(n) ((n) * 0x200) | ||
45 | |||
46 | /* Addresses for NFC SPARE BUFFER areas */ | ||
47 | #define NFC_SPARE_BUFFERS 8 | ||
48 | #define NFC_SPARE_LEN 0x40 | ||
49 | #define NFC_SPARE_AREA(n) (0x1000 + ((n) * NFC_SPARE_LEN)) | ||
50 | |||
51 | /* MPC5121 NFC registers */ | ||
52 | #define NFC_BUF_ADDR 0x1E04 | ||
53 | #define NFC_FLASH_ADDR 0x1E06 | ||
54 | #define NFC_FLASH_CMD 0x1E08 | ||
55 | #define NFC_CONFIG 0x1E0A | ||
56 | #define NFC_ECC_STATUS1 0x1E0C | ||
57 | #define NFC_ECC_STATUS2 0x1E0E | ||
58 | #define NFC_SPAS 0x1E10 | ||
59 | #define NFC_WRPROT 0x1E12 | ||
60 | #define NFC_NF_WRPRST 0x1E18 | ||
61 | #define NFC_CONFIG1 0x1E1A | ||
62 | #define NFC_CONFIG2 0x1E1C | ||
63 | #define NFC_UNLOCKSTART_BLK0 0x1E20 | ||
64 | #define NFC_UNLOCKEND_BLK0 0x1E22 | ||
65 | #define NFC_UNLOCKSTART_BLK1 0x1E24 | ||
66 | #define NFC_UNLOCKEND_BLK1 0x1E26 | ||
67 | #define NFC_UNLOCKSTART_BLK2 0x1E28 | ||
68 | #define NFC_UNLOCKEND_BLK2 0x1E2A | ||
69 | #define NFC_UNLOCKSTART_BLK3 0x1E2C | ||
70 | #define NFC_UNLOCKEND_BLK3 0x1E2E | ||
71 | |||
72 | /* Bit Definitions: NFC_BUF_ADDR */ | ||
73 | #define NFC_RBA_MASK (7 << 0) | ||
74 | #define NFC_ACTIVE_CS_SHIFT 5 | ||
75 | #define NFC_ACTIVE_CS_MASK (3 << NFC_ACTIVE_CS_SHIFT) | ||
76 | |||
77 | /* Bit Definitions: NFC_CONFIG */ | ||
78 | #define NFC_BLS_UNLOCKED (1 << 1) | ||
79 | |||
80 | /* Bit Definitions: NFC_CONFIG1 */ | ||
81 | #define NFC_ECC_4BIT (1 << 0) | ||
82 | #define NFC_FULL_PAGE_DMA (1 << 1) | ||
83 | #define NFC_SPARE_ONLY (1 << 2) | ||
84 | #define NFC_ECC_ENABLE (1 << 3) | ||
85 | #define NFC_INT_MASK (1 << 4) | ||
86 | #define NFC_BIG_ENDIAN (1 << 5) | ||
87 | #define NFC_RESET (1 << 6) | ||
88 | #define NFC_CE (1 << 7) | ||
89 | #define NFC_ONE_CYCLE (1 << 8) | ||
90 | #define NFC_PPB_32 (0 << 9) | ||
91 | #define NFC_PPB_64 (1 << 9) | ||
92 | #define NFC_PPB_128 (2 << 9) | ||
93 | #define NFC_PPB_256 (3 << 9) | ||
94 | #define NFC_PPB_MASK (3 << 9) | ||
95 | #define NFC_FULL_PAGE_INT (1 << 11) | ||
96 | |||
97 | /* Bit Definitions: NFC_CONFIG2 */ | ||
98 | #define NFC_COMMAND (1 << 0) | ||
99 | #define NFC_ADDRESS (1 << 1) | ||
100 | #define NFC_INPUT (1 << 2) | ||
101 | #define NFC_OUTPUT (1 << 3) | ||
102 | #define NFC_ID (1 << 4) | ||
103 | #define NFC_STATUS (1 << 5) | ||
104 | #define NFC_CMD_FAIL (1 << 15) | ||
105 | #define NFC_INT (1 << 15) | ||
106 | |||
107 | /* Bit Definitions: NFC_WRPROT */ | ||
108 | #define NFC_WPC_LOCK_TIGHT (1 << 0) | ||
109 | #define NFC_WPC_LOCK (1 << 1) | ||
110 | #define NFC_WPC_UNLOCK (1 << 2) | ||
111 | |||
112 | #define DRV_NAME "mpc5121_nfc" | ||
113 | |||
114 | /* Timeouts */ | ||
115 | #define NFC_RESET_TIMEOUT 1000 /* 1 ms */ | ||
116 | #define NFC_TIMEOUT (HZ / 10) /* 1/10 s */ | ||
117 | |||
118 | struct mpc5121_nfc_prv { | ||
119 | struct mtd_info mtd; | ||
120 | struct nand_chip chip; | ||
121 | int irq; | ||
122 | void __iomem *regs; | ||
123 | struct clk *clk; | ||
124 | wait_queue_head_t irq_waitq; | ||
125 | uint column; | ||
126 | int spareonly; | ||
127 | void __iomem *csreg; | ||
128 | struct device *dev; | ||
129 | }; | ||
130 | |||
131 | static void mpc5121_nfc_done(struct mtd_info *mtd); | ||
132 | |||
133 | #ifdef CONFIG_MTD_PARTITIONS | ||
134 | static const char *mpc5121_nfc_pprobes[] = { "cmdlinepart", NULL }; | ||
135 | #endif | ||
136 | |||
137 | /* Read NFC register */ | ||
138 | static inline u16 nfc_read(struct mtd_info *mtd, uint reg) | ||
139 | { | ||
140 | struct nand_chip *chip = mtd->priv; | ||
141 | struct mpc5121_nfc_prv *prv = chip->priv; | ||
142 | |||
143 | return in_be16(prv->regs + reg); | ||
144 | } | ||
145 | |||
146 | /* Write NFC register */ | ||
147 | static inline void nfc_write(struct mtd_info *mtd, uint reg, u16 val) | ||
148 | { | ||
149 | struct nand_chip *chip = mtd->priv; | ||
150 | struct mpc5121_nfc_prv *prv = chip->priv; | ||
151 | |||
152 | out_be16(prv->regs + reg, val); | ||
153 | } | ||
154 | |||
155 | /* Set bits in NFC register */ | ||
156 | static inline void nfc_set(struct mtd_info *mtd, uint reg, u16 bits) | ||
157 | { | ||
158 | nfc_write(mtd, reg, nfc_read(mtd, reg) | bits); | ||
159 | } | ||
160 | |||
161 | /* Clear bits in NFC register */ | ||
162 | static inline void nfc_clear(struct mtd_info *mtd, uint reg, u16 bits) | ||
163 | { | ||
164 | nfc_write(mtd, reg, nfc_read(mtd, reg) & ~bits); | ||
165 | } | ||
166 | |||
167 | /* Invoke address cycle */ | ||
168 | static inline void mpc5121_nfc_send_addr(struct mtd_info *mtd, u16 addr) | ||
169 | { | ||
170 | nfc_write(mtd, NFC_FLASH_ADDR, addr); | ||
171 | nfc_write(mtd, NFC_CONFIG2, NFC_ADDRESS); | ||
172 | mpc5121_nfc_done(mtd); | ||
173 | } | ||
174 | |||
175 | /* Invoke command cycle */ | ||
176 | static inline void mpc5121_nfc_send_cmd(struct mtd_info *mtd, u16 cmd) | ||
177 | { | ||
178 | nfc_write(mtd, NFC_FLASH_CMD, cmd); | ||
179 | nfc_write(mtd, NFC_CONFIG2, NFC_COMMAND); | ||
180 | mpc5121_nfc_done(mtd); | ||
181 | } | ||
182 | |||
183 | /* Send data from NFC buffers to NAND flash */ | ||
184 | static inline void mpc5121_nfc_send_prog_page(struct mtd_info *mtd) | ||
185 | { | ||
186 | nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK); | ||
187 | nfc_write(mtd, NFC_CONFIG2, NFC_INPUT); | ||
188 | mpc5121_nfc_done(mtd); | ||
189 | } | ||
190 | |||
191 | /* Receive data from NAND flash */ | ||
192 | static inline void mpc5121_nfc_send_read_page(struct mtd_info *mtd) | ||
193 | { | ||
194 | nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK); | ||
195 | nfc_write(mtd, NFC_CONFIG2, NFC_OUTPUT); | ||
196 | mpc5121_nfc_done(mtd); | ||
197 | } | ||
198 | |||
199 | /* Receive ID from NAND flash */ | ||
200 | static inline void mpc5121_nfc_send_read_id(struct mtd_info *mtd) | ||
201 | { | ||
202 | nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK); | ||
203 | nfc_write(mtd, NFC_CONFIG2, NFC_ID); | ||
204 | mpc5121_nfc_done(mtd); | ||
205 | } | ||
206 | |||
207 | /* Receive status from NAND flash */ | ||
208 | static inline void mpc5121_nfc_send_read_status(struct mtd_info *mtd) | ||
209 | { | ||
210 | nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK); | ||
211 | nfc_write(mtd, NFC_CONFIG2, NFC_STATUS); | ||
212 | mpc5121_nfc_done(mtd); | ||
213 | } | ||
214 | |||
215 | /* NFC interrupt handler */ | ||
216 | static irqreturn_t mpc5121_nfc_irq(int irq, void *data) | ||
217 | { | ||
218 | struct mtd_info *mtd = data; | ||
219 | struct nand_chip *chip = mtd->priv; | ||
220 | struct mpc5121_nfc_prv *prv = chip->priv; | ||
221 | |||
222 | nfc_set(mtd, NFC_CONFIG1, NFC_INT_MASK); | ||
223 | wake_up(&prv->irq_waitq); | ||
224 | |||
225 | return IRQ_HANDLED; | ||
226 | } | ||
227 | |||
228 | /* Wait for operation complete */ | ||
229 | static void mpc5121_nfc_done(struct mtd_info *mtd) | ||
230 | { | ||
231 | struct nand_chip *chip = mtd->priv; | ||
232 | struct mpc5121_nfc_prv *prv = chip->priv; | ||
233 | int rv; | ||
234 | |||
235 | if ((nfc_read(mtd, NFC_CONFIG2) & NFC_INT) == 0) { | ||
236 | nfc_clear(mtd, NFC_CONFIG1, NFC_INT_MASK); | ||
237 | rv = wait_event_timeout(prv->irq_waitq, | ||
238 | (nfc_read(mtd, NFC_CONFIG2) & NFC_INT), NFC_TIMEOUT); | ||
239 | |||
240 | if (!rv) | ||
241 | dev_warn(prv->dev, | ||
242 | "Timeout while waiting for interrupt.\n"); | ||
243 | } | ||
244 | |||
245 | nfc_clear(mtd, NFC_CONFIG2, NFC_INT); | ||
246 | } | ||
247 | |||
248 | /* Do address cycle(s) */ | ||
249 | static void mpc5121_nfc_addr_cycle(struct mtd_info *mtd, int column, int page) | ||
250 | { | ||
251 | struct nand_chip *chip = mtd->priv; | ||
252 | u32 pagemask = chip->pagemask; | ||
253 | |||
254 | if (column != -1) { | ||
255 | mpc5121_nfc_send_addr(mtd, column); | ||
256 | if (mtd->writesize > 512) | ||
257 | mpc5121_nfc_send_addr(mtd, column >> 8); | ||
258 | } | ||
259 | |||
260 | if (page != -1) { | ||
261 | do { | ||
262 | mpc5121_nfc_send_addr(mtd, page & 0xFF); | ||
263 | page >>= 8; | ||
264 | pagemask >>= 8; | ||
265 | } while (pagemask); | ||
266 | } | ||
267 | } | ||
268 | |||
269 | /* Control chip select signals */ | ||
270 | static void mpc5121_nfc_select_chip(struct mtd_info *mtd, int chip) | ||
271 | { | ||
272 | if (chip < 0) { | ||
273 | nfc_clear(mtd, NFC_CONFIG1, NFC_CE); | ||
274 | return; | ||
275 | } | ||
276 | |||
277 | nfc_clear(mtd, NFC_BUF_ADDR, NFC_ACTIVE_CS_MASK); | ||
278 | nfc_set(mtd, NFC_BUF_ADDR, (chip << NFC_ACTIVE_CS_SHIFT) & | ||
279 | NFC_ACTIVE_CS_MASK); | ||
280 | nfc_set(mtd, NFC_CONFIG1, NFC_CE); | ||
281 | } | ||
282 | |||
283 | /* Init external chip select logic on ADS5121 board */ | ||
284 | static int ads5121_chipselect_init(struct mtd_info *mtd) | ||
285 | { | ||
286 | struct nand_chip *chip = mtd->priv; | ||
287 | struct mpc5121_nfc_prv *prv = chip->priv; | ||
288 | struct device_node *dn; | ||
289 | |||
290 | dn = of_find_compatible_node(NULL, NULL, "fsl,mpc5121ads-cpld"); | ||
291 | if (dn) { | ||
292 | prv->csreg = of_iomap(dn, 0); | ||
293 | of_node_put(dn); | ||
294 | if (!prv->csreg) | ||
295 | return -ENOMEM; | ||
296 | |||
297 | /* CPLD Register 9 controls NAND /CE Lines */ | ||
298 | prv->csreg += 9; | ||
299 | return 0; | ||
300 | } | ||
301 | |||
302 | return -EINVAL; | ||
303 | } | ||
304 | |||
305 | /* Control chips select signal on ADS5121 board */ | ||
306 | static void ads5121_select_chip(struct mtd_info *mtd, int chip) | ||
307 | { | ||
308 | struct nand_chip *nand = mtd->priv; | ||
309 | struct mpc5121_nfc_prv *prv = nand->priv; | ||
310 | u8 v; | ||
311 | |||
312 | v = in_8(prv->csreg); | ||
313 | v |= 0x0F; | ||
314 | |||
315 | if (chip >= 0) { | ||
316 | mpc5121_nfc_select_chip(mtd, 0); | ||
317 | v &= ~(1 << chip); | ||
318 | } else | ||
319 | mpc5121_nfc_select_chip(mtd, -1); | ||
320 | |||
321 | out_8(prv->csreg, v); | ||
322 | } | ||
323 | |||
324 | /* Read NAND Ready/Busy signal */ | ||
325 | static int mpc5121_nfc_dev_ready(struct mtd_info *mtd) | ||
326 | { | ||
327 | /* | ||
328 | * NFC handles ready/busy signal internally. Therefore, this function | ||
329 | * always returns status as ready. | ||
330 | */ | ||
331 | return 1; | ||
332 | } | ||
333 | |||
334 | /* Write command to NAND flash */ | ||
335 | static void mpc5121_nfc_command(struct mtd_info *mtd, unsigned command, | ||
336 | int column, int page) | ||
337 | { | ||
338 | struct nand_chip *chip = mtd->priv; | ||
339 | struct mpc5121_nfc_prv *prv = chip->priv; | ||
340 | |||
341 | prv->column = (column >= 0) ? column : 0; | ||
342 | prv->spareonly = 0; | ||
343 | |||
344 | switch (command) { | ||
345 | case NAND_CMD_PAGEPROG: | ||
346 | mpc5121_nfc_send_prog_page(mtd); | ||
347 | break; | ||
348 | /* | ||
349 | * NFC does not support sub-page reads and writes, | ||
350 | * so emulate them using full page transfers. | ||
351 | */ | ||
352 | case NAND_CMD_READ0: | ||
353 | column = 0; | ||
354 | break; | ||
355 | |||
356 | case NAND_CMD_READ1: | ||
357 | prv->column += 256; | ||
358 | command = NAND_CMD_READ0; | ||
359 | column = 0; | ||
360 | break; | ||
361 | |||
362 | case NAND_CMD_READOOB: | ||
363 | prv->spareonly = 1; | ||
364 | command = NAND_CMD_READ0; | ||
365 | column = 0; | ||
366 | break; | ||
367 | |||
368 | case NAND_CMD_SEQIN: | ||
369 | mpc5121_nfc_command(mtd, NAND_CMD_READ0, column, page); | ||
370 | column = 0; | ||
371 | break; | ||
372 | |||
373 | case NAND_CMD_ERASE1: | ||
374 | case NAND_CMD_ERASE2: | ||
375 | case NAND_CMD_READID: | ||
376 | case NAND_CMD_STATUS: | ||
377 | break; | ||
378 | |||
379 | default: | ||
380 | return; | ||
381 | } | ||
382 | |||
383 | mpc5121_nfc_send_cmd(mtd, command); | ||
384 | mpc5121_nfc_addr_cycle(mtd, column, page); | ||
385 | |||
386 | switch (command) { | ||
387 | case NAND_CMD_READ0: | ||
388 | if (mtd->writesize > 512) | ||
389 | mpc5121_nfc_send_cmd(mtd, NAND_CMD_READSTART); | ||
390 | mpc5121_nfc_send_read_page(mtd); | ||
391 | break; | ||
392 | |||
393 | case NAND_CMD_READID: | ||
394 | mpc5121_nfc_send_read_id(mtd); | ||
395 | break; | ||
396 | |||
397 | case NAND_CMD_STATUS: | ||
398 | mpc5121_nfc_send_read_status(mtd); | ||
399 | if (chip->options & NAND_BUSWIDTH_16) | ||
400 | prv->column = 1; | ||
401 | else | ||
402 | prv->column = 0; | ||
403 | break; | ||
404 | } | ||
405 | } | ||
406 | |||
407 | /* Copy data from/to NFC spare buffers. */ | ||
408 | static void mpc5121_nfc_copy_spare(struct mtd_info *mtd, uint offset, | ||
409 | u8 *buffer, uint size, int wr) | ||
410 | { | ||
411 | struct nand_chip *nand = mtd->priv; | ||
412 | struct mpc5121_nfc_prv *prv = nand->priv; | ||
413 | uint o, s, sbsize, blksize; | ||
414 | |||
415 | /* | ||
416 | * NAND spare area is available through NFC spare buffers. | ||
417 | * The NFC divides spare area into (page_size / 512) chunks. | ||
418 | * Each chunk is placed into separate spare memory area, using | ||
419 | * first (spare_size / num_of_chunks) bytes of the buffer. | ||
420 | * | ||
421 | * For NAND device in which the spare area is not divided fully | ||
422 | * by the number of chunks, number of used bytes in each spare | ||
423 | * buffer is rounded down to the nearest even number of bytes, | ||
424 | * and all remaining bytes are added to the last used spare area. | ||
425 | * | ||
426 | * For more information read section 26.6.10 of MPC5121e | ||
427 | * Microcontroller Reference Manual, Rev. 3. | ||
428 | */ | ||
429 | |||
430 | /* Calculate number of valid bytes in each spare buffer */ | ||
431 | sbsize = (mtd->oobsize / (mtd->writesize / 512)) & ~1; | ||
432 | |||
433 | while (size) { | ||
434 | /* Calculate spare buffer number */ | ||
435 | s = offset / sbsize; | ||
436 | if (s > NFC_SPARE_BUFFERS - 1) | ||
437 | s = NFC_SPARE_BUFFERS - 1; | ||
438 | |||
439 | /* | ||
440 | * Calculate offset to requested data block in selected spare | ||
441 | * buffer and its size. | ||
442 | */ | ||
443 | o = offset - (s * sbsize); | ||
444 | blksize = min(sbsize - o, size); | ||
445 | |||
446 | if (wr) | ||
447 | memcpy_toio(prv->regs + NFC_SPARE_AREA(s) + o, | ||
448 | buffer, blksize); | ||
449 | else | ||
450 | memcpy_fromio(buffer, | ||
451 | prv->regs + NFC_SPARE_AREA(s) + o, blksize); | ||
452 | |||
453 | buffer += blksize; | ||
454 | offset += blksize; | ||
455 | size -= blksize; | ||
456 | }; | ||
457 | } | ||
458 | |||
459 | /* Copy data from/to NFC main and spare buffers */ | ||
460 | static void mpc5121_nfc_buf_copy(struct mtd_info *mtd, u_char *buf, int len, | ||
461 | int wr) | ||
462 | { | ||
463 | struct nand_chip *chip = mtd->priv; | ||
464 | struct mpc5121_nfc_prv *prv = chip->priv; | ||
465 | uint c = prv->column; | ||
466 | uint l; | ||
467 | |||
468 | /* Handle spare area access */ | ||
469 | if (prv->spareonly || c >= mtd->writesize) { | ||
470 | /* Calculate offset from beginning of spare area */ | ||
471 | if (c >= mtd->writesize) | ||
472 | c -= mtd->writesize; | ||
473 | |||
474 | prv->column += len; | ||
475 | mpc5121_nfc_copy_spare(mtd, c, buf, len, wr); | ||
476 | return; | ||
477 | } | ||
478 | |||
479 | /* | ||
480 | * Handle main area access - limit copy length to prevent | ||
481 | * crossing main/spare boundary. | ||
482 | */ | ||
483 | l = min((uint)len, mtd->writesize - c); | ||
484 | prv->column += l; | ||
485 | |||
486 | if (wr) | ||
487 | memcpy_toio(prv->regs + NFC_MAIN_AREA(0) + c, buf, l); | ||
488 | else | ||
489 | memcpy_fromio(buf, prv->regs + NFC_MAIN_AREA(0) + c, l); | ||
490 | |||
491 | /* Handle crossing main/spare boundary */ | ||
492 | if (l != len) { | ||
493 | buf += l; | ||
494 | len -= l; | ||
495 | mpc5121_nfc_buf_copy(mtd, buf, len, wr); | ||
496 | } | ||
497 | } | ||
498 | |||
499 | /* Read data from NFC buffers */ | ||
500 | static void mpc5121_nfc_read_buf(struct mtd_info *mtd, u_char *buf, int len) | ||
501 | { | ||
502 | mpc5121_nfc_buf_copy(mtd, buf, len, 0); | ||
503 | } | ||
504 | |||
505 | /* Write data to NFC buffers */ | ||
506 | static void mpc5121_nfc_write_buf(struct mtd_info *mtd, | ||
507 | const u_char *buf, int len) | ||
508 | { | ||
509 | mpc5121_nfc_buf_copy(mtd, (u_char *)buf, len, 1); | ||
510 | } | ||
511 | |||
512 | /* Compare buffer with NAND flash */ | ||
513 | static int mpc5121_nfc_verify_buf(struct mtd_info *mtd, | ||
514 | const u_char *buf, int len) | ||
515 | { | ||
516 | u_char tmp[256]; | ||
517 | uint bsize; | ||
518 | |||
519 | while (len) { | ||
520 | bsize = min(len, 256); | ||
521 | mpc5121_nfc_read_buf(mtd, tmp, bsize); | ||
522 | |||
523 | if (memcmp(buf, tmp, bsize)) | ||
524 | return 1; | ||
525 | |||
526 | buf += bsize; | ||
527 | len -= bsize; | ||
528 | } | ||
529 | |||
530 | return 0; | ||
531 | } | ||
532 | |||
533 | /* Read byte from NFC buffers */ | ||
534 | static u8 mpc5121_nfc_read_byte(struct mtd_info *mtd) | ||
535 | { | ||
536 | u8 tmp; | ||
537 | |||
538 | mpc5121_nfc_read_buf(mtd, &tmp, sizeof(tmp)); | ||
539 | |||
540 | return tmp; | ||
541 | } | ||
542 | |||
543 | /* Read word from NFC buffers */ | ||
544 | static u16 mpc5121_nfc_read_word(struct mtd_info *mtd) | ||
545 | { | ||
546 | u16 tmp; | ||
547 | |||
548 | mpc5121_nfc_read_buf(mtd, (u_char *)&tmp, sizeof(tmp)); | ||
549 | |||
550 | return tmp; | ||
551 | } | ||
552 | |||
553 | /* | ||
554 | * Read NFC configuration from Reset Config Word | ||
555 | * | ||
556 | * NFC is configured during reset in basis of information stored | ||
557 | * in Reset Config Word. There is no other way to set NAND block | ||
558 | * size, spare size and bus width. | ||
559 | */ | ||
560 | static int mpc5121_nfc_read_hw_config(struct mtd_info *mtd) | ||
561 | { | ||
562 | struct nand_chip *chip = mtd->priv; | ||
563 | struct mpc5121_nfc_prv *prv = chip->priv; | ||
564 | struct mpc512x_reset_module *rm; | ||
565 | struct device_node *rmnode; | ||
566 | uint rcw_pagesize = 0; | ||
567 | uint rcw_sparesize = 0; | ||
568 | uint rcw_width; | ||
569 | uint rcwh; | ||
570 | uint romloc, ps; | ||
571 | |||
572 | rmnode = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-reset"); | ||
573 | if (!rmnode) { | ||
574 | dev_err(prv->dev, "Missing 'fsl,mpc5121-reset' " | ||
575 | "node in device tree!\n"); | ||
576 | return -ENODEV; | ||
577 | } | ||
578 | |||
579 | rm = of_iomap(rmnode, 0); | ||
580 | if (!rm) { | ||
581 | dev_err(prv->dev, "Error mapping reset module node!\n"); | ||
582 | return -EBUSY; | ||
583 | } | ||
584 | |||
585 | rcwh = in_be32(&rm->rcwhr); | ||
586 | |||
587 | /* Bit 6: NFC bus width */ | ||
588 | rcw_width = ((rcwh >> 6) & 0x1) ? 2 : 1; | ||
589 | |||
590 | /* Bit 7: NFC Page/Spare size */ | ||
591 | ps = (rcwh >> 7) & 0x1; | ||
592 | |||
593 | /* Bits [22:21]: ROM Location */ | ||
594 | romloc = (rcwh >> 21) & 0x3; | ||
595 | |||
596 | /* Decode RCW bits */ | ||
597 | switch ((ps << 2) | romloc) { | ||
598 | case 0x00: | ||
599 | case 0x01: | ||
600 | rcw_pagesize = 512; | ||
601 | rcw_sparesize = 16; | ||
602 | break; | ||
603 | case 0x02: | ||
604 | case 0x03: | ||
605 | rcw_pagesize = 4096; | ||
606 | rcw_sparesize = 128; | ||
607 | break; | ||
608 | case 0x04: | ||
609 | case 0x05: | ||
610 | rcw_pagesize = 2048; | ||
611 | rcw_sparesize = 64; | ||
612 | break; | ||
613 | case 0x06: | ||
614 | case 0x07: | ||
615 | rcw_pagesize = 4096; | ||
616 | rcw_sparesize = 218; | ||
617 | break; | ||
618 | } | ||
619 | |||
620 | mtd->writesize = rcw_pagesize; | ||
621 | mtd->oobsize = rcw_sparesize; | ||
622 | if (rcw_width == 2) | ||
623 | chip->options |= NAND_BUSWIDTH_16; | ||
624 | |||
625 | dev_notice(prv->dev, "Configured for " | ||
626 | "%u-bit NAND, page size %u " | ||
627 | "with %u spare.\n", | ||
628 | rcw_width * 8, rcw_pagesize, | ||
629 | rcw_sparesize); | ||
630 | iounmap(rm); | ||
631 | of_node_put(rmnode); | ||
632 | return 0; | ||
633 | } | ||
634 | |||
635 | /* Free driver resources */ | ||
636 | static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd) | ||
637 | { | ||
638 | struct nand_chip *chip = mtd->priv; | ||
639 | struct mpc5121_nfc_prv *prv = chip->priv; | ||
640 | |||
641 | if (prv->clk) { | ||
642 | clk_disable(prv->clk); | ||
643 | clk_put(prv->clk); | ||
644 | } | ||
645 | |||
646 | if (prv->csreg) | ||
647 | iounmap(prv->csreg); | ||
648 | } | ||
649 | |||
650 | static int __devinit mpc5121_nfc_probe(struct of_device *op, | ||
651 | const struct of_device_id *match) | ||
652 | { | ||
653 | struct device_node *rootnode, *dn = op->node; | ||
654 | struct device *dev = &op->dev; | ||
655 | struct mpc5121_nfc_prv *prv; | ||
656 | struct resource res; | ||
657 | struct mtd_info *mtd; | ||
658 | #ifdef CONFIG_MTD_PARTITIONS | ||
659 | struct mtd_partition *parts; | ||
660 | #endif | ||
661 | struct nand_chip *chip; | ||
662 | unsigned long regs_paddr, regs_size; | ||
663 | const uint *chips_no; | ||
664 | int resettime = 0; | ||
665 | int retval = 0; | ||
666 | int rev, len; | ||
667 | |||
668 | /* | ||
669 | * Check SoC revision. This driver supports only NFC | ||
670 | * in MPC5121 revision 2 and MPC5123 revision 3. | ||
671 | */ | ||
672 | rev = (mfspr(SPRN_SVR) >> 4) & 0xF; | ||
673 | if ((rev != 2) && (rev != 3)) { | ||
674 | dev_err(dev, "SoC revision %u is not supported!\n", rev); | ||
675 | return -ENXIO; | ||
676 | } | ||
677 | |||
678 | prv = devm_kzalloc(dev, sizeof(*prv), GFP_KERNEL); | ||
679 | if (!prv) { | ||
680 | dev_err(dev, "Memory exhausted!\n"); | ||
681 | return -ENOMEM; | ||
682 | } | ||
683 | |||
684 | mtd = &prv->mtd; | ||
685 | chip = &prv->chip; | ||
686 | |||
687 | mtd->priv = chip; | ||
688 | chip->priv = prv; | ||
689 | prv->dev = dev; | ||
690 | |||
691 | /* Read NFC configuration from Reset Config Word */ | ||
692 | retval = mpc5121_nfc_read_hw_config(mtd); | ||
693 | if (retval) { | ||
694 | dev_err(dev, "Unable to read NFC config!\n"); | ||
695 | return retval; | ||
696 | } | ||
697 | |||
698 | prv->irq = irq_of_parse_and_map(dn, 0); | ||
699 | if (prv->irq == NO_IRQ) { | ||
700 | dev_err(dev, "Error mapping IRQ!\n"); | ||
701 | return -EINVAL; | ||
702 | } | ||
703 | |||
704 | retval = of_address_to_resource(dn, 0, &res); | ||
705 | if (retval) { | ||
706 | dev_err(dev, "Error parsing memory region!\n"); | ||
707 | return retval; | ||
708 | } | ||
709 | |||
710 | chips_no = of_get_property(dn, "chips", &len); | ||
711 | if (!chips_no || len != sizeof(*chips_no)) { | ||
712 | dev_err(dev, "Invalid/missing 'chips' property!\n"); | ||
713 | return -EINVAL; | ||
714 | } | ||
715 | |||
716 | regs_paddr = res.start; | ||
717 | regs_size = res.end - res.start + 1; | ||
718 | |||
719 | if (!devm_request_mem_region(dev, regs_paddr, regs_size, DRV_NAME)) { | ||
720 | dev_err(dev, "Error requesting memory region!\n"); | ||
721 | return -EBUSY; | ||
722 | } | ||
723 | |||
724 | prv->regs = devm_ioremap(dev, regs_paddr, regs_size); | ||
725 | if (!prv->regs) { | ||
726 | dev_err(dev, "Error mapping memory region!\n"); | ||
727 | return -ENOMEM; | ||
728 | } | ||
729 | |||
730 | mtd->name = "MPC5121 NAND"; | ||
731 | chip->dev_ready = mpc5121_nfc_dev_ready; | ||
732 | chip->cmdfunc = mpc5121_nfc_command; | ||
733 | chip->read_byte = mpc5121_nfc_read_byte; | ||
734 | chip->read_word = mpc5121_nfc_read_word; | ||
735 | chip->read_buf = mpc5121_nfc_read_buf; | ||
736 | chip->write_buf = mpc5121_nfc_write_buf; | ||
737 | chip->verify_buf = mpc5121_nfc_verify_buf; | ||
738 | chip->select_chip = mpc5121_nfc_select_chip; | ||
739 | chip->options = NAND_NO_AUTOINCR | NAND_USE_FLASH_BBT; | ||
740 | chip->ecc.mode = NAND_ECC_SOFT; | ||
741 | |||
742 | /* Support external chip-select logic on ADS5121 board */ | ||
743 | rootnode = of_find_node_by_path("/"); | ||
744 | if (of_device_is_compatible(rootnode, "fsl,mpc5121ads")) { | ||
745 | retval = ads5121_chipselect_init(mtd); | ||
746 | if (retval) { | ||
747 | dev_err(dev, "Chipselect init error!\n"); | ||
748 | of_node_put(rootnode); | ||
749 | return retval; | ||
750 | } | ||
751 | |||
752 | chip->select_chip = ads5121_select_chip; | ||
753 | } | ||
754 | of_node_put(rootnode); | ||
755 | |||
756 | /* Enable NFC clock */ | ||
757 | prv->clk = clk_get(dev, "nfc_clk"); | ||
758 | if (!prv->clk) { | ||
759 | dev_err(dev, "Unable to acquire NFC clock!\n"); | ||
760 | retval = -ENODEV; | ||
761 | goto error; | ||
762 | } | ||
763 | |||
764 | clk_enable(prv->clk); | ||
765 | |||
766 | /* Reset NAND Flash controller */ | ||
767 | nfc_set(mtd, NFC_CONFIG1, NFC_RESET); | ||
768 | while (nfc_read(mtd, NFC_CONFIG1) & NFC_RESET) { | ||
769 | if (resettime++ >= NFC_RESET_TIMEOUT) { | ||
770 | dev_err(dev, "Timeout while resetting NFC!\n"); | ||
771 | retval = -EINVAL; | ||
772 | goto error; | ||
773 | } | ||
774 | |||
775 | udelay(1); | ||
776 | } | ||
777 | |||
778 | /* Enable write to NFC memory */ | ||
779 | nfc_write(mtd, NFC_CONFIG, NFC_BLS_UNLOCKED); | ||
780 | |||
781 | /* Enable write to all NAND pages */ | ||
782 | nfc_write(mtd, NFC_UNLOCKSTART_BLK0, 0x0000); | ||
783 | nfc_write(mtd, NFC_UNLOCKEND_BLK0, 0xFFFF); | ||
784 | nfc_write(mtd, NFC_WRPROT, NFC_WPC_UNLOCK); | ||
785 | |||
786 | /* | ||
787 | * Setup NFC: | ||
788 | * - Big Endian transfers, | ||
789 | * - Interrupt after full page read/write. | ||
790 | */ | ||
791 | nfc_write(mtd, NFC_CONFIG1, NFC_BIG_ENDIAN | NFC_INT_MASK | | ||
792 | NFC_FULL_PAGE_INT); | ||
793 | |||
794 | /* Set spare area size */ | ||
795 | nfc_write(mtd, NFC_SPAS, mtd->oobsize >> 1); | ||
796 | |||
797 | init_waitqueue_head(&prv->irq_waitq); | ||
798 | retval = devm_request_irq(dev, prv->irq, &mpc5121_nfc_irq, 0, DRV_NAME, | ||
799 | mtd); | ||
800 | if (retval) { | ||
801 | dev_err(dev, "Error requesting IRQ!\n"); | ||
802 | goto error; | ||
803 | } | ||
804 | |||
805 | /* Detect NAND chips */ | ||
806 | if (nand_scan(mtd, *chips_no)) { | ||
807 | dev_err(dev, "NAND Flash not found !\n"); | ||
808 | devm_free_irq(dev, prv->irq, mtd); | ||
809 | retval = -ENXIO; | ||
810 | goto error; | ||
811 | } | ||
812 | |||
813 | /* Set erase block size */ | ||
814 | switch (mtd->erasesize / mtd->writesize) { | ||
815 | case 32: | ||
816 | nfc_set(mtd, NFC_CONFIG1, NFC_PPB_32); | ||
817 | break; | ||
818 | |||
819 | case 64: | ||
820 | nfc_set(mtd, NFC_CONFIG1, NFC_PPB_64); | ||
821 | break; | ||
822 | |||
823 | case 128: | ||
824 | nfc_set(mtd, NFC_CONFIG1, NFC_PPB_128); | ||
825 | break; | ||
826 | |||
827 | case 256: | ||
828 | nfc_set(mtd, NFC_CONFIG1, NFC_PPB_256); | ||
829 | break; | ||
830 | |||
831 | default: | ||
832 | dev_err(dev, "Unsupported NAND flash!\n"); | ||
833 | devm_free_irq(dev, prv->irq, mtd); | ||
834 | retval = -ENXIO; | ||
835 | goto error; | ||
836 | } | ||
837 | |||
838 | dev_set_drvdata(dev, mtd); | ||
839 | |||
840 | /* Register device in MTD */ | ||
841 | #ifdef CONFIG_MTD_PARTITIONS | ||
842 | retval = parse_mtd_partitions(mtd, mpc5121_nfc_pprobes, &parts, 0); | ||
843 | #ifdef CONFIG_MTD_OF_PARTS | ||
844 | if (retval == 0) | ||
845 | retval = of_mtd_parse_partitions(dev, dn, &parts); | ||
846 | #endif | ||
847 | if (retval < 0) { | ||
848 | dev_err(dev, "Error parsing MTD partitions!\n"); | ||
849 | devm_free_irq(dev, prv->irq, mtd); | ||
850 | retval = -EINVAL; | ||
851 | goto error; | ||
852 | } | ||
853 | |||
854 | if (retval > 0) | ||
855 | retval = add_mtd_partitions(mtd, parts, retval); | ||
856 | else | ||
857 | #endif | ||
858 | retval = add_mtd_device(mtd); | ||
859 | |||
860 | if (retval) { | ||
861 | dev_err(dev, "Error adding MTD device!\n"); | ||
862 | devm_free_irq(dev, prv->irq, mtd); | ||
863 | goto error; | ||
864 | } | ||
865 | |||
866 | return 0; | ||
867 | error: | ||
868 | mpc5121_nfc_free(dev, mtd); | ||
869 | return retval; | ||
870 | } | ||
871 | |||
872 | static int __devexit mpc5121_nfc_remove(struct of_device *op) | ||
873 | { | ||
874 | struct device *dev = &op->dev; | ||
875 | struct mtd_info *mtd = dev_get_drvdata(dev); | ||
876 | struct nand_chip *chip = mtd->priv; | ||
877 | struct mpc5121_nfc_prv *prv = chip->priv; | ||
878 | |||
879 | nand_release(mtd); | ||
880 | devm_free_irq(dev, prv->irq, mtd); | ||
881 | mpc5121_nfc_free(dev, mtd); | ||
882 | |||
883 | return 0; | ||
884 | } | ||
885 | |||
886 | static struct of_device_id mpc5121_nfc_match[] __devinitdata = { | ||
887 | { .compatible = "fsl,mpc5121-nfc", }, | ||
888 | {}, | ||
889 | }; | ||
890 | |||
891 | static struct of_platform_driver mpc5121_nfc_driver = { | ||
892 | .match_table = mpc5121_nfc_match, | ||
893 | .probe = mpc5121_nfc_probe, | ||
894 | .remove = __devexit_p(mpc5121_nfc_remove), | ||
895 | .driver = { | ||
896 | .name = DRV_NAME, | ||
897 | .owner = THIS_MODULE, | ||
898 | }, | ||
899 | }; | ||
900 | |||
901 | static int __init mpc5121_nfc_init(void) | ||
902 | { | ||
903 | return of_register_platform_driver(&mpc5121_nfc_driver); | ||
904 | } | ||
905 | |||
906 | module_init(mpc5121_nfc_init); | ||
907 | |||
908 | static void __exit mpc5121_nfc_cleanup(void) | ||
909 | { | ||
910 | of_unregister_platform_driver(&mpc5121_nfc_driver); | ||
911 | } | ||
912 | |||
913 | module_exit(mpc5121_nfc_cleanup); | ||
914 | |||
915 | MODULE_AUTHOR("Freescale Semiconductor, Inc."); | ||
916 | MODULE_DESCRIPTION("MPC5121 NAND MTD driver"); | ||
917 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index b2900d8406d3..82e94389824e 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c | |||
@@ -38,7 +38,7 @@ | |||
38 | #define DRIVER_NAME "mxc_nand" | 38 | #define DRIVER_NAME "mxc_nand" |
39 | 39 | ||
40 | #define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35()) | 40 | #define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35()) |
41 | #define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27()) | 41 | #define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27() || cpu_is_mx21()) |
42 | 42 | ||
43 | /* Addresses for NFC registers */ | 43 | /* Addresses for NFC registers */ |
44 | #define NFC_BUF_SIZE 0xE00 | 44 | #define NFC_BUF_SIZE 0xE00 |
@@ -168,11 +168,7 @@ static irqreturn_t mxc_nfc_irq(int irq, void *dev_id) | |||
168 | { | 168 | { |
169 | struct mxc_nand_host *host = dev_id; | 169 | struct mxc_nand_host *host = dev_id; |
170 | 170 | ||
171 | uint16_t tmp; | 171 | disable_irq_nosync(irq); |
172 | |||
173 | tmp = readw(host->regs + NFC_CONFIG1); | ||
174 | tmp |= NFC_INT_MSK; /* Disable interrupt */ | ||
175 | writew(tmp, host->regs + NFC_CONFIG1); | ||
176 | 172 | ||
177 | wake_up(&host->irq_waitq); | 173 | wake_up(&host->irq_waitq); |
178 | 174 | ||
@@ -184,15 +180,13 @@ static irqreturn_t mxc_nfc_irq(int irq, void *dev_id) | |||
184 | */ | 180 | */ |
185 | static void wait_op_done(struct mxc_nand_host *host, int useirq) | 181 | static void wait_op_done(struct mxc_nand_host *host, int useirq) |
186 | { | 182 | { |
187 | uint32_t tmp; | 183 | uint16_t tmp; |
188 | int max_retries = 2000; | 184 | int max_retries = 8000; |
189 | 185 | ||
190 | if (useirq) { | 186 | if (useirq) { |
191 | if ((readw(host->regs + NFC_CONFIG2) & NFC_INT) == 0) { | 187 | if ((readw(host->regs + NFC_CONFIG2) & NFC_INT) == 0) { |
192 | 188 | ||
193 | tmp = readw(host->regs + NFC_CONFIG1); | 189 | enable_irq(host->irq); |
194 | tmp &= ~NFC_INT_MSK; /* Enable interrupt */ | ||
195 | writew(tmp, host->regs + NFC_CONFIG1); | ||
196 | 190 | ||
197 | wait_event(host->irq_waitq, | 191 | wait_event(host->irq_waitq, |
198 | readw(host->regs + NFC_CONFIG2) & NFC_INT); | 192 | readw(host->regs + NFC_CONFIG2) & NFC_INT); |
@@ -226,8 +220,23 @@ static void send_cmd(struct mxc_nand_host *host, uint16_t cmd, int useirq) | |||
226 | writew(cmd, host->regs + NFC_FLASH_CMD); | 220 | writew(cmd, host->regs + NFC_FLASH_CMD); |
227 | writew(NFC_CMD, host->regs + NFC_CONFIG2); | 221 | writew(NFC_CMD, host->regs + NFC_CONFIG2); |
228 | 222 | ||
229 | /* Wait for operation to complete */ | 223 | if (cpu_is_mx21() && (cmd == NAND_CMD_RESET)) { |
230 | wait_op_done(host, useirq); | 224 | int max_retries = 100; |
225 | /* Reset completion is indicated by NFC_CONFIG2 */ | ||
226 | /* being set to 0 */ | ||
227 | while (max_retries-- > 0) { | ||
228 | if (readw(host->regs + NFC_CONFIG2) == 0) { | ||
229 | break; | ||
230 | } | ||
231 | udelay(1); | ||
232 | } | ||
233 | if (max_retries < 0) | ||
234 | DEBUG(MTD_DEBUG_LEVEL0, "%s: RESET failed\n", | ||
235 | __func__); | ||
236 | } else { | ||
237 | /* Wait for operation to complete */ | ||
238 | wait_op_done(host, useirq); | ||
239 | } | ||
231 | } | 240 | } |
232 | 241 | ||
233 | /* This function sends an address (or partial address) to the | 242 | /* This function sends an address (or partial address) to the |
@@ -542,6 +551,41 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr) | |||
542 | } | 551 | } |
543 | } | 552 | } |
544 | 553 | ||
554 | static void preset(struct mtd_info *mtd) | ||
555 | { | ||
556 | struct nand_chip *nand_chip = mtd->priv; | ||
557 | struct mxc_nand_host *host = nand_chip->priv; | ||
558 | uint16_t tmp; | ||
559 | |||
560 | /* enable interrupt, disable spare enable */ | ||
561 | tmp = readw(host->regs + NFC_CONFIG1); | ||
562 | tmp &= ~NFC_INT_MSK; | ||
563 | tmp &= ~NFC_SP_EN; | ||
564 | if (nand_chip->ecc.mode == NAND_ECC_HW) { | ||
565 | tmp |= NFC_ECC_EN; | ||
566 | } else { | ||
567 | tmp &= ~NFC_ECC_EN; | ||
568 | } | ||
569 | writew(tmp, host->regs + NFC_CONFIG1); | ||
570 | /* preset operation */ | ||
571 | |||
572 | /* Unlock the internal RAM Buffer */ | ||
573 | writew(0x2, host->regs + NFC_CONFIG); | ||
574 | |||
575 | /* Blocks to be unlocked */ | ||
576 | if (nfc_is_v21()) { | ||
577 | writew(0x0, host->regs + NFC_V21_UNLOCKSTART_BLKADDR); | ||
578 | writew(0xffff, host->regs + NFC_V21_UNLOCKEND_BLKADDR); | ||
579 | } else if (nfc_is_v1()) { | ||
580 | writew(0x0, host->regs + NFC_V1_UNLOCKSTART_BLKADDR); | ||
581 | writew(0x4000, host->regs + NFC_V1_UNLOCKEND_BLKADDR); | ||
582 | } else | ||
583 | BUG(); | ||
584 | |||
585 | /* Unlock Block Command for given address range */ | ||
586 | writew(0x4, host->regs + NFC_WRPROT); | ||
587 | } | ||
588 | |||
545 | /* Used by the upper layer to write command to NAND Flash for | 589 | /* Used by the upper layer to write command to NAND Flash for |
546 | * different operations to be carried out on NAND Flash */ | 590 | * different operations to be carried out on NAND Flash */ |
547 | static void mxc_nand_command(struct mtd_info *mtd, unsigned command, | 591 | static void mxc_nand_command(struct mtd_info *mtd, unsigned command, |
@@ -559,6 +603,10 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command, | |||
559 | 603 | ||
560 | /* Command pre-processing step */ | 604 | /* Command pre-processing step */ |
561 | switch (command) { | 605 | switch (command) { |
606 | case NAND_CMD_RESET: | ||
607 | send_cmd(host, command, false); | ||
608 | preset(mtd); | ||
609 | break; | ||
562 | 610 | ||
563 | case NAND_CMD_STATUS: | 611 | case NAND_CMD_STATUS: |
564 | host->buf_start = 0; | 612 | host->buf_start = 0; |
@@ -679,7 +727,6 @@ static int __init mxcnd_probe(struct platform_device *pdev) | |||
679 | struct mxc_nand_platform_data *pdata = pdev->dev.platform_data; | 727 | struct mxc_nand_platform_data *pdata = pdev->dev.platform_data; |
680 | struct mxc_nand_host *host; | 728 | struct mxc_nand_host *host; |
681 | struct resource *res; | 729 | struct resource *res; |
682 | uint16_t tmp; | ||
683 | int err = 0, nr_parts = 0; | 730 | int err = 0, nr_parts = 0; |
684 | struct nand_ecclayout *oob_smallpage, *oob_largepage; | 731 | struct nand_ecclayout *oob_smallpage, *oob_largepage; |
685 | 732 | ||
@@ -743,51 +790,17 @@ static int __init mxcnd_probe(struct platform_device *pdev) | |||
743 | host->spare_len = 64; | 790 | host->spare_len = 64; |
744 | oob_smallpage = &nandv2_hw_eccoob_smallpage; | 791 | oob_smallpage = &nandv2_hw_eccoob_smallpage; |
745 | oob_largepage = &nandv2_hw_eccoob_largepage; | 792 | oob_largepage = &nandv2_hw_eccoob_largepage; |
793 | this->ecc.bytes = 9; | ||
746 | } else if (nfc_is_v1()) { | 794 | } else if (nfc_is_v1()) { |
747 | host->regs = host->base; | 795 | host->regs = host->base; |
748 | host->spare0 = host->base + 0x800; | 796 | host->spare0 = host->base + 0x800; |
749 | host->spare_len = 16; | 797 | host->spare_len = 16; |
750 | oob_smallpage = &nandv1_hw_eccoob_smallpage; | 798 | oob_smallpage = &nandv1_hw_eccoob_smallpage; |
751 | oob_largepage = &nandv1_hw_eccoob_largepage; | 799 | oob_largepage = &nandv1_hw_eccoob_largepage; |
752 | } else | ||
753 | BUG(); | ||
754 | |||
755 | /* disable interrupt and spare enable */ | ||
756 | tmp = readw(host->regs + NFC_CONFIG1); | ||
757 | tmp |= NFC_INT_MSK; | ||
758 | tmp &= ~NFC_SP_EN; | ||
759 | writew(tmp, host->regs + NFC_CONFIG1); | ||
760 | |||
761 | init_waitqueue_head(&host->irq_waitq); | ||
762 | |||
763 | host->irq = platform_get_irq(pdev, 0); | ||
764 | |||
765 | err = request_irq(host->irq, mxc_nfc_irq, 0, DRIVER_NAME, host); | ||
766 | if (err) | ||
767 | goto eirq; | ||
768 | |||
769 | /* Reset NAND */ | ||
770 | this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); | ||
771 | |||
772 | /* preset operation */ | ||
773 | /* Unlock the internal RAM Buffer */ | ||
774 | writew(0x2, host->regs + NFC_CONFIG); | ||
775 | |||
776 | /* Blocks to be unlocked */ | ||
777 | if (nfc_is_v21()) { | ||
778 | writew(0x0, host->regs + NFC_V21_UNLOCKSTART_BLKADDR); | ||
779 | writew(0xffff, host->regs + NFC_V21_UNLOCKEND_BLKADDR); | ||
780 | this->ecc.bytes = 9; | ||
781 | } else if (nfc_is_v1()) { | ||
782 | writew(0x0, host->regs + NFC_V1_UNLOCKSTART_BLKADDR); | ||
783 | writew(0x4000, host->regs + NFC_V1_UNLOCKEND_BLKADDR); | ||
784 | this->ecc.bytes = 3; | 800 | this->ecc.bytes = 3; |
785 | } else | 801 | } else |
786 | BUG(); | 802 | BUG(); |
787 | 803 | ||
788 | /* Unlock Block Command for given address range */ | ||
789 | writew(0x4, host->regs + NFC_WRPROT); | ||
790 | |||
791 | this->ecc.size = 512; | 804 | this->ecc.size = 512; |
792 | this->ecc.layout = oob_smallpage; | 805 | this->ecc.layout = oob_smallpage; |
793 | 806 | ||
@@ -796,14 +809,8 @@ static int __init mxcnd_probe(struct platform_device *pdev) | |||
796 | this->ecc.hwctl = mxc_nand_enable_hwecc; | 809 | this->ecc.hwctl = mxc_nand_enable_hwecc; |
797 | this->ecc.correct = mxc_nand_correct_data; | 810 | this->ecc.correct = mxc_nand_correct_data; |
798 | this->ecc.mode = NAND_ECC_HW; | 811 | this->ecc.mode = NAND_ECC_HW; |
799 | tmp = readw(host->regs + NFC_CONFIG1); | ||
800 | tmp |= NFC_ECC_EN; | ||
801 | writew(tmp, host->regs + NFC_CONFIG1); | ||
802 | } else { | 812 | } else { |
803 | this->ecc.mode = NAND_ECC_SOFT; | 813 | this->ecc.mode = NAND_ECC_SOFT; |
804 | tmp = readw(host->regs + NFC_CONFIG1); | ||
805 | tmp &= ~NFC_ECC_EN; | ||
806 | writew(tmp, host->regs + NFC_CONFIG1); | ||
807 | } | 814 | } |
808 | 815 | ||
809 | /* NAND bus width determines access funtions used by upper layer */ | 816 | /* NAND bus width determines access funtions used by upper layer */ |
@@ -817,8 +824,16 @@ static int __init mxcnd_probe(struct platform_device *pdev) | |||
817 | this->options |= NAND_USE_FLASH_BBT; | 824 | this->options |= NAND_USE_FLASH_BBT; |
818 | } | 825 | } |
819 | 826 | ||
827 | init_waitqueue_head(&host->irq_waitq); | ||
828 | |||
829 | host->irq = platform_get_irq(pdev, 0); | ||
830 | |||
831 | err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host); | ||
832 | if (err) | ||
833 | goto eirq; | ||
834 | |||
820 | /* first scan to find the device and get the page size */ | 835 | /* first scan to find the device and get the page size */ |
821 | if (nand_scan_ident(mtd, 1)) { | 836 | if (nand_scan_ident(mtd, 1, NULL)) { |
822 | err = -ENXIO; | 837 | err = -ENXIO; |
823 | goto escan; | 838 | goto escan; |
824 | } | 839 | } |
@@ -886,11 +901,14 @@ static int mxcnd_suspend(struct platform_device *pdev, pm_message_t state) | |||
886 | int ret = 0; | 901 | int ret = 0; |
887 | 902 | ||
888 | DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND suspend\n"); | 903 | DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND suspend\n"); |
889 | if (mtd) { | 904 | |
890 | ret = mtd->suspend(mtd); | 905 | ret = mtd->suspend(mtd); |
891 | /* Disable the NFC clock */ | 906 | |
892 | clk_disable(host->clk); | 907 | /* |
893 | } | 908 | * nand_suspend locks the device for exclusive access, so |
909 | * the clock must already be off. | ||
910 | */ | ||
911 | BUG_ON(!ret && host->clk_act); | ||
894 | 912 | ||
895 | return ret; | 913 | return ret; |
896 | } | 914 | } |
@@ -904,11 +922,7 @@ static int mxcnd_resume(struct platform_device *pdev) | |||
904 | 922 | ||
905 | DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND resume\n"); | 923 | DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND resume\n"); |
906 | 924 | ||
907 | if (mtd) { | 925 | mtd->resume(mtd); |
908 | /* Enable the NFC clock */ | ||
909 | clk_enable(host->clk); | ||
910 | mtd->resume(mtd); | ||
911 | } | ||
912 | 926 | ||
913 | return ret; | 927 | return ret; |
914 | } | 928 | } |
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 8f2958fe2148..4a7b86423ee9 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c | |||
@@ -108,6 +108,35 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, | |||
108 | */ | 108 | */ |
109 | DEFINE_LED_TRIGGER(nand_led_trigger); | 109 | DEFINE_LED_TRIGGER(nand_led_trigger); |
110 | 110 | ||
111 | static int check_offs_len(struct mtd_info *mtd, | ||
112 | loff_t ofs, uint64_t len) | ||
113 | { | ||
114 | struct nand_chip *chip = mtd->priv; | ||
115 | int ret = 0; | ||
116 | |||
117 | /* Start address must align on block boundary */ | ||
118 | if (ofs & ((1 << chip->phys_erase_shift) - 1)) { | ||
119 | DEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __func__); | ||
120 | ret = -EINVAL; | ||
121 | } | ||
122 | |||
123 | /* Length must align on block boundary */ | ||
124 | if (len & ((1 << chip->phys_erase_shift) - 1)) { | ||
125 | DEBUG(MTD_DEBUG_LEVEL0, "%s: Length not block aligned\n", | ||
126 | __func__); | ||
127 | ret = -EINVAL; | ||
128 | } | ||
129 | |||
130 | /* Do not allow past end of device */ | ||
131 | if (ofs + len > mtd->size) { | ||
132 | DEBUG(MTD_DEBUG_LEVEL0, "%s: Past end of device\n", | ||
133 | __func__); | ||
134 | ret = -EINVAL; | ||
135 | } | ||
136 | |||
137 | return ret; | ||
138 | } | ||
139 | |||
111 | /** | 140 | /** |
112 | * nand_release_device - [GENERIC] release chip | 141 | * nand_release_device - [GENERIC] release chip |
113 | * @mtd: MTD device structure | 142 | * @mtd: MTD device structure |
@@ -318,6 +347,9 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip) | |||
318 | struct nand_chip *chip = mtd->priv; | 347 | struct nand_chip *chip = mtd->priv; |
319 | u16 bad; | 348 | u16 bad; |
320 | 349 | ||
350 | if (chip->options & NAND_BB_LAST_PAGE) | ||
351 | ofs += mtd->erasesize - mtd->writesize; | ||
352 | |||
321 | page = (int)(ofs >> chip->page_shift) & chip->pagemask; | 353 | page = (int)(ofs >> chip->page_shift) & chip->pagemask; |
322 | 354 | ||
323 | if (getchip) { | 355 | if (getchip) { |
@@ -335,14 +367,18 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip) | |||
335 | bad = cpu_to_le16(chip->read_word(mtd)); | 367 | bad = cpu_to_le16(chip->read_word(mtd)); |
336 | if (chip->badblockpos & 0x1) | 368 | if (chip->badblockpos & 0x1) |
337 | bad >>= 8; | 369 | bad >>= 8; |
338 | if ((bad & 0xFF) != 0xff) | 370 | else |
339 | res = 1; | 371 | bad &= 0xFF; |
340 | } else { | 372 | } else { |
341 | chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, page); | 373 | chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, page); |
342 | if (chip->read_byte(mtd) != 0xff) | 374 | bad = chip->read_byte(mtd); |
343 | res = 1; | ||
344 | } | 375 | } |
345 | 376 | ||
377 | if (likely(chip->badblockbits == 8)) | ||
378 | res = bad != 0xFF; | ||
379 | else | ||
380 | res = hweight8(bad) < chip->badblockbits; | ||
381 | |||
346 | if (getchip) | 382 | if (getchip) |
347 | nand_release_device(mtd); | 383 | nand_release_device(mtd); |
348 | 384 | ||
@@ -363,6 +399,9 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs) | |||
363 | uint8_t buf[2] = { 0, 0 }; | 399 | uint8_t buf[2] = { 0, 0 }; |
364 | int block, ret; | 400 | int block, ret; |
365 | 401 | ||
402 | if (chip->options & NAND_BB_LAST_PAGE) | ||
403 | ofs += mtd->erasesize - mtd->writesize; | ||
404 | |||
366 | /* Get block number */ | 405 | /* Get block number */ |
367 | block = (int)(ofs >> chip->bbt_erase_shift); | 406 | block = (int)(ofs >> chip->bbt_erase_shift); |
368 | if (chip->bbt) | 407 | if (chip->bbt) |
@@ -401,6 +440,11 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs) | |||
401 | static int nand_check_wp(struct mtd_info *mtd) | 440 | static int nand_check_wp(struct mtd_info *mtd) |
402 | { | 441 | { |
403 | struct nand_chip *chip = mtd->priv; | 442 | struct nand_chip *chip = mtd->priv; |
443 | |||
444 | /* broken xD cards report WP despite being writable */ | ||
445 | if (chip->options & NAND_BROKEN_XD) | ||
446 | return 0; | ||
447 | |||
404 | /* Check the WP bit */ | 448 | /* Check the WP bit */ |
405 | chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); | 449 | chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); |
406 | return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1; | 450 | return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1; |
@@ -744,9 +788,6 @@ nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state) | |||
744 | chip->state = FL_PM_SUSPENDED; | 788 | chip->state = FL_PM_SUSPENDED; |
745 | spin_unlock(lock); | 789 | spin_unlock(lock); |
746 | return 0; | 790 | return 0; |
747 | } else { | ||
748 | spin_unlock(lock); | ||
749 | return -EAGAIN; | ||
750 | } | 791 | } |
751 | } | 792 | } |
752 | set_current_state(TASK_UNINTERRUPTIBLE); | 793 | set_current_state(TASK_UNINTERRUPTIBLE); |
@@ -835,6 +876,168 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip) | |||
835 | } | 876 | } |
836 | 877 | ||
837 | /** | 878 | /** |
879 | * __nand_unlock - [REPLACABLE] unlocks specified locked blockes | ||
880 | * | ||
881 | * @param mtd - mtd info | ||
882 | * @param ofs - offset to start unlock from | ||
883 | * @param len - length to unlock | ||
884 | * @invert - when = 0, unlock the range of blocks within the lower and | ||
885 | * upper boundary address | ||
886 | * whne = 1, unlock the range of blocks outside the boundaries | ||
887 | * of the lower and upper boundary address | ||
888 | * | ||
889 | * @return - unlock status | ||
890 | */ | ||
891 | static int __nand_unlock(struct mtd_info *mtd, loff_t ofs, | ||
892 | uint64_t len, int invert) | ||
893 | { | ||
894 | int ret = 0; | ||
895 | int status, page; | ||
896 | struct nand_chip *chip = mtd->priv; | ||
897 | |||
898 | /* Submit address of first page to unlock */ | ||
899 | page = ofs >> chip->page_shift; | ||
900 | chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask); | ||
901 | |||
902 | /* Submit address of last page to unlock */ | ||
903 | page = (ofs + len) >> chip->page_shift; | ||
904 | chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1, | ||
905 | (page | invert) & chip->pagemask); | ||
906 | |||
907 | /* Call wait ready function */ | ||
908 | status = chip->waitfunc(mtd, chip); | ||
909 | udelay(1000); | ||
910 | /* See if device thinks it succeeded */ | ||
911 | if (status & 0x01) { | ||
912 | DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n", | ||
913 | __func__, status); | ||
914 | ret = -EIO; | ||
915 | } | ||
916 | |||
917 | return ret; | ||
918 | } | ||
919 | |||
920 | /** | ||
921 | * nand_unlock - [REPLACABLE] unlocks specified locked blockes | ||
922 | * | ||
923 | * @param mtd - mtd info | ||
924 | * @param ofs - offset to start unlock from | ||
925 | * @param len - length to unlock | ||
926 | * | ||
927 | * @return - unlock status | ||
928 | */ | ||
929 | int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | ||
930 | { | ||
931 | int ret = 0; | ||
932 | int chipnr; | ||
933 | struct nand_chip *chip = mtd->priv; | ||
934 | |||
935 | DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n", | ||
936 | __func__, (unsigned long long)ofs, len); | ||
937 | |||
938 | if (check_offs_len(mtd, ofs, len)) | ||
939 | ret = -EINVAL; | ||
940 | |||
941 | /* Align to last block address if size addresses end of the device */ | ||
942 | if (ofs + len == mtd->size) | ||
943 | len -= mtd->erasesize; | ||
944 | |||
945 | nand_get_device(chip, mtd, FL_UNLOCKING); | ||
946 | |||
947 | /* Shift to get chip number */ | ||
948 | chipnr = ofs >> chip->chip_shift; | ||
949 | |||
950 | chip->select_chip(mtd, chipnr); | ||
951 | |||
952 | /* Check, if it is write protected */ | ||
953 | if (nand_check_wp(mtd)) { | ||
954 | DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n", | ||
955 | __func__); | ||
956 | ret = -EIO; | ||
957 | goto out; | ||
958 | } | ||
959 | |||
960 | ret = __nand_unlock(mtd, ofs, len, 0); | ||
961 | |||
962 | out: | ||
963 | /* de-select the NAND device */ | ||
964 | chip->select_chip(mtd, -1); | ||
965 | |||
966 | nand_release_device(mtd); | ||
967 | |||
968 | return ret; | ||
969 | } | ||
970 | |||
971 | /** | ||
972 | * nand_lock - [REPLACABLE] locks all blockes present in the device | ||
973 | * | ||
974 | * @param mtd - mtd info | ||
975 | * @param ofs - offset to start unlock from | ||
976 | * @param len - length to unlock | ||
977 | * | ||
978 | * @return - lock status | ||
979 | * | ||
980 | * This feature is not support in many NAND parts. 'Micron' NAND parts | ||
981 | * do have this feature, but it allows only to lock all blocks not for | ||
982 | * specified range for block. | ||
983 | * | ||
984 | * Implementing 'lock' feature by making use of 'unlock', for now. | ||
985 | */ | ||
986 | int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | ||
987 | { | ||
988 | int ret = 0; | ||
989 | int chipnr, status, page; | ||
990 | struct nand_chip *chip = mtd->priv; | ||
991 | |||
992 | DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n", | ||
993 | __func__, (unsigned long long)ofs, len); | ||
994 | |||
995 | if (check_offs_len(mtd, ofs, len)) | ||
996 | ret = -EINVAL; | ||
997 | |||
998 | nand_get_device(chip, mtd, FL_LOCKING); | ||
999 | |||
1000 | /* Shift to get chip number */ | ||
1001 | chipnr = ofs >> chip->chip_shift; | ||
1002 | |||
1003 | chip->select_chip(mtd, chipnr); | ||
1004 | |||
1005 | /* Check, if it is write protected */ | ||
1006 | if (nand_check_wp(mtd)) { | ||
1007 | DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n", | ||
1008 | __func__); | ||
1009 | status = MTD_ERASE_FAILED; | ||
1010 | ret = -EIO; | ||
1011 | goto out; | ||
1012 | } | ||
1013 | |||
1014 | /* Submit address of first page to lock */ | ||
1015 | page = ofs >> chip->page_shift; | ||
1016 | chip->cmdfunc(mtd, NAND_CMD_LOCK, -1, page & chip->pagemask); | ||
1017 | |||
1018 | /* Call wait ready function */ | ||
1019 | status = chip->waitfunc(mtd, chip); | ||
1020 | udelay(1000); | ||
1021 | /* See if device thinks it succeeded */ | ||
1022 | if (status & 0x01) { | ||
1023 | DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n", | ||
1024 | __func__, status); | ||
1025 | ret = -EIO; | ||
1026 | goto out; | ||
1027 | } | ||
1028 | |||
1029 | ret = __nand_unlock(mtd, ofs, len, 0x1); | ||
1030 | |||
1031 | out: | ||
1032 | /* de-select the NAND device */ | ||
1033 | chip->select_chip(mtd, -1); | ||
1034 | |||
1035 | nand_release_device(mtd); | ||
1036 | |||
1037 | return ret; | ||
1038 | } | ||
1039 | |||
1040 | /** | ||
838 | * nand_read_page_raw - [Intern] read raw page data without ecc | 1041 | * nand_read_page_raw - [Intern] read raw page data without ecc |
839 | * @mtd: mtd info structure | 1042 | * @mtd: mtd info structure |
840 | * @chip: nand chip info structure | 1043 | * @chip: nand chip info structure |
@@ -1232,6 +1435,9 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, | |||
1232 | int ret = 0; | 1435 | int ret = 0; |
1233 | uint32_t readlen = ops->len; | 1436 | uint32_t readlen = ops->len; |
1234 | uint32_t oobreadlen = ops->ooblen; | 1437 | uint32_t oobreadlen = ops->ooblen; |
1438 | uint32_t max_oobsize = ops->mode == MTD_OOB_AUTO ? | ||
1439 | mtd->oobavail : mtd->oobsize; | ||
1440 | |||
1235 | uint8_t *bufpoi, *oob, *buf; | 1441 | uint8_t *bufpoi, *oob, *buf; |
1236 | 1442 | ||
1237 | stats = mtd->ecc_stats; | 1443 | stats = mtd->ecc_stats; |
@@ -1282,18 +1488,14 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, | |||
1282 | buf += bytes; | 1488 | buf += bytes; |
1283 | 1489 | ||
1284 | if (unlikely(oob)) { | 1490 | if (unlikely(oob)) { |
1285 | /* Raw mode does data:oob:data:oob */ | 1491 | |
1286 | if (ops->mode != MTD_OOB_RAW) { | 1492 | int toread = min(oobreadlen, max_oobsize); |
1287 | int toread = min(oobreadlen, | 1493 | |
1288 | chip->ecc.layout->oobavail); | 1494 | if (toread) { |
1289 | if (toread) { | 1495 | oob = nand_transfer_oob(chip, |
1290 | oob = nand_transfer_oob(chip, | 1496 | oob, ops, toread); |
1291 | oob, ops, toread); | 1497 | oobreadlen -= toread; |
1292 | oobreadlen -= toread; | 1498 | } |
1293 | } | ||
1294 | } else | ||
1295 | buf = nand_transfer_oob(chip, | ||
1296 | buf, ops, mtd->oobsize); | ||
1297 | } | 1499 | } |
1298 | 1500 | ||
1299 | if (!(chip->options & NAND_NO_READRDY)) { | 1501 | if (!(chip->options & NAND_NO_READRDY)) { |
@@ -1880,11 +2082,9 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, | |||
1880 | * @oob: oob data buffer | 2082 | * @oob: oob data buffer |
1881 | * @ops: oob ops structure | 2083 | * @ops: oob ops structure |
1882 | */ | 2084 | */ |
1883 | static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, | 2085 | static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len, |
1884 | struct mtd_oob_ops *ops) | 2086 | struct mtd_oob_ops *ops) |
1885 | { | 2087 | { |
1886 | size_t len = ops->ooblen; | ||
1887 | |||
1888 | switch(ops->mode) { | 2088 | switch(ops->mode) { |
1889 | 2089 | ||
1890 | case MTD_OOB_PLACE: | 2090 | case MTD_OOB_PLACE: |
@@ -1939,6 +2139,11 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, | |||
1939 | int chipnr, realpage, page, blockmask, column; | 2139 | int chipnr, realpage, page, blockmask, column; |
1940 | struct nand_chip *chip = mtd->priv; | 2140 | struct nand_chip *chip = mtd->priv; |
1941 | uint32_t writelen = ops->len; | 2141 | uint32_t writelen = ops->len; |
2142 | |||
2143 | uint32_t oobwritelen = ops->ooblen; | ||
2144 | uint32_t oobmaxlen = ops->mode == MTD_OOB_AUTO ? | ||
2145 | mtd->oobavail : mtd->oobsize; | ||
2146 | |||
1942 | uint8_t *oob = ops->oobbuf; | 2147 | uint8_t *oob = ops->oobbuf; |
1943 | uint8_t *buf = ops->datbuf; | 2148 | uint8_t *buf = ops->datbuf; |
1944 | int ret, subpage; | 2149 | int ret, subpage; |
@@ -1980,6 +2185,10 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, | |||
1980 | if (likely(!oob)) | 2185 | if (likely(!oob)) |
1981 | memset(chip->oob_poi, 0xff, mtd->oobsize); | 2186 | memset(chip->oob_poi, 0xff, mtd->oobsize); |
1982 | 2187 | ||
2188 | /* Don't allow multipage oob writes with offset */ | ||
2189 | if (ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) | ||
2190 | return -EINVAL; | ||
2191 | |||
1983 | while(1) { | 2192 | while(1) { |
1984 | int bytes = mtd->writesize; | 2193 | int bytes = mtd->writesize; |
1985 | int cached = writelen > bytes && page != blockmask; | 2194 | int cached = writelen > bytes && page != blockmask; |
@@ -1995,8 +2204,11 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, | |||
1995 | wbuf = chip->buffers->databuf; | 2204 | wbuf = chip->buffers->databuf; |
1996 | } | 2205 | } |
1997 | 2206 | ||
1998 | if (unlikely(oob)) | 2207 | if (unlikely(oob)) { |
1999 | oob = nand_fill_oob(chip, oob, ops); | 2208 | size_t len = min(oobwritelen, oobmaxlen); |
2209 | oob = nand_fill_oob(chip, oob, len, ops); | ||
2210 | oobwritelen -= len; | ||
2211 | } | ||
2000 | 2212 | ||
2001 | ret = chip->write_page(mtd, chip, wbuf, page, cached, | 2213 | ret = chip->write_page(mtd, chip, wbuf, page, cached, |
2002 | (ops->mode == MTD_OOB_RAW)); | 2214 | (ops->mode == MTD_OOB_RAW)); |
@@ -2170,7 +2382,7 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, | |||
2170 | chip->pagebuf = -1; | 2382 | chip->pagebuf = -1; |
2171 | 2383 | ||
2172 | memset(chip->oob_poi, 0xff, mtd->oobsize); | 2384 | memset(chip->oob_poi, 0xff, mtd->oobsize); |
2173 | nand_fill_oob(chip, ops->oobbuf, ops); | 2385 | nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops); |
2174 | status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask); | 2386 | status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask); |
2175 | memset(chip->oob_poi, 0xff, mtd->oobsize); | 2387 | memset(chip->oob_poi, 0xff, mtd->oobsize); |
2176 | 2388 | ||
@@ -2293,25 +2505,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, | |||
2293 | __func__, (unsigned long long)instr->addr, | 2505 | __func__, (unsigned long long)instr->addr, |
2294 | (unsigned long long)instr->len); | 2506 | (unsigned long long)instr->len); |
2295 | 2507 | ||
2296 | /* Start address must align on block boundary */ | 2508 | if (check_offs_len(mtd, instr->addr, instr->len)) |
2297 | if (instr->addr & ((1 << chip->phys_erase_shift) - 1)) { | ||
2298 | DEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __func__); | ||
2299 | return -EINVAL; | 2509 | return -EINVAL; |
2300 | } | ||
2301 | |||
2302 | /* Length must align on block boundary */ | ||
2303 | if (instr->len & ((1 << chip->phys_erase_shift) - 1)) { | ||
2304 | DEBUG(MTD_DEBUG_LEVEL0, "%s: Length not block aligned\n", | ||
2305 | __func__); | ||
2306 | return -EINVAL; | ||
2307 | } | ||
2308 | |||
2309 | /* Do not allow erase past end of device */ | ||
2310 | if ((instr->len + instr->addr) > mtd->size) { | ||
2311 | DEBUG(MTD_DEBUG_LEVEL0, "%s: Erase past end of device\n", | ||
2312 | __func__); | ||
2313 | return -EINVAL; | ||
2314 | } | ||
2315 | 2510 | ||
2316 | instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; | 2511 | instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; |
2317 | 2512 | ||
@@ -2582,11 +2777,11 @@ static void nand_set_defaults(struct nand_chip *chip, int busw) | |||
2582 | */ | 2777 | */ |
2583 | static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, | 2778 | static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, |
2584 | struct nand_chip *chip, | 2779 | struct nand_chip *chip, |
2585 | int busw, int *maf_id) | 2780 | int busw, int *maf_id, |
2781 | struct nand_flash_dev *type) | ||
2586 | { | 2782 | { |
2587 | struct nand_flash_dev *type = NULL; | ||
2588 | int i, dev_id, maf_idx; | 2783 | int i, dev_id, maf_idx; |
2589 | int tmp_id, tmp_manf; | 2784 | u8 id_data[8]; |
2590 | 2785 | ||
2591 | /* Select the device */ | 2786 | /* Select the device */ |
2592 | chip->select_chip(mtd, 0); | 2787 | chip->select_chip(mtd, 0); |
@@ -2612,27 +2807,26 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, | |||
2612 | 2807 | ||
2613 | chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); | 2808 | chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); |
2614 | 2809 | ||
2615 | /* Read manufacturer and device IDs */ | 2810 | /* Read entire ID string */ |
2616 | 2811 | ||
2617 | tmp_manf = chip->read_byte(mtd); | 2812 | for (i = 0; i < 8; i++) |
2618 | tmp_id = chip->read_byte(mtd); | 2813 | id_data[i] = chip->read_byte(mtd); |
2619 | 2814 | ||
2620 | if (tmp_manf != *maf_id || tmp_id != dev_id) { | 2815 | if (id_data[0] != *maf_id || id_data[1] != dev_id) { |
2621 | printk(KERN_INFO "%s: second ID read did not match " | 2816 | printk(KERN_INFO "%s: second ID read did not match " |
2622 | "%02x,%02x against %02x,%02x\n", __func__, | 2817 | "%02x,%02x against %02x,%02x\n", __func__, |
2623 | *maf_id, dev_id, tmp_manf, tmp_id); | 2818 | *maf_id, dev_id, id_data[0], id_data[1]); |
2624 | return ERR_PTR(-ENODEV); | 2819 | return ERR_PTR(-ENODEV); |
2625 | } | 2820 | } |
2626 | 2821 | ||
2627 | /* Lookup the flash id */ | ||
2628 | for (i = 0; nand_flash_ids[i].name != NULL; i++) { | ||
2629 | if (dev_id == nand_flash_ids[i].id) { | ||
2630 | type = &nand_flash_ids[i]; | ||
2631 | break; | ||
2632 | } | ||
2633 | } | ||
2634 | |||
2635 | if (!type) | 2822 | if (!type) |
2823 | type = nand_flash_ids; | ||
2824 | |||
2825 | for (; type->name != NULL; type++) | ||
2826 | if (dev_id == type->id) | ||
2827 | break; | ||
2828 | |||
2829 | if (!type->name) | ||
2636 | return ERR_PTR(-ENODEV); | 2830 | return ERR_PTR(-ENODEV); |
2637 | 2831 | ||
2638 | if (!mtd->name) | 2832 | if (!mtd->name) |
@@ -2644,21 +2838,45 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, | |||
2644 | if (!type->pagesize) { | 2838 | if (!type->pagesize) { |
2645 | int extid; | 2839 | int extid; |
2646 | /* The 3rd id byte holds MLC / multichip data */ | 2840 | /* The 3rd id byte holds MLC / multichip data */ |
2647 | chip->cellinfo = chip->read_byte(mtd); | 2841 | chip->cellinfo = id_data[2]; |
2648 | /* The 4th id byte is the important one */ | 2842 | /* The 4th id byte is the important one */ |
2649 | extid = chip->read_byte(mtd); | 2843 | extid = id_data[3]; |
2650 | /* Calc pagesize */ | ||
2651 | mtd->writesize = 1024 << (extid & 0x3); | ||
2652 | extid >>= 2; | ||
2653 | /* Calc oobsize */ | ||
2654 | mtd->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9); | ||
2655 | extid >>= 2; | ||
2656 | /* Calc blocksize. Blocksize is multiples of 64KiB */ | ||
2657 | mtd->erasesize = (64 * 1024) << (extid & 0x03); | ||
2658 | extid >>= 2; | ||
2659 | /* Get buswidth information */ | ||
2660 | busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0; | ||
2661 | 2844 | ||
2845 | /* | ||
2846 | * Field definitions are in the following datasheets: | ||
2847 | * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32) | ||
2848 | * New style (6 byte ID): Samsung K9GAG08U0D (p.40) | ||
2849 | * | ||
2850 | * Check for wraparound + Samsung ID + nonzero 6th byte | ||
2851 | * to decide what to do. | ||
2852 | */ | ||
2853 | if (id_data[0] == id_data[6] && id_data[1] == id_data[7] && | ||
2854 | id_data[0] == NAND_MFR_SAMSUNG && | ||
2855 | id_data[5] != 0x00) { | ||
2856 | /* Calc pagesize */ | ||
2857 | mtd->writesize = 2048 << (extid & 0x03); | ||
2858 | extid >>= 2; | ||
2859 | /* Calc oobsize */ | ||
2860 | mtd->oobsize = (extid & 0x03) == 0x01 ? 128 : 218; | ||
2861 | extid >>= 2; | ||
2862 | /* Calc blocksize */ | ||
2863 | mtd->erasesize = (128 * 1024) << | ||
2864 | (((extid >> 1) & 0x04) | (extid & 0x03)); | ||
2865 | busw = 0; | ||
2866 | } else { | ||
2867 | /* Calc pagesize */ | ||
2868 | mtd->writesize = 1024 << (extid & 0x03); | ||
2869 | extid >>= 2; | ||
2870 | /* Calc oobsize */ | ||
2871 | mtd->oobsize = (8 << (extid & 0x01)) * | ||
2872 | (mtd->writesize >> 9); | ||
2873 | extid >>= 2; | ||
2874 | /* Calc blocksize. Blocksize is multiples of 64KiB */ | ||
2875 | mtd->erasesize = (64 * 1024) << (extid & 0x03); | ||
2876 | extid >>= 2; | ||
2877 | /* Get buswidth information */ | ||
2878 | busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0; | ||
2879 | } | ||
2662 | } else { | 2880 | } else { |
2663 | /* | 2881 | /* |
2664 | * Old devices have chip data hardcoded in the device id table | 2882 | * Old devices have chip data hardcoded in the device id table |
@@ -2704,6 +2922,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, | |||
2704 | /* Set the bad block position */ | 2922 | /* Set the bad block position */ |
2705 | chip->badblockpos = mtd->writesize > 512 ? | 2923 | chip->badblockpos = mtd->writesize > 512 ? |
2706 | NAND_LARGE_BADBLOCK_POS : NAND_SMALL_BADBLOCK_POS; | 2924 | NAND_LARGE_BADBLOCK_POS : NAND_SMALL_BADBLOCK_POS; |
2925 | chip->badblockbits = 8; | ||
2707 | 2926 | ||
2708 | /* Get chip options, preserve non chip based options */ | 2927 | /* Get chip options, preserve non chip based options */ |
2709 | chip->options &= ~NAND_CHIPOPTIONS_MSK; | 2928 | chip->options &= ~NAND_CHIPOPTIONS_MSK; |
@@ -2720,6 +2939,15 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, | |||
2720 | if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize) | 2939 | if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize) |
2721 | chip->options &= ~NAND_SAMSUNG_LP_OPTIONS; | 2940 | chip->options &= ~NAND_SAMSUNG_LP_OPTIONS; |
2722 | 2941 | ||
2942 | /* | ||
2943 | * Bad block marker is stored in the last page of each block | ||
2944 | * on Samsung and Hynix MLC devices | ||
2945 | */ | ||
2946 | if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) && | ||
2947 | (*maf_id == NAND_MFR_SAMSUNG || | ||
2948 | *maf_id == NAND_MFR_HYNIX)) | ||
2949 | chip->options |= NAND_BB_LAST_PAGE; | ||
2950 | |||
2723 | /* Check for AND chips with 4 page planes */ | 2951 | /* Check for AND chips with 4 page planes */ |
2724 | if (chip->options & NAND_4PAGE_ARRAY) | 2952 | if (chip->options & NAND_4PAGE_ARRAY) |
2725 | chip->erase_cmd = multi_erase_cmd; | 2953 | chip->erase_cmd = multi_erase_cmd; |
@@ -2741,13 +2969,15 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, | |||
2741 | * nand_scan_ident - [NAND Interface] Scan for the NAND device | 2969 | * nand_scan_ident - [NAND Interface] Scan for the NAND device |
2742 | * @mtd: MTD device structure | 2970 | * @mtd: MTD device structure |
2743 | * @maxchips: Number of chips to scan for | 2971 | * @maxchips: Number of chips to scan for |
2972 | * @table: Alternative NAND ID table | ||
2744 | * | 2973 | * |
2745 | * This is the first phase of the normal nand_scan() function. It | 2974 | * This is the first phase of the normal nand_scan() function. It |
2746 | * reads the flash ID and sets up MTD fields accordingly. | 2975 | * reads the flash ID and sets up MTD fields accordingly. |
2747 | * | 2976 | * |
2748 | * The mtd->owner field must be set to the module of the caller. | 2977 | * The mtd->owner field must be set to the module of the caller. |
2749 | */ | 2978 | */ |
2750 | int nand_scan_ident(struct mtd_info *mtd, int maxchips) | 2979 | int nand_scan_ident(struct mtd_info *mtd, int maxchips, |
2980 | struct nand_flash_dev *table) | ||
2751 | { | 2981 | { |
2752 | int i, busw, nand_maf_id; | 2982 | int i, busw, nand_maf_id; |
2753 | struct nand_chip *chip = mtd->priv; | 2983 | struct nand_chip *chip = mtd->priv; |
@@ -2759,7 +2989,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips) | |||
2759 | nand_set_defaults(chip, busw); | 2989 | nand_set_defaults(chip, busw); |
2760 | 2990 | ||
2761 | /* Read the flash type */ | 2991 | /* Read the flash type */ |
2762 | type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id); | 2992 | type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id, table); |
2763 | 2993 | ||
2764 | if (IS_ERR(type)) { | 2994 | if (IS_ERR(type)) { |
2765 | if (!(chip->options & NAND_SCAN_SILENT_NODEV)) | 2995 | if (!(chip->options & NAND_SCAN_SILENT_NODEV)) |
@@ -2989,7 +3219,8 @@ int nand_scan_tail(struct mtd_info *mtd) | |||
2989 | 3219 | ||
2990 | /* Fill in remaining MTD driver data */ | 3220 | /* Fill in remaining MTD driver data */ |
2991 | mtd->type = MTD_NANDFLASH; | 3221 | mtd->type = MTD_NANDFLASH; |
2992 | mtd->flags = MTD_CAP_NANDFLASH; | 3222 | mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM : |
3223 | MTD_CAP_NANDFLASH; | ||
2993 | mtd->erase = nand_erase; | 3224 | mtd->erase = nand_erase; |
2994 | mtd->point = NULL; | 3225 | mtd->point = NULL; |
2995 | mtd->unpoint = NULL; | 3226 | mtd->unpoint = NULL; |
@@ -3050,7 +3281,7 @@ int nand_scan(struct mtd_info *mtd, int maxchips) | |||
3050 | BUG(); | 3281 | BUG(); |
3051 | } | 3282 | } |
3052 | 3283 | ||
3053 | ret = nand_scan_ident(mtd, maxchips); | 3284 | ret = nand_scan_ident(mtd, maxchips, NULL); |
3054 | if (!ret) | 3285 | if (!ret) |
3055 | ret = nand_scan_tail(mtd); | 3286 | ret = nand_scan_tail(mtd); |
3056 | return ret; | 3287 | return ret; |
@@ -3077,6 +3308,8 @@ void nand_release(struct mtd_info *mtd) | |||
3077 | kfree(chip->buffers); | 3308 | kfree(chip->buffers); |
3078 | } | 3309 | } |
3079 | 3310 | ||
3311 | EXPORT_SYMBOL_GPL(nand_lock); | ||
3312 | EXPORT_SYMBOL_GPL(nand_unlock); | ||
3080 | EXPORT_SYMBOL_GPL(nand_scan); | 3313 | EXPORT_SYMBOL_GPL(nand_scan); |
3081 | EXPORT_SYMBOL_GPL(nand_scan_ident); | 3314 | EXPORT_SYMBOL_GPL(nand_scan_ident); |
3082 | EXPORT_SYMBOL_GPL(nand_scan_tail); | 3315 | EXPORT_SYMBOL_GPL(nand_scan_tail); |
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c index 55c23e5cd210..ad97c0ce73b2 100644 --- a/drivers/mtd/nand/nand_bbt.c +++ b/drivers/mtd/nand/nand_bbt.c | |||
@@ -237,15 +237,33 @@ static int scan_read_raw(struct mtd_info *mtd, uint8_t *buf, loff_t offs, | |||
237 | size_t len) | 237 | size_t len) |
238 | { | 238 | { |
239 | struct mtd_oob_ops ops; | 239 | struct mtd_oob_ops ops; |
240 | int res; | ||
240 | 241 | ||
241 | ops.mode = MTD_OOB_RAW; | 242 | ops.mode = MTD_OOB_RAW; |
242 | ops.ooboffs = 0; | 243 | ops.ooboffs = 0; |
243 | ops.ooblen = mtd->oobsize; | 244 | ops.ooblen = mtd->oobsize; |
244 | ops.oobbuf = buf; | ||
245 | ops.datbuf = buf; | ||
246 | ops.len = len; | ||
247 | 245 | ||
248 | return mtd->read_oob(mtd, offs, &ops); | 246 | |
247 | while (len > 0) { | ||
248 | if (len <= mtd->writesize) { | ||
249 | ops.oobbuf = buf + len; | ||
250 | ops.datbuf = buf; | ||
251 | ops.len = len; | ||
252 | return mtd->read_oob(mtd, offs, &ops); | ||
253 | } else { | ||
254 | ops.oobbuf = buf + mtd->writesize; | ||
255 | ops.datbuf = buf; | ||
256 | ops.len = mtd->writesize; | ||
257 | res = mtd->read_oob(mtd, offs, &ops); | ||
258 | |||
259 | if (res) | ||
260 | return res; | ||
261 | } | ||
262 | |||
263 | buf += mtd->oobsize + mtd->writesize; | ||
264 | len -= mtd->writesize; | ||
265 | } | ||
266 | return 0; | ||
249 | } | 267 | } |
250 | 268 | ||
251 | /* | 269 | /* |
@@ -414,6 +432,9 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, | |||
414 | from = (loff_t)startblock << (this->bbt_erase_shift - 1); | 432 | from = (loff_t)startblock << (this->bbt_erase_shift - 1); |
415 | } | 433 | } |
416 | 434 | ||
435 | if (this->options & NAND_BB_LAST_PAGE) | ||
436 | from += mtd->erasesize - (mtd->writesize * len); | ||
437 | |||
417 | for (i = startblock; i < numblocks;) { | 438 | for (i = startblock; i < numblocks;) { |
418 | int ret; | 439 | int ret; |
419 | 440 | ||
diff --git a/drivers/mtd/nand/nand_bcm_umi.h b/drivers/mtd/nand/nand_bcm_umi.h index 7cec2cd97854..198b304d6f72 100644 --- a/drivers/mtd/nand/nand_bcm_umi.h +++ b/drivers/mtd/nand/nand_bcm_umi.h | |||
@@ -167,18 +167,27 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize, | |||
167 | int numToRead = 16; /* There are 16 bytes per sector in the OOB */ | 167 | int numToRead = 16; /* There are 16 bytes per sector in the OOB */ |
168 | 168 | ||
169 | /* ECC is already paused when this function is called */ | 169 | /* ECC is already paused when this function is called */ |
170 | if (pageSize != NAND_DATA_ACCESS_SIZE) { | ||
171 | /* skip BI */ | ||
172 | #if defined(__KERNEL__) && !defined(STANDALONE) | ||
173 | *oobp++ = REG_NAND_DATA8; | ||
174 | #else | ||
175 | REG_NAND_DATA8; | ||
176 | #endif | ||
177 | numToRead--; | ||
178 | } | ||
170 | 179 | ||
171 | if (pageSize == NAND_DATA_ACCESS_SIZE) { | 180 | while (numToRead > numEccBytes) { |
172 | while (numToRead > numEccBytes) { | 181 | /* skip free oob region */ |
173 | /* skip free oob region */ | ||
174 | #if defined(__KERNEL__) && !defined(STANDALONE) | 182 | #if defined(__KERNEL__) && !defined(STANDALONE) |
175 | *oobp++ = REG_NAND_DATA8; | 183 | *oobp++ = REG_NAND_DATA8; |
176 | #else | 184 | #else |
177 | REG_NAND_DATA8; | 185 | REG_NAND_DATA8; |
178 | #endif | 186 | #endif |
179 | numToRead--; | 187 | numToRead--; |
180 | } | 188 | } |
181 | 189 | ||
190 | if (pageSize == NAND_DATA_ACCESS_SIZE) { | ||
182 | /* read ECC bytes before BI */ | 191 | /* read ECC bytes before BI */ |
183 | nand_bcm_umi_bch_resume_read_ecc_calc(); | 192 | nand_bcm_umi_bch_resume_read_ecc_calc(); |
184 | 193 | ||
@@ -190,6 +199,7 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize, | |||
190 | #else | 199 | #else |
191 | eccCalc[eccPos++] = REG_NAND_DATA8; | 200 | eccCalc[eccPos++] = REG_NAND_DATA8; |
192 | #endif | 201 | #endif |
202 | numToRead--; | ||
193 | } | 203 | } |
194 | 204 | ||
195 | nand_bcm_umi_bch_pause_read_ecc_calc(); | 205 | nand_bcm_umi_bch_pause_read_ecc_calc(); |
@@ -204,49 +214,18 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize, | |||
204 | numToRead--; | 214 | numToRead--; |
205 | } | 215 | } |
206 | 216 | ||
207 | /* read ECC bytes */ | 217 | } |
208 | nand_bcm_umi_bch_resume_read_ecc_calc(); | 218 | /* read ECC bytes */ |
209 | while (numToRead) { | 219 | nand_bcm_umi_bch_resume_read_ecc_calc(); |
210 | #if defined(__KERNEL__) && !defined(STANDALONE) | 220 | while (numToRead) { |
211 | *oobp = REG_NAND_DATA8; | ||
212 | eccCalc[eccPos++] = *oobp; | ||
213 | oobp++; | ||
214 | #else | ||
215 | eccCalc[eccPos++] = REG_NAND_DATA8; | ||
216 | #endif | ||
217 | numToRead--; | ||
218 | } | ||
219 | } else { | ||
220 | /* skip BI */ | ||
221 | #if defined(__KERNEL__) && !defined(STANDALONE) | 221 | #if defined(__KERNEL__) && !defined(STANDALONE) |
222 | *oobp++ = REG_NAND_DATA8; | 222 | *oobp = REG_NAND_DATA8; |
223 | eccCalc[eccPos++] = *oobp; | ||
224 | oobp++; | ||
223 | #else | 225 | #else |
224 | REG_NAND_DATA8; | 226 | eccCalc[eccPos++] = REG_NAND_DATA8; |
225 | #endif | 227 | #endif |
226 | numToRead--; | 228 | numToRead--; |
227 | |||
228 | while (numToRead > numEccBytes) { | ||
229 | /* skip free oob region */ | ||
230 | #if defined(__KERNEL__) && !defined(STANDALONE) | ||
231 | *oobp++ = REG_NAND_DATA8; | ||
232 | #else | ||
233 | REG_NAND_DATA8; | ||
234 | #endif | ||
235 | numToRead--; | ||
236 | } | ||
237 | |||
238 | /* read ECC bytes */ | ||
239 | nand_bcm_umi_bch_resume_read_ecc_calc(); | ||
240 | while (numToRead) { | ||
241 | #if defined(__KERNEL__) && !defined(STANDALONE) | ||
242 | *oobp = REG_NAND_DATA8; | ||
243 | eccCalc[eccPos++] = *oobp; | ||
244 | oobp++; | ||
245 | #else | ||
246 | eccCalc[eccPos++] = REG_NAND_DATA8; | ||
247 | #endif | ||
248 | numToRead--; | ||
249 | } | ||
250 | } | 229 | } |
251 | } | 230 | } |
252 | 231 | ||
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c index 69ee2c90eb0b..89907ed99009 100644 --- a/drivers/mtd/nand/nand_ids.c +++ b/drivers/mtd/nand/nand_ids.c | |||
@@ -82,6 +82,7 @@ struct nand_flash_dev nand_flash_ids[] = { | |||
82 | /* 1 Gigabit */ | 82 | /* 1 Gigabit */ |
83 | {"NAND 128MiB 1,8V 8-bit", 0xA1, 0, 128, 0, LP_OPTIONS}, | 83 | {"NAND 128MiB 1,8V 8-bit", 0xA1, 0, 128, 0, LP_OPTIONS}, |
84 | {"NAND 128MiB 3,3V 8-bit", 0xF1, 0, 128, 0, LP_OPTIONS}, | 84 | {"NAND 128MiB 3,3V 8-bit", 0xF1, 0, 128, 0, LP_OPTIONS}, |
85 | {"NAND 128MiB 3,3V 8-bit", 0xD1, 0, 128, 0, LP_OPTIONS}, | ||
85 | {"NAND 128MiB 1,8V 16-bit", 0xB1, 0, 128, 0, LP_OPTIONS16}, | 86 | {"NAND 128MiB 1,8V 16-bit", 0xB1, 0, 128, 0, LP_OPTIONS16}, |
86 | {"NAND 128MiB 3,3V 16-bit", 0xC1, 0, 128, 0, LP_OPTIONS16}, | 87 | {"NAND 128MiB 3,3V 16-bit", 0xC1, 0, 128, 0, LP_OPTIONS16}, |
87 | 88 | ||
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c index 7281000fef2d..261337efe0ee 100644 --- a/drivers/mtd/nand/nandsim.c +++ b/drivers/mtd/nand/nandsim.c | |||
@@ -80,6 +80,9 @@ | |||
80 | #ifndef CONFIG_NANDSIM_DBG | 80 | #ifndef CONFIG_NANDSIM_DBG |
81 | #define CONFIG_NANDSIM_DBG 0 | 81 | #define CONFIG_NANDSIM_DBG 0 |
82 | #endif | 82 | #endif |
83 | #ifndef CONFIG_NANDSIM_MAX_PARTS | ||
84 | #define CONFIG_NANDSIM_MAX_PARTS 32 | ||
85 | #endif | ||
83 | 86 | ||
84 | static uint first_id_byte = CONFIG_NANDSIM_FIRST_ID_BYTE; | 87 | static uint first_id_byte = CONFIG_NANDSIM_FIRST_ID_BYTE; |
85 | static uint second_id_byte = CONFIG_NANDSIM_SECOND_ID_BYTE; | 88 | static uint second_id_byte = CONFIG_NANDSIM_SECOND_ID_BYTE; |
@@ -94,7 +97,7 @@ static uint bus_width = CONFIG_NANDSIM_BUS_WIDTH; | |||
94 | static uint do_delays = CONFIG_NANDSIM_DO_DELAYS; | 97 | static uint do_delays = CONFIG_NANDSIM_DO_DELAYS; |
95 | static uint log = CONFIG_NANDSIM_LOG; | 98 | static uint log = CONFIG_NANDSIM_LOG; |
96 | static uint dbg = CONFIG_NANDSIM_DBG; | 99 | static uint dbg = CONFIG_NANDSIM_DBG; |
97 | static unsigned long parts[MAX_MTD_DEVICES]; | 100 | static unsigned long parts[CONFIG_NANDSIM_MAX_PARTS]; |
98 | static unsigned int parts_num; | 101 | static unsigned int parts_num; |
99 | static char *badblocks = NULL; | 102 | static char *badblocks = NULL; |
100 | static char *weakblocks = NULL; | 103 | static char *weakblocks = NULL; |
@@ -135,8 +138,8 @@ MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read I | |||
135 | MODULE_PARM_DESC(access_delay, "Initial page access delay (microseconds)"); | 138 | MODULE_PARM_DESC(access_delay, "Initial page access delay (microseconds)"); |
136 | MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds"); | 139 | MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds"); |
137 | MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)"); | 140 | MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)"); |
138 | MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanodeconds)"); | 141 | MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanoseconds)"); |
139 | MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanodeconds)"); | 142 | MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanoseconds)"); |
140 | MODULE_PARM_DESC(bus_width, "Chip's bus width (8- or 16-bit)"); | 143 | MODULE_PARM_DESC(bus_width, "Chip's bus width (8- or 16-bit)"); |
141 | MODULE_PARM_DESC(do_delays, "Simulate NAND delays using busy-waits if not zero"); | 144 | MODULE_PARM_DESC(do_delays, "Simulate NAND delays using busy-waits if not zero"); |
142 | MODULE_PARM_DESC(log, "Perform logging if not zero"); | 145 | MODULE_PARM_DESC(log, "Perform logging if not zero"); |
@@ -288,7 +291,7 @@ union ns_mem { | |||
288 | * The structure which describes all the internal simulator data. | 291 | * The structure which describes all the internal simulator data. |
289 | */ | 292 | */ |
290 | struct nandsim { | 293 | struct nandsim { |
291 | struct mtd_partition partitions[MAX_MTD_DEVICES]; | 294 | struct mtd_partition partitions[CONFIG_NANDSIM_MAX_PARTS]; |
292 | unsigned int nbparts; | 295 | unsigned int nbparts; |
293 | 296 | ||
294 | uint busw; /* flash chip bus width (8 or 16) */ | 297 | uint busw; /* flash chip bus width (8 or 16) */ |
@@ -312,7 +315,7 @@ struct nandsim { | |||
312 | union ns_mem buf; | 315 | union ns_mem buf; |
313 | 316 | ||
314 | /* NAND flash "geometry" */ | 317 | /* NAND flash "geometry" */ |
315 | struct nandsin_geometry { | 318 | struct { |
316 | uint64_t totsz; /* total flash size, bytes */ | 319 | uint64_t totsz; /* total flash size, bytes */ |
317 | uint32_t secsz; /* flash sector (erase block) size, bytes */ | 320 | uint32_t secsz; /* flash sector (erase block) size, bytes */ |
318 | uint pgsz; /* NAND flash page size, bytes */ | 321 | uint pgsz; /* NAND flash page size, bytes */ |
@@ -331,7 +334,7 @@ struct nandsim { | |||
331 | } geom; | 334 | } geom; |
332 | 335 | ||
333 | /* NAND flash internal registers */ | 336 | /* NAND flash internal registers */ |
334 | struct nandsim_regs { | 337 | struct { |
335 | unsigned command; /* the command register */ | 338 | unsigned command; /* the command register */ |
336 | u_char status; /* the status register */ | 339 | u_char status; /* the status register */ |
337 | uint row; /* the page number */ | 340 | uint row; /* the page number */ |
@@ -342,7 +345,7 @@ struct nandsim { | |||
342 | } regs; | 345 | } regs; |
343 | 346 | ||
344 | /* NAND flash lines state */ | 347 | /* NAND flash lines state */ |
345 | struct ns_lines_status { | 348 | struct { |
346 | int ce; /* chip Enable */ | 349 | int ce; /* chip Enable */ |
347 | int cle; /* command Latch Enable */ | 350 | int cle; /* command Latch Enable */ |
348 | int ale; /* address Latch Enable */ | 351 | int ale; /* address Latch Enable */ |
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c index 1f6f741af5da..8c0b69375224 100644 --- a/drivers/mtd/nand/nomadik_nand.c +++ b/drivers/mtd/nand/nomadik_nand.c | |||
@@ -105,21 +105,21 @@ static int nomadik_nand_probe(struct platform_device *pdev) | |||
105 | ret = -EIO; | 105 | ret = -EIO; |
106 | goto err_unmap; | 106 | goto err_unmap; |
107 | } | 107 | } |
108 | host->addr_va = ioremap(res->start, res->end - res->start + 1); | 108 | host->addr_va = ioremap(res->start, resource_size(res)); |
109 | 109 | ||
110 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data"); | 110 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data"); |
111 | if (!res) { | 111 | if (!res) { |
112 | ret = -EIO; | 112 | ret = -EIO; |
113 | goto err_unmap; | 113 | goto err_unmap; |
114 | } | 114 | } |
115 | host->data_va = ioremap(res->start, res->end - res->start + 1); | 115 | host->data_va = ioremap(res->start, resource_size(res)); |
116 | 116 | ||
117 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd"); | 117 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd"); |
118 | if (!res) { | 118 | if (!res) { |
119 | ret = -EIO; | 119 | ret = -EIO; |
120 | goto err_unmap; | 120 | goto err_unmap; |
121 | } | 121 | } |
122 | host->cmd_va = ioremap(res->start, res->end - res->start + 1); | 122 | host->cmd_va = ioremap(res->start, resource_size(res)); |
123 | 123 | ||
124 | if (!host->addr_va || !host->data_va || !host->cmd_va) { | 124 | if (!host->addr_va || !host->data_va || !host->cmd_va) { |
125 | ret = -ENOMEM; | 125 | ret = -ENOMEM; |
diff --git a/drivers/mtd/nand/w90p910_nand.c b/drivers/mtd/nand/nuc900_nand.c index 7680e731348a..6eddf7361ed7 100644 --- a/drivers/mtd/nand/w90p910_nand.c +++ b/drivers/mtd/nand/nuc900_nand.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2009 Nuvoton technology corporation. | 2 | * Copyright © 2009 Nuvoton technology corporation. |
3 | * | 3 | * |
4 | * Wan ZongShun <mcuos.com@gmail.com> | 4 | * Wan ZongShun <mcuos.com@gmail.com> |
5 | * | 5 | * |
@@ -55,7 +55,7 @@ | |||
55 | #define write_addr_reg(dev, val) \ | 55 | #define write_addr_reg(dev, val) \ |
56 | __raw_writel((val), (dev)->reg + REG_SMADDR) | 56 | __raw_writel((val), (dev)->reg + REG_SMADDR) |
57 | 57 | ||
58 | struct w90p910_nand { | 58 | struct nuc900_nand { |
59 | struct mtd_info mtd; | 59 | struct mtd_info mtd; |
60 | struct nand_chip chip; | 60 | struct nand_chip chip; |
61 | void __iomem *reg; | 61 | void __iomem *reg; |
@@ -76,49 +76,49 @@ static const struct mtd_partition partitions[] = { | |||
76 | } | 76 | } |
77 | }; | 77 | }; |
78 | 78 | ||
79 | static unsigned char w90p910_nand_read_byte(struct mtd_info *mtd) | 79 | static unsigned char nuc900_nand_read_byte(struct mtd_info *mtd) |
80 | { | 80 | { |
81 | unsigned char ret; | 81 | unsigned char ret; |
82 | struct w90p910_nand *nand; | 82 | struct nuc900_nand *nand; |
83 | 83 | ||
84 | nand = container_of(mtd, struct w90p910_nand, mtd); | 84 | nand = container_of(mtd, struct nuc900_nand, mtd); |
85 | 85 | ||
86 | ret = (unsigned char)read_data_reg(nand); | 86 | ret = (unsigned char)read_data_reg(nand); |
87 | 87 | ||
88 | return ret; | 88 | return ret; |
89 | } | 89 | } |
90 | 90 | ||
91 | static void w90p910_nand_read_buf(struct mtd_info *mtd, | 91 | static void nuc900_nand_read_buf(struct mtd_info *mtd, |
92 | unsigned char *buf, int len) | 92 | unsigned char *buf, int len) |
93 | { | 93 | { |
94 | int i; | 94 | int i; |
95 | struct w90p910_nand *nand; | 95 | struct nuc900_nand *nand; |
96 | 96 | ||
97 | nand = container_of(mtd, struct w90p910_nand, mtd); | 97 | nand = container_of(mtd, struct nuc900_nand, mtd); |
98 | 98 | ||
99 | for (i = 0; i < len; i++) | 99 | for (i = 0; i < len; i++) |
100 | buf[i] = (unsigned char)read_data_reg(nand); | 100 | buf[i] = (unsigned char)read_data_reg(nand); |
101 | } | 101 | } |
102 | 102 | ||
103 | static void w90p910_nand_write_buf(struct mtd_info *mtd, | 103 | static void nuc900_nand_write_buf(struct mtd_info *mtd, |
104 | const unsigned char *buf, int len) | 104 | const unsigned char *buf, int len) |
105 | { | 105 | { |
106 | int i; | 106 | int i; |
107 | struct w90p910_nand *nand; | 107 | struct nuc900_nand *nand; |
108 | 108 | ||
109 | nand = container_of(mtd, struct w90p910_nand, mtd); | 109 | nand = container_of(mtd, struct nuc900_nand, mtd); |
110 | 110 | ||
111 | for (i = 0; i < len; i++) | 111 | for (i = 0; i < len; i++) |
112 | write_data_reg(nand, buf[i]); | 112 | write_data_reg(nand, buf[i]); |
113 | } | 113 | } |
114 | 114 | ||
115 | static int w90p910_verify_buf(struct mtd_info *mtd, | 115 | static int nuc900_verify_buf(struct mtd_info *mtd, |
116 | const unsigned char *buf, int len) | 116 | const unsigned char *buf, int len) |
117 | { | 117 | { |
118 | int i; | 118 | int i; |
119 | struct w90p910_nand *nand; | 119 | struct nuc900_nand *nand; |
120 | 120 | ||
121 | nand = container_of(mtd, struct w90p910_nand, mtd); | 121 | nand = container_of(mtd, struct nuc900_nand, mtd); |
122 | 122 | ||
123 | for (i = 0; i < len; i++) { | 123 | for (i = 0; i < len; i++) { |
124 | if (buf[i] != (unsigned char)read_data_reg(nand)) | 124 | if (buf[i] != (unsigned char)read_data_reg(nand)) |
@@ -128,7 +128,7 @@ static int w90p910_verify_buf(struct mtd_info *mtd, | |||
128 | return 0; | 128 | return 0; |
129 | } | 129 | } |
130 | 130 | ||
131 | static int w90p910_check_rb(struct w90p910_nand *nand) | 131 | static int nuc900_check_rb(struct nuc900_nand *nand) |
132 | { | 132 | { |
133 | unsigned int val; | 133 | unsigned int val; |
134 | spin_lock(&nand->lock); | 134 | spin_lock(&nand->lock); |
@@ -139,24 +139,24 @@ static int w90p910_check_rb(struct w90p910_nand *nand) | |||
139 | return val; | 139 | return val; |
140 | } | 140 | } |
141 | 141 | ||
142 | static int w90p910_nand_devready(struct mtd_info *mtd) | 142 | static int nuc900_nand_devready(struct mtd_info *mtd) |
143 | { | 143 | { |
144 | struct w90p910_nand *nand; | 144 | struct nuc900_nand *nand; |
145 | int ready; | 145 | int ready; |
146 | 146 | ||
147 | nand = container_of(mtd, struct w90p910_nand, mtd); | 147 | nand = container_of(mtd, struct nuc900_nand, mtd); |
148 | 148 | ||
149 | ready = (w90p910_check_rb(nand)) ? 1 : 0; | 149 | ready = (nuc900_check_rb(nand)) ? 1 : 0; |
150 | return ready; | 150 | return ready; |
151 | } | 151 | } |
152 | 152 | ||
153 | static void w90p910_nand_command_lp(struct mtd_info *mtd, | 153 | static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command, |
154 | unsigned int command, int column, int page_addr) | 154 | int column, int page_addr) |
155 | { | 155 | { |
156 | register struct nand_chip *chip = mtd->priv; | 156 | register struct nand_chip *chip = mtd->priv; |
157 | struct w90p910_nand *nand; | 157 | struct nuc900_nand *nand; |
158 | 158 | ||
159 | nand = container_of(mtd, struct w90p910_nand, mtd); | 159 | nand = container_of(mtd, struct nuc900_nand, mtd); |
160 | 160 | ||
161 | if (command == NAND_CMD_READOOB) { | 161 | if (command == NAND_CMD_READOOB) { |
162 | column += mtd->writesize; | 162 | column += mtd->writesize; |
@@ -212,7 +212,7 @@ static void w90p910_nand_command_lp(struct mtd_info *mtd, | |||
212 | write_cmd_reg(nand, NAND_CMD_STATUS); | 212 | write_cmd_reg(nand, NAND_CMD_STATUS); |
213 | write_cmd_reg(nand, command); | 213 | write_cmd_reg(nand, command); |
214 | 214 | ||
215 | while (!w90p910_check_rb(nand)) | 215 | while (!nuc900_check_rb(nand)) |
216 | ; | 216 | ; |
217 | 217 | ||
218 | return; | 218 | return; |
@@ -241,7 +241,7 @@ static void w90p910_nand_command_lp(struct mtd_info *mtd, | |||
241 | } | 241 | } |
242 | 242 | ||
243 | 243 | ||
244 | static void w90p910_nand_enable(struct w90p910_nand *nand) | 244 | static void nuc900_nand_enable(struct nuc900_nand *nand) |
245 | { | 245 | { |
246 | unsigned int val; | 246 | unsigned int val; |
247 | spin_lock(&nand->lock); | 247 | spin_lock(&nand->lock); |
@@ -262,37 +262,37 @@ static void w90p910_nand_enable(struct w90p910_nand *nand) | |||
262 | spin_unlock(&nand->lock); | 262 | spin_unlock(&nand->lock); |
263 | } | 263 | } |
264 | 264 | ||
265 | static int __devinit w90p910_nand_probe(struct platform_device *pdev) | 265 | static int __devinit nuc900_nand_probe(struct platform_device *pdev) |
266 | { | 266 | { |
267 | struct w90p910_nand *w90p910_nand; | 267 | struct nuc900_nand *nuc900_nand; |
268 | struct nand_chip *chip; | 268 | struct nand_chip *chip; |
269 | int retval; | 269 | int retval; |
270 | struct resource *res; | 270 | struct resource *res; |
271 | 271 | ||
272 | retval = 0; | 272 | retval = 0; |
273 | 273 | ||
274 | w90p910_nand = kzalloc(sizeof(struct w90p910_nand), GFP_KERNEL); | 274 | nuc900_nand = kzalloc(sizeof(struct nuc900_nand), GFP_KERNEL); |
275 | if (!w90p910_nand) | 275 | if (!nuc900_nand) |
276 | return -ENOMEM; | 276 | return -ENOMEM; |
277 | chip = &(w90p910_nand->chip); | 277 | chip = &(nuc900_nand->chip); |
278 | 278 | ||
279 | w90p910_nand->mtd.priv = chip; | 279 | nuc900_nand->mtd.priv = chip; |
280 | w90p910_nand->mtd.owner = THIS_MODULE; | 280 | nuc900_nand->mtd.owner = THIS_MODULE; |
281 | spin_lock_init(&w90p910_nand->lock); | 281 | spin_lock_init(&nuc900_nand->lock); |
282 | 282 | ||
283 | w90p910_nand->clk = clk_get(&pdev->dev, NULL); | 283 | nuc900_nand->clk = clk_get(&pdev->dev, NULL); |
284 | if (IS_ERR(w90p910_nand->clk)) { | 284 | if (IS_ERR(nuc900_nand->clk)) { |
285 | retval = -ENOENT; | 285 | retval = -ENOENT; |
286 | goto fail1; | 286 | goto fail1; |
287 | } | 287 | } |
288 | clk_enable(w90p910_nand->clk); | 288 | clk_enable(nuc900_nand->clk); |
289 | 289 | ||
290 | chip->cmdfunc = w90p910_nand_command_lp; | 290 | chip->cmdfunc = nuc900_nand_command_lp; |
291 | chip->dev_ready = w90p910_nand_devready; | 291 | chip->dev_ready = nuc900_nand_devready; |
292 | chip->read_byte = w90p910_nand_read_byte; | 292 | chip->read_byte = nuc900_nand_read_byte; |
293 | chip->write_buf = w90p910_nand_write_buf; | 293 | chip->write_buf = nuc900_nand_write_buf; |
294 | chip->read_buf = w90p910_nand_read_buf; | 294 | chip->read_buf = nuc900_nand_read_buf; |
295 | chip->verify_buf = w90p910_verify_buf; | 295 | chip->verify_buf = nuc900_verify_buf; |
296 | chip->chip_delay = 50; | 296 | chip->chip_delay = 50; |
297 | chip->options = 0; | 297 | chip->options = 0; |
298 | chip->ecc.mode = NAND_ECC_SOFT; | 298 | chip->ecc.mode = NAND_ECC_SOFT; |
@@ -308,75 +308,75 @@ static int __devinit w90p910_nand_probe(struct platform_device *pdev) | |||
308 | goto fail1; | 308 | goto fail1; |
309 | } | 309 | } |
310 | 310 | ||
311 | w90p910_nand->reg = ioremap(res->start, resource_size(res)); | 311 | nuc900_nand->reg = ioremap(res->start, resource_size(res)); |
312 | if (!w90p910_nand->reg) { | 312 | if (!nuc900_nand->reg) { |
313 | retval = -ENOMEM; | 313 | retval = -ENOMEM; |
314 | goto fail2; | 314 | goto fail2; |
315 | } | 315 | } |
316 | 316 | ||
317 | w90p910_nand_enable(w90p910_nand); | 317 | nuc900_nand_enable(nuc900_nand); |
318 | 318 | ||
319 | if (nand_scan(&(w90p910_nand->mtd), 1)) { | 319 | if (nand_scan(&(nuc900_nand->mtd), 1)) { |
320 | retval = -ENXIO; | 320 | retval = -ENXIO; |
321 | goto fail3; | 321 | goto fail3; |
322 | } | 322 | } |
323 | 323 | ||
324 | add_mtd_partitions(&(w90p910_nand->mtd), partitions, | 324 | add_mtd_partitions(&(nuc900_nand->mtd), partitions, |
325 | ARRAY_SIZE(partitions)); | 325 | ARRAY_SIZE(partitions)); |
326 | 326 | ||
327 | platform_set_drvdata(pdev, w90p910_nand); | 327 | platform_set_drvdata(pdev, nuc900_nand); |
328 | 328 | ||
329 | return retval; | 329 | return retval; |
330 | 330 | ||
331 | fail3: iounmap(w90p910_nand->reg); | 331 | fail3: iounmap(nuc900_nand->reg); |
332 | fail2: release_mem_region(res->start, resource_size(res)); | 332 | fail2: release_mem_region(res->start, resource_size(res)); |
333 | fail1: kfree(w90p910_nand); | 333 | fail1: kfree(nuc900_nand); |
334 | return retval; | 334 | return retval; |
335 | } | 335 | } |
336 | 336 | ||
337 | static int __devexit w90p910_nand_remove(struct platform_device *pdev) | 337 | static int __devexit nuc900_nand_remove(struct platform_device *pdev) |
338 | { | 338 | { |
339 | struct w90p910_nand *w90p910_nand = platform_get_drvdata(pdev); | 339 | struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev); |
340 | struct resource *res; | 340 | struct resource *res; |
341 | 341 | ||
342 | iounmap(w90p910_nand->reg); | 342 | iounmap(nuc900_nand->reg); |
343 | 343 | ||
344 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 344 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
345 | release_mem_region(res->start, resource_size(res)); | 345 | release_mem_region(res->start, resource_size(res)); |
346 | 346 | ||
347 | clk_disable(w90p910_nand->clk); | 347 | clk_disable(nuc900_nand->clk); |
348 | clk_put(w90p910_nand->clk); | 348 | clk_put(nuc900_nand->clk); |
349 | 349 | ||
350 | kfree(w90p910_nand); | 350 | kfree(nuc900_nand); |
351 | 351 | ||
352 | platform_set_drvdata(pdev, NULL); | 352 | platform_set_drvdata(pdev, NULL); |
353 | 353 | ||
354 | return 0; | 354 | return 0; |
355 | } | 355 | } |
356 | 356 | ||
357 | static struct platform_driver w90p910_nand_driver = { | 357 | static struct platform_driver nuc900_nand_driver = { |
358 | .probe = w90p910_nand_probe, | 358 | .probe = nuc900_nand_probe, |
359 | .remove = __devexit_p(w90p910_nand_remove), | 359 | .remove = __devexit_p(nuc900_nand_remove), |
360 | .driver = { | 360 | .driver = { |
361 | .name = "w90p910-fmi", | 361 | .name = "nuc900-fmi", |
362 | .owner = THIS_MODULE, | 362 | .owner = THIS_MODULE, |
363 | }, | 363 | }, |
364 | }; | 364 | }; |
365 | 365 | ||
366 | static int __init w90p910_nand_init(void) | 366 | static int __init nuc900_nand_init(void) |
367 | { | 367 | { |
368 | return platform_driver_register(&w90p910_nand_driver); | 368 | return platform_driver_register(&nuc900_nand_driver); |
369 | } | 369 | } |
370 | 370 | ||
371 | static void __exit w90p910_nand_exit(void) | 371 | static void __exit nuc900_nand_exit(void) |
372 | { | 372 | { |
373 | platform_driver_unregister(&w90p910_nand_driver); | 373 | platform_driver_unregister(&nuc900_nand_driver); |
374 | } | 374 | } |
375 | 375 | ||
376 | module_init(w90p910_nand_init); | 376 | module_init(nuc900_nand_init); |
377 | module_exit(w90p910_nand_exit); | 377 | module_exit(nuc900_nand_exit); |
378 | 378 | ||
379 | MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>"); | 379 | MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>"); |
380 | MODULE_DESCRIPTION("w90p910 nand driver!"); | 380 | MODULE_DESCRIPTION("w90p910/NUC9xx nand driver!"); |
381 | MODULE_LICENSE("GPL"); | 381 | MODULE_LICENSE("GPL"); |
382 | MODULE_ALIAS("platform:w90p910-fmi"); | 382 | MODULE_ALIAS("platform:nuc900-fmi"); |
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 7545568fce47..ee87325c7712 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c | |||
@@ -292,11 +292,14 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len) | |||
292 | u32 *p = (u32 *)buf; | 292 | u32 *p = (u32 *)buf; |
293 | 293 | ||
294 | /* take care of subpage reads */ | 294 | /* take care of subpage reads */ |
295 | for (; len % 4 != 0; ) { | 295 | if (len % 4) { |
296 | *buf++ = __raw_readb(info->nand.IO_ADDR_R); | 296 | if (info->nand.options & NAND_BUSWIDTH_16) |
297 | len--; | 297 | omap_read_buf16(mtd, buf, len % 4); |
298 | else | ||
299 | omap_read_buf8(mtd, buf, len % 4); | ||
300 | p = (u32 *) (buf + len % 4); | ||
301 | len -= len % 4; | ||
298 | } | 302 | } |
299 | p = (u32 *) buf; | ||
300 | 303 | ||
301 | /* configure and start prefetch transfer */ | 304 | /* configure and start prefetch transfer */ |
302 | ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0); | 305 | ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0); |
@@ -502,7 +505,7 @@ static void omap_write_buf_dma_pref(struct mtd_info *mtd, | |||
502 | omap_write_buf_pref(mtd, buf, len); | 505 | omap_write_buf_pref(mtd, buf, len); |
503 | else | 506 | else |
504 | /* start transfer in DMA mode */ | 507 | /* start transfer in DMA mode */ |
505 | omap_nand_dma_transfer(mtd, buf, len, 0x1); | 508 | omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1); |
506 | } | 509 | } |
507 | 510 | ||
508 | /** | 511 | /** |
@@ -1028,7 +1031,8 @@ out_free_info: | |||
1028 | static int omap_nand_remove(struct platform_device *pdev) | 1031 | static int omap_nand_remove(struct platform_device *pdev) |
1029 | { | 1032 | { |
1030 | struct mtd_info *mtd = platform_get_drvdata(pdev); | 1033 | struct mtd_info *mtd = platform_get_drvdata(pdev); |
1031 | struct omap_nand_info *info = mtd->priv; | 1034 | struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, |
1035 | mtd); | ||
1032 | 1036 | ||
1033 | platform_set_drvdata(pdev, NULL); | 1037 | platform_set_drvdata(pdev, NULL); |
1034 | if (use_dma) | 1038 | if (use_dma) |
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c index f59c07427af3..da6e75343052 100644 --- a/drivers/mtd/nand/orion_nand.c +++ b/drivers/mtd/nand/orion_nand.c | |||
@@ -60,7 +60,13 @@ static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) | |||
60 | } | 60 | } |
61 | buf64 = (uint64_t *)buf; | 61 | buf64 = (uint64_t *)buf; |
62 | while (i < len/8) { | 62 | while (i < len/8) { |
63 | uint64_t x; | 63 | /* |
64 | * Since GCC has no proper constraint (PR 43518) | ||
65 | * force x variable to r2/r3 registers as ldrd instruction | ||
66 | * requires first register to be even. | ||
67 | */ | ||
68 | register uint64_t x asm ("r2"); | ||
69 | |||
64 | asm volatile ("ldrd\t%0, [%1]" : "=&r" (x) : "r" (io_base)); | 70 | asm volatile ("ldrd\t%0, [%1]" : "=&r" (x) : "r" (io_base)); |
65 | buf64[i++] = x; | 71 | buf64[i++] = x; |
66 | } | 72 | } |
@@ -74,6 +80,7 @@ static int __init orion_nand_probe(struct platform_device *pdev) | |||
74 | struct mtd_info *mtd; | 80 | struct mtd_info *mtd; |
75 | struct nand_chip *nc; | 81 | struct nand_chip *nc; |
76 | struct orion_nand_data *board; | 82 | struct orion_nand_data *board; |
83 | struct resource *res; | ||
77 | void __iomem *io_base; | 84 | void __iomem *io_base; |
78 | int ret = 0; | 85 | int ret = 0; |
79 | #ifdef CONFIG_MTD_PARTITIONS | 86 | #ifdef CONFIG_MTD_PARTITIONS |
@@ -89,8 +96,13 @@ static int __init orion_nand_probe(struct platform_device *pdev) | |||
89 | } | 96 | } |
90 | mtd = (struct mtd_info *)(nc + 1); | 97 | mtd = (struct mtd_info *)(nc + 1); |
91 | 98 | ||
92 | io_base = ioremap(pdev->resource[0].start, | 99 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
93 | pdev->resource[0].end - pdev->resource[0].start + 1); | 100 | if (!res) { |
101 | ret = -ENODEV; | ||
102 | goto no_res; | ||
103 | } | ||
104 | |||
105 | io_base = ioremap(res->start, resource_size(res)); | ||
94 | if (!io_base) { | 106 | if (!io_base) { |
95 | printk(KERN_ERR "orion_nand: ioremap failed\n"); | 107 | printk(KERN_ERR "orion_nand: ioremap failed\n"); |
96 | ret = -EIO; | 108 | ret = -EIO; |
@@ -114,6 +126,9 @@ static int __init orion_nand_probe(struct platform_device *pdev) | |||
114 | if (board->width == 16) | 126 | if (board->width == 16) |
115 | nc->options |= NAND_BUSWIDTH_16; | 127 | nc->options |= NAND_BUSWIDTH_16; |
116 | 128 | ||
129 | if (board->dev_ready) | ||
130 | nc->dev_ready = board->dev_ready; | ||
131 | |||
117 | platform_set_drvdata(pdev, mtd); | 132 | platform_set_drvdata(pdev, mtd); |
118 | 133 | ||
119 | if (nand_scan(mtd, 1)) { | 134 | if (nand_scan(mtd, 1)) { |
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c index a97e9c95ab6a..f02af24d033a 100644 --- a/drivers/mtd/nand/pasemi_nand.c +++ b/drivers/mtd/nand/pasemi_nand.c | |||
@@ -209,7 +209,7 @@ static int __devexit pasemi_nand_remove(struct of_device *ofdev) | |||
209 | return 0; | 209 | return 0; |
210 | } | 210 | } |
211 | 211 | ||
212 | static struct of_device_id pasemi_nand_match[] = | 212 | static const struct of_device_id pasemi_nand_match[] = |
213 | { | 213 | { |
214 | { | 214 | { |
215 | .compatible = "pasemi,localbus-nand", | 215 | .compatible = "pasemi,localbus-nand", |
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 5d55152162cf..e02fa4f0e3c9 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c | |||
@@ -1320,6 +1320,17 @@ static int pxa3xx_nand_probe(struct platform_device *pdev) | |||
1320 | goto fail_free_irq; | 1320 | goto fail_free_irq; |
1321 | } | 1321 | } |
1322 | 1322 | ||
1323 | if (mtd_has_cmdlinepart()) { | ||
1324 | static const char *probes[] = { "cmdlinepart", NULL }; | ||
1325 | struct mtd_partition *parts; | ||
1326 | int nr_parts; | ||
1327 | |||
1328 | nr_parts = parse_mtd_partitions(mtd, probes, &parts, 0); | ||
1329 | |||
1330 | if (nr_parts) | ||
1331 | return add_mtd_partitions(mtd, parts, nr_parts); | ||
1332 | } | ||
1333 | |||
1323 | return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts); | 1334 | return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts); |
1324 | 1335 | ||
1325 | fail_free_irq: | 1336 | fail_free_irq: |
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c new file mode 100644 index 000000000000..78a423295474 --- /dev/null +++ b/drivers/mtd/nand/r852.c | |||
@@ -0,0 +1,1140 @@ | |||
1 | /* | ||
2 | * Copyright © 2009 - Maxim Levitsky | ||
3 | * driver for Ricoh xD readers | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/jiffies.h> | ||
13 | #include <linux/workqueue.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/pci.h> | ||
16 | #include <linux/pci_ids.h> | ||
17 | #include <linux/delay.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <asm/byteorder.h> | ||
20 | #include <linux/sched.h> | ||
21 | #include "sm_common.h" | ||
22 | #include "r852.h" | ||
23 | |||
24 | |||
25 | static int r852_enable_dma = 1; | ||
26 | module_param(r852_enable_dma, bool, S_IRUGO); | ||
27 | MODULE_PARM_DESC(r852_enable_dma, "Enable usage of the DMA (default)"); | ||
28 | |||
29 | static int debug; | ||
30 | module_param(debug, int, S_IRUGO | S_IWUSR); | ||
31 | MODULE_PARM_DESC(debug, "Debug level (0-2)"); | ||
32 | |||
33 | /* read register */ | ||
34 | static inline uint8_t r852_read_reg(struct r852_device *dev, int address) | ||
35 | { | ||
36 | uint8_t reg = readb(dev->mmio + address); | ||
37 | return reg; | ||
38 | } | ||
39 | |||
40 | /* write register */ | ||
41 | static inline void r852_write_reg(struct r852_device *dev, | ||
42 | int address, uint8_t value) | ||
43 | { | ||
44 | writeb(value, dev->mmio + address); | ||
45 | mmiowb(); | ||
46 | } | ||
47 | |||
48 | |||
49 | /* read dword sized register */ | ||
50 | static inline uint32_t r852_read_reg_dword(struct r852_device *dev, int address) | ||
51 | { | ||
52 | uint32_t reg = le32_to_cpu(readl(dev->mmio + address)); | ||
53 | return reg; | ||
54 | } | ||
55 | |||
56 | /* write dword sized register */ | ||
57 | static inline void r852_write_reg_dword(struct r852_device *dev, | ||
58 | int address, uint32_t value) | ||
59 | { | ||
60 | writel(cpu_to_le32(value), dev->mmio + address); | ||
61 | mmiowb(); | ||
62 | } | ||
63 | |||
64 | /* returns pointer to our private structure */ | ||
65 | static inline struct r852_device *r852_get_dev(struct mtd_info *mtd) | ||
66 | { | ||
67 | struct nand_chip *chip = (struct nand_chip *)mtd->priv; | ||
68 | return (struct r852_device *)chip->priv; | ||
69 | } | ||
70 | |||
71 | |||
72 | /* check if controller supports dma */ | ||
73 | static void r852_dma_test(struct r852_device *dev) | ||
74 | { | ||
75 | dev->dma_usable = (r852_read_reg(dev, R852_DMA_CAP) & | ||
76 | (R852_DMA1 | R852_DMA2)) == (R852_DMA1 | R852_DMA2); | ||
77 | |||
78 | if (!dev->dma_usable) | ||
79 | message("Non dma capable device detected, dma disabled"); | ||
80 | |||
81 | if (!r852_enable_dma) { | ||
82 | message("disabling dma on user request"); | ||
83 | dev->dma_usable = 0; | ||
84 | } | ||
85 | } | ||
86 | |||
87 | /* | ||
88 | * Enable dma. Enables ether first or second stage of the DMA, | ||
89 | * Expects dev->dma_dir and dev->dma_state be set | ||
90 | */ | ||
91 | static void r852_dma_enable(struct r852_device *dev) | ||
92 | { | ||
93 | uint8_t dma_reg, dma_irq_reg; | ||
94 | |||
95 | /* Set up dma settings */ | ||
96 | dma_reg = r852_read_reg_dword(dev, R852_DMA_SETTINGS); | ||
97 | dma_reg &= ~(R852_DMA_READ | R852_DMA_INTERNAL | R852_DMA_MEMORY); | ||
98 | |||
99 | if (dev->dma_dir) | ||
100 | dma_reg |= R852_DMA_READ; | ||
101 | |||
102 | if (dev->dma_state == DMA_INTERNAL) { | ||
103 | dma_reg |= R852_DMA_INTERNAL; | ||
104 | /* Precaution to make sure HW doesn't write */ | ||
105 | /* to random kernel memory */ | ||
106 | r852_write_reg_dword(dev, R852_DMA_ADDR, | ||
107 | cpu_to_le32(dev->phys_bounce_buffer)); | ||
108 | } else { | ||
109 | dma_reg |= R852_DMA_MEMORY; | ||
110 | r852_write_reg_dword(dev, R852_DMA_ADDR, | ||
111 | cpu_to_le32(dev->phys_dma_addr)); | ||
112 | } | ||
113 | |||
114 | /* Precaution: make sure write reached the device */ | ||
115 | r852_read_reg_dword(dev, R852_DMA_ADDR); | ||
116 | |||
117 | r852_write_reg_dword(dev, R852_DMA_SETTINGS, dma_reg); | ||
118 | |||
119 | /* Set dma irq */ | ||
120 | dma_irq_reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE); | ||
121 | r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, | ||
122 | dma_irq_reg | | ||
123 | R852_DMA_IRQ_INTERNAL | | ||
124 | R852_DMA_IRQ_ERROR | | ||
125 | R852_DMA_IRQ_MEMORY); | ||
126 | } | ||
127 | |||
128 | /* | ||
129 | * Disable dma, called from the interrupt handler, which specifies | ||
130 | * success of the operation via 'error' argument | ||
131 | */ | ||
132 | static void r852_dma_done(struct r852_device *dev, int error) | ||
133 | { | ||
134 | WARN_ON(dev->dma_stage == 0); | ||
135 | |||
136 | r852_write_reg_dword(dev, R852_DMA_IRQ_STA, | ||
137 | r852_read_reg_dword(dev, R852_DMA_IRQ_STA)); | ||
138 | |||
139 | r852_write_reg_dword(dev, R852_DMA_SETTINGS, 0); | ||
140 | r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 0); | ||
141 | |||
142 | /* Precaution to make sure HW doesn't write to random kernel memory */ | ||
143 | r852_write_reg_dword(dev, R852_DMA_ADDR, | ||
144 | cpu_to_le32(dev->phys_bounce_buffer)); | ||
145 | r852_read_reg_dword(dev, R852_DMA_ADDR); | ||
146 | |||
147 | dev->dma_error = error; | ||
148 | dev->dma_stage = 0; | ||
149 | |||
150 | if (dev->phys_dma_addr && dev->phys_dma_addr != dev->phys_bounce_buffer) | ||
151 | pci_unmap_single(dev->pci_dev, dev->phys_dma_addr, R852_DMA_LEN, | ||
152 | dev->dma_dir ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); | ||
153 | complete(&dev->dma_done); | ||
154 | } | ||
155 | |||
156 | /* | ||
157 | * Wait, till dma is done, which includes both phases of it | ||
158 | */ | ||
159 | static int r852_dma_wait(struct r852_device *dev) | ||
160 | { | ||
161 | long timeout = wait_for_completion_timeout(&dev->dma_done, | ||
162 | msecs_to_jiffies(1000)); | ||
163 | if (!timeout) { | ||
164 | dbg("timeout waiting for DMA interrupt"); | ||
165 | return -ETIMEDOUT; | ||
166 | } | ||
167 | |||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * Read/Write one page using dma. Only pages can be read (512 bytes) | ||
173 | */ | ||
174 | static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read) | ||
175 | { | ||
176 | int bounce = 0; | ||
177 | unsigned long flags; | ||
178 | int error; | ||
179 | |||
180 | dev->dma_error = 0; | ||
181 | |||
182 | /* Set dma direction */ | ||
183 | dev->dma_dir = do_read; | ||
184 | dev->dma_stage = 1; | ||
185 | |||
186 | dbg_verbose("doing dma %s ", do_read ? "read" : "write"); | ||
187 | |||
188 | /* Set intial dma state: for reading first fill on board buffer, | ||
189 | from device, for writes first fill the buffer from memory*/ | ||
190 | dev->dma_state = do_read ? DMA_INTERNAL : DMA_MEMORY; | ||
191 | |||
192 | /* if incoming buffer is not page aligned, we should do bounce */ | ||
193 | if ((unsigned long)buf & (R852_DMA_LEN-1)) | ||
194 | bounce = 1; | ||
195 | |||
196 | if (!bounce) { | ||
197 | dev->phys_dma_addr = pci_map_single(dev->pci_dev, (void *)buf, | ||
198 | R852_DMA_LEN, | ||
199 | (do_read ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE)); | ||
200 | |||
201 | if (pci_dma_mapping_error(dev->pci_dev, dev->phys_dma_addr)) | ||
202 | bounce = 1; | ||
203 | } | ||
204 | |||
205 | if (bounce) { | ||
206 | dbg_verbose("dma: using bounce buffer"); | ||
207 | dev->phys_dma_addr = dev->phys_bounce_buffer; | ||
208 | if (!do_read) | ||
209 | memcpy(dev->bounce_buffer, buf, R852_DMA_LEN); | ||
210 | } | ||
211 | |||
212 | /* Enable DMA */ | ||
213 | spin_lock_irqsave(&dev->irqlock, flags); | ||
214 | r852_dma_enable(dev); | ||
215 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
216 | |||
217 | /* Wait till complete */ | ||
218 | error = r852_dma_wait(dev); | ||
219 | |||
220 | if (error) { | ||
221 | r852_dma_done(dev, error); | ||
222 | return; | ||
223 | } | ||
224 | |||
225 | if (do_read && bounce) | ||
226 | memcpy((void *)buf, dev->bounce_buffer, R852_DMA_LEN); | ||
227 | } | ||
228 | |||
229 | /* | ||
230 | * Program data lines of the nand chip to send data to it | ||
231 | */ | ||
232 | void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) | ||
233 | { | ||
234 | struct r852_device *dev = r852_get_dev(mtd); | ||
235 | uint32_t reg; | ||
236 | |||
237 | /* Don't allow any access to hardware if we suspect card removal */ | ||
238 | if (dev->card_unstable) | ||
239 | return; | ||
240 | |||
241 | /* Special case for whole sector read */ | ||
242 | if (len == R852_DMA_LEN && dev->dma_usable) { | ||
243 | r852_do_dma(dev, (uint8_t *)buf, 0); | ||
244 | return; | ||
245 | } | ||
246 | |||
247 | /* write DWORD chinks - faster */ | ||
248 | while (len) { | ||
249 | reg = buf[0] | buf[1] << 8 | buf[2] << 16 | buf[3] << 24; | ||
250 | r852_write_reg_dword(dev, R852_DATALINE, reg); | ||
251 | buf += 4; | ||
252 | len -= 4; | ||
253 | |||
254 | } | ||
255 | |||
256 | /* write rest */ | ||
257 | while (len) | ||
258 | r852_write_reg(dev, R852_DATALINE, *buf++); | ||
259 | } | ||
260 | |||
261 | /* | ||
262 | * Read data lines of the nand chip to retrieve data | ||
263 | */ | ||
264 | void r852_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) | ||
265 | { | ||
266 | struct r852_device *dev = r852_get_dev(mtd); | ||
267 | uint32_t reg; | ||
268 | |||
269 | if (dev->card_unstable) { | ||
270 | /* since we can't signal error here, at least, return | ||
271 | predictable buffer */ | ||
272 | memset(buf, 0, len); | ||
273 | return; | ||
274 | } | ||
275 | |||
276 | /* special case for whole sector read */ | ||
277 | if (len == R852_DMA_LEN && dev->dma_usable) { | ||
278 | r852_do_dma(dev, buf, 1); | ||
279 | return; | ||
280 | } | ||
281 | |||
282 | /* read in dword sized chunks */ | ||
283 | while (len >= 4) { | ||
284 | |||
285 | reg = r852_read_reg_dword(dev, R852_DATALINE); | ||
286 | *buf++ = reg & 0xFF; | ||
287 | *buf++ = (reg >> 8) & 0xFF; | ||
288 | *buf++ = (reg >> 16) & 0xFF; | ||
289 | *buf++ = (reg >> 24) & 0xFF; | ||
290 | len -= 4; | ||
291 | } | ||
292 | |||
293 | /* read the reset by bytes */ | ||
294 | while (len--) | ||
295 | *buf++ = r852_read_reg(dev, R852_DATALINE); | ||
296 | } | ||
297 | |||
298 | /* | ||
299 | * Read one byte from nand chip | ||
300 | */ | ||
301 | static uint8_t r852_read_byte(struct mtd_info *mtd) | ||
302 | { | ||
303 | struct r852_device *dev = r852_get_dev(mtd); | ||
304 | |||
305 | /* Same problem as in r852_read_buf.... */ | ||
306 | if (dev->card_unstable) | ||
307 | return 0; | ||
308 | |||
309 | return r852_read_reg(dev, R852_DATALINE); | ||
310 | } | ||
311 | |||
312 | |||
313 | /* | ||
314 | * Readback the buffer to verify it | ||
315 | */ | ||
316 | int r852_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len) | ||
317 | { | ||
318 | struct r852_device *dev = r852_get_dev(mtd); | ||
319 | |||
320 | /* We can't be sure about anything here... */ | ||
321 | if (dev->card_unstable) | ||
322 | return -1; | ||
323 | |||
324 | /* This will never happen, unless you wired up a nand chip | ||
325 | with > 512 bytes page size to the reader */ | ||
326 | if (len > SM_SECTOR_SIZE) | ||
327 | return 0; | ||
328 | |||
329 | r852_read_buf(mtd, dev->tmp_buffer, len); | ||
330 | return memcmp(buf, dev->tmp_buffer, len); | ||
331 | } | ||
332 | |||
333 | /* | ||
334 | * Control several chip lines & send commands | ||
335 | */ | ||
336 | void r852_cmdctl(struct mtd_info *mtd, int dat, unsigned int ctrl) | ||
337 | { | ||
338 | struct r852_device *dev = r852_get_dev(mtd); | ||
339 | |||
340 | if (dev->card_unstable) | ||
341 | return; | ||
342 | |||
343 | if (ctrl & NAND_CTRL_CHANGE) { | ||
344 | |||
345 | dev->ctlreg &= ~(R852_CTL_DATA | R852_CTL_COMMAND | | ||
346 | R852_CTL_ON | R852_CTL_CARDENABLE); | ||
347 | |||
348 | if (ctrl & NAND_ALE) | ||
349 | dev->ctlreg |= R852_CTL_DATA; | ||
350 | |||
351 | if (ctrl & NAND_CLE) | ||
352 | dev->ctlreg |= R852_CTL_COMMAND; | ||
353 | |||
354 | if (ctrl & NAND_NCE) | ||
355 | dev->ctlreg |= (R852_CTL_CARDENABLE | R852_CTL_ON); | ||
356 | else | ||
357 | dev->ctlreg &= ~R852_CTL_WRITE; | ||
358 | |||
359 | /* when write is stareted, enable write access */ | ||
360 | if (dat == NAND_CMD_ERASE1) | ||
361 | dev->ctlreg |= R852_CTL_WRITE; | ||
362 | |||
363 | r852_write_reg(dev, R852_CTL, dev->ctlreg); | ||
364 | } | ||
365 | |||
366 | /* HACK: NAND_CMD_SEQIN is called without NAND_CTRL_CHANGE, but we need | ||
367 | to set write mode */ | ||
368 | if (dat == NAND_CMD_SEQIN && (dev->ctlreg & R852_CTL_COMMAND)) { | ||
369 | dev->ctlreg |= R852_CTL_WRITE; | ||
370 | r852_write_reg(dev, R852_CTL, dev->ctlreg); | ||
371 | } | ||
372 | |||
373 | if (dat != NAND_CMD_NONE) | ||
374 | r852_write_reg(dev, R852_DATALINE, dat); | ||
375 | } | ||
376 | |||
377 | /* | ||
378 | * Wait till card is ready. | ||
379 | * based on nand_wait, but returns errors on DMA error | ||
380 | */ | ||
381 | int r852_wait(struct mtd_info *mtd, struct nand_chip *chip) | ||
382 | { | ||
383 | struct r852_device *dev = (struct r852_device *)chip->priv; | ||
384 | |||
385 | unsigned long timeout; | ||
386 | int status; | ||
387 | |||
388 | timeout = jiffies + (chip->state == FL_ERASING ? | ||
389 | msecs_to_jiffies(400) : msecs_to_jiffies(20)); | ||
390 | |||
391 | while (time_before(jiffies, timeout)) | ||
392 | if (chip->dev_ready(mtd)) | ||
393 | break; | ||
394 | |||
395 | chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); | ||
396 | status = (int)chip->read_byte(mtd); | ||
397 | |||
398 | /* Unfortunelly, no way to send detailed error status... */ | ||
399 | if (dev->dma_error) { | ||
400 | status |= NAND_STATUS_FAIL; | ||
401 | dev->dma_error = 0; | ||
402 | } | ||
403 | return status; | ||
404 | } | ||
405 | |||
406 | /* | ||
407 | * Check if card is ready | ||
408 | */ | ||
409 | |||
410 | int r852_ready(struct mtd_info *mtd) | ||
411 | { | ||
412 | struct r852_device *dev = r852_get_dev(mtd); | ||
413 | return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY); | ||
414 | } | ||
415 | |||
416 | |||
417 | /* | ||
418 | * Set ECC engine mode | ||
419 | */ | ||
420 | |||
421 | void r852_ecc_hwctl(struct mtd_info *mtd, int mode) | ||
422 | { | ||
423 | struct r852_device *dev = r852_get_dev(mtd); | ||
424 | |||
425 | if (dev->card_unstable) | ||
426 | return; | ||
427 | |||
428 | switch (mode) { | ||
429 | case NAND_ECC_READ: | ||
430 | case NAND_ECC_WRITE: | ||
431 | /* enable ecc generation/check*/ | ||
432 | dev->ctlreg |= R852_CTL_ECC_ENABLE; | ||
433 | |||
434 | /* flush ecc buffer */ | ||
435 | r852_write_reg(dev, R852_CTL, | ||
436 | dev->ctlreg | R852_CTL_ECC_ACCESS); | ||
437 | |||
438 | r852_read_reg_dword(dev, R852_DATALINE); | ||
439 | r852_write_reg(dev, R852_CTL, dev->ctlreg); | ||
440 | return; | ||
441 | |||
442 | case NAND_ECC_READSYN: | ||
443 | /* disable ecc generation */ | ||
444 | dev->ctlreg &= ~R852_CTL_ECC_ENABLE; | ||
445 | r852_write_reg(dev, R852_CTL, dev->ctlreg); | ||
446 | } | ||
447 | } | ||
448 | |||
449 | /* | ||
450 | * Calculate ECC, only used for writes | ||
451 | */ | ||
452 | |||
453 | int r852_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat, | ||
454 | uint8_t *ecc_code) | ||
455 | { | ||
456 | struct r852_device *dev = r852_get_dev(mtd); | ||
457 | struct sm_oob *oob = (struct sm_oob *)ecc_code; | ||
458 | uint32_t ecc1, ecc2; | ||
459 | |||
460 | if (dev->card_unstable) | ||
461 | return 0; | ||
462 | |||
463 | dev->ctlreg &= ~R852_CTL_ECC_ENABLE; | ||
464 | r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS); | ||
465 | |||
466 | ecc1 = r852_read_reg_dword(dev, R852_DATALINE); | ||
467 | ecc2 = r852_read_reg_dword(dev, R852_DATALINE); | ||
468 | |||
469 | oob->ecc1[0] = (ecc1) & 0xFF; | ||
470 | oob->ecc1[1] = (ecc1 >> 8) & 0xFF; | ||
471 | oob->ecc1[2] = (ecc1 >> 16) & 0xFF; | ||
472 | |||
473 | oob->ecc2[0] = (ecc2) & 0xFF; | ||
474 | oob->ecc2[1] = (ecc2 >> 8) & 0xFF; | ||
475 | oob->ecc2[2] = (ecc2 >> 16) & 0xFF; | ||
476 | |||
477 | r852_write_reg(dev, R852_CTL, dev->ctlreg); | ||
478 | return 0; | ||
479 | } | ||
480 | |||
481 | /* | ||
482 | * Correct the data using ECC, hw did almost everything for us | ||
483 | */ | ||
484 | |||
485 | int r852_ecc_correct(struct mtd_info *mtd, uint8_t *dat, | ||
486 | uint8_t *read_ecc, uint8_t *calc_ecc) | ||
487 | { | ||
488 | uint16_t ecc_reg; | ||
489 | uint8_t ecc_status, err_byte; | ||
490 | int i, error = 0; | ||
491 | |||
492 | struct r852_device *dev = r852_get_dev(mtd); | ||
493 | |||
494 | if (dev->card_unstable) | ||
495 | return 0; | ||
496 | |||
497 | r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS); | ||
498 | ecc_reg = r852_read_reg_dword(dev, R852_DATALINE); | ||
499 | r852_write_reg(dev, R852_CTL, dev->ctlreg); | ||
500 | |||
501 | for (i = 0 ; i <= 1 ; i++) { | ||
502 | |||
503 | ecc_status = (ecc_reg >> 8) & 0xFF; | ||
504 | |||
505 | /* ecc uncorrectable error */ | ||
506 | if (ecc_status & R852_ECC_FAIL) { | ||
507 | dbg("ecc: unrecoverable error, in half %d", i); | ||
508 | error = -1; | ||
509 | goto exit; | ||
510 | } | ||
511 | |||
512 | /* correctable error */ | ||
513 | if (ecc_status & R852_ECC_CORRECTABLE) { | ||
514 | |||
515 | err_byte = ecc_reg & 0xFF; | ||
516 | dbg("ecc: recoverable error, " | ||
517 | "in half %d, byte %d, bit %d", i, | ||
518 | err_byte, ecc_status & R852_ECC_ERR_BIT_MSK); | ||
519 | |||
520 | dat[err_byte] ^= | ||
521 | 1 << (ecc_status & R852_ECC_ERR_BIT_MSK); | ||
522 | error++; | ||
523 | } | ||
524 | |||
525 | dat += 256; | ||
526 | ecc_reg >>= 16; | ||
527 | } | ||
528 | exit: | ||
529 | return error; | ||
530 | } | ||
531 | |||
532 | /* | ||
533 | * This is copy of nand_read_oob_std | ||
534 | * nand_read_oob_syndrome assumes we can send column address - we can't | ||
535 | */ | ||
536 | static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip, | ||
537 | int page, int sndcmd) | ||
538 | { | ||
539 | if (sndcmd) { | ||
540 | chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); | ||
541 | sndcmd = 0; | ||
542 | } | ||
543 | chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); | ||
544 | return sndcmd; | ||
545 | } | ||
546 | |||
547 | /* | ||
548 | * Start the nand engine | ||
549 | */ | ||
550 | |||
551 | void r852_engine_enable(struct r852_device *dev) | ||
552 | { | ||
553 | if (r852_read_reg_dword(dev, R852_HW) & R852_HW_UNKNOWN) { | ||
554 | r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON); | ||
555 | r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED); | ||
556 | } else { | ||
557 | r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED); | ||
558 | r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON); | ||
559 | } | ||
560 | msleep(300); | ||
561 | r852_write_reg(dev, R852_CTL, 0); | ||
562 | } | ||
563 | |||
564 | |||
565 | /* | ||
566 | * Stop the nand engine | ||
567 | */ | ||
568 | |||
569 | void r852_engine_disable(struct r852_device *dev) | ||
570 | { | ||
571 | r852_write_reg_dword(dev, R852_HW, 0); | ||
572 | r852_write_reg(dev, R852_CTL, R852_CTL_RESET); | ||
573 | } | ||
574 | |||
575 | /* | ||
576 | * Test if card is present | ||
577 | */ | ||
578 | |||
579 | void r852_card_update_present(struct r852_device *dev) | ||
580 | { | ||
581 | unsigned long flags; | ||
582 | uint8_t reg; | ||
583 | |||
584 | spin_lock_irqsave(&dev->irqlock, flags); | ||
585 | reg = r852_read_reg(dev, R852_CARD_STA); | ||
586 | dev->card_detected = !!(reg & R852_CARD_STA_PRESENT); | ||
587 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
588 | } | ||
589 | |||
590 | /* | ||
591 | * Update card detection IRQ state according to current card state | ||
592 | * which is read in r852_card_update_present | ||
593 | */ | ||
594 | void r852_update_card_detect(struct r852_device *dev) | ||
595 | { | ||
596 | int card_detect_reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE); | ||
597 | dev->card_unstable = 0; | ||
598 | |||
599 | card_detect_reg &= ~(R852_CARD_IRQ_REMOVE | R852_CARD_IRQ_INSERT); | ||
600 | card_detect_reg |= R852_CARD_IRQ_GENABLE; | ||
601 | |||
602 | card_detect_reg |= dev->card_detected ? | ||
603 | R852_CARD_IRQ_REMOVE : R852_CARD_IRQ_INSERT; | ||
604 | |||
605 | r852_write_reg(dev, R852_CARD_IRQ_ENABLE, card_detect_reg); | ||
606 | } | ||
607 | |||
608 | ssize_t r852_media_type_show(struct device *sys_dev, | ||
609 | struct device_attribute *attr, char *buf) | ||
610 | { | ||
611 | struct mtd_info *mtd = container_of(sys_dev, struct mtd_info, dev); | ||
612 | struct r852_device *dev = r852_get_dev(mtd); | ||
613 | char *data = dev->sm ? "smartmedia" : "xd"; | ||
614 | |||
615 | strcpy(buf, data); | ||
616 | return strlen(data); | ||
617 | } | ||
618 | |||
619 | DEVICE_ATTR(media_type, S_IRUGO, r852_media_type_show, NULL); | ||
620 | |||
621 | |||
622 | /* Detect properties of card in slot */ | ||
623 | void r852_update_media_status(struct r852_device *dev) | ||
624 | { | ||
625 | uint8_t reg; | ||
626 | unsigned long flags; | ||
627 | int readonly; | ||
628 | |||
629 | spin_lock_irqsave(&dev->irqlock, flags); | ||
630 | if (!dev->card_detected) { | ||
631 | message("card removed"); | ||
632 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
633 | return ; | ||
634 | } | ||
635 | |||
636 | readonly = r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_RO; | ||
637 | reg = r852_read_reg(dev, R852_DMA_CAP); | ||
638 | dev->sm = (reg & (R852_DMA1 | R852_DMA2)) && (reg & R852_SMBIT); | ||
639 | |||
640 | message("detected %s %s card in slot", | ||
641 | dev->sm ? "SmartMedia" : "xD", | ||
642 | readonly ? "readonly" : "writeable"); | ||
643 | |||
644 | dev->readonly = readonly; | ||
645 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
646 | } | ||
647 | |||
648 | /* | ||
649 | * Register the nand device | ||
650 | * Called when the card is detected | ||
651 | */ | ||
652 | int r852_register_nand_device(struct r852_device *dev) | ||
653 | { | ||
654 | dev->mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL); | ||
655 | |||
656 | if (!dev->mtd) | ||
657 | goto error1; | ||
658 | |||
659 | WARN_ON(dev->card_registred); | ||
660 | |||
661 | dev->mtd->owner = THIS_MODULE; | ||
662 | dev->mtd->priv = dev->chip; | ||
663 | dev->mtd->dev.parent = &dev->pci_dev->dev; | ||
664 | |||
665 | if (dev->readonly) | ||
666 | dev->chip->options |= NAND_ROM; | ||
667 | |||
668 | r852_engine_enable(dev); | ||
669 | |||
670 | if (sm_register_device(dev->mtd, dev->sm)) | ||
671 | goto error2; | ||
672 | |||
673 | if (device_create_file(&dev->mtd->dev, &dev_attr_media_type)) | ||
674 | message("can't create media type sysfs attribute"); | ||
675 | |||
676 | dev->card_registred = 1; | ||
677 | return 0; | ||
678 | error2: | ||
679 | kfree(dev->mtd); | ||
680 | error1: | ||
681 | /* Force card redetect */ | ||
682 | dev->card_detected = 0; | ||
683 | return -1; | ||
684 | } | ||
685 | |||
686 | /* | ||
687 | * Unregister the card | ||
688 | */ | ||
689 | |||
690 | void r852_unregister_nand_device(struct r852_device *dev) | ||
691 | { | ||
692 | if (!dev->card_registred) | ||
693 | return; | ||
694 | |||
695 | device_remove_file(&dev->mtd->dev, &dev_attr_media_type); | ||
696 | nand_release(dev->mtd); | ||
697 | r852_engine_disable(dev); | ||
698 | dev->card_registred = 0; | ||
699 | kfree(dev->mtd); | ||
700 | dev->mtd = NULL; | ||
701 | } | ||
702 | |||
703 | /* Card state updater */ | ||
704 | void r852_card_detect_work(struct work_struct *work) | ||
705 | { | ||
706 | struct r852_device *dev = | ||
707 | container_of(work, struct r852_device, card_detect_work.work); | ||
708 | |||
709 | r852_card_update_present(dev); | ||
710 | dev->card_unstable = 0; | ||
711 | |||
712 | /* False alarm */ | ||
713 | if (dev->card_detected == dev->card_registred) | ||
714 | goto exit; | ||
715 | |||
716 | /* Read media properties */ | ||
717 | r852_update_media_status(dev); | ||
718 | |||
719 | /* Register the card */ | ||
720 | if (dev->card_detected) | ||
721 | r852_register_nand_device(dev); | ||
722 | else | ||
723 | r852_unregister_nand_device(dev); | ||
724 | exit: | ||
725 | /* Update detection logic */ | ||
726 | r852_update_card_detect(dev); | ||
727 | } | ||
728 | |||
729 | /* Ack + disable IRQ generation */ | ||
730 | static void r852_disable_irqs(struct r852_device *dev) | ||
731 | { | ||
732 | uint8_t reg; | ||
733 | reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE); | ||
734 | r852_write_reg(dev, R852_CARD_IRQ_ENABLE, reg & ~R852_CARD_IRQ_MASK); | ||
735 | |||
736 | reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE); | ||
737 | r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, | ||
738 | reg & ~R852_DMA_IRQ_MASK); | ||
739 | |||
740 | r852_write_reg(dev, R852_CARD_IRQ_STA, R852_CARD_IRQ_MASK); | ||
741 | r852_write_reg_dword(dev, R852_DMA_IRQ_STA, R852_DMA_IRQ_MASK); | ||
742 | } | ||
743 | |||
744 | /* Interrupt handler */ | ||
745 | static irqreturn_t r852_irq(int irq, void *data) | ||
746 | { | ||
747 | struct r852_device *dev = (struct r852_device *)data; | ||
748 | |||
749 | uint8_t card_status, dma_status; | ||
750 | unsigned long flags; | ||
751 | irqreturn_t ret = IRQ_NONE; | ||
752 | |||
753 | spin_lock_irqsave(&dev->irqlock, flags); | ||
754 | |||
755 | /* We can recieve shared interrupt while pci is suspended | ||
756 | in that case reads will return 0xFFFFFFFF.... */ | ||
757 | if (dev->insuspend) | ||
758 | goto out; | ||
759 | |||
760 | /* handle card detection interrupts first */ | ||
761 | card_status = r852_read_reg(dev, R852_CARD_IRQ_STA); | ||
762 | r852_write_reg(dev, R852_CARD_IRQ_STA, card_status); | ||
763 | |||
764 | if (card_status & (R852_CARD_IRQ_INSERT|R852_CARD_IRQ_REMOVE)) { | ||
765 | |||
766 | ret = IRQ_HANDLED; | ||
767 | dev->card_detected = !!(card_status & R852_CARD_IRQ_INSERT); | ||
768 | |||
769 | /* we shouldn't recieve any interrupts if we wait for card | ||
770 | to settle */ | ||
771 | WARN_ON(dev->card_unstable); | ||
772 | |||
773 | /* disable irqs while card is unstable */ | ||
774 | /* this will timeout DMA if active, but better that garbage */ | ||
775 | r852_disable_irqs(dev); | ||
776 | |||
777 | if (dev->card_unstable) | ||
778 | goto out; | ||
779 | |||
780 | /* let, card state to settle a bit, and then do the work */ | ||
781 | dev->card_unstable = 1; | ||
782 | queue_delayed_work(dev->card_workqueue, | ||
783 | &dev->card_detect_work, msecs_to_jiffies(100)); | ||
784 | goto out; | ||
785 | } | ||
786 | |||
787 | |||
788 | /* Handle dma interrupts */ | ||
789 | dma_status = r852_read_reg_dword(dev, R852_DMA_IRQ_STA); | ||
790 | r852_write_reg_dword(dev, R852_DMA_IRQ_STA, dma_status); | ||
791 | |||
792 | if (dma_status & R852_DMA_IRQ_MASK) { | ||
793 | |||
794 | ret = IRQ_HANDLED; | ||
795 | |||
796 | if (dma_status & R852_DMA_IRQ_ERROR) { | ||
797 | dbg("recieved dma error IRQ"); | ||
798 | r852_dma_done(dev, -EIO); | ||
799 | goto out; | ||
800 | } | ||
801 | |||
802 | /* recieved DMA interrupt out of nowhere? */ | ||
803 | WARN_ON_ONCE(dev->dma_stage == 0); | ||
804 | |||
805 | if (dev->dma_stage == 0) | ||
806 | goto out; | ||
807 | |||
808 | /* done device access */ | ||
809 | if (dev->dma_state == DMA_INTERNAL && | ||
810 | (dma_status & R852_DMA_IRQ_INTERNAL)) { | ||
811 | |||
812 | dev->dma_state = DMA_MEMORY; | ||
813 | dev->dma_stage++; | ||
814 | } | ||
815 | |||
816 | /* done memory DMA */ | ||
817 | if (dev->dma_state == DMA_MEMORY && | ||
818 | (dma_status & R852_DMA_IRQ_MEMORY)) { | ||
819 | dev->dma_state = DMA_INTERNAL; | ||
820 | dev->dma_stage++; | ||
821 | } | ||
822 | |||
823 | /* Enable 2nd half of dma dance */ | ||
824 | if (dev->dma_stage == 2) | ||
825 | r852_dma_enable(dev); | ||
826 | |||
827 | /* Operation done */ | ||
828 | if (dev->dma_stage == 3) | ||
829 | r852_dma_done(dev, 0); | ||
830 | goto out; | ||
831 | } | ||
832 | |||
833 | /* Handle unknown interrupts */ | ||
834 | if (dma_status) | ||
835 | dbg("bad dma IRQ status = %x", dma_status); | ||
836 | |||
837 | if (card_status & ~R852_CARD_STA_CD) | ||
838 | dbg("strange card status = %x", card_status); | ||
839 | |||
840 | out: | ||
841 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
842 | return ret; | ||
843 | } | ||
844 | |||
845 | int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) | ||
846 | { | ||
847 | int error; | ||
848 | struct nand_chip *chip; | ||
849 | struct r852_device *dev; | ||
850 | |||
851 | /* pci initialization */ | ||
852 | error = pci_enable_device(pci_dev); | ||
853 | |||
854 | if (error) | ||
855 | goto error1; | ||
856 | |||
857 | pci_set_master(pci_dev); | ||
858 | |||
859 | error = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)); | ||
860 | if (error) | ||
861 | goto error2; | ||
862 | |||
863 | error = pci_request_regions(pci_dev, DRV_NAME); | ||
864 | |||
865 | if (error) | ||
866 | goto error3; | ||
867 | |||
868 | error = -ENOMEM; | ||
869 | |||
870 | /* init nand chip, but register it only on card insert */ | ||
871 | chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL); | ||
872 | |||
873 | if (!chip) | ||
874 | goto error4; | ||
875 | |||
876 | /* commands */ | ||
877 | chip->cmd_ctrl = r852_cmdctl; | ||
878 | chip->waitfunc = r852_wait; | ||
879 | chip->dev_ready = r852_ready; | ||
880 | |||
881 | /* I/O */ | ||
882 | chip->read_byte = r852_read_byte; | ||
883 | chip->read_buf = r852_read_buf; | ||
884 | chip->write_buf = r852_write_buf; | ||
885 | chip->verify_buf = r852_verify_buf; | ||
886 | |||
887 | /* ecc */ | ||
888 | chip->ecc.mode = NAND_ECC_HW_SYNDROME; | ||
889 | chip->ecc.size = R852_DMA_LEN; | ||
890 | chip->ecc.bytes = SM_OOB_SIZE; | ||
891 | chip->ecc.hwctl = r852_ecc_hwctl; | ||
892 | chip->ecc.calculate = r852_ecc_calculate; | ||
893 | chip->ecc.correct = r852_ecc_correct; | ||
894 | |||
895 | /* TODO: hack */ | ||
896 | chip->ecc.read_oob = r852_read_oob; | ||
897 | |||
898 | /* init our device structure */ | ||
899 | dev = kzalloc(sizeof(struct r852_device), GFP_KERNEL); | ||
900 | |||
901 | if (!dev) | ||
902 | goto error5; | ||
903 | |||
904 | chip->priv = dev; | ||
905 | dev->chip = chip; | ||
906 | dev->pci_dev = pci_dev; | ||
907 | pci_set_drvdata(pci_dev, dev); | ||
908 | |||
909 | dev->bounce_buffer = pci_alloc_consistent(pci_dev, R852_DMA_LEN, | ||
910 | &dev->phys_bounce_buffer); | ||
911 | |||
912 | if (!dev->bounce_buffer) | ||
913 | goto error6; | ||
914 | |||
915 | |||
916 | error = -ENODEV; | ||
917 | dev->mmio = pci_ioremap_bar(pci_dev, 0); | ||
918 | |||
919 | if (!dev->mmio) | ||
920 | goto error7; | ||
921 | |||
922 | error = -ENOMEM; | ||
923 | dev->tmp_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL); | ||
924 | |||
925 | if (!dev->tmp_buffer) | ||
926 | goto error8; | ||
927 | |||
928 | init_completion(&dev->dma_done); | ||
929 | |||
930 | dev->card_workqueue = create_freezeable_workqueue(DRV_NAME); | ||
931 | |||
932 | if (!dev->card_workqueue) | ||
933 | goto error9; | ||
934 | |||
935 | INIT_DELAYED_WORK(&dev->card_detect_work, r852_card_detect_work); | ||
936 | |||
937 | /* shutdown everything - precation */ | ||
938 | r852_engine_disable(dev); | ||
939 | r852_disable_irqs(dev); | ||
940 | |||
941 | r852_dma_test(dev); | ||
942 | |||
943 | /*register irq handler*/ | ||
944 | error = -ENODEV; | ||
945 | if (request_irq(pci_dev->irq, &r852_irq, IRQF_SHARED, | ||
946 | DRV_NAME, dev)) | ||
947 | goto error10; | ||
948 | |||
949 | dev->irq = pci_dev->irq; | ||
950 | spin_lock_init(&dev->irqlock); | ||
951 | |||
952 | /* kick initial present test */ | ||
953 | dev->card_detected = 0; | ||
954 | r852_card_update_present(dev); | ||
955 | queue_delayed_work(dev->card_workqueue, | ||
956 | &dev->card_detect_work, 0); | ||
957 | |||
958 | |||
959 | printk(KERN_NOTICE DRV_NAME ": driver loaded succesfully\n"); | ||
960 | return 0; | ||
961 | |||
962 | error10: | ||
963 | destroy_workqueue(dev->card_workqueue); | ||
964 | error9: | ||
965 | kfree(dev->tmp_buffer); | ||
966 | error8: | ||
967 | pci_iounmap(pci_dev, dev->mmio); | ||
968 | error7: | ||
969 | pci_free_consistent(pci_dev, R852_DMA_LEN, | ||
970 | dev->bounce_buffer, dev->phys_bounce_buffer); | ||
971 | error6: | ||
972 | kfree(dev); | ||
973 | error5: | ||
974 | kfree(chip); | ||
975 | error4: | ||
976 | pci_release_regions(pci_dev); | ||
977 | error3: | ||
978 | error2: | ||
979 | pci_disable_device(pci_dev); | ||
980 | error1: | ||
981 | return error; | ||
982 | } | ||
983 | |||
984 | void r852_remove(struct pci_dev *pci_dev) | ||
985 | { | ||
986 | struct r852_device *dev = pci_get_drvdata(pci_dev); | ||
987 | |||
988 | /* Stop detect workqueue - | ||
989 | we are going to unregister the device anyway*/ | ||
990 | cancel_delayed_work_sync(&dev->card_detect_work); | ||
991 | destroy_workqueue(dev->card_workqueue); | ||
992 | |||
993 | /* Unregister the device, this might make more IO */ | ||
994 | r852_unregister_nand_device(dev); | ||
995 | |||
996 | /* Stop interrupts */ | ||
997 | r852_disable_irqs(dev); | ||
998 | synchronize_irq(dev->irq); | ||
999 | free_irq(dev->irq, dev); | ||
1000 | |||
1001 | /* Cleanup */ | ||
1002 | kfree(dev->tmp_buffer); | ||
1003 | pci_iounmap(pci_dev, dev->mmio); | ||
1004 | pci_free_consistent(pci_dev, R852_DMA_LEN, | ||
1005 | dev->bounce_buffer, dev->phys_bounce_buffer); | ||
1006 | |||
1007 | kfree(dev->chip); | ||
1008 | kfree(dev); | ||
1009 | |||
1010 | /* Shutdown the PCI device */ | ||
1011 | pci_release_regions(pci_dev); | ||
1012 | pci_disable_device(pci_dev); | ||
1013 | } | ||
1014 | |||
1015 | void r852_shutdown(struct pci_dev *pci_dev) | ||
1016 | { | ||
1017 | struct r852_device *dev = pci_get_drvdata(pci_dev); | ||
1018 | |||
1019 | cancel_delayed_work_sync(&dev->card_detect_work); | ||
1020 | r852_disable_irqs(dev); | ||
1021 | synchronize_irq(dev->irq); | ||
1022 | pci_disable_device(pci_dev); | ||
1023 | } | ||
1024 | |||
1025 | #ifdef CONFIG_PM | ||
1026 | int r852_suspend(struct device *device) | ||
1027 | { | ||
1028 | struct r852_device *dev = pci_get_drvdata(to_pci_dev(device)); | ||
1029 | unsigned long flags; | ||
1030 | |||
1031 | if (dev->ctlreg & R852_CTL_CARDENABLE) | ||
1032 | return -EBUSY; | ||
1033 | |||
1034 | /* First make sure the detect work is gone */ | ||
1035 | cancel_delayed_work_sync(&dev->card_detect_work); | ||
1036 | |||
1037 | /* Turn off the interrupts and stop the device */ | ||
1038 | r852_disable_irqs(dev); | ||
1039 | r852_engine_disable(dev); | ||
1040 | |||
1041 | spin_lock_irqsave(&dev->irqlock, flags); | ||
1042 | dev->insuspend = 1; | ||
1043 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
1044 | |||
1045 | /* At that point, even if interrupt handler is running, it will quit */ | ||
1046 | /* So wait for this to happen explictly */ | ||
1047 | synchronize_irq(dev->irq); | ||
1048 | |||
1049 | /* If card was pulled off just during the suspend, which is very | ||
1050 | unlikely, we will remove it on resume, it too late now | ||
1051 | anyway... */ | ||
1052 | dev->card_unstable = 0; | ||
1053 | |||
1054 | pci_save_state(to_pci_dev(device)); | ||
1055 | return pci_prepare_to_sleep(to_pci_dev(device)); | ||
1056 | } | ||
1057 | |||
1058 | int r852_resume(struct device *device) | ||
1059 | { | ||
1060 | struct r852_device *dev = pci_get_drvdata(to_pci_dev(device)); | ||
1061 | unsigned long flags; | ||
1062 | |||
1063 | /* Turn on the hardware */ | ||
1064 | pci_back_from_sleep(to_pci_dev(device)); | ||
1065 | pci_restore_state(to_pci_dev(device)); | ||
1066 | |||
1067 | r852_disable_irqs(dev); | ||
1068 | r852_card_update_present(dev); | ||
1069 | r852_engine_disable(dev); | ||
1070 | |||
1071 | |||
1072 | /* Now its safe for IRQ to run */ | ||
1073 | spin_lock_irqsave(&dev->irqlock, flags); | ||
1074 | dev->insuspend = 0; | ||
1075 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
1076 | |||
1077 | |||
1078 | /* If card status changed, just do the work */ | ||
1079 | if (dev->card_detected != dev->card_registred) { | ||
1080 | dbg("card was %s during low power state", | ||
1081 | dev->card_detected ? "added" : "removed"); | ||
1082 | |||
1083 | queue_delayed_work(dev->card_workqueue, | ||
1084 | &dev->card_detect_work, 1000); | ||
1085 | return 0; | ||
1086 | } | ||
1087 | |||
1088 | /* Otherwise, initialize the card */ | ||
1089 | if (dev->card_registred) { | ||
1090 | r852_engine_enable(dev); | ||
1091 | dev->chip->select_chip(dev->mtd, 0); | ||
1092 | dev->chip->cmdfunc(dev->mtd, NAND_CMD_RESET, -1, -1); | ||
1093 | dev->chip->select_chip(dev->mtd, -1); | ||
1094 | } | ||
1095 | |||
1096 | /* Program card detection IRQ */ | ||
1097 | r852_update_card_detect(dev); | ||
1098 | return 0; | ||
1099 | } | ||
1100 | #else | ||
1101 | #define r852_suspend NULL | ||
1102 | #define r852_resume NULL | ||
1103 | #endif | ||
1104 | |||
1105 | static const struct pci_device_id r852_pci_id_tbl[] = { | ||
1106 | |||
1107 | { PCI_VDEVICE(RICOH, 0x0852), }, | ||
1108 | { }, | ||
1109 | }; | ||
1110 | |||
1111 | MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl); | ||
1112 | |||
1113 | SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume); | ||
1114 | |||
1115 | |||
1116 | static struct pci_driver r852_pci_driver = { | ||
1117 | .name = DRV_NAME, | ||
1118 | .id_table = r852_pci_id_tbl, | ||
1119 | .probe = r852_probe, | ||
1120 | .remove = r852_remove, | ||
1121 | .shutdown = r852_shutdown, | ||
1122 | .driver.pm = &r852_pm_ops, | ||
1123 | }; | ||
1124 | |||
1125 | static __init int r852_module_init(void) | ||
1126 | { | ||
1127 | return pci_register_driver(&r852_pci_driver); | ||
1128 | } | ||
1129 | |||
1130 | static void __exit r852_module_exit(void) | ||
1131 | { | ||
1132 | pci_unregister_driver(&r852_pci_driver); | ||
1133 | } | ||
1134 | |||
1135 | module_init(r852_module_init); | ||
1136 | module_exit(r852_module_exit); | ||
1137 | |||
1138 | MODULE_LICENSE("GPL"); | ||
1139 | MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>"); | ||
1140 | MODULE_DESCRIPTION("Ricoh 85xx xD/smartmedia card reader driver"); | ||
diff --git a/drivers/mtd/nand/r852.h b/drivers/mtd/nand/r852.h new file mode 100644 index 000000000000..8096cc280c73 --- /dev/null +++ b/drivers/mtd/nand/r852.h | |||
@@ -0,0 +1,163 @@ | |||
1 | /* | ||
2 | * Copyright © 2009 - Maxim Levitsky | ||
3 | * driver for Ricoh xD readers | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | |||
10 | #include <linux/pci.h> | ||
11 | #include <linux/completion.h> | ||
12 | #include <linux/workqueue.h> | ||
13 | #include <linux/mtd/nand.h> | ||
14 | #include <linux/spinlock.h> | ||
15 | |||
16 | |||
17 | /* nand interface + ecc | ||
18 | byte write/read does one cycle on nand data lines. | ||
19 | dword write/read does 4 cycles | ||
20 | if R852_CTL_ECC_ACCESS is set in R852_CTL, then dword read reads | ||
21 | results of ecc correction, if DMA read was done before. | ||
22 | If write was done two dword reads read generated ecc checksums | ||
23 | */ | ||
24 | #define R852_DATALINE 0x00 | ||
25 | |||
26 | /* control register */ | ||
27 | #define R852_CTL 0x04 | ||
28 | #define R852_CTL_COMMAND 0x01 /* send command (#CLE)*/ | ||
29 | #define R852_CTL_DATA 0x02 /* read/write data (#ALE)*/ | ||
30 | #define R852_CTL_ON 0x04 /* only seem to controls the hd led, */ | ||
31 | /* but has to be set on start...*/ | ||
32 | #define R852_CTL_RESET 0x08 /* unknown, set only on start once*/ | ||
33 | #define R852_CTL_CARDENABLE 0x10 /* probably (#CE) - always set*/ | ||
34 | #define R852_CTL_ECC_ENABLE 0x20 /* enable ecc engine */ | ||
35 | #define R852_CTL_ECC_ACCESS 0x40 /* read/write ecc via reg #0*/ | ||
36 | #define R852_CTL_WRITE 0x80 /* set when performing writes (#WP) */ | ||
37 | |||
38 | /* card detection status */ | ||
39 | #define R852_CARD_STA 0x05 | ||
40 | |||
41 | #define R852_CARD_STA_CD 0x01 /* state of #CD line, same as 0x04 */ | ||
42 | #define R852_CARD_STA_RO 0x02 /* card is readonly */ | ||
43 | #define R852_CARD_STA_PRESENT 0x04 /* card is present (#CD) */ | ||
44 | #define R852_CARD_STA_ABSENT 0x08 /* card is absent */ | ||
45 | #define R852_CARD_STA_BUSY 0x80 /* card is busy - (#R/B) */ | ||
46 | |||
47 | /* card detection irq status & enable*/ | ||
48 | #define R852_CARD_IRQ_STA 0x06 /* IRQ status */ | ||
49 | #define R852_CARD_IRQ_ENABLE 0x07 /* IRQ enable */ | ||
50 | |||
51 | #define R852_CARD_IRQ_CD 0x01 /* fire when #CD lights, same as 0x04*/ | ||
52 | #define R852_CARD_IRQ_REMOVE 0x04 /* detect card removal */ | ||
53 | #define R852_CARD_IRQ_INSERT 0x08 /* detect card insert */ | ||
54 | #define R852_CARD_IRQ_UNK1 0x10 /* unknown */ | ||
55 | #define R852_CARD_IRQ_GENABLE 0x80 /* general enable */ | ||
56 | #define R852_CARD_IRQ_MASK 0x1D | ||
57 | |||
58 | |||
59 | |||
60 | /* hardware enable */ | ||
61 | #define R852_HW 0x08 | ||
62 | #define R852_HW_ENABLED 0x01 /* hw enabled */ | ||
63 | #define R852_HW_UNKNOWN 0x80 | ||
64 | |||
65 | |||
66 | /* dma capabilities */ | ||
67 | #define R852_DMA_CAP 0x09 | ||
68 | #define R852_SMBIT 0x20 /* if set with bit #6 or bit #7, then */ | ||
69 | /* hw is smartmedia */ | ||
70 | #define R852_DMA1 0x40 /* if set w/bit #7, dma is supported */ | ||
71 | #define R852_DMA2 0x80 /* if set w/bit #6, dma is supported */ | ||
72 | |||
73 | |||
74 | /* physical DMA address - 32 bit value*/ | ||
75 | #define R852_DMA_ADDR 0x0C | ||
76 | |||
77 | |||
78 | /* dma settings */ | ||
79 | #define R852_DMA_SETTINGS 0x10 | ||
80 | #define R852_DMA_MEMORY 0x01 /* (memory <-> internal hw buffer) */ | ||
81 | #define R852_DMA_READ 0x02 /* 0 = write, 1 = read */ | ||
82 | #define R852_DMA_INTERNAL 0x04 /* (internal hw buffer <-> card) */ | ||
83 | |||
84 | /* dma IRQ status */ | ||
85 | #define R852_DMA_IRQ_STA 0x14 | ||
86 | |||
87 | /* dma IRQ enable */ | ||
88 | #define R852_DMA_IRQ_ENABLE 0x18 | ||
89 | |||
90 | #define R852_DMA_IRQ_MEMORY 0x01 /* (memory <-> internal hw buffer) */ | ||
91 | #define R852_DMA_IRQ_ERROR 0x02 /* error did happen */ | ||
92 | #define R852_DMA_IRQ_INTERNAL 0x04 /* (internal hw buffer <-> card) */ | ||
93 | #define R852_DMA_IRQ_MASK 0x07 /* mask of all IRQ bits */ | ||
94 | |||
95 | |||
96 | /* ECC syndrome format - read from reg #0 will return two copies of these for | ||
97 | each half of the page. | ||
98 | first byte is error byte location, and second, bit location + flags */ | ||
99 | #define R852_ECC_ERR_BIT_MSK 0x07 /* error bit location */ | ||
100 | #define R852_ECC_CORRECT 0x10 /* no errors - (guessed) */ | ||
101 | #define R852_ECC_CORRECTABLE 0x20 /* correctable error exist */ | ||
102 | #define R852_ECC_FAIL 0x40 /* non correctable error detected */ | ||
103 | |||
104 | #define R852_DMA_LEN 512 | ||
105 | |||
106 | #define DMA_INTERNAL 0 | ||
107 | #define DMA_MEMORY 1 | ||
108 | |||
109 | struct r852_device { | ||
110 | void __iomem *mmio; /* mmio */ | ||
111 | struct mtd_info *mtd; /* mtd backpointer */ | ||
112 | struct nand_chip *chip; /* nand chip backpointer */ | ||
113 | struct pci_dev *pci_dev; /* pci backpointer */ | ||
114 | |||
115 | /* dma area */ | ||
116 | dma_addr_t phys_dma_addr; /* bus address of buffer*/ | ||
117 | struct completion dma_done; /* data transfer done */ | ||
118 | |||
119 | dma_addr_t phys_bounce_buffer; /* bus address of bounce buffer */ | ||
120 | uint8_t *bounce_buffer; /* virtual address of bounce buffer */ | ||
121 | |||
122 | int dma_dir; /* 1 = read, 0 = write */ | ||
123 | int dma_stage; /* 0 - idle, 1 - first step, | ||
124 | 2 - second step */ | ||
125 | |||
126 | int dma_state; /* 0 = internal, 1 = memory */ | ||
127 | int dma_error; /* dma errors */ | ||
128 | int dma_usable; /* is it possible to use dma */ | ||
129 | |||
130 | /* card status area */ | ||
131 | struct delayed_work card_detect_work; | ||
132 | struct workqueue_struct *card_workqueue; | ||
133 | int card_registred; /* card registered with mtd */ | ||
134 | int card_detected; /* card detected in slot */ | ||
135 | int card_unstable; /* whenever the card is inserted, | ||
136 | is not known yet */ | ||
137 | int readonly; /* card is readonly */ | ||
138 | int sm; /* Is card smartmedia */ | ||
139 | |||
140 | /* interrupt handling */ | ||
141 | spinlock_t irqlock; /* IRQ protecting lock */ | ||
142 | int irq; /* irq num */ | ||
143 | int insuspend; /* device is suspended */ | ||
144 | |||
145 | /* misc */ | ||
146 | void *tmp_buffer; /* temporary buffer */ | ||
147 | uint8_t ctlreg; /* cached contents of control reg */ | ||
148 | }; | ||
149 | |||
150 | #define DRV_NAME "r852" | ||
151 | |||
152 | |||
153 | #define dbg(format, ...) \ | ||
154 | if (debug) \ | ||
155 | printk(KERN_DEBUG DRV_NAME ": " format "\n", ## __VA_ARGS__) | ||
156 | |||
157 | #define dbg_verbose(format, ...) \ | ||
158 | if (debug > 1) \ | ||
159 | printk(KERN_DEBUG DRV_NAME ": " format "\n", ## __VA_ARGS__) | ||
160 | |||
161 | |||
162 | #define message(format, ...) \ | ||
163 | printk(KERN_INFO DRV_NAME ": " format "\n", ## __VA_ARGS__) | ||
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c index fa6e9c7fe511..239aadfd01b0 100644 --- a/drivers/mtd/nand/s3c2410.c +++ b/drivers/mtd/nand/s3c2410.c | |||
@@ -929,14 +929,13 @@ static int s3c24xx_nand_probe(struct platform_device *pdev) | |||
929 | 929 | ||
930 | pr_debug("s3c2410_nand_probe(%p)\n", pdev); | 930 | pr_debug("s3c2410_nand_probe(%p)\n", pdev); |
931 | 931 | ||
932 | info = kmalloc(sizeof(*info), GFP_KERNEL); | 932 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
933 | if (info == NULL) { | 933 | if (info == NULL) { |
934 | dev_err(&pdev->dev, "no memory for flash info\n"); | 934 | dev_err(&pdev->dev, "no memory for flash info\n"); |
935 | err = -ENOMEM; | 935 | err = -ENOMEM; |
936 | goto exit_error; | 936 | goto exit_error; |
937 | } | 937 | } |
938 | 938 | ||
939 | memset(info, 0, sizeof(*info)); | ||
940 | platform_set_drvdata(pdev, info); | 939 | platform_set_drvdata(pdev, info); |
941 | 940 | ||
942 | spin_lock_init(&info->controller.lock); | 941 | spin_lock_init(&info->controller.lock); |
@@ -957,7 +956,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev) | |||
957 | 956 | ||
958 | /* currently we assume we have the one resource */ | 957 | /* currently we assume we have the one resource */ |
959 | res = pdev->resource; | 958 | res = pdev->resource; |
960 | size = res->end - res->start + 1; | 959 | size = resource_size(res); |
961 | 960 | ||
962 | info->area = request_mem_region(res->start, size, pdev->name); | 961 | info->area = request_mem_region(res->start, size, pdev->name); |
963 | 962 | ||
@@ -994,15 +993,13 @@ static int s3c24xx_nand_probe(struct platform_device *pdev) | |||
994 | /* allocate our information */ | 993 | /* allocate our information */ |
995 | 994 | ||
996 | size = nr_sets * sizeof(*info->mtds); | 995 | size = nr_sets * sizeof(*info->mtds); |
997 | info->mtds = kmalloc(size, GFP_KERNEL); | 996 | info->mtds = kzalloc(size, GFP_KERNEL); |
998 | if (info->mtds == NULL) { | 997 | if (info->mtds == NULL) { |
999 | dev_err(&pdev->dev, "failed to allocate mtd storage\n"); | 998 | dev_err(&pdev->dev, "failed to allocate mtd storage\n"); |
1000 | err = -ENOMEM; | 999 | err = -ENOMEM; |
1001 | goto exit_error; | 1000 | goto exit_error; |
1002 | } | 1001 | } |
1003 | 1002 | ||
1004 | memset(info->mtds, 0, size); | ||
1005 | |||
1006 | /* initialise all possible chips */ | 1003 | /* initialise all possible chips */ |
1007 | 1004 | ||
1008 | nmtd = info->mtds; | 1005 | nmtd = info->mtds; |
@@ -1013,7 +1010,8 @@ static int s3c24xx_nand_probe(struct platform_device *pdev) | |||
1013 | s3c2410_nand_init_chip(info, nmtd, sets); | 1010 | s3c2410_nand_init_chip(info, nmtd, sets); |
1014 | 1011 | ||
1015 | nmtd->scan_res = nand_scan_ident(&nmtd->mtd, | 1012 | nmtd->scan_res = nand_scan_ident(&nmtd->mtd, |
1016 | (sets) ? sets->nr_chips : 1); | 1013 | (sets) ? sets->nr_chips : 1, |
1014 | NULL); | ||
1017 | 1015 | ||
1018 | if (nmtd->scan_res == 0) { | 1016 | if (nmtd->scan_res == 0) { |
1019 | s3c2410_nand_update_chip(info, nmtd); | 1017 | s3c2410_nand_update_chip(info, nmtd); |
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c index 34752fce0793..546c2f0eb2e8 100644 --- a/drivers/mtd/nand/sh_flctl.c +++ b/drivers/mtd/nand/sh_flctl.c | |||
@@ -855,7 +855,7 @@ static int __devinit flctl_probe(struct platform_device *pdev) | |||
855 | nand->read_word = flctl_read_word; | 855 | nand->read_word = flctl_read_word; |
856 | } | 856 | } |
857 | 857 | ||
858 | ret = nand_scan_ident(flctl_mtd, 1); | 858 | ret = nand_scan_ident(flctl_mtd, 1, NULL); |
859 | if (ret) | 859 | if (ret) |
860 | goto err; | 860 | goto err; |
861 | 861 | ||
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c new file mode 100644 index 000000000000..ac80fb362e63 --- /dev/null +++ b/drivers/mtd/nand/sm_common.c | |||
@@ -0,0 +1,148 @@ | |||
1 | /* | ||
2 | * Copyright © 2009 - Maxim Levitsky | ||
3 | * Common routines & support for xD format | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/mtd/nand.h> | ||
11 | #include "sm_common.h" | ||
12 | |||
13 | static struct nand_ecclayout nand_oob_sm = { | ||
14 | .eccbytes = 6, | ||
15 | .eccpos = {8, 9, 10, 13, 14, 15}, | ||
16 | .oobfree = { | ||
17 | {.offset = 0 , .length = 4}, /* reserved */ | ||
18 | {.offset = 6 , .length = 2}, /* LBA1 */ | ||
19 | {.offset = 11, .length = 2} /* LBA2 */ | ||
20 | } | ||
21 | }; | ||
22 | |||
23 | /* NOTE: This layout is is not compatabable with SmartMedia, */ | ||
24 | /* because the 256 byte devices have page depenent oob layout */ | ||
25 | /* However it does preserve the bad block markers */ | ||
26 | /* If you use smftl, it will bypass this and work correctly */ | ||
27 | /* If you not, then you break SmartMedia compliance anyway */ | ||
28 | |||
29 | static struct nand_ecclayout nand_oob_sm_small = { | ||
30 | .eccbytes = 3, | ||
31 | .eccpos = {0, 1, 2}, | ||
32 | .oobfree = { | ||
33 | {.offset = 3 , .length = 2}, /* reserved */ | ||
34 | {.offset = 6 , .length = 2}, /* LBA1 */ | ||
35 | } | ||
36 | }; | ||
37 | |||
38 | |||
39 | static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs) | ||
40 | { | ||
41 | struct mtd_oob_ops ops; | ||
42 | struct sm_oob oob; | ||
43 | int ret, error = 0; | ||
44 | |||
45 | memset(&oob, -1, SM_OOB_SIZE); | ||
46 | oob.block_status = 0x0F; | ||
47 | |||
48 | /* As long as this function is called on erase block boundaries | ||
49 | it will work correctly for 256 byte nand */ | ||
50 | ops.mode = MTD_OOB_PLACE; | ||
51 | ops.ooboffs = 0; | ||
52 | ops.ooblen = mtd->oobsize; | ||
53 | ops.oobbuf = (void *)&oob; | ||
54 | ops.datbuf = NULL; | ||
55 | |||
56 | |||
57 | ret = mtd->write_oob(mtd, ofs, &ops); | ||
58 | if (ret < 0 || ops.oobretlen != SM_OOB_SIZE) { | ||
59 | printk(KERN_NOTICE | ||
60 | "sm_common: can't mark sector at %i as bad\n", | ||
61 | (int)ofs); | ||
62 | error = -EIO; | ||
63 | } else | ||
64 | mtd->ecc_stats.badblocks++; | ||
65 | |||
66 | return error; | ||
67 | } | ||
68 | |||
69 | |||
70 | static struct nand_flash_dev nand_smartmedia_flash_ids[] = { | ||
71 | {"SmartMedia 1MiB 5V", 0x6e, 256, 1, 0x1000, 0}, | ||
72 | {"SmartMedia 1MiB 3,3V", 0xe8, 256, 1, 0x1000, 0}, | ||
73 | {"SmartMedia 1MiB 3,3V", 0xec, 256, 1, 0x1000, 0}, | ||
74 | {"SmartMedia 2MiB 3,3V", 0xea, 256, 2, 0x1000, 0}, | ||
75 | {"SmartMedia 2MiB 5V", 0x64, 256, 2, 0x1000, 0}, | ||
76 | {"SmartMedia 2MiB 3,3V ROM", 0x5d, 512, 2, 0x2000, NAND_ROM}, | ||
77 | {"SmartMedia 4MiB 3,3V", 0xe3, 512, 4, 0x2000, 0}, | ||
78 | {"SmartMedia 4MiB 3,3/5V", 0xe5, 512, 4, 0x2000, 0}, | ||
79 | {"SmartMedia 4MiB 5V", 0x6b, 512, 4, 0x2000, 0}, | ||
80 | {"SmartMedia 4MiB 3,3V ROM", 0xd5, 512, 4, 0x2000, NAND_ROM}, | ||
81 | {"SmartMedia 8MiB 3,3V", 0xe6, 512, 8, 0x2000, 0}, | ||
82 | {"SmartMedia 8MiB 3,3V ROM", 0xd6, 512, 8, 0x2000, NAND_ROM}, | ||
83 | {"SmartMedia 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0}, | ||
84 | {"SmartMedia 16MiB 3,3V ROM", 0x57, 512, 16, 0x4000, NAND_ROM}, | ||
85 | {"SmartMedia 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0}, | ||
86 | {"SmartMedia 32MiB 3,3V ROM", 0x58, 512, 32, 0x4000, NAND_ROM}, | ||
87 | {"SmartMedia 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0}, | ||
88 | {"SmartMedia 64MiB 3,3V ROM", 0xd9, 512, 64, 0x4000, NAND_ROM}, | ||
89 | {"SmartMedia 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0}, | ||
90 | {"SmartMedia 128MiB 3,3V ROM", 0xda, 512, 128, 0x4000, NAND_ROM}, | ||
91 | {"SmartMedia 256MiB 3,3V", 0x71, 512, 256, 0x4000 }, | ||
92 | {"SmartMedia 256MiB 3,3V ROM", 0x5b, 512, 256, 0x4000, NAND_ROM}, | ||
93 | {NULL,} | ||
94 | }; | ||
95 | |||
96 | #define XD_TYPEM (NAND_NO_AUTOINCR | NAND_BROKEN_XD) | ||
97 | static struct nand_flash_dev nand_xd_flash_ids[] = { | ||
98 | |||
99 | {"xD 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0}, | ||
100 | {"xD 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0}, | ||
101 | {"xD 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0}, | ||
102 | {"xD 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0}, | ||
103 | {"xD 256MiB 3,3V", 0x71, 512, 256, 0x4000, XD_TYPEM}, | ||
104 | {"xD 512MiB 3,3V", 0xdc, 512, 512, 0x4000, XD_TYPEM}, | ||
105 | {"xD 1GiB 3,3V", 0xd3, 512, 1024, 0x4000, XD_TYPEM}, | ||
106 | {"xD 2GiB 3,3V", 0xd5, 512, 2048, 0x4000, XD_TYPEM}, | ||
107 | {NULL,} | ||
108 | }; | ||
109 | |||
110 | int sm_register_device(struct mtd_info *mtd, int smartmedia) | ||
111 | { | ||
112 | struct nand_chip *chip = (struct nand_chip *)mtd->priv; | ||
113 | int ret; | ||
114 | |||
115 | chip->options |= NAND_SKIP_BBTSCAN; | ||
116 | |||
117 | /* Scan for card properties */ | ||
118 | ret = nand_scan_ident(mtd, 1, smartmedia ? | ||
119 | nand_smartmedia_flash_ids : nand_xd_flash_ids); | ||
120 | |||
121 | if (ret) | ||
122 | return ret; | ||
123 | |||
124 | /* Bad block marker postion */ | ||
125 | chip->badblockpos = 0x05; | ||
126 | chip->badblockbits = 7; | ||
127 | chip->block_markbad = sm_block_markbad; | ||
128 | |||
129 | /* ECC layout */ | ||
130 | if (mtd->writesize == SM_SECTOR_SIZE) | ||
131 | chip->ecc.layout = &nand_oob_sm; | ||
132 | else if (mtd->writesize == SM_SMALL_PAGE) | ||
133 | chip->ecc.layout = &nand_oob_sm_small; | ||
134 | else | ||
135 | return -ENODEV; | ||
136 | |||
137 | ret = nand_scan_tail(mtd); | ||
138 | |||
139 | if (ret) | ||
140 | return ret; | ||
141 | |||
142 | return add_mtd_device(mtd); | ||
143 | } | ||
144 | EXPORT_SYMBOL_GPL(sm_register_device); | ||
145 | |||
146 | MODULE_LICENSE("GPL"); | ||
147 | MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>"); | ||
148 | MODULE_DESCRIPTION("Common SmartMedia/xD functions"); | ||
diff --git a/drivers/mtd/nand/sm_common.h b/drivers/mtd/nand/sm_common.h new file mode 100644 index 000000000000..00f4a83359b2 --- /dev/null +++ b/drivers/mtd/nand/sm_common.h | |||
@@ -0,0 +1,61 @@ | |||
1 | /* | ||
2 | * Copyright © 2009 - Maxim Levitsky | ||
3 | * Common routines & support for SmartMedia/xD format | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | #include <linux/bitops.h> | ||
10 | #include <linux/mtd/mtd.h> | ||
11 | |||
12 | /* Full oob structure as written on the flash */ | ||
13 | struct sm_oob { | ||
14 | uint32_t reserved; | ||
15 | uint8_t data_status; | ||
16 | uint8_t block_status; | ||
17 | uint8_t lba_copy1[2]; | ||
18 | uint8_t ecc2[3]; | ||
19 | uint8_t lba_copy2[2]; | ||
20 | uint8_t ecc1[3]; | ||
21 | } __attribute__((packed)); | ||
22 | |||
23 | |||
24 | /* one sector is always 512 bytes, but it can consist of two nand pages */ | ||
25 | #define SM_SECTOR_SIZE 512 | ||
26 | |||
27 | /* oob area is also 16 bytes, but might be from two pages */ | ||
28 | #define SM_OOB_SIZE 16 | ||
29 | |||
30 | /* This is maximum zone size, and all devices that have more that one zone | ||
31 | have this size */ | ||
32 | #define SM_MAX_ZONE_SIZE 1024 | ||
33 | |||
34 | /* support for small page nand */ | ||
35 | #define SM_SMALL_PAGE 256 | ||
36 | #define SM_SMALL_OOB_SIZE 8 | ||
37 | |||
38 | |||
39 | extern int sm_register_device(struct mtd_info *mtd, int smartmedia); | ||
40 | |||
41 | |||
42 | static inline int sm_sector_valid(struct sm_oob *oob) | ||
43 | { | ||
44 | return hweight16(oob->data_status) >= 5; | ||
45 | } | ||
46 | |||
47 | static inline int sm_block_valid(struct sm_oob *oob) | ||
48 | { | ||
49 | return hweight16(oob->block_status) >= 7; | ||
50 | } | ||
51 | |||
52 | static inline int sm_block_erased(struct sm_oob *oob) | ||
53 | { | ||
54 | static const uint32_t erased_pattern[4] = { | ||
55 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }; | ||
56 | |||
57 | /* First test for erased block */ | ||
58 | if (!memcmp(oob, erased_pattern, sizeof(*oob))) | ||
59 | return 1; | ||
60 | return 0; | ||
61 | } | ||
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c index edb9b1384143..884852dc7eb4 100644 --- a/drivers/mtd/nand/socrates_nand.c +++ b/drivers/mtd/nand/socrates_nand.c | |||
@@ -220,7 +220,7 @@ static int __devinit socrates_nand_probe(struct of_device *ofdev, | |||
220 | dev_set_drvdata(&ofdev->dev, host); | 220 | dev_set_drvdata(&ofdev->dev, host); |
221 | 221 | ||
222 | /* first scan to find the device and get the page size */ | 222 | /* first scan to find the device and get the page size */ |
223 | if (nand_scan_ident(mtd, 1)) { | 223 | if (nand_scan_ident(mtd, 1, NULL)) { |
224 | res = -ENXIO; | 224 | res = -ENXIO; |
225 | goto out; | 225 | goto out; |
226 | } | 226 | } |
@@ -290,7 +290,7 @@ static int __devexit socrates_nand_remove(struct of_device *ofdev) | |||
290 | return 0; | 290 | return 0; |
291 | } | 291 | } |
292 | 292 | ||
293 | static struct of_device_id socrates_nand_match[] = | 293 | static const struct of_device_id socrates_nand_match[] = |
294 | { | 294 | { |
295 | { | 295 | { |
296 | .compatible = "abb,socrates-nand", | 296 | .compatible = "abb,socrates-nand", |
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c index fa28f01ae009..3041d1f7ae3f 100644 --- a/drivers/mtd/nand/tmio_nand.c +++ b/drivers/mtd/nand/tmio_nand.c | |||
@@ -319,7 +319,7 @@ static int tmio_nand_correct_data(struct mtd_info *mtd, unsigned char *buf, | |||
319 | 319 | ||
320 | static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio) | 320 | static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio) |
321 | { | 321 | { |
322 | struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; | 322 | struct mfd_cell *cell = dev_get_platdata(&dev->dev); |
323 | int ret; | 323 | int ret; |
324 | 324 | ||
325 | if (cell->enable) { | 325 | if (cell->enable) { |
@@ -363,7 +363,7 @@ static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio) | |||
363 | 363 | ||
364 | static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio) | 364 | static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio) |
365 | { | 365 | { |
366 | struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; | 366 | struct mfd_cell *cell = dev_get_platdata(&dev->dev); |
367 | 367 | ||
368 | tmio_iowrite8(FCR_MODE_POWER_OFF, tmio->fcr + FCR_MODE); | 368 | tmio_iowrite8(FCR_MODE_POWER_OFF, tmio->fcr + FCR_MODE); |
369 | if (cell->disable) | 369 | if (cell->disable) |
@@ -372,7 +372,7 @@ static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio) | |||
372 | 372 | ||
373 | static int tmio_probe(struct platform_device *dev) | 373 | static int tmio_probe(struct platform_device *dev) |
374 | { | 374 | { |
375 | struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; | 375 | struct mfd_cell *cell = dev_get_platdata(&dev->dev); |
376 | struct tmio_nand_data *data = cell->driver_data; | 376 | struct tmio_nand_data *data = cell->driver_data; |
377 | struct resource *fcr = platform_get_resource(dev, | 377 | struct resource *fcr = platform_get_resource(dev, |
378 | IORESOURCE_MEM, 0); | 378 | IORESOURCE_MEM, 0); |
@@ -405,14 +405,14 @@ static int tmio_probe(struct platform_device *dev) | |||
405 | mtd->priv = nand_chip; | 405 | mtd->priv = nand_chip; |
406 | mtd->name = "tmio-nand"; | 406 | mtd->name = "tmio-nand"; |
407 | 407 | ||
408 | tmio->ccr = ioremap(ccr->start, ccr->end - ccr->start + 1); | 408 | tmio->ccr = ioremap(ccr->start, resource_size(ccr)); |
409 | if (!tmio->ccr) { | 409 | if (!tmio->ccr) { |
410 | retval = -EIO; | 410 | retval = -EIO; |
411 | goto err_iomap_ccr; | 411 | goto err_iomap_ccr; |
412 | } | 412 | } |
413 | 413 | ||
414 | tmio->fcr_base = fcr->start & 0xfffff; | 414 | tmio->fcr_base = fcr->start & 0xfffff; |
415 | tmio->fcr = ioremap(fcr->start, fcr->end - fcr->start + 1); | 415 | tmio->fcr = ioremap(fcr->start, resource_size(fcr)); |
416 | if (!tmio->fcr) { | 416 | if (!tmio->fcr) { |
417 | retval = -EIO; | 417 | retval = -EIO; |
418 | goto err_iomap_fcr; | 418 | goto err_iomap_fcr; |
@@ -516,7 +516,7 @@ static int tmio_remove(struct platform_device *dev) | |||
516 | #ifdef CONFIG_PM | 516 | #ifdef CONFIG_PM |
517 | static int tmio_suspend(struct platform_device *dev, pm_message_t state) | 517 | static int tmio_suspend(struct platform_device *dev, pm_message_t state) |
518 | { | 518 | { |
519 | struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; | 519 | struct mfd_cell *cell = dev_get_platdata(&dev->dev); |
520 | 520 | ||
521 | if (cell->suspend) | 521 | if (cell->suspend) |
522 | cell->suspend(dev); | 522 | cell->suspend(dev); |
@@ -527,7 +527,7 @@ static int tmio_suspend(struct platform_device *dev, pm_message_t state) | |||
527 | 527 | ||
528 | static int tmio_resume(struct platform_device *dev) | 528 | static int tmio_resume(struct platform_device *dev) |
529 | { | 529 | { |
530 | struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; | 530 | struct mfd_cell *cell = dev_get_platdata(&dev->dev); |
531 | 531 | ||
532 | /* FIXME - is this required or merely another attack of the broken | 532 | /* FIXME - is this required or merely another attack of the broken |
533 | * SHARP platform? Looks suspicious. | 533 | * SHARP platform? Looks suspicious. |
diff --git a/drivers/mtd/nand/ts7250.c b/drivers/mtd/nand/ts7250.c deleted file mode 100644 index 0f5562aeedc1..000000000000 --- a/drivers/mtd/nand/ts7250.c +++ /dev/null | |||
@@ -1,207 +0,0 @@ | |||
1 | /* | ||
2 | * drivers/mtd/nand/ts7250.c | ||
3 | * | ||
4 | * Copyright (C) 2004 Technologic Systems (support@embeddedARM.com) | ||
5 | * | ||
6 | * Derived from drivers/mtd/nand/edb7312.c | ||
7 | * Copyright (C) 2004 Marius Gröger (mag@sysgo.de) | ||
8 | * | ||
9 | * Derived from drivers/mtd/nand/autcpu12.c | ||
10 | * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de) | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License version 2 as | ||
14 | * published by the Free Software Foundation. | ||
15 | * | ||
16 | * Overview: | ||
17 | * This is a device driver for the NAND flash device found on the | ||
18 | * TS-7250 board which utilizes a Samsung 32 Mbyte part. | ||
19 | */ | ||
20 | |||
21 | #include <linux/slab.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/mtd/mtd.h> | ||
25 | #include <linux/mtd/nand.h> | ||
26 | #include <linux/mtd/partitions.h> | ||
27 | #include <linux/io.h> | ||
28 | |||
29 | #include <mach/hardware.h> | ||
30 | #include <mach/ts72xx.h> | ||
31 | |||
32 | #include <asm/sizes.h> | ||
33 | #include <asm/mach-types.h> | ||
34 | |||
35 | /* | ||
36 | * MTD structure for TS7250 board | ||
37 | */ | ||
38 | static struct mtd_info *ts7250_mtd = NULL; | ||
39 | |||
40 | #ifdef CONFIG_MTD_PARTITIONS | ||
41 | static const char *part_probes[] = { "cmdlinepart", NULL }; | ||
42 | |||
43 | #define NUM_PARTITIONS 3 | ||
44 | |||
45 | /* | ||
46 | * Define static partitions for flash device | ||
47 | */ | ||
48 | static struct mtd_partition partition_info32[] = { | ||
49 | { | ||
50 | .name = "TS-BOOTROM", | ||
51 | .offset = 0x00000000, | ||
52 | .size = 0x00004000, | ||
53 | }, { | ||
54 | .name = "Linux", | ||
55 | .offset = 0x00004000, | ||
56 | .size = 0x01d00000, | ||
57 | }, { | ||
58 | .name = "RedBoot", | ||
59 | .offset = 0x01d04000, | ||
60 | .size = 0x002fc000, | ||
61 | }, | ||
62 | }; | ||
63 | |||
64 | /* | ||
65 | * Define static partitions for flash device | ||
66 | */ | ||
67 | static struct mtd_partition partition_info128[] = { | ||
68 | { | ||
69 | .name = "TS-BOOTROM", | ||
70 | .offset = 0x00000000, | ||
71 | .size = 0x00004000, | ||
72 | }, { | ||
73 | .name = "Linux", | ||
74 | .offset = 0x00004000, | ||
75 | .size = 0x07d00000, | ||
76 | }, { | ||
77 | .name = "RedBoot", | ||
78 | .offset = 0x07d04000, | ||
79 | .size = 0x002fc000, | ||
80 | }, | ||
81 | }; | ||
82 | #endif | ||
83 | |||
84 | |||
85 | /* | ||
86 | * hardware specific access to control-lines | ||
87 | * | ||
88 | * ctrl: | ||
89 | * NAND_NCE: bit 0 -> bit 2 | ||
90 | * NAND_CLE: bit 1 -> bit 1 | ||
91 | * NAND_ALE: bit 2 -> bit 0 | ||
92 | */ | ||
93 | static void ts7250_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) | ||
94 | { | ||
95 | struct nand_chip *chip = mtd->priv; | ||
96 | |||
97 | if (ctrl & NAND_CTRL_CHANGE) { | ||
98 | unsigned long addr = TS72XX_NAND_CONTROL_VIRT_BASE; | ||
99 | unsigned char bits; | ||
100 | |||
101 | bits = (ctrl & NAND_NCE) << 2; | ||
102 | bits |= ctrl & NAND_CLE; | ||
103 | bits |= (ctrl & NAND_ALE) >> 2; | ||
104 | |||
105 | __raw_writeb((__raw_readb(addr) & ~0x7) | bits, addr); | ||
106 | } | ||
107 | |||
108 | if (cmd != NAND_CMD_NONE) | ||
109 | writeb(cmd, chip->IO_ADDR_W); | ||
110 | } | ||
111 | |||
112 | /* | ||
113 | * read device ready pin | ||
114 | */ | ||
115 | static int ts7250_device_ready(struct mtd_info *mtd) | ||
116 | { | ||
117 | return __raw_readb(TS72XX_NAND_BUSY_VIRT_BASE) & 0x20; | ||
118 | } | ||
119 | |||
120 | /* | ||
121 | * Main initialization routine | ||
122 | */ | ||
123 | static int __init ts7250_init(void) | ||
124 | { | ||
125 | struct nand_chip *this; | ||
126 | const char *part_type = 0; | ||
127 | int mtd_parts_nb = 0; | ||
128 | struct mtd_partition *mtd_parts = 0; | ||
129 | |||
130 | if (!machine_is_ts72xx() || board_is_ts7200()) | ||
131 | return -ENXIO; | ||
132 | |||
133 | /* Allocate memory for MTD device structure and private data */ | ||
134 | ts7250_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); | ||
135 | if (!ts7250_mtd) { | ||
136 | printk("Unable to allocate TS7250 NAND MTD device structure.\n"); | ||
137 | return -ENOMEM; | ||
138 | } | ||
139 | |||
140 | /* Get pointer to private data */ | ||
141 | this = (struct nand_chip *)(&ts7250_mtd[1]); | ||
142 | |||
143 | /* Initialize structures */ | ||
144 | memset(ts7250_mtd, 0, sizeof(struct mtd_info)); | ||
145 | memset(this, 0, sizeof(struct nand_chip)); | ||
146 | |||
147 | /* Link the private data with the MTD structure */ | ||
148 | ts7250_mtd->priv = this; | ||
149 | ts7250_mtd->owner = THIS_MODULE; | ||
150 | |||
151 | /* insert callbacks */ | ||
152 | this->IO_ADDR_R = (void *)TS72XX_NAND_DATA_VIRT_BASE; | ||
153 | this->IO_ADDR_W = (void *)TS72XX_NAND_DATA_VIRT_BASE; | ||
154 | this->cmd_ctrl = ts7250_hwcontrol; | ||
155 | this->dev_ready = ts7250_device_ready; | ||
156 | this->chip_delay = 15; | ||
157 | this->ecc.mode = NAND_ECC_SOFT; | ||
158 | |||
159 | printk("Searching for NAND flash...\n"); | ||
160 | /* Scan to find existence of the device */ | ||
161 | if (nand_scan(ts7250_mtd, 1)) { | ||
162 | kfree(ts7250_mtd); | ||
163 | return -ENXIO; | ||
164 | } | ||
165 | #ifdef CONFIG_MTD_PARTITIONS | ||
166 | ts7250_mtd->name = "ts7250-nand"; | ||
167 | mtd_parts_nb = parse_mtd_partitions(ts7250_mtd, part_probes, &mtd_parts, 0); | ||
168 | if (mtd_parts_nb > 0) | ||
169 | part_type = "command line"; | ||
170 | else | ||
171 | mtd_parts_nb = 0; | ||
172 | #endif | ||
173 | if (mtd_parts_nb == 0) { | ||
174 | mtd_parts = partition_info32; | ||
175 | if (ts7250_mtd->size >= (128 * 0x100000)) | ||
176 | mtd_parts = partition_info128; | ||
177 | mtd_parts_nb = NUM_PARTITIONS; | ||
178 | part_type = "static"; | ||
179 | } | ||
180 | |||
181 | /* Register the partitions */ | ||
182 | printk(KERN_NOTICE "Using %s partition definition\n", part_type); | ||
183 | add_mtd_partitions(ts7250_mtd, mtd_parts, mtd_parts_nb); | ||
184 | |||
185 | /* Return happy */ | ||
186 | return 0; | ||
187 | } | ||
188 | |||
189 | module_init(ts7250_init); | ||
190 | |||
191 | /* | ||
192 | * Clean up routine | ||
193 | */ | ||
194 | static void __exit ts7250_cleanup(void) | ||
195 | { | ||
196 | /* Unregister the device */ | ||
197 | del_mtd_device(ts7250_mtd); | ||
198 | |||
199 | /* Free the MTD device structure */ | ||
200 | kfree(ts7250_mtd); | ||
201 | } | ||
202 | |||
203 | module_exit(ts7250_cleanup); | ||
204 | |||
205 | MODULE_LICENSE("GPL"); | ||
206 | MODULE_AUTHOR("Jesse Off <joff@embeddedARM.com>"); | ||
207 | MODULE_DESCRIPTION("MTD map driver for Technologic Systems TS-7250 board"); | ||
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c index 863513c3b69a..054a41c0ef4a 100644 --- a/drivers/mtd/nand/txx9ndfmc.c +++ b/drivers/mtd/nand/txx9ndfmc.c | |||
@@ -274,7 +274,7 @@ static int txx9ndfmc_nand_scan(struct mtd_info *mtd) | |||
274 | struct nand_chip *chip = mtd->priv; | 274 | struct nand_chip *chip = mtd->priv; |
275 | int ret; | 275 | int ret; |
276 | 276 | ||
277 | ret = nand_scan_ident(mtd, 1); | 277 | ret = nand_scan_ident(mtd, 1, NULL); |
278 | if (!ret) { | 278 | if (!ret) { |
279 | if (mtd->writesize >= 512) { | 279 | if (mtd->writesize >= 512) { |
280 | chip->ecc.size = mtd->writesize; | 280 | chip->ecc.size = mtd->writesize; |
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c index 1002e1882996..a4578bf903aa 100644 --- a/drivers/mtd/nftlcore.c +++ b/drivers/mtd/nftlcore.c | |||
@@ -126,7 +126,6 @@ static void nftl_remove_dev(struct mtd_blktrans_dev *dev) | |||
126 | del_mtd_blktrans_dev(dev); | 126 | del_mtd_blktrans_dev(dev); |
127 | kfree(nftl->ReplUnitTable); | 127 | kfree(nftl->ReplUnitTable); |
128 | kfree(nftl->EUNtable); | 128 | kfree(nftl->EUNtable); |
129 | kfree(nftl); | ||
130 | } | 129 | } |
131 | 130 | ||
132 | /* | 131 | /* |
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig index 3a9f15784600..9a49d68ba5f9 100644 --- a/drivers/mtd/onenand/Kconfig +++ b/drivers/mtd/onenand/Kconfig | |||
@@ -30,6 +30,13 @@ config MTD_ONENAND_OMAP2 | |||
30 | Support for a OneNAND flash device connected to an OMAP2/OMAP3 CPU | 30 | Support for a OneNAND flash device connected to an OMAP2/OMAP3 CPU |
31 | via the GPMC memory controller. | 31 | via the GPMC memory controller. |
32 | 32 | ||
33 | config MTD_ONENAND_SAMSUNG | ||
34 | tristate "OneNAND on Samsung SOC controller support" | ||
35 | depends on MTD_ONENAND && (ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210) | ||
36 | help | ||
37 | Support for a OneNAND flash device connected to an Samsung SOC | ||
38 | S3C64XX/S5PC1XX controller. | ||
39 | |||
33 | config MTD_ONENAND_OTP | 40 | config MTD_ONENAND_OTP |
34 | bool "OneNAND OTP Support" | 41 | bool "OneNAND OTP Support" |
35 | select HAVE_MTD_OTP | 42 | select HAVE_MTD_OTP |
diff --git a/drivers/mtd/onenand/Makefile b/drivers/mtd/onenand/Makefile index 64b6cc61a520..2b7884c7577e 100644 --- a/drivers/mtd/onenand/Makefile +++ b/drivers/mtd/onenand/Makefile | |||
@@ -8,6 +8,7 @@ obj-$(CONFIG_MTD_ONENAND) += onenand.o | |||
8 | # Board specific. | 8 | # Board specific. |
9 | obj-$(CONFIG_MTD_ONENAND_GENERIC) += generic.o | 9 | obj-$(CONFIG_MTD_ONENAND_GENERIC) += generic.o |
10 | obj-$(CONFIG_MTD_ONENAND_OMAP2) += omap2.o | 10 | obj-$(CONFIG_MTD_ONENAND_OMAP2) += omap2.o |
11 | obj-$(CONFIG_MTD_ONENAND_SAMSUNG) += samsung.o | ||
11 | 12 | ||
12 | # Simulator | 13 | # Simulator |
13 | obj-$(CONFIG_MTD_ONENAND_SIM) += onenand_sim.o | 14 | obj-$(CONFIG_MTD_ONENAND_SIM) += onenand_sim.o |
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c index fd406348fdfd..9f322f1a7f22 100644 --- a/drivers/mtd/onenand/omap2.c +++ b/drivers/mtd/onenand/omap2.c | |||
@@ -309,7 +309,7 @@ static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area, | |||
309 | goto out_copy; | 309 | goto out_copy; |
310 | 310 | ||
311 | /* panic_write() may be in an interrupt context */ | 311 | /* panic_write() may be in an interrupt context */ |
312 | if (in_interrupt()) | 312 | if (in_interrupt() || oops_in_progress) |
313 | goto out_copy; | 313 | goto out_copy; |
314 | 314 | ||
315 | if (buf >= high_memory) { | 315 | if (buf >= high_memory) { |
@@ -386,7 +386,7 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area, | |||
386 | goto out_copy; | 386 | goto out_copy; |
387 | 387 | ||
388 | /* panic_write() may be in an interrupt context */ | 388 | /* panic_write() may be in an interrupt context */ |
389 | if (in_interrupt()) | 389 | if (in_interrupt() || oops_in_progress) |
390 | goto out_copy; | 390 | goto out_copy; |
391 | 391 | ||
392 | if (buf >= high_memory) { | 392 | if (buf >= high_memory) { |
@@ -403,7 +403,7 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area, | |||
403 | 403 | ||
404 | dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE); | 404 | dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE); |
405 | dma_dst = c->phys_base + bram_offset; | 405 | dma_dst = c->phys_base + bram_offset; |
406 | if (dma_mapping_error(&c->pdev->dev, dma_dst)) { | 406 | if (dma_mapping_error(&c->pdev->dev, dma_src)) { |
407 | dev_err(&c->pdev->dev, | 407 | dev_err(&c->pdev->dev, |
408 | "Couldn't DMA map a %d byte buffer\n", | 408 | "Couldn't DMA map a %d byte buffer\n", |
409 | count); | 409 | count); |
@@ -426,7 +426,7 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area, | |||
426 | if (*done) | 426 | if (*done) |
427 | break; | 427 | break; |
428 | 428 | ||
429 | dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE); | 429 | dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE); |
430 | 430 | ||
431 | if (!*done) { | 431 | if (!*done) { |
432 | dev_err(&c->pdev->dev, "timeout waiting for DMA\n"); | 432 | dev_err(&c->pdev->dev, "timeout waiting for DMA\n"); |
@@ -521,7 +521,7 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area, | |||
521 | dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count, | 521 | dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count, |
522 | DMA_TO_DEVICE); | 522 | DMA_TO_DEVICE); |
523 | dma_dst = c->phys_base + bram_offset; | 523 | dma_dst = c->phys_base + bram_offset; |
524 | if (dma_mapping_error(&c->pdev->dev, dma_dst)) { | 524 | if (dma_mapping_error(&c->pdev->dev, dma_src)) { |
525 | dev_err(&c->pdev->dev, | 525 | dev_err(&c->pdev->dev, |
526 | "Couldn't DMA map a %d byte buffer\n", | 526 | "Couldn't DMA map a %d byte buffer\n", |
527 | count); | 527 | count); |
@@ -539,7 +539,7 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area, | |||
539 | omap_start_dma(c->dma_channel); | 539 | omap_start_dma(c->dma_channel); |
540 | wait_for_completion(&c->dma_done); | 540 | wait_for_completion(&c->dma_done); |
541 | 541 | ||
542 | dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE); | 542 | dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE); |
543 | 543 | ||
544 | return 0; | 544 | return 0; |
545 | } | 545 | } |
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c index 32f0ed33afe0..26caf2590dae 100644 --- a/drivers/mtd/onenand/onenand_base.c +++ b/drivers/mtd/onenand/onenand_base.c | |||
@@ -397,7 +397,8 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le | |||
397 | value = onenand_bufferram_address(this, block); | 397 | value = onenand_bufferram_address(this, block); |
398 | this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2); | 398 | this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2); |
399 | 399 | ||
400 | if (ONENAND_IS_MLC(this) || ONENAND_IS_2PLANE(this)) | 400 | if (ONENAND_IS_MLC(this) || ONENAND_IS_2PLANE(this) || |
401 | ONENAND_IS_4KB_PAGE(this)) | ||
401 | /* It is always BufferRAM0 */ | 402 | /* It is always BufferRAM0 */ |
402 | ONENAND_SET_BUFFERRAM0(this); | 403 | ONENAND_SET_BUFFERRAM0(this); |
403 | else | 404 | else |
@@ -426,7 +427,7 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le | |||
426 | case FLEXONENAND_CMD_RECOVER_LSB: | 427 | case FLEXONENAND_CMD_RECOVER_LSB: |
427 | case ONENAND_CMD_READ: | 428 | case ONENAND_CMD_READ: |
428 | case ONENAND_CMD_READOOB: | 429 | case ONENAND_CMD_READOOB: |
429 | if (ONENAND_IS_MLC(this)) | 430 | if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this)) |
430 | /* It is always BufferRAM0 */ | 431 | /* It is always BufferRAM0 */ |
431 | dataram = ONENAND_SET_BUFFERRAM0(this); | 432 | dataram = ONENAND_SET_BUFFERRAM0(this); |
432 | else | 433 | else |
@@ -466,11 +467,11 @@ static inline int onenand_read_ecc(struct onenand_chip *this) | |||
466 | { | 467 | { |
467 | int ecc, i, result = 0; | 468 | int ecc, i, result = 0; |
468 | 469 | ||
469 | if (!FLEXONENAND(this)) | 470 | if (!FLEXONENAND(this) && !ONENAND_IS_4KB_PAGE(this)) |
470 | return this->read_word(this->base + ONENAND_REG_ECC_STATUS); | 471 | return this->read_word(this->base + ONENAND_REG_ECC_STATUS); |
471 | 472 | ||
472 | for (i = 0; i < 4; i++) { | 473 | for (i = 0; i < 4; i++) { |
473 | ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS + i); | 474 | ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS + i*2); |
474 | if (likely(!ecc)) | 475 | if (likely(!ecc)) |
475 | continue; | 476 | continue; |
476 | if (ecc & FLEXONENAND_UNCORRECTABLE_ERROR) | 477 | if (ecc & FLEXONENAND_UNCORRECTABLE_ERROR) |
@@ -1425,7 +1426,7 @@ static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
1425 | int ret; | 1426 | int ret; |
1426 | 1427 | ||
1427 | onenand_get_device(mtd, FL_READING); | 1428 | onenand_get_device(mtd, FL_READING); |
1428 | ret = ONENAND_IS_MLC(this) ? | 1429 | ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ? |
1429 | onenand_mlc_read_ops_nolock(mtd, from, &ops) : | 1430 | onenand_mlc_read_ops_nolock(mtd, from, &ops) : |
1430 | onenand_read_ops_nolock(mtd, from, &ops); | 1431 | onenand_read_ops_nolock(mtd, from, &ops); |
1431 | onenand_release_device(mtd); | 1432 | onenand_release_device(mtd); |
@@ -1460,7 +1461,7 @@ static int onenand_read_oob(struct mtd_info *mtd, loff_t from, | |||
1460 | 1461 | ||
1461 | onenand_get_device(mtd, FL_READING); | 1462 | onenand_get_device(mtd, FL_READING); |
1462 | if (ops->datbuf) | 1463 | if (ops->datbuf) |
1463 | ret = ONENAND_IS_MLC(this) ? | 1464 | ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ? |
1464 | onenand_mlc_read_ops_nolock(mtd, from, ops) : | 1465 | onenand_mlc_read_ops_nolock(mtd, from, ops) : |
1465 | onenand_read_ops_nolock(mtd, from, ops); | 1466 | onenand_read_ops_nolock(mtd, from, ops); |
1466 | else | 1467 | else |
@@ -1634,7 +1635,6 @@ static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to | |||
1634 | static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr, size_t len) | 1635 | static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr, size_t len) |
1635 | { | 1636 | { |
1636 | struct onenand_chip *this = mtd->priv; | 1637 | struct onenand_chip *this = mtd->priv; |
1637 | void __iomem *dataram; | ||
1638 | int ret = 0; | 1638 | int ret = 0; |
1639 | int thislen, column; | 1639 | int thislen, column; |
1640 | 1640 | ||
@@ -1654,10 +1654,9 @@ static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr, | |||
1654 | 1654 | ||
1655 | onenand_update_bufferram(mtd, addr, 1); | 1655 | onenand_update_bufferram(mtd, addr, 1); |
1656 | 1656 | ||
1657 | dataram = this->base + ONENAND_DATARAM; | 1657 | this->read_bufferram(mtd, ONENAND_DATARAM, this->verify_buf, 0, mtd->writesize); |
1658 | dataram += onenand_bufferram_offset(mtd, ONENAND_DATARAM); | ||
1659 | 1658 | ||
1660 | if (memcmp(buf, dataram + column, thislen)) | 1659 | if (memcmp(buf, this->verify_buf, thislen)) |
1661 | return -EBADMSG; | 1660 | return -EBADMSG; |
1662 | 1661 | ||
1663 | len -= thislen; | 1662 | len -= thislen; |
@@ -1926,7 +1925,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to, | |||
1926 | * 2 PLANE, MLC, and Flex-OneNAND do not support | 1925 | * 2 PLANE, MLC, and Flex-OneNAND do not support |
1927 | * write-while-program feature. | 1926 | * write-while-program feature. |
1928 | */ | 1927 | */ |
1929 | if (!ONENAND_IS_2PLANE(this) && !first) { | 1928 | if (!ONENAND_IS_2PLANE(this) && !ONENAND_IS_4KB_PAGE(this) && !first) { |
1930 | ONENAND_SET_PREV_BUFFERRAM(this); | 1929 | ONENAND_SET_PREV_BUFFERRAM(this); |
1931 | 1930 | ||
1932 | ret = this->wait(mtd, FL_WRITING); | 1931 | ret = this->wait(mtd, FL_WRITING); |
@@ -1957,7 +1956,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to, | |||
1957 | /* | 1956 | /* |
1958 | * 2 PLANE, MLC, and Flex-OneNAND wait here | 1957 | * 2 PLANE, MLC, and Flex-OneNAND wait here |
1959 | */ | 1958 | */ |
1960 | if (ONENAND_IS_2PLANE(this)) { | 1959 | if (ONENAND_IS_2PLANE(this) || ONENAND_IS_4KB_PAGE(this)) { |
1961 | ret = this->wait(mtd, FL_WRITING); | 1960 | ret = this->wait(mtd, FL_WRITING); |
1962 | 1961 | ||
1963 | /* In partial page write we don't update bufferram */ | 1962 | /* In partial page write we don't update bufferram */ |
@@ -2084,7 +2083,7 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to, | |||
2084 | memcpy(oobbuf + column, buf, thislen); | 2083 | memcpy(oobbuf + column, buf, thislen); |
2085 | this->write_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize); | 2084 | this->write_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize); |
2086 | 2085 | ||
2087 | if (ONENAND_IS_MLC(this)) { | 2086 | if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this)) { |
2088 | /* Set main area of DataRAM to 0xff*/ | 2087 | /* Set main area of DataRAM to 0xff*/ |
2089 | memset(this->page_buf, 0xff, mtd->writesize); | 2088 | memset(this->page_buf, 0xff, mtd->writesize); |
2090 | this->write_bufferram(mtd, ONENAND_DATARAM, | 2089 | this->write_bufferram(mtd, ONENAND_DATARAM, |
@@ -3027,7 +3026,7 @@ static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
3027 | this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0); | 3026 | this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0); |
3028 | this->wait(mtd, FL_OTPING); | 3027 | this->wait(mtd, FL_OTPING); |
3029 | 3028 | ||
3030 | ret = ONENAND_IS_MLC(this) ? | 3029 | ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ? |
3031 | onenand_mlc_read_ops_nolock(mtd, from, &ops) : | 3030 | onenand_mlc_read_ops_nolock(mtd, from, &ops) : |
3032 | onenand_read_ops_nolock(mtd, from, &ops); | 3031 | onenand_read_ops_nolock(mtd, from, &ops); |
3033 | 3032 | ||
@@ -3372,7 +3371,10 @@ static void onenand_check_features(struct mtd_info *mtd) | |||
3372 | /* Lock scheme */ | 3371 | /* Lock scheme */ |
3373 | switch (density) { | 3372 | switch (density) { |
3374 | case ONENAND_DEVICE_DENSITY_4Gb: | 3373 | case ONENAND_DEVICE_DENSITY_4Gb: |
3375 | this->options |= ONENAND_HAS_2PLANE; | 3374 | if (ONENAND_IS_DDP(this)) |
3375 | this->options |= ONENAND_HAS_2PLANE; | ||
3376 | else | ||
3377 | this->options |= ONENAND_HAS_4KB_PAGE; | ||
3376 | 3378 | ||
3377 | case ONENAND_DEVICE_DENSITY_2Gb: | 3379 | case ONENAND_DEVICE_DENSITY_2Gb: |
3378 | /* 2Gb DDP does not have 2 plane */ | 3380 | /* 2Gb DDP does not have 2 plane */ |
@@ -3393,7 +3395,7 @@ static void onenand_check_features(struct mtd_info *mtd) | |||
3393 | break; | 3395 | break; |
3394 | } | 3396 | } |
3395 | 3397 | ||
3396 | if (ONENAND_IS_MLC(this)) | 3398 | if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this)) |
3397 | this->options &= ~ONENAND_HAS_2PLANE; | 3399 | this->options &= ~ONENAND_HAS_2PLANE; |
3398 | 3400 | ||
3399 | if (FLEXONENAND(this)) { | 3401 | if (FLEXONENAND(this)) { |
@@ -3407,6 +3409,8 @@ static void onenand_check_features(struct mtd_info *mtd) | |||
3407 | printk(KERN_DEBUG "Chip support all block unlock\n"); | 3409 | printk(KERN_DEBUG "Chip support all block unlock\n"); |
3408 | if (this->options & ONENAND_HAS_2PLANE) | 3410 | if (this->options & ONENAND_HAS_2PLANE) |
3409 | printk(KERN_DEBUG "Chip has 2 plane\n"); | 3411 | printk(KERN_DEBUG "Chip has 2 plane\n"); |
3412 | if (this->options & ONENAND_HAS_4KB_PAGE) | ||
3413 | printk(KERN_DEBUG "Chip has 4KiB pagesize\n"); | ||
3410 | } | 3414 | } |
3411 | 3415 | ||
3412 | /** | 3416 | /** |
@@ -3759,6 +3763,12 @@ static int onenand_probe(struct mtd_info *mtd) | |||
3759 | /* Restore system configuration 1 */ | 3763 | /* Restore system configuration 1 */ |
3760 | this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1); | 3764 | this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1); |
3761 | 3765 | ||
3766 | /* Workaround */ | ||
3767 | if (syscfg & ONENAND_SYS_CFG1_SYNC_WRITE) { | ||
3768 | bram_maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID); | ||
3769 | bram_dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID); | ||
3770 | } | ||
3771 | |||
3762 | /* Check manufacturer ID */ | 3772 | /* Check manufacturer ID */ |
3763 | if (onenand_check_maf(bram_maf_id)) | 3773 | if (onenand_check_maf(bram_maf_id)) |
3764 | return -ENXIO; | 3774 | return -ENXIO; |
@@ -3778,6 +3788,9 @@ static int onenand_probe(struct mtd_info *mtd) | |||
3778 | this->device_id = dev_id; | 3788 | this->device_id = dev_id; |
3779 | this->version_id = ver_id; | 3789 | this->version_id = ver_id; |
3780 | 3790 | ||
3791 | /* Check OneNAND features */ | ||
3792 | onenand_check_features(mtd); | ||
3793 | |||
3781 | density = onenand_get_density(dev_id); | 3794 | density = onenand_get_density(dev_id); |
3782 | if (FLEXONENAND(this)) { | 3795 | if (FLEXONENAND(this)) { |
3783 | this->dies = ONENAND_IS_DDP(this) ? 2 : 1; | 3796 | this->dies = ONENAND_IS_DDP(this) ? 2 : 1; |
@@ -3799,7 +3812,7 @@ static int onenand_probe(struct mtd_info *mtd) | |||
3799 | /* The data buffer size is equal to page size */ | 3812 | /* The data buffer size is equal to page size */ |
3800 | mtd->writesize = this->read_word(this->base + ONENAND_REG_DATA_BUFFER_SIZE); | 3813 | mtd->writesize = this->read_word(this->base + ONENAND_REG_DATA_BUFFER_SIZE); |
3801 | /* We use the full BufferRAM */ | 3814 | /* We use the full BufferRAM */ |
3802 | if (ONENAND_IS_MLC(this)) | 3815 | if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this)) |
3803 | mtd->writesize <<= 1; | 3816 | mtd->writesize <<= 1; |
3804 | 3817 | ||
3805 | mtd->oobsize = mtd->writesize >> 5; | 3818 | mtd->oobsize = mtd->writesize >> 5; |
@@ -3829,9 +3842,6 @@ static int onenand_probe(struct mtd_info *mtd) | |||
3829 | else | 3842 | else |
3830 | mtd->size = this->chipsize; | 3843 | mtd->size = this->chipsize; |
3831 | 3844 | ||
3832 | /* Check OneNAND features */ | ||
3833 | onenand_check_features(mtd); | ||
3834 | |||
3835 | /* | 3845 | /* |
3836 | * We emulate the 4KiB page and 256KiB erase block size | 3846 | * We emulate the 4KiB page and 256KiB erase block size |
3837 | * But oobsize is still 64 bytes. | 3847 | * But oobsize is still 64 bytes. |
@@ -3926,6 +3936,13 @@ int onenand_scan(struct mtd_info *mtd, int maxchips) | |||
3926 | __func__); | 3936 | __func__); |
3927 | return -ENOMEM; | 3937 | return -ENOMEM; |
3928 | } | 3938 | } |
3939 | #ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE | ||
3940 | this->verify_buf = kzalloc(mtd->writesize, GFP_KERNEL); | ||
3941 | if (!this->verify_buf) { | ||
3942 | kfree(this->page_buf); | ||
3943 | return -ENOMEM; | ||
3944 | } | ||
3945 | #endif | ||
3929 | this->options |= ONENAND_PAGEBUF_ALLOC; | 3946 | this->options |= ONENAND_PAGEBUF_ALLOC; |
3930 | } | 3947 | } |
3931 | if (!this->oob_buf) { | 3948 | if (!this->oob_buf) { |
@@ -4053,8 +4070,12 @@ void onenand_release(struct mtd_info *mtd) | |||
4053 | kfree(this->bbm); | 4070 | kfree(this->bbm); |
4054 | } | 4071 | } |
4055 | /* Buffers allocated by onenand_scan */ | 4072 | /* Buffers allocated by onenand_scan */ |
4056 | if (this->options & ONENAND_PAGEBUF_ALLOC) | 4073 | if (this->options & ONENAND_PAGEBUF_ALLOC) { |
4057 | kfree(this->page_buf); | 4074 | kfree(this->page_buf); |
4075 | #ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE | ||
4076 | kfree(this->verify_buf); | ||
4077 | #endif | ||
4078 | } | ||
4058 | if (this->options & ONENAND_OOBBUF_ALLOC) | 4079 | if (this->options & ONENAND_OOBBUF_ALLOC) |
4059 | kfree(this->oob_buf); | 4080 | kfree(this->oob_buf); |
4060 | kfree(mtd->eraseregions); | 4081 | kfree(mtd->eraseregions); |
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c new file mode 100644 index 000000000000..2750317cb58f --- /dev/null +++ b/drivers/mtd/onenand/samsung.c | |||
@@ -0,0 +1,1071 @@ | |||
1 | /* | ||
2 | * Samsung S3C64XX/S5PC1XX OneNAND driver | ||
3 | * | ||
4 | * Copyright © 2008-2010 Samsung Electronics | ||
5 | * Kyungmin Park <kyungmin.park@samsung.com> | ||
6 | * Marek Szyprowski <m.szyprowski@samsung.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * Implementation: | ||
13 | * S3C64XX and S5PC100: emulate the pseudo BufferRAM | ||
14 | * S5PC110: use DMA | ||
15 | */ | ||
16 | |||
17 | #include <linux/module.h> | ||
18 | #include <linux/platform_device.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/slab.h> | ||
21 | #include <linux/mtd/mtd.h> | ||
22 | #include <linux/mtd/onenand.h> | ||
23 | #include <linux/mtd/partitions.h> | ||
24 | #include <linux/dma-mapping.h> | ||
25 | |||
26 | #include <asm/mach/flash.h> | ||
27 | #include <plat/regs-onenand.h> | ||
28 | |||
29 | #include <linux/io.h> | ||
30 | |||
31 | enum soc_type { | ||
32 | TYPE_S3C6400, | ||
33 | TYPE_S3C6410, | ||
34 | TYPE_S5PC100, | ||
35 | TYPE_S5PC110, | ||
36 | }; | ||
37 | |||
38 | #define ONENAND_ERASE_STATUS 0x00 | ||
39 | #define ONENAND_MULTI_ERASE_SET 0x01 | ||
40 | #define ONENAND_ERASE_START 0x03 | ||
41 | #define ONENAND_UNLOCK_START 0x08 | ||
42 | #define ONENAND_UNLOCK_END 0x09 | ||
43 | #define ONENAND_LOCK_START 0x0A | ||
44 | #define ONENAND_LOCK_END 0x0B | ||
45 | #define ONENAND_LOCK_TIGHT_START 0x0C | ||
46 | #define ONENAND_LOCK_TIGHT_END 0x0D | ||
47 | #define ONENAND_UNLOCK_ALL 0x0E | ||
48 | #define ONENAND_OTP_ACCESS 0x12 | ||
49 | #define ONENAND_SPARE_ACCESS_ONLY 0x13 | ||
50 | #define ONENAND_MAIN_ACCESS_ONLY 0x14 | ||
51 | #define ONENAND_ERASE_VERIFY 0x15 | ||
52 | #define ONENAND_MAIN_SPARE_ACCESS 0x16 | ||
53 | #define ONENAND_PIPELINE_READ 0x4000 | ||
54 | |||
55 | #define MAP_00 (0x0) | ||
56 | #define MAP_01 (0x1) | ||
57 | #define MAP_10 (0x2) | ||
58 | #define MAP_11 (0x3) | ||
59 | |||
60 | #define S3C64XX_CMD_MAP_SHIFT 24 | ||
61 | #define S5PC1XX_CMD_MAP_SHIFT 26 | ||
62 | |||
63 | #define S3C6400_FBA_SHIFT 10 | ||
64 | #define S3C6400_FPA_SHIFT 4 | ||
65 | #define S3C6400_FSA_SHIFT 2 | ||
66 | |||
67 | #define S3C6410_FBA_SHIFT 12 | ||
68 | #define S3C6410_FPA_SHIFT 6 | ||
69 | #define S3C6410_FSA_SHIFT 4 | ||
70 | |||
71 | #define S5PC100_FBA_SHIFT 13 | ||
72 | #define S5PC100_FPA_SHIFT 7 | ||
73 | #define S5PC100_FSA_SHIFT 5 | ||
74 | |||
75 | /* S5PC110 specific definitions */ | ||
76 | #define S5PC110_DMA_SRC_ADDR 0x400 | ||
77 | #define S5PC110_DMA_SRC_CFG 0x404 | ||
78 | #define S5PC110_DMA_DST_ADDR 0x408 | ||
79 | #define S5PC110_DMA_DST_CFG 0x40C | ||
80 | #define S5PC110_DMA_TRANS_SIZE 0x414 | ||
81 | #define S5PC110_DMA_TRANS_CMD 0x418 | ||
82 | #define S5PC110_DMA_TRANS_STATUS 0x41C | ||
83 | #define S5PC110_DMA_TRANS_DIR 0x420 | ||
84 | |||
85 | #define S5PC110_DMA_CFG_SINGLE (0x0 << 16) | ||
86 | #define S5PC110_DMA_CFG_4BURST (0x2 << 16) | ||
87 | #define S5PC110_DMA_CFG_8BURST (0x3 << 16) | ||
88 | #define S5PC110_DMA_CFG_16BURST (0x4 << 16) | ||
89 | |||
90 | #define S5PC110_DMA_CFG_INC (0x0 << 8) | ||
91 | #define S5PC110_DMA_CFG_CNT (0x1 << 8) | ||
92 | |||
93 | #define S5PC110_DMA_CFG_8BIT (0x0 << 0) | ||
94 | #define S5PC110_DMA_CFG_16BIT (0x1 << 0) | ||
95 | #define S5PC110_DMA_CFG_32BIT (0x2 << 0) | ||
96 | |||
97 | #define S5PC110_DMA_SRC_CFG_READ (S5PC110_DMA_CFG_16BURST | \ | ||
98 | S5PC110_DMA_CFG_INC | \ | ||
99 | S5PC110_DMA_CFG_16BIT) | ||
100 | #define S5PC110_DMA_DST_CFG_READ (S5PC110_DMA_CFG_16BURST | \ | ||
101 | S5PC110_DMA_CFG_INC | \ | ||
102 | S5PC110_DMA_CFG_32BIT) | ||
103 | #define S5PC110_DMA_SRC_CFG_WRITE (S5PC110_DMA_CFG_16BURST | \ | ||
104 | S5PC110_DMA_CFG_INC | \ | ||
105 | S5PC110_DMA_CFG_32BIT) | ||
106 | #define S5PC110_DMA_DST_CFG_WRITE (S5PC110_DMA_CFG_16BURST | \ | ||
107 | S5PC110_DMA_CFG_INC | \ | ||
108 | S5PC110_DMA_CFG_16BIT) | ||
109 | |||
110 | #define S5PC110_DMA_TRANS_CMD_TDC (0x1 << 18) | ||
111 | #define S5PC110_DMA_TRANS_CMD_TEC (0x1 << 16) | ||
112 | #define S5PC110_DMA_TRANS_CMD_TR (0x1 << 0) | ||
113 | |||
114 | #define S5PC110_DMA_TRANS_STATUS_TD (0x1 << 18) | ||
115 | #define S5PC110_DMA_TRANS_STATUS_TB (0x1 << 17) | ||
116 | #define S5PC110_DMA_TRANS_STATUS_TE (0x1 << 16) | ||
117 | |||
118 | #define S5PC110_DMA_DIR_READ 0x0 | ||
119 | #define S5PC110_DMA_DIR_WRITE 0x1 | ||
120 | |||
121 | struct s3c_onenand { | ||
122 | struct mtd_info *mtd; | ||
123 | struct platform_device *pdev; | ||
124 | enum soc_type type; | ||
125 | void __iomem *base; | ||
126 | struct resource *base_res; | ||
127 | void __iomem *ahb_addr; | ||
128 | struct resource *ahb_res; | ||
129 | int bootram_command; | ||
130 | void __iomem *page_buf; | ||
131 | void __iomem *oob_buf; | ||
132 | unsigned int (*mem_addr)(int fba, int fpa, int fsa); | ||
133 | unsigned int (*cmd_map)(unsigned int type, unsigned int val); | ||
134 | void __iomem *dma_addr; | ||
135 | struct resource *dma_res; | ||
136 | unsigned long phys_base; | ||
137 | #ifdef CONFIG_MTD_PARTITIONS | ||
138 | struct mtd_partition *parts; | ||
139 | #endif | ||
140 | }; | ||
141 | |||
142 | #define CMD_MAP_00(dev, addr) (dev->cmd_map(MAP_00, ((addr) << 1))) | ||
143 | #define CMD_MAP_01(dev, mem_addr) (dev->cmd_map(MAP_01, (mem_addr))) | ||
144 | #define CMD_MAP_10(dev, mem_addr) (dev->cmd_map(MAP_10, (mem_addr))) | ||
145 | #define CMD_MAP_11(dev, addr) (dev->cmd_map(MAP_11, ((addr) << 2))) | ||
146 | |||
147 | static struct s3c_onenand *onenand; | ||
148 | |||
149 | #ifdef CONFIG_MTD_PARTITIONS | ||
150 | static const char *part_probes[] = { "cmdlinepart", NULL, }; | ||
151 | #endif | ||
152 | |||
153 | static inline int s3c_read_reg(int offset) | ||
154 | { | ||
155 | return readl(onenand->base + offset); | ||
156 | } | ||
157 | |||
158 | static inline void s3c_write_reg(int value, int offset) | ||
159 | { | ||
160 | writel(value, onenand->base + offset); | ||
161 | } | ||
162 | |||
163 | static inline int s3c_read_cmd(unsigned int cmd) | ||
164 | { | ||
165 | return readl(onenand->ahb_addr + cmd); | ||
166 | } | ||
167 | |||
168 | static inline void s3c_write_cmd(int value, unsigned int cmd) | ||
169 | { | ||
170 | writel(value, onenand->ahb_addr + cmd); | ||
171 | } | ||
172 | |||
173 | #ifdef SAMSUNG_DEBUG | ||
174 | static void s3c_dump_reg(void) | ||
175 | { | ||
176 | int i; | ||
177 | |||
178 | for (i = 0; i < 0x400; i += 0x40) { | ||
179 | printk(KERN_INFO "0x%08X: 0x%08x 0x%08x 0x%08x 0x%08x\n", | ||
180 | (unsigned int) onenand->base + i, | ||
181 | s3c_read_reg(i), s3c_read_reg(i + 0x10), | ||
182 | s3c_read_reg(i + 0x20), s3c_read_reg(i + 0x30)); | ||
183 | } | ||
184 | } | ||
185 | #endif | ||
186 | |||
187 | static unsigned int s3c64xx_cmd_map(unsigned type, unsigned val) | ||
188 | { | ||
189 | return (type << S3C64XX_CMD_MAP_SHIFT) | val; | ||
190 | } | ||
191 | |||
192 | static unsigned int s5pc1xx_cmd_map(unsigned type, unsigned val) | ||
193 | { | ||
194 | return (type << S5PC1XX_CMD_MAP_SHIFT) | val; | ||
195 | } | ||
196 | |||
197 | static unsigned int s3c6400_mem_addr(int fba, int fpa, int fsa) | ||
198 | { | ||
199 | return (fba << S3C6400_FBA_SHIFT) | (fpa << S3C6400_FPA_SHIFT) | | ||
200 | (fsa << S3C6400_FSA_SHIFT); | ||
201 | } | ||
202 | |||
203 | static unsigned int s3c6410_mem_addr(int fba, int fpa, int fsa) | ||
204 | { | ||
205 | return (fba << S3C6410_FBA_SHIFT) | (fpa << S3C6410_FPA_SHIFT) | | ||
206 | (fsa << S3C6410_FSA_SHIFT); | ||
207 | } | ||
208 | |||
209 | static unsigned int s5pc100_mem_addr(int fba, int fpa, int fsa) | ||
210 | { | ||
211 | return (fba << S5PC100_FBA_SHIFT) | (fpa << S5PC100_FPA_SHIFT) | | ||
212 | (fsa << S5PC100_FSA_SHIFT); | ||
213 | } | ||
214 | |||
215 | static void s3c_onenand_reset(void) | ||
216 | { | ||
217 | unsigned long timeout = 0x10000; | ||
218 | int stat; | ||
219 | |||
220 | s3c_write_reg(ONENAND_MEM_RESET_COLD, MEM_RESET_OFFSET); | ||
221 | while (1 && timeout--) { | ||
222 | stat = s3c_read_reg(INT_ERR_STAT_OFFSET); | ||
223 | if (stat & RST_CMP) | ||
224 | break; | ||
225 | } | ||
226 | stat = s3c_read_reg(INT_ERR_STAT_OFFSET); | ||
227 | s3c_write_reg(stat, INT_ERR_ACK_OFFSET); | ||
228 | |||
229 | /* Clear interrupt */ | ||
230 | s3c_write_reg(0x0, INT_ERR_ACK_OFFSET); | ||
231 | /* Clear the ECC status */ | ||
232 | s3c_write_reg(0x0, ECC_ERR_STAT_OFFSET); | ||
233 | } | ||
234 | |||
235 | static unsigned short s3c_onenand_readw(void __iomem *addr) | ||
236 | { | ||
237 | struct onenand_chip *this = onenand->mtd->priv; | ||
238 | struct device *dev = &onenand->pdev->dev; | ||
239 | int reg = addr - this->base; | ||
240 | int word_addr = reg >> 1; | ||
241 | int value; | ||
242 | |||
243 | /* It's used for probing time */ | ||
244 | switch (reg) { | ||
245 | case ONENAND_REG_MANUFACTURER_ID: | ||
246 | return s3c_read_reg(MANUFACT_ID_OFFSET); | ||
247 | case ONENAND_REG_DEVICE_ID: | ||
248 | return s3c_read_reg(DEVICE_ID_OFFSET); | ||
249 | case ONENAND_REG_VERSION_ID: | ||
250 | return s3c_read_reg(FLASH_VER_ID_OFFSET); | ||
251 | case ONENAND_REG_DATA_BUFFER_SIZE: | ||
252 | return s3c_read_reg(DATA_BUF_SIZE_OFFSET); | ||
253 | case ONENAND_REG_TECHNOLOGY: | ||
254 | return s3c_read_reg(TECH_OFFSET); | ||
255 | case ONENAND_REG_SYS_CFG1: | ||
256 | return s3c_read_reg(MEM_CFG_OFFSET); | ||
257 | |||
258 | /* Used at unlock all status */ | ||
259 | case ONENAND_REG_CTRL_STATUS: | ||
260 | return 0; | ||
261 | |||
262 | case ONENAND_REG_WP_STATUS: | ||
263 | return ONENAND_WP_US; | ||
264 | |||
265 | default: | ||
266 | break; | ||
267 | } | ||
268 | |||
269 | /* BootRAM access control */ | ||
270 | if ((unsigned int) addr < ONENAND_DATARAM && onenand->bootram_command) { | ||
271 | if (word_addr == 0) | ||
272 | return s3c_read_reg(MANUFACT_ID_OFFSET); | ||
273 | if (word_addr == 1) | ||
274 | return s3c_read_reg(DEVICE_ID_OFFSET); | ||
275 | if (word_addr == 2) | ||
276 | return s3c_read_reg(FLASH_VER_ID_OFFSET); | ||
277 | } | ||
278 | |||
279 | value = s3c_read_cmd(CMD_MAP_11(onenand, word_addr)) & 0xffff; | ||
280 | dev_info(dev, "%s: Illegal access at reg 0x%x, value 0x%x\n", __func__, | ||
281 | word_addr, value); | ||
282 | return value; | ||
283 | } | ||
284 | |||
285 | static void s3c_onenand_writew(unsigned short value, void __iomem *addr) | ||
286 | { | ||
287 | struct onenand_chip *this = onenand->mtd->priv; | ||
288 | struct device *dev = &onenand->pdev->dev; | ||
289 | unsigned int reg = addr - this->base; | ||
290 | unsigned int word_addr = reg >> 1; | ||
291 | |||
292 | /* It's used for probing time */ | ||
293 | switch (reg) { | ||
294 | case ONENAND_REG_SYS_CFG1: | ||
295 | s3c_write_reg(value, MEM_CFG_OFFSET); | ||
296 | return; | ||
297 | |||
298 | case ONENAND_REG_START_ADDRESS1: | ||
299 | case ONENAND_REG_START_ADDRESS2: | ||
300 | return; | ||
301 | |||
302 | /* Lock/lock-tight/unlock/unlock_all */ | ||
303 | case ONENAND_REG_START_BLOCK_ADDRESS: | ||
304 | return; | ||
305 | |||
306 | default: | ||
307 | break; | ||
308 | } | ||
309 | |||
310 | /* BootRAM access control */ | ||
311 | if ((unsigned int)addr < ONENAND_DATARAM) { | ||
312 | if (value == ONENAND_CMD_READID) { | ||
313 | onenand->bootram_command = 1; | ||
314 | return; | ||
315 | } | ||
316 | if (value == ONENAND_CMD_RESET) { | ||
317 | s3c_write_reg(ONENAND_MEM_RESET_COLD, MEM_RESET_OFFSET); | ||
318 | onenand->bootram_command = 0; | ||
319 | return; | ||
320 | } | ||
321 | } | ||
322 | |||
323 | dev_info(dev, "%s: Illegal access at reg 0x%x, value 0x%x\n", __func__, | ||
324 | word_addr, value); | ||
325 | |||
326 | s3c_write_cmd(value, CMD_MAP_11(onenand, word_addr)); | ||
327 | } | ||
328 | |||
329 | static int s3c_onenand_wait(struct mtd_info *mtd, int state) | ||
330 | { | ||
331 | struct device *dev = &onenand->pdev->dev; | ||
332 | unsigned int flags = INT_ACT; | ||
333 | unsigned int stat, ecc; | ||
334 | unsigned long timeout; | ||
335 | |||
336 | switch (state) { | ||
337 | case FL_READING: | ||
338 | flags |= BLK_RW_CMP | LOAD_CMP; | ||
339 | break; | ||
340 | case FL_WRITING: | ||
341 | flags |= BLK_RW_CMP | PGM_CMP; | ||
342 | break; | ||
343 | case FL_ERASING: | ||
344 | flags |= BLK_RW_CMP | ERS_CMP; | ||
345 | break; | ||
346 | case FL_LOCKING: | ||
347 | flags |= BLK_RW_CMP; | ||
348 | break; | ||
349 | default: | ||
350 | break; | ||
351 | } | ||
352 | |||
353 | /* The 20 msec is enough */ | ||
354 | timeout = jiffies + msecs_to_jiffies(20); | ||
355 | while (time_before(jiffies, timeout)) { | ||
356 | stat = s3c_read_reg(INT_ERR_STAT_OFFSET); | ||
357 | if (stat & flags) | ||
358 | break; | ||
359 | |||
360 | if (state != FL_READING) | ||
361 | cond_resched(); | ||
362 | } | ||
363 | /* To get correct interrupt status in timeout case */ | ||
364 | stat = s3c_read_reg(INT_ERR_STAT_OFFSET); | ||
365 | s3c_write_reg(stat, INT_ERR_ACK_OFFSET); | ||
366 | |||
367 | /* | ||
368 | * In the Spec. it checks the controller status first | ||
369 | * However if you get the correct information in case of | ||
370 | * power off recovery (POR) test, it should read ECC status first | ||
371 | */ | ||
372 | if (stat & LOAD_CMP) { | ||
373 | ecc = s3c_read_reg(ECC_ERR_STAT_OFFSET); | ||
374 | if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) { | ||
375 | dev_info(dev, "%s: ECC error = 0x%04x\n", __func__, | ||
376 | ecc); | ||
377 | mtd->ecc_stats.failed++; | ||
378 | return -EBADMSG; | ||
379 | } | ||
380 | } | ||
381 | |||
382 | if (stat & (LOCKED_BLK | ERS_FAIL | PGM_FAIL | LD_FAIL_ECC_ERR)) { | ||
383 | dev_info(dev, "%s: controller error = 0x%04x\n", __func__, | ||
384 | stat); | ||
385 | if (stat & LOCKED_BLK) | ||
386 | dev_info(dev, "%s: it's locked error = 0x%04x\n", | ||
387 | __func__, stat); | ||
388 | |||
389 | return -EIO; | ||
390 | } | ||
391 | |||
392 | return 0; | ||
393 | } | ||
394 | |||
395 | static int s3c_onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, | ||
396 | size_t len) | ||
397 | { | ||
398 | struct onenand_chip *this = mtd->priv; | ||
399 | unsigned int *m, *s; | ||
400 | int fba, fpa, fsa = 0; | ||
401 | unsigned int mem_addr, cmd_map_01, cmd_map_10; | ||
402 | int i, mcount, scount; | ||
403 | int index; | ||
404 | |||
405 | fba = (int) (addr >> this->erase_shift); | ||
406 | fpa = (int) (addr >> this->page_shift); | ||
407 | fpa &= this->page_mask; | ||
408 | |||
409 | mem_addr = onenand->mem_addr(fba, fpa, fsa); | ||
410 | cmd_map_01 = CMD_MAP_01(onenand, mem_addr); | ||
411 | cmd_map_10 = CMD_MAP_10(onenand, mem_addr); | ||
412 | |||
413 | switch (cmd) { | ||
414 | case ONENAND_CMD_READ: | ||
415 | case ONENAND_CMD_READOOB: | ||
416 | case ONENAND_CMD_BUFFERRAM: | ||
417 | ONENAND_SET_NEXT_BUFFERRAM(this); | ||
418 | default: | ||
419 | break; | ||
420 | } | ||
421 | |||
422 | index = ONENAND_CURRENT_BUFFERRAM(this); | ||
423 | |||
424 | /* | ||
425 | * Emulate Two BufferRAMs and access with 4 bytes pointer | ||
426 | */ | ||
427 | m = (unsigned int *) onenand->page_buf; | ||
428 | s = (unsigned int *) onenand->oob_buf; | ||
429 | |||
430 | if (index) { | ||
431 | m += (this->writesize >> 2); | ||
432 | s += (mtd->oobsize >> 2); | ||
433 | } | ||
434 | |||
435 | mcount = mtd->writesize >> 2; | ||
436 | scount = mtd->oobsize >> 2; | ||
437 | |||
438 | switch (cmd) { | ||
439 | case ONENAND_CMD_READ: | ||
440 | /* Main */ | ||
441 | for (i = 0; i < mcount; i++) | ||
442 | *m++ = s3c_read_cmd(cmd_map_01); | ||
443 | return 0; | ||
444 | |||
445 | case ONENAND_CMD_READOOB: | ||
446 | s3c_write_reg(TSRF, TRANS_SPARE_OFFSET); | ||
447 | /* Main */ | ||
448 | for (i = 0; i < mcount; i++) | ||
449 | *m++ = s3c_read_cmd(cmd_map_01); | ||
450 | |||
451 | /* Spare */ | ||
452 | for (i = 0; i < scount; i++) | ||
453 | *s++ = s3c_read_cmd(cmd_map_01); | ||
454 | |||
455 | s3c_write_reg(0, TRANS_SPARE_OFFSET); | ||
456 | return 0; | ||
457 | |||
458 | case ONENAND_CMD_PROG: | ||
459 | /* Main */ | ||
460 | for (i = 0; i < mcount; i++) | ||
461 | s3c_write_cmd(*m++, cmd_map_01); | ||
462 | return 0; | ||
463 | |||
464 | case ONENAND_CMD_PROGOOB: | ||
465 | s3c_write_reg(TSRF, TRANS_SPARE_OFFSET); | ||
466 | |||
467 | /* Main - dummy write */ | ||
468 | for (i = 0; i < mcount; i++) | ||
469 | s3c_write_cmd(0xffffffff, cmd_map_01); | ||
470 | |||
471 | /* Spare */ | ||
472 | for (i = 0; i < scount; i++) | ||
473 | s3c_write_cmd(*s++, cmd_map_01); | ||
474 | |||
475 | s3c_write_reg(0, TRANS_SPARE_OFFSET); | ||
476 | return 0; | ||
477 | |||
478 | case ONENAND_CMD_UNLOCK_ALL: | ||
479 | s3c_write_cmd(ONENAND_UNLOCK_ALL, cmd_map_10); | ||
480 | return 0; | ||
481 | |||
482 | case ONENAND_CMD_ERASE: | ||
483 | s3c_write_cmd(ONENAND_ERASE_START, cmd_map_10); | ||
484 | return 0; | ||
485 | |||
486 | default: | ||
487 | break; | ||
488 | } | ||
489 | |||
490 | return 0; | ||
491 | } | ||
492 | |||
493 | static unsigned char *s3c_get_bufferram(struct mtd_info *mtd, int area) | ||
494 | { | ||
495 | struct onenand_chip *this = mtd->priv; | ||
496 | int index = ONENAND_CURRENT_BUFFERRAM(this); | ||
497 | unsigned char *p; | ||
498 | |||
499 | if (area == ONENAND_DATARAM) { | ||
500 | p = (unsigned char *) onenand->page_buf; | ||
501 | if (index == 1) | ||
502 | p += this->writesize; | ||
503 | } else { | ||
504 | p = (unsigned char *) onenand->oob_buf; | ||
505 | if (index == 1) | ||
506 | p += mtd->oobsize; | ||
507 | } | ||
508 | |||
509 | return p; | ||
510 | } | ||
511 | |||
512 | static int onenand_read_bufferram(struct mtd_info *mtd, int area, | ||
513 | unsigned char *buffer, int offset, | ||
514 | size_t count) | ||
515 | { | ||
516 | unsigned char *p; | ||
517 | |||
518 | p = s3c_get_bufferram(mtd, area); | ||
519 | memcpy(buffer, p + offset, count); | ||
520 | return 0; | ||
521 | } | ||
522 | |||
523 | static int onenand_write_bufferram(struct mtd_info *mtd, int area, | ||
524 | const unsigned char *buffer, int offset, | ||
525 | size_t count) | ||
526 | { | ||
527 | unsigned char *p; | ||
528 | |||
529 | p = s3c_get_bufferram(mtd, area); | ||
530 | memcpy(p + offset, buffer, count); | ||
531 | return 0; | ||
532 | } | ||
533 | |||
534 | static int s5pc110_dma_ops(void *dst, void *src, size_t count, int direction) | ||
535 | { | ||
536 | void __iomem *base = onenand->dma_addr; | ||
537 | int status; | ||
538 | |||
539 | writel(src, base + S5PC110_DMA_SRC_ADDR); | ||
540 | writel(dst, base + S5PC110_DMA_DST_ADDR); | ||
541 | |||
542 | if (direction == S5PC110_DMA_DIR_READ) { | ||
543 | writel(S5PC110_DMA_SRC_CFG_READ, base + S5PC110_DMA_SRC_CFG); | ||
544 | writel(S5PC110_DMA_DST_CFG_READ, base + S5PC110_DMA_DST_CFG); | ||
545 | } else { | ||
546 | writel(S5PC110_DMA_SRC_CFG_WRITE, base + S5PC110_DMA_SRC_CFG); | ||
547 | writel(S5PC110_DMA_DST_CFG_WRITE, base + S5PC110_DMA_DST_CFG); | ||
548 | } | ||
549 | |||
550 | writel(count, base + S5PC110_DMA_TRANS_SIZE); | ||
551 | writel(direction, base + S5PC110_DMA_TRANS_DIR); | ||
552 | |||
553 | writel(S5PC110_DMA_TRANS_CMD_TR, base + S5PC110_DMA_TRANS_CMD); | ||
554 | |||
555 | do { | ||
556 | status = readl(base + S5PC110_DMA_TRANS_STATUS); | ||
557 | } while (!(status & S5PC110_DMA_TRANS_STATUS_TD)); | ||
558 | |||
559 | if (status & S5PC110_DMA_TRANS_STATUS_TE) { | ||
560 | writel(S5PC110_DMA_TRANS_CMD_TEC, base + S5PC110_DMA_TRANS_CMD); | ||
561 | writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD); | ||
562 | return -EIO; | ||
563 | } | ||
564 | |||
565 | writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD); | ||
566 | |||
567 | return 0; | ||
568 | } | ||
569 | |||
570 | static int s5pc110_read_bufferram(struct mtd_info *mtd, int area, | ||
571 | unsigned char *buffer, int offset, size_t count) | ||
572 | { | ||
573 | struct onenand_chip *this = mtd->priv; | ||
574 | void __iomem *bufferram; | ||
575 | void __iomem *p; | ||
576 | void *buf = (void *) buffer; | ||
577 | dma_addr_t dma_src, dma_dst; | ||
578 | int err; | ||
579 | |||
580 | p = bufferram = this->base + area; | ||
581 | if (ONENAND_CURRENT_BUFFERRAM(this)) { | ||
582 | if (area == ONENAND_DATARAM) | ||
583 | p += this->writesize; | ||
584 | else | ||
585 | p += mtd->oobsize; | ||
586 | } | ||
587 | |||
588 | if (offset & 3 || (size_t) buf & 3 || | ||
589 | !onenand->dma_addr || count != mtd->writesize) | ||
590 | goto normal; | ||
591 | |||
592 | /* Handle vmalloc address */ | ||
593 | if (buf >= high_memory) { | ||
594 | struct page *page; | ||
595 | |||
596 | if (((size_t) buf & PAGE_MASK) != | ||
597 | ((size_t) (buf + count - 1) & PAGE_MASK)) | ||
598 | goto normal; | ||
599 | page = vmalloc_to_page(buf); | ||
600 | if (!page) | ||
601 | goto normal; | ||
602 | buf = page_address(page) + ((size_t) buf & ~PAGE_MASK); | ||
603 | } | ||
604 | |||
605 | /* DMA routine */ | ||
606 | dma_src = onenand->phys_base + (p - this->base); | ||
607 | dma_dst = dma_map_single(&onenand->pdev->dev, | ||
608 | buf, count, DMA_FROM_DEVICE); | ||
609 | if (dma_mapping_error(&onenand->pdev->dev, dma_dst)) { | ||
610 | dev_err(&onenand->pdev->dev, | ||
611 | "Couldn't map a %d byte buffer for DMA\n", count); | ||
612 | goto normal; | ||
613 | } | ||
614 | err = s5pc110_dma_ops((void *) dma_dst, (void *) dma_src, | ||
615 | count, S5PC110_DMA_DIR_READ); | ||
616 | dma_unmap_single(&onenand->pdev->dev, dma_dst, count, DMA_FROM_DEVICE); | ||
617 | |||
618 | if (!err) | ||
619 | return 0; | ||
620 | |||
621 | normal: | ||
622 | if (count != mtd->writesize) { | ||
623 | /* Copy the bufferram to memory to prevent unaligned access */ | ||
624 | memcpy(this->page_buf, bufferram, mtd->writesize); | ||
625 | p = this->page_buf + offset; | ||
626 | } | ||
627 | |||
628 | memcpy(buffer, p, count); | ||
629 | |||
630 | return 0; | ||
631 | } | ||
632 | |||
633 | static int s3c_onenand_bbt_wait(struct mtd_info *mtd, int state) | ||
634 | { | ||
635 | unsigned int flags = INT_ACT | LOAD_CMP; | ||
636 | unsigned int stat; | ||
637 | unsigned long timeout; | ||
638 | |||
639 | /* The 20 msec is enough */ | ||
640 | timeout = jiffies + msecs_to_jiffies(20); | ||
641 | while (time_before(jiffies, timeout)) { | ||
642 | stat = s3c_read_reg(INT_ERR_STAT_OFFSET); | ||
643 | if (stat & flags) | ||
644 | break; | ||
645 | } | ||
646 | /* To get correct interrupt status in timeout case */ | ||
647 | stat = s3c_read_reg(INT_ERR_STAT_OFFSET); | ||
648 | s3c_write_reg(stat, INT_ERR_ACK_OFFSET); | ||
649 | |||
650 | if (stat & LD_FAIL_ECC_ERR) { | ||
651 | s3c_onenand_reset(); | ||
652 | return ONENAND_BBT_READ_ERROR; | ||
653 | } | ||
654 | |||
655 | if (stat & LOAD_CMP) { | ||
656 | int ecc = s3c_read_reg(ECC_ERR_STAT_OFFSET); | ||
657 | if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) { | ||
658 | s3c_onenand_reset(); | ||
659 | return ONENAND_BBT_READ_ERROR; | ||
660 | } | ||
661 | } | ||
662 | |||
663 | return 0; | ||
664 | } | ||
665 | |||
666 | static void s3c_onenand_check_lock_status(struct mtd_info *mtd) | ||
667 | { | ||
668 | struct onenand_chip *this = mtd->priv; | ||
669 | struct device *dev = &onenand->pdev->dev; | ||
670 | unsigned int block, end; | ||
671 | int tmp; | ||
672 | |||
673 | end = this->chipsize >> this->erase_shift; | ||
674 | |||
675 | for (block = 0; block < end; block++) { | ||
676 | unsigned int mem_addr = onenand->mem_addr(block, 0, 0); | ||
677 | tmp = s3c_read_cmd(CMD_MAP_01(onenand, mem_addr)); | ||
678 | |||
679 | if (s3c_read_reg(INT_ERR_STAT_OFFSET) & LOCKED_BLK) { | ||
680 | dev_err(dev, "block %d is write-protected!\n", block); | ||
681 | s3c_write_reg(LOCKED_BLK, INT_ERR_ACK_OFFSET); | ||
682 | } | ||
683 | } | ||
684 | } | ||
685 | |||
686 | static void s3c_onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, | ||
687 | size_t len, int cmd) | ||
688 | { | ||
689 | struct onenand_chip *this = mtd->priv; | ||
690 | int start, end, start_mem_addr, end_mem_addr; | ||
691 | |||
692 | start = ofs >> this->erase_shift; | ||
693 | start_mem_addr = onenand->mem_addr(start, 0, 0); | ||
694 | end = start + (len >> this->erase_shift) - 1; | ||
695 | end_mem_addr = onenand->mem_addr(end, 0, 0); | ||
696 | |||
697 | if (cmd == ONENAND_CMD_LOCK) { | ||
698 | s3c_write_cmd(ONENAND_LOCK_START, CMD_MAP_10(onenand, | ||
699 | start_mem_addr)); | ||
700 | s3c_write_cmd(ONENAND_LOCK_END, CMD_MAP_10(onenand, | ||
701 | end_mem_addr)); | ||
702 | } else { | ||
703 | s3c_write_cmd(ONENAND_UNLOCK_START, CMD_MAP_10(onenand, | ||
704 | start_mem_addr)); | ||
705 | s3c_write_cmd(ONENAND_UNLOCK_END, CMD_MAP_10(onenand, | ||
706 | end_mem_addr)); | ||
707 | } | ||
708 | |||
709 | this->wait(mtd, FL_LOCKING); | ||
710 | } | ||
711 | |||
712 | static void s3c_unlock_all(struct mtd_info *mtd) | ||
713 | { | ||
714 | struct onenand_chip *this = mtd->priv; | ||
715 | loff_t ofs = 0; | ||
716 | size_t len = this->chipsize; | ||
717 | |||
718 | if (this->options & ONENAND_HAS_UNLOCK_ALL) { | ||
719 | /* Write unlock command */ | ||
720 | this->command(mtd, ONENAND_CMD_UNLOCK_ALL, 0, 0); | ||
721 | |||
722 | /* No need to check return value */ | ||
723 | this->wait(mtd, FL_LOCKING); | ||
724 | |||
725 | /* Workaround for all block unlock in DDP */ | ||
726 | if (!ONENAND_IS_DDP(this)) { | ||
727 | s3c_onenand_check_lock_status(mtd); | ||
728 | return; | ||
729 | } | ||
730 | |||
731 | /* All blocks on another chip */ | ||
732 | ofs = this->chipsize >> 1; | ||
733 | len = this->chipsize >> 1; | ||
734 | } | ||
735 | |||
736 | s3c_onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK); | ||
737 | |||
738 | s3c_onenand_check_lock_status(mtd); | ||
739 | } | ||
740 | |||
741 | static void s3c_onenand_setup(struct mtd_info *mtd) | ||
742 | { | ||
743 | struct onenand_chip *this = mtd->priv; | ||
744 | |||
745 | onenand->mtd = mtd; | ||
746 | |||
747 | if (onenand->type == TYPE_S3C6400) { | ||
748 | onenand->mem_addr = s3c6400_mem_addr; | ||
749 | onenand->cmd_map = s3c64xx_cmd_map; | ||
750 | } else if (onenand->type == TYPE_S3C6410) { | ||
751 | onenand->mem_addr = s3c6410_mem_addr; | ||
752 | onenand->cmd_map = s3c64xx_cmd_map; | ||
753 | } else if (onenand->type == TYPE_S5PC100) { | ||
754 | onenand->mem_addr = s5pc100_mem_addr; | ||
755 | onenand->cmd_map = s5pc1xx_cmd_map; | ||
756 | } else if (onenand->type == TYPE_S5PC110) { | ||
757 | /* Use generic onenand functions */ | ||
758 | onenand->cmd_map = s5pc1xx_cmd_map; | ||
759 | this->read_bufferram = s5pc110_read_bufferram; | ||
760 | return; | ||
761 | } else { | ||
762 | BUG(); | ||
763 | } | ||
764 | |||
765 | this->read_word = s3c_onenand_readw; | ||
766 | this->write_word = s3c_onenand_writew; | ||
767 | |||
768 | this->wait = s3c_onenand_wait; | ||
769 | this->bbt_wait = s3c_onenand_bbt_wait; | ||
770 | this->unlock_all = s3c_unlock_all; | ||
771 | this->command = s3c_onenand_command; | ||
772 | |||
773 | this->read_bufferram = onenand_read_bufferram; | ||
774 | this->write_bufferram = onenand_write_bufferram; | ||
775 | } | ||
776 | |||
777 | static int s3c_onenand_probe(struct platform_device *pdev) | ||
778 | { | ||
779 | struct onenand_platform_data *pdata; | ||
780 | struct onenand_chip *this; | ||
781 | struct mtd_info *mtd; | ||
782 | struct resource *r; | ||
783 | int size, err; | ||
784 | unsigned long onenand_ctrl_cfg = 0; | ||
785 | |||
786 | pdata = pdev->dev.platform_data; | ||
787 | /* No need to check pdata. the platform data is optional */ | ||
788 | |||
789 | size = sizeof(struct mtd_info) + sizeof(struct onenand_chip); | ||
790 | mtd = kzalloc(size, GFP_KERNEL); | ||
791 | if (!mtd) { | ||
792 | dev_err(&pdev->dev, "failed to allocate memory\n"); | ||
793 | return -ENOMEM; | ||
794 | } | ||
795 | |||
796 | onenand = kzalloc(sizeof(struct s3c_onenand), GFP_KERNEL); | ||
797 | if (!onenand) { | ||
798 | err = -ENOMEM; | ||
799 | goto onenand_fail; | ||
800 | } | ||
801 | |||
802 | this = (struct onenand_chip *) &mtd[1]; | ||
803 | mtd->priv = this; | ||
804 | mtd->dev.parent = &pdev->dev; | ||
805 | mtd->owner = THIS_MODULE; | ||
806 | onenand->pdev = pdev; | ||
807 | onenand->type = platform_get_device_id(pdev)->driver_data; | ||
808 | |||
809 | s3c_onenand_setup(mtd); | ||
810 | |||
811 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
812 | if (!r) { | ||
813 | dev_err(&pdev->dev, "no memory resource defined\n"); | ||
814 | return -ENOENT; | ||
815 | goto ahb_resource_failed; | ||
816 | } | ||
817 | |||
818 | onenand->base_res = request_mem_region(r->start, resource_size(r), | ||
819 | pdev->name); | ||
820 | if (!onenand->base_res) { | ||
821 | dev_err(&pdev->dev, "failed to request memory resource\n"); | ||
822 | err = -EBUSY; | ||
823 | goto resource_failed; | ||
824 | } | ||
825 | |||
826 | onenand->base = ioremap(r->start, resource_size(r)); | ||
827 | if (!onenand->base) { | ||
828 | dev_err(&pdev->dev, "failed to map memory resource\n"); | ||
829 | err = -EFAULT; | ||
830 | goto ioremap_failed; | ||
831 | } | ||
832 | /* Set onenand_chip also */ | ||
833 | this->base = onenand->base; | ||
834 | |||
835 | /* Use runtime badblock check */ | ||
836 | this->options |= ONENAND_SKIP_UNLOCK_CHECK; | ||
837 | |||
838 | if (onenand->type != TYPE_S5PC110) { | ||
839 | r = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
840 | if (!r) { | ||
841 | dev_err(&pdev->dev, "no buffer memory resource defined\n"); | ||
842 | return -ENOENT; | ||
843 | goto ahb_resource_failed; | ||
844 | } | ||
845 | |||
846 | onenand->ahb_res = request_mem_region(r->start, resource_size(r), | ||
847 | pdev->name); | ||
848 | if (!onenand->ahb_res) { | ||
849 | dev_err(&pdev->dev, "failed to request buffer memory resource\n"); | ||
850 | err = -EBUSY; | ||
851 | goto ahb_resource_failed; | ||
852 | } | ||
853 | |||
854 | onenand->ahb_addr = ioremap(r->start, resource_size(r)); | ||
855 | if (!onenand->ahb_addr) { | ||
856 | dev_err(&pdev->dev, "failed to map buffer memory resource\n"); | ||
857 | err = -EINVAL; | ||
858 | goto ahb_ioremap_failed; | ||
859 | } | ||
860 | |||
861 | /* Allocate 4KiB BufferRAM */ | ||
862 | onenand->page_buf = kzalloc(SZ_4K, GFP_KERNEL); | ||
863 | if (!onenand->page_buf) { | ||
864 | err = -ENOMEM; | ||
865 | goto page_buf_fail; | ||
866 | } | ||
867 | |||
868 | /* Allocate 128 SpareRAM */ | ||
869 | onenand->oob_buf = kzalloc(128, GFP_KERNEL); | ||
870 | if (!onenand->oob_buf) { | ||
871 | err = -ENOMEM; | ||
872 | goto oob_buf_fail; | ||
873 | } | ||
874 | |||
875 | /* S3C doesn't handle subpage write */ | ||
876 | mtd->subpage_sft = 0; | ||
877 | this->subpagesize = mtd->writesize; | ||
878 | |||
879 | } else { /* S5PC110 */ | ||
880 | r = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
881 | if (!r) { | ||
882 | dev_err(&pdev->dev, "no dma memory resource defined\n"); | ||
883 | return -ENOENT; | ||
884 | goto dma_resource_failed; | ||
885 | } | ||
886 | |||
887 | onenand->dma_res = request_mem_region(r->start, resource_size(r), | ||
888 | pdev->name); | ||
889 | if (!onenand->dma_res) { | ||
890 | dev_err(&pdev->dev, "failed to request dma memory resource\n"); | ||
891 | err = -EBUSY; | ||
892 | goto dma_resource_failed; | ||
893 | } | ||
894 | |||
895 | onenand->dma_addr = ioremap(r->start, resource_size(r)); | ||
896 | if (!onenand->dma_addr) { | ||
897 | dev_err(&pdev->dev, "failed to map dma memory resource\n"); | ||
898 | err = -EINVAL; | ||
899 | goto dma_ioremap_failed; | ||
900 | } | ||
901 | |||
902 | onenand->phys_base = onenand->base_res->start; | ||
903 | |||
904 | onenand_ctrl_cfg = readl(onenand->dma_addr + 0x100); | ||
905 | if ((onenand_ctrl_cfg & ONENAND_SYS_CFG1_SYNC_WRITE) && | ||
906 | onenand->dma_addr) | ||
907 | writel(onenand_ctrl_cfg & ~ONENAND_SYS_CFG1_SYNC_WRITE, | ||
908 | onenand->dma_addr + 0x100); | ||
909 | else | ||
910 | onenand_ctrl_cfg = 0; | ||
911 | } | ||
912 | |||
913 | if (onenand_scan(mtd, 1)) { | ||
914 | err = -EFAULT; | ||
915 | goto scan_failed; | ||
916 | } | ||
917 | |||
918 | if (onenand->type == TYPE_S5PC110) { | ||
919 | if (onenand_ctrl_cfg && onenand->dma_addr) | ||
920 | writel(onenand_ctrl_cfg, onenand->dma_addr + 0x100); | ||
921 | } else { | ||
922 | /* S3C doesn't handle subpage write */ | ||
923 | mtd->subpage_sft = 0; | ||
924 | this->subpagesize = mtd->writesize; | ||
925 | } | ||
926 | |||
927 | if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ) | ||
928 | dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n"); | ||
929 | |||
930 | #ifdef CONFIG_MTD_PARTITIONS | ||
931 | err = parse_mtd_partitions(mtd, part_probes, &onenand->parts, 0); | ||
932 | if (err > 0) | ||
933 | add_mtd_partitions(mtd, onenand->parts, err); | ||
934 | else if (err <= 0 && pdata && pdata->parts) | ||
935 | add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts); | ||
936 | else | ||
937 | #endif | ||
938 | err = add_mtd_device(mtd); | ||
939 | |||
940 | platform_set_drvdata(pdev, mtd); | ||
941 | |||
942 | return 0; | ||
943 | |||
944 | scan_failed: | ||
945 | if (onenand->dma_addr) | ||
946 | iounmap(onenand->dma_addr); | ||
947 | dma_ioremap_failed: | ||
948 | if (onenand->dma_res) | ||
949 | release_mem_region(onenand->dma_res->start, | ||
950 | resource_size(onenand->dma_res)); | ||
951 | kfree(onenand->oob_buf); | ||
952 | oob_buf_fail: | ||
953 | kfree(onenand->page_buf); | ||
954 | page_buf_fail: | ||
955 | if (onenand->ahb_addr) | ||
956 | iounmap(onenand->ahb_addr); | ||
957 | ahb_ioremap_failed: | ||
958 | if (onenand->ahb_res) | ||
959 | release_mem_region(onenand->ahb_res->start, | ||
960 | resource_size(onenand->ahb_res)); | ||
961 | dma_resource_failed: | ||
962 | ahb_resource_failed: | ||
963 | iounmap(onenand->base); | ||
964 | ioremap_failed: | ||
965 | if (onenand->base_res) | ||
966 | release_mem_region(onenand->base_res->start, | ||
967 | resource_size(onenand->base_res)); | ||
968 | resource_failed: | ||
969 | kfree(onenand); | ||
970 | onenand_fail: | ||
971 | kfree(mtd); | ||
972 | return err; | ||
973 | } | ||
974 | |||
975 | static int __devexit s3c_onenand_remove(struct platform_device *pdev) | ||
976 | { | ||
977 | struct mtd_info *mtd = platform_get_drvdata(pdev); | ||
978 | |||
979 | onenand_release(mtd); | ||
980 | if (onenand->ahb_addr) | ||
981 | iounmap(onenand->ahb_addr); | ||
982 | if (onenand->ahb_res) | ||
983 | release_mem_region(onenand->ahb_res->start, | ||
984 | resource_size(onenand->ahb_res)); | ||
985 | if (onenand->dma_addr) | ||
986 | iounmap(onenand->dma_addr); | ||
987 | if (onenand->dma_res) | ||
988 | release_mem_region(onenand->dma_res->start, | ||
989 | resource_size(onenand->dma_res)); | ||
990 | |||
991 | iounmap(onenand->base); | ||
992 | release_mem_region(onenand->base_res->start, | ||
993 | resource_size(onenand->base_res)); | ||
994 | |||
995 | platform_set_drvdata(pdev, NULL); | ||
996 | kfree(onenand->oob_buf); | ||
997 | kfree(onenand->page_buf); | ||
998 | kfree(onenand); | ||
999 | kfree(mtd); | ||
1000 | return 0; | ||
1001 | } | ||
1002 | |||
1003 | static int s3c_pm_ops_suspend(struct device *dev) | ||
1004 | { | ||
1005 | struct platform_device *pdev = to_platform_device(dev); | ||
1006 | struct mtd_info *mtd = platform_get_drvdata(pdev); | ||
1007 | struct onenand_chip *this = mtd->priv; | ||
1008 | |||
1009 | this->wait(mtd, FL_PM_SUSPENDED); | ||
1010 | return mtd->suspend(mtd); | ||
1011 | } | ||
1012 | |||
1013 | static int s3c_pm_ops_resume(struct device *dev) | ||
1014 | { | ||
1015 | struct platform_device *pdev = to_platform_device(dev); | ||
1016 | struct mtd_info *mtd = platform_get_drvdata(pdev); | ||
1017 | struct onenand_chip *this = mtd->priv; | ||
1018 | |||
1019 | mtd->resume(mtd); | ||
1020 | this->unlock_all(mtd); | ||
1021 | return 0; | ||
1022 | } | ||
1023 | |||
1024 | static const struct dev_pm_ops s3c_pm_ops = { | ||
1025 | .suspend = s3c_pm_ops_suspend, | ||
1026 | .resume = s3c_pm_ops_resume, | ||
1027 | }; | ||
1028 | |||
1029 | static struct platform_device_id s3c_onenand_driver_ids[] = { | ||
1030 | { | ||
1031 | .name = "s3c6400-onenand", | ||
1032 | .driver_data = TYPE_S3C6400, | ||
1033 | }, { | ||
1034 | .name = "s3c6410-onenand", | ||
1035 | .driver_data = TYPE_S3C6410, | ||
1036 | }, { | ||
1037 | .name = "s5pc100-onenand", | ||
1038 | .driver_data = TYPE_S5PC100, | ||
1039 | }, { | ||
1040 | .name = "s5pc110-onenand", | ||
1041 | .driver_data = TYPE_S5PC110, | ||
1042 | }, { }, | ||
1043 | }; | ||
1044 | MODULE_DEVICE_TABLE(platform, s3c_onenand_driver_ids); | ||
1045 | |||
1046 | static struct platform_driver s3c_onenand_driver = { | ||
1047 | .driver = { | ||
1048 | .name = "samsung-onenand", | ||
1049 | .pm = &s3c_pm_ops, | ||
1050 | }, | ||
1051 | .id_table = s3c_onenand_driver_ids, | ||
1052 | .probe = s3c_onenand_probe, | ||
1053 | .remove = __devexit_p(s3c_onenand_remove), | ||
1054 | }; | ||
1055 | |||
1056 | static int __init s3c_onenand_init(void) | ||
1057 | { | ||
1058 | return platform_driver_register(&s3c_onenand_driver); | ||
1059 | } | ||
1060 | |||
1061 | static void __exit s3c_onenand_exit(void) | ||
1062 | { | ||
1063 | platform_driver_unregister(&s3c_onenand_driver); | ||
1064 | } | ||
1065 | |||
1066 | module_init(s3c_onenand_init); | ||
1067 | module_exit(s3c_onenand_exit); | ||
1068 | |||
1069 | MODULE_LICENSE("GPL"); | ||
1070 | MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>"); | ||
1071 | MODULE_DESCRIPTION("Samsung OneNAND controller support"); | ||
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c index d2aa9c46530f..63b83c0d9a13 100644 --- a/drivers/mtd/rfd_ftl.c +++ b/drivers/mtd/rfd_ftl.c | |||
@@ -817,7 +817,6 @@ static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev) | |||
817 | vfree(part->sector_map); | 817 | vfree(part->sector_map); |
818 | kfree(part->header_cache); | 818 | kfree(part->header_cache); |
819 | kfree(part->blocks); | 819 | kfree(part->blocks); |
820 | kfree(part); | ||
821 | } | 820 | } |
822 | 821 | ||
823 | static struct mtd_blktrans_ops rfd_ftl_tr = { | 822 | static struct mtd_blktrans_ops rfd_ftl_tr = { |
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c new file mode 100644 index 000000000000..67822cf6c025 --- /dev/null +++ b/drivers/mtd/sm_ftl.c | |||
@@ -0,0 +1,1284 @@ | |||
1 | /* | ||
2 | * Copyright © 2009 - Maxim Levitsky | ||
3 | * SmartMedia/xD translation layer | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/random.h> | ||
13 | #include <linux/hdreg.h> | ||
14 | #include <linux/kthread.h> | ||
15 | #include <linux/freezer.h> | ||
16 | #include <linux/sysfs.h> | ||
17 | #include <linux/bitops.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/mtd/nand_ecc.h> | ||
20 | #include "nand/sm_common.h" | ||
21 | #include "sm_ftl.h" | ||
22 | |||
23 | |||
24 | |||
25 | struct workqueue_struct *cache_flush_workqueue; | ||
26 | |||
27 | static int cache_timeout = 1000; | ||
28 | module_param(cache_timeout, bool, S_IRUGO); | ||
29 | MODULE_PARM_DESC(cache_timeout, | ||
30 | "Timeout (in ms) for cache flush (1000 ms default"); | ||
31 | |||
32 | static int debug; | ||
33 | module_param(debug, int, S_IRUGO | S_IWUSR); | ||
34 | MODULE_PARM_DESC(debug, "Debug level (0-2)"); | ||
35 | |||
36 | |||
37 | /* ------------------- sysfs attributtes ---------------------------------- */ | ||
38 | struct sm_sysfs_attribute { | ||
39 | struct device_attribute dev_attr; | ||
40 | char *data; | ||
41 | int len; | ||
42 | }; | ||
43 | |||
44 | ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr, | ||
45 | char *buf) | ||
46 | { | ||
47 | struct sm_sysfs_attribute *sm_attr = | ||
48 | container_of(attr, struct sm_sysfs_attribute, dev_attr); | ||
49 | |||
50 | strncpy(buf, sm_attr->data, sm_attr->len); | ||
51 | return sm_attr->len; | ||
52 | } | ||
53 | |||
54 | |||
55 | #define NUM_ATTRIBUTES 1 | ||
56 | #define SM_CIS_VENDOR_OFFSET 0x59 | ||
57 | struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl) | ||
58 | { | ||
59 | struct attribute_group *attr_group; | ||
60 | struct attribute **attributes; | ||
61 | struct sm_sysfs_attribute *vendor_attribute; | ||
62 | |||
63 | int vendor_len = strnlen(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, | ||
64 | SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET); | ||
65 | |||
66 | char *vendor = kmalloc(vendor_len, GFP_KERNEL); | ||
67 | memcpy(vendor, ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, vendor_len); | ||
68 | vendor[vendor_len] = 0; | ||
69 | |||
70 | /* Initialize sysfs attributes */ | ||
71 | vendor_attribute = | ||
72 | kzalloc(sizeof(struct sm_sysfs_attribute), GFP_KERNEL); | ||
73 | |||
74 | sysfs_attr_init(&vendor_attribute->dev_attr.attr); | ||
75 | |||
76 | vendor_attribute->data = vendor; | ||
77 | vendor_attribute->len = vendor_len; | ||
78 | vendor_attribute->dev_attr.attr.name = "vendor"; | ||
79 | vendor_attribute->dev_attr.attr.mode = S_IRUGO; | ||
80 | vendor_attribute->dev_attr.show = sm_attr_show; | ||
81 | |||
82 | |||
83 | /* Create array of pointers to the attributes */ | ||
84 | attributes = kzalloc(sizeof(struct attribute *) * (NUM_ATTRIBUTES + 1), | ||
85 | GFP_KERNEL); | ||
86 | attributes[0] = &vendor_attribute->dev_attr.attr; | ||
87 | |||
88 | /* Finally create the attribute group */ | ||
89 | attr_group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL); | ||
90 | attr_group->attrs = attributes; | ||
91 | return attr_group; | ||
92 | } | ||
93 | |||
94 | void sm_delete_sysfs_attributes(struct sm_ftl *ftl) | ||
95 | { | ||
96 | struct attribute **attributes = ftl->disk_attributes->attrs; | ||
97 | int i; | ||
98 | |||
99 | for (i = 0; attributes[i] ; i++) { | ||
100 | |||
101 | struct device_attribute *dev_attr = container_of(attributes[i], | ||
102 | struct device_attribute, attr); | ||
103 | |||
104 | struct sm_sysfs_attribute *sm_attr = | ||
105 | container_of(dev_attr, | ||
106 | struct sm_sysfs_attribute, dev_attr); | ||
107 | |||
108 | kfree(sm_attr->data); | ||
109 | kfree(sm_attr); | ||
110 | } | ||
111 | |||
112 | kfree(ftl->disk_attributes->attrs); | ||
113 | kfree(ftl->disk_attributes); | ||
114 | } | ||
115 | |||
116 | |||
117 | /* ----------------------- oob helpers -------------------------------------- */ | ||
118 | |||
119 | static int sm_get_lba(uint8_t *lba) | ||
120 | { | ||
121 | /* check fixed bits */ | ||
122 | if ((lba[0] & 0xF8) != 0x10) | ||
123 | return -2; | ||
124 | |||
125 | /* check parity - endianess doesn't matter */ | ||
126 | if (hweight16(*(uint16_t *)lba) & 1) | ||
127 | return -2; | ||
128 | |||
129 | return (lba[1] >> 1) | ((lba[0] & 0x07) << 7); | ||
130 | } | ||
131 | |||
132 | |||
133 | /* | ||
134 | * Read LBA asscociated with block | ||
135 | * returns -1, if block is erased | ||
136 | * returns -2 if error happens | ||
137 | */ | ||
138 | static int sm_read_lba(struct sm_oob *oob) | ||
139 | { | ||
140 | static const uint32_t erased_pattern[4] = { | ||
141 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }; | ||
142 | |||
143 | uint16_t lba_test; | ||
144 | int lba; | ||
145 | |||
146 | /* First test for erased block */ | ||
147 | if (!memcmp(oob, erased_pattern, SM_OOB_SIZE)) | ||
148 | return -1; | ||
149 | |||
150 | /* Now check is both copies of the LBA differ too much */ | ||
151 | lba_test = *(uint16_t *)oob->lba_copy1 ^ *(uint16_t*)oob->lba_copy2; | ||
152 | if (lba_test && !is_power_of_2(lba_test)) | ||
153 | return -2; | ||
154 | |||
155 | /* And read it */ | ||
156 | lba = sm_get_lba(oob->lba_copy1); | ||
157 | |||
158 | if (lba == -2) | ||
159 | lba = sm_get_lba(oob->lba_copy2); | ||
160 | |||
161 | return lba; | ||
162 | } | ||
163 | |||
164 | static void sm_write_lba(struct sm_oob *oob, uint16_t lba) | ||
165 | { | ||
166 | uint8_t tmp[2]; | ||
167 | |||
168 | WARN_ON(lba >= 1000); | ||
169 | |||
170 | tmp[0] = 0x10 | ((lba >> 7) & 0x07); | ||
171 | tmp[1] = (lba << 1) & 0xFF; | ||
172 | |||
173 | if (hweight16(*(uint16_t *)tmp) & 0x01) | ||
174 | tmp[1] |= 1; | ||
175 | |||
176 | oob->lba_copy1[0] = oob->lba_copy2[0] = tmp[0]; | ||
177 | oob->lba_copy1[1] = oob->lba_copy2[1] = tmp[1]; | ||
178 | } | ||
179 | |||
180 | |||
181 | /* Make offset from parts */ | ||
182 | static loff_t sm_mkoffset(struct sm_ftl *ftl, int zone, int block, int boffset) | ||
183 | { | ||
184 | WARN_ON(boffset & (SM_SECTOR_SIZE - 1)); | ||
185 | WARN_ON(zone < 0 || zone >= ftl->zone_count); | ||
186 | WARN_ON(block >= ftl->zone_size); | ||
187 | WARN_ON(boffset >= ftl->block_size); | ||
188 | |||
189 | if (block == -1) | ||
190 | return -1; | ||
191 | |||
192 | return (zone * SM_MAX_ZONE_SIZE + block) * ftl->block_size + boffset; | ||
193 | } | ||
194 | |||
195 | /* Breaks offset into parts */ | ||
196 | static void sm_break_offset(struct sm_ftl *ftl, loff_t offset, | ||
197 | int *zone, int *block, int *boffset) | ||
198 | { | ||
199 | *boffset = do_div(offset, ftl->block_size); | ||
200 | *block = do_div(offset, ftl->max_lba); | ||
201 | *zone = offset >= ftl->zone_count ? -1 : offset; | ||
202 | } | ||
203 | |||
204 | /* ---------------------- low level IO ------------------------------------- */ | ||
205 | |||
206 | static int sm_correct_sector(uint8_t *buffer, struct sm_oob *oob) | ||
207 | { | ||
208 | uint8_t ecc[3]; | ||
209 | |||
210 | __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc); | ||
211 | if (__nand_correct_data(buffer, ecc, oob->ecc1, SM_SMALL_PAGE) < 0) | ||
212 | return -EIO; | ||
213 | |||
214 | buffer += SM_SMALL_PAGE; | ||
215 | |||
216 | __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc); | ||
217 | if (__nand_correct_data(buffer, ecc, oob->ecc2, SM_SMALL_PAGE) < 0) | ||
218 | return -EIO; | ||
219 | return 0; | ||
220 | } | ||
221 | |||
222 | /* Reads a sector + oob*/ | ||
223 | static int sm_read_sector(struct sm_ftl *ftl, | ||
224 | int zone, int block, int boffset, | ||
225 | uint8_t *buffer, struct sm_oob *oob) | ||
226 | { | ||
227 | struct mtd_info *mtd = ftl->trans->mtd; | ||
228 | struct mtd_oob_ops ops; | ||
229 | struct sm_oob tmp_oob; | ||
230 | int ret = -EIO; | ||
231 | int try = 0; | ||
232 | |||
233 | /* FTL can contain -1 entries that are by default filled with bits */ | ||
234 | if (block == -1) { | ||
235 | memset(buffer, 0xFF, SM_SECTOR_SIZE); | ||
236 | return 0; | ||
237 | } | ||
238 | |||
239 | /* User might not need the oob, but we do for data vertification */ | ||
240 | if (!oob) | ||
241 | oob = &tmp_oob; | ||
242 | |||
243 | ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE; | ||
244 | ops.ooboffs = 0; | ||
245 | ops.ooblen = SM_OOB_SIZE; | ||
246 | ops.oobbuf = (void *)oob; | ||
247 | ops.len = SM_SECTOR_SIZE; | ||
248 | ops.datbuf = buffer; | ||
249 | |||
250 | again: | ||
251 | if (try++) { | ||
252 | /* Avoid infinite recursion on CIS reads, sm_recheck_media | ||
253 | won't help anyway */ | ||
254 | if (zone == 0 && block == ftl->cis_block && boffset == | ||
255 | ftl->cis_boffset) | ||
256 | return ret; | ||
257 | |||
258 | /* Test if media is stable */ | ||
259 | if (try == 3 || sm_recheck_media(ftl)) | ||
260 | return ret; | ||
261 | } | ||
262 | |||
263 | /* Unfortunelly, oob read will _always_ succeed, | ||
264 | despite card removal..... */ | ||
265 | ret = mtd->read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops); | ||
266 | |||
267 | /* Test for unknown errors */ | ||
268 | if (ret != 0 && ret != -EUCLEAN && ret != -EBADMSG) { | ||
269 | dbg("read of block %d at zone %d, failed due to error (%d)", | ||
270 | block, zone, ret); | ||
271 | goto again; | ||
272 | } | ||
273 | |||
274 | /* Do a basic test on the oob, to guard against returned garbage */ | ||
275 | if (oob->reserved != 0xFFFFFFFF && !is_power_of_2(~oob->reserved)) | ||
276 | goto again; | ||
277 | |||
278 | /* This should never happen, unless there is a bug in the mtd driver */ | ||
279 | WARN_ON(ops.oobretlen != SM_OOB_SIZE); | ||
280 | WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE); | ||
281 | |||
282 | if (!buffer) | ||
283 | return 0; | ||
284 | |||
285 | /* Test if sector marked as bad */ | ||
286 | if (!sm_sector_valid(oob)) { | ||
287 | dbg("read of block %d at zone %d, failed because it is marked" | ||
288 | " as bad" , block, zone); | ||
289 | goto again; | ||
290 | } | ||
291 | |||
292 | /* Test ECC*/ | ||
293 | if (ret == -EBADMSG || | ||
294 | (ftl->smallpagenand && sm_correct_sector(buffer, oob))) { | ||
295 | |||
296 | dbg("read of block %d at zone %d, failed due to ECC error", | ||
297 | block, zone); | ||
298 | goto again; | ||
299 | } | ||
300 | |||
301 | return 0; | ||
302 | } | ||
303 | |||
304 | /* Writes a sector to media */ | ||
305 | static int sm_write_sector(struct sm_ftl *ftl, | ||
306 | int zone, int block, int boffset, | ||
307 | uint8_t *buffer, struct sm_oob *oob) | ||
308 | { | ||
309 | struct mtd_oob_ops ops; | ||
310 | struct mtd_info *mtd = ftl->trans->mtd; | ||
311 | int ret; | ||
312 | |||
313 | BUG_ON(ftl->readonly); | ||
314 | |||
315 | if (zone == 0 && (block == ftl->cis_block || block == 0)) { | ||
316 | dbg("attempted to write the CIS!"); | ||
317 | return -EIO; | ||
318 | } | ||
319 | |||
320 | if (ftl->unstable) | ||
321 | return -EIO; | ||
322 | |||
323 | ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE; | ||
324 | ops.len = SM_SECTOR_SIZE; | ||
325 | ops.datbuf = buffer; | ||
326 | ops.ooboffs = 0; | ||
327 | ops.ooblen = SM_OOB_SIZE; | ||
328 | ops.oobbuf = (void *)oob; | ||
329 | |||
330 | ret = mtd->write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops); | ||
331 | |||
332 | /* Now we assume that hardware will catch write bitflip errors */ | ||
333 | /* If you are paranoid, use CONFIG_MTD_NAND_VERIFY_WRITE */ | ||
334 | |||
335 | if (ret) { | ||
336 | dbg("write to block %d at zone %d, failed with error %d", | ||
337 | block, zone, ret); | ||
338 | |||
339 | sm_recheck_media(ftl); | ||
340 | return ret; | ||
341 | } | ||
342 | |||
343 | /* This should never happen, unless there is a bug in the driver */ | ||
344 | WARN_ON(ops.oobretlen != SM_OOB_SIZE); | ||
345 | WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE); | ||
346 | |||
347 | return 0; | ||
348 | } | ||
349 | |||
350 | /* ------------------------ block IO ------------------------------------- */ | ||
351 | |||
352 | /* Write a block using data and lba, and invalid sector bitmap */ | ||
353 | static int sm_write_block(struct sm_ftl *ftl, uint8_t *buf, | ||
354 | int zone, int block, int lba, | ||
355 | unsigned long invalid_bitmap) | ||
356 | { | ||
357 | struct sm_oob oob; | ||
358 | int boffset; | ||
359 | int retry = 0; | ||
360 | |||
361 | /* Initialize the oob with requested values */ | ||
362 | memset(&oob, 0xFF, SM_OOB_SIZE); | ||
363 | sm_write_lba(&oob, lba); | ||
364 | restart: | ||
365 | if (ftl->unstable) | ||
366 | return -EIO; | ||
367 | |||
368 | for (boffset = 0; boffset < ftl->block_size; | ||
369 | boffset += SM_SECTOR_SIZE) { | ||
370 | |||
371 | oob.data_status = 0xFF; | ||
372 | |||
373 | if (test_bit(boffset / SM_SECTOR_SIZE, &invalid_bitmap)) { | ||
374 | |||
375 | sm_printk("sector %d of block at LBA %d of zone %d" | ||
376 | " coudn't be read, marking it as invalid", | ||
377 | boffset / SM_SECTOR_SIZE, lba, zone); | ||
378 | |||
379 | oob.data_status = 0; | ||
380 | } | ||
381 | |||
382 | if (ftl->smallpagenand) { | ||
383 | __nand_calculate_ecc(buf + boffset, | ||
384 | SM_SMALL_PAGE, oob.ecc1); | ||
385 | |||
386 | __nand_calculate_ecc(buf + boffset + SM_SMALL_PAGE, | ||
387 | SM_SMALL_PAGE, oob.ecc2); | ||
388 | } | ||
389 | if (!sm_write_sector(ftl, zone, block, boffset, | ||
390 | buf + boffset, &oob)) | ||
391 | continue; | ||
392 | |||
393 | if (!retry) { | ||
394 | |||
395 | /* If write fails. try to erase the block */ | ||
396 | /* This is safe, because we never write in blocks | ||
397 | that contain valuable data. | ||
398 | This is intended to repair block that are marked | ||
399 | as erased, but that isn't fully erased*/ | ||
400 | |||
401 | if (sm_erase_block(ftl, zone, block, 0)) | ||
402 | return -EIO; | ||
403 | |||
404 | retry = 1; | ||
405 | goto restart; | ||
406 | } else { | ||
407 | sm_mark_block_bad(ftl, zone, block); | ||
408 | return -EIO; | ||
409 | } | ||
410 | } | ||
411 | return 0; | ||
412 | } | ||
413 | |||
414 | |||
415 | /* Mark whole block at offset 'offs' as bad. */ | ||
416 | static void sm_mark_block_bad(struct sm_ftl *ftl, int zone, int block) | ||
417 | { | ||
418 | struct sm_oob oob; | ||
419 | int boffset; | ||
420 | |||
421 | memset(&oob, 0xFF, SM_OOB_SIZE); | ||
422 | oob.block_status = 0xF0; | ||
423 | |||
424 | if (ftl->unstable) | ||
425 | return; | ||
426 | |||
427 | if (sm_recheck_media(ftl)) | ||
428 | return; | ||
429 | |||
430 | sm_printk("marking block %d of zone %d as bad", block, zone); | ||
431 | |||
432 | /* We aren't checking the return value, because we don't care */ | ||
433 | /* This also fails on fake xD cards, but I guess these won't expose | ||
434 | any bad blocks till fail completly */ | ||
435 | for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE) | ||
436 | sm_write_sector(ftl, zone, block, boffset, NULL, &oob); | ||
437 | } | ||
438 | |||
439 | /* | ||
440 | * Erase a block within a zone | ||
441 | * If erase succedes, it updates free block fifo, otherwise marks block as bad | ||
442 | */ | ||
443 | static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block, | ||
444 | int put_free) | ||
445 | { | ||
446 | struct ftl_zone *zone = &ftl->zones[zone_num]; | ||
447 | struct mtd_info *mtd = ftl->trans->mtd; | ||
448 | struct erase_info erase; | ||
449 | |||
450 | erase.mtd = mtd; | ||
451 | erase.callback = sm_erase_callback; | ||
452 | erase.addr = sm_mkoffset(ftl, zone_num, block, 0); | ||
453 | erase.len = ftl->block_size; | ||
454 | erase.priv = (u_long)ftl; | ||
455 | |||
456 | if (ftl->unstable) | ||
457 | return -EIO; | ||
458 | |||
459 | BUG_ON(ftl->readonly); | ||
460 | |||
461 | if (zone_num == 0 && (block == ftl->cis_block || block == 0)) { | ||
462 | sm_printk("attempted to erase the CIS!"); | ||
463 | return -EIO; | ||
464 | } | ||
465 | |||
466 | if (mtd->erase(mtd, &erase)) { | ||
467 | sm_printk("erase of block %d in zone %d failed", | ||
468 | block, zone_num); | ||
469 | goto error; | ||
470 | } | ||
471 | |||
472 | if (erase.state == MTD_ERASE_PENDING) | ||
473 | wait_for_completion(&ftl->erase_completion); | ||
474 | |||
475 | if (erase.state != MTD_ERASE_DONE) { | ||
476 | sm_printk("erase of block %d in zone %d failed after wait", | ||
477 | block, zone_num); | ||
478 | goto error; | ||
479 | } | ||
480 | |||
481 | if (put_free) | ||
482 | kfifo_in(&zone->free_sectors, | ||
483 | (const unsigned char *)&block, sizeof(block)); | ||
484 | |||
485 | return 0; | ||
486 | error: | ||
487 | sm_mark_block_bad(ftl, zone_num, block); | ||
488 | return -EIO; | ||
489 | } | ||
490 | |||
491 | static void sm_erase_callback(struct erase_info *self) | ||
492 | { | ||
493 | struct sm_ftl *ftl = (struct sm_ftl *)self->priv; | ||
494 | complete(&ftl->erase_completion); | ||
495 | } | ||
496 | |||
497 | /* Throughtly test that block is valid. */ | ||
498 | static int sm_check_block(struct sm_ftl *ftl, int zone, int block) | ||
499 | { | ||
500 | int boffset; | ||
501 | struct sm_oob oob; | ||
502 | int lbas[] = { -3, 0, 0, 0 }; | ||
503 | int i = 0; | ||
504 | int test_lba; | ||
505 | |||
506 | |||
507 | /* First just check that block doesn't look fishy */ | ||
508 | /* Only blocks that are valid or are sliced in two parts, are | ||
509 | accepted */ | ||
510 | for (boffset = 0; boffset < ftl->block_size; | ||
511 | boffset += SM_SECTOR_SIZE) { | ||
512 | |||
513 | /* This shoudn't happen anyway */ | ||
514 | if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob)) | ||
515 | return -2; | ||
516 | |||
517 | test_lba = sm_read_lba(&oob); | ||
518 | |||
519 | if (lbas[i] != test_lba) | ||
520 | lbas[++i] = test_lba; | ||
521 | |||
522 | /* If we found three different LBAs, something is fishy */ | ||
523 | if (i == 3) | ||
524 | return -EIO; | ||
525 | } | ||
526 | |||
527 | /* If the block is sliced (partialy erased usually) erase it */ | ||
528 | if (i == 2) { | ||
529 | sm_erase_block(ftl, zone, block, 1); | ||
530 | return 1; | ||
531 | } | ||
532 | |||
533 | return 0; | ||
534 | } | ||
535 | |||
536 | /* ----------------- media scanning --------------------------------- */ | ||
537 | static const struct chs_entry chs_table[] = { | ||
538 | { 1, 125, 4, 4 }, | ||
539 | { 2, 125, 4, 8 }, | ||
540 | { 4, 250, 4, 8 }, | ||
541 | { 8, 250, 4, 16 }, | ||
542 | { 16, 500, 4, 16 }, | ||
543 | { 32, 500, 8, 16 }, | ||
544 | { 64, 500, 8, 32 }, | ||
545 | { 128, 500, 16, 32 }, | ||
546 | { 256, 1000, 16, 32 }, | ||
547 | { 512, 1015, 32, 63 }, | ||
548 | { 1024, 985, 33, 63 }, | ||
549 | { 2048, 985, 33, 63 }, | ||
550 | { 0 }, | ||
551 | }; | ||
552 | |||
553 | |||
554 | static const uint8_t cis_signature[] = { | ||
555 | 0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20 | ||
556 | }; | ||
557 | /* Find out media parameters. | ||
558 | * This ideally has to be based on nand id, but for now device size is enough */ | ||
559 | int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd) | ||
560 | { | ||
561 | int i; | ||
562 | int size_in_megs = mtd->size / (1024 * 1024); | ||
563 | |||
564 | ftl->readonly = mtd->type == MTD_ROM; | ||
565 | |||
566 | /* Manual settings for very old devices */ | ||
567 | ftl->zone_count = 1; | ||
568 | ftl->smallpagenand = 0; | ||
569 | |||
570 | switch (size_in_megs) { | ||
571 | case 1: | ||
572 | /* 1 MiB flash/rom SmartMedia card (256 byte pages)*/ | ||
573 | ftl->zone_size = 256; | ||
574 | ftl->max_lba = 250; | ||
575 | ftl->block_size = 8 * SM_SECTOR_SIZE; | ||
576 | ftl->smallpagenand = 1; | ||
577 | |||
578 | break; | ||
579 | case 2: | ||
580 | /* 2 MiB flash SmartMedia (256 byte pages)*/ | ||
581 | if (mtd->writesize == SM_SMALL_PAGE) { | ||
582 | ftl->zone_size = 512; | ||
583 | ftl->max_lba = 500; | ||
584 | ftl->block_size = 8 * SM_SECTOR_SIZE; | ||
585 | ftl->smallpagenand = 1; | ||
586 | /* 2 MiB rom SmartMedia */ | ||
587 | } else { | ||
588 | |||
589 | if (!ftl->readonly) | ||
590 | return -ENODEV; | ||
591 | |||
592 | ftl->zone_size = 256; | ||
593 | ftl->max_lba = 250; | ||
594 | ftl->block_size = 16 * SM_SECTOR_SIZE; | ||
595 | } | ||
596 | break; | ||
597 | case 4: | ||
598 | /* 4 MiB flash/rom SmartMedia device */ | ||
599 | ftl->zone_size = 512; | ||
600 | ftl->max_lba = 500; | ||
601 | ftl->block_size = 16 * SM_SECTOR_SIZE; | ||
602 | break; | ||
603 | case 8: | ||
604 | /* 8 MiB flash/rom SmartMedia device */ | ||
605 | ftl->zone_size = 1024; | ||
606 | ftl->max_lba = 1000; | ||
607 | ftl->block_size = 16 * SM_SECTOR_SIZE; | ||
608 | } | ||
609 | |||
610 | /* Minimum xD size is 16MiB. Also, all xD cards have standard zone | ||
611 | sizes. SmartMedia cards exist up to 128 MiB and have same layout*/ | ||
612 | if (size_in_megs >= 16) { | ||
613 | ftl->zone_count = size_in_megs / 16; | ||
614 | ftl->zone_size = 1024; | ||
615 | ftl->max_lba = 1000; | ||
616 | ftl->block_size = 32 * SM_SECTOR_SIZE; | ||
617 | } | ||
618 | |||
619 | /* Test for proper write,erase and oob sizes */ | ||
620 | if (mtd->erasesize > ftl->block_size) | ||
621 | return -ENODEV; | ||
622 | |||
623 | if (mtd->writesize > SM_SECTOR_SIZE) | ||
624 | return -ENODEV; | ||
625 | |||
626 | if (ftl->smallpagenand && mtd->oobsize < SM_SMALL_OOB_SIZE) | ||
627 | return -ENODEV; | ||
628 | |||
629 | if (!ftl->smallpagenand && mtd->oobsize < SM_OOB_SIZE) | ||
630 | return -ENODEV; | ||
631 | |||
632 | /* We use these functions for IO */ | ||
633 | if (!mtd->read_oob || !mtd->write_oob) | ||
634 | return -ENODEV; | ||
635 | |||
636 | /* Find geometry information */ | ||
637 | for (i = 0 ; i < ARRAY_SIZE(chs_table) ; i++) { | ||
638 | if (chs_table[i].size == size_in_megs) { | ||
639 | ftl->cylinders = chs_table[i].cyl; | ||
640 | ftl->heads = chs_table[i].head; | ||
641 | ftl->sectors = chs_table[i].sec; | ||
642 | return 0; | ||
643 | } | ||
644 | } | ||
645 | |||
646 | sm_printk("media has unknown size : %dMiB", size_in_megs); | ||
647 | ftl->cylinders = 985; | ||
648 | ftl->heads = 33; | ||
649 | ftl->sectors = 63; | ||
650 | return 0; | ||
651 | } | ||
652 | |||
653 | /* Validate the CIS */ | ||
654 | static int sm_read_cis(struct sm_ftl *ftl) | ||
655 | { | ||
656 | struct sm_oob oob; | ||
657 | |||
658 | if (sm_read_sector(ftl, | ||
659 | 0, ftl->cis_block, ftl->cis_boffset, ftl->cis_buffer, &oob)) | ||
660 | return -EIO; | ||
661 | |||
662 | if (!sm_sector_valid(&oob) || !sm_block_valid(&oob)) | ||
663 | return -EIO; | ||
664 | |||
665 | if (!memcmp(ftl->cis_buffer + ftl->cis_page_offset, | ||
666 | cis_signature, sizeof(cis_signature))) { | ||
667 | return 0; | ||
668 | } | ||
669 | |||
670 | return -EIO; | ||
671 | } | ||
672 | |||
673 | /* Scan the media for the CIS */ | ||
674 | static int sm_find_cis(struct sm_ftl *ftl) | ||
675 | { | ||
676 | struct sm_oob oob; | ||
677 | int block, boffset; | ||
678 | int block_found = 0; | ||
679 | int cis_found = 0; | ||
680 | |||
681 | /* Search for first valid block */ | ||
682 | for (block = 0 ; block < ftl->zone_size - ftl->max_lba ; block++) { | ||
683 | |||
684 | if (sm_read_sector(ftl, 0, block, 0, NULL, &oob)) | ||
685 | continue; | ||
686 | |||
687 | if (!sm_block_valid(&oob)) | ||
688 | continue; | ||
689 | block_found = 1; | ||
690 | break; | ||
691 | } | ||
692 | |||
693 | if (!block_found) | ||
694 | return -EIO; | ||
695 | |||
696 | /* Search for first valid sector in this block */ | ||
697 | for (boffset = 0 ; boffset < ftl->block_size; | ||
698 | boffset += SM_SECTOR_SIZE) { | ||
699 | |||
700 | if (sm_read_sector(ftl, 0, block, boffset, NULL, &oob)) | ||
701 | continue; | ||
702 | |||
703 | if (!sm_sector_valid(&oob)) | ||
704 | continue; | ||
705 | break; | ||
706 | } | ||
707 | |||
708 | if (boffset == ftl->block_size) | ||
709 | return -EIO; | ||
710 | |||
711 | ftl->cis_block = block; | ||
712 | ftl->cis_boffset = boffset; | ||
713 | ftl->cis_page_offset = 0; | ||
714 | |||
715 | cis_found = !sm_read_cis(ftl); | ||
716 | |||
717 | if (!cis_found) { | ||
718 | ftl->cis_page_offset = SM_SMALL_PAGE; | ||
719 | cis_found = !sm_read_cis(ftl); | ||
720 | } | ||
721 | |||
722 | if (cis_found) { | ||
723 | dbg("CIS block found at offset %x", | ||
724 | block * ftl->block_size + | ||
725 | boffset + ftl->cis_page_offset); | ||
726 | return 0; | ||
727 | } | ||
728 | return -EIO; | ||
729 | } | ||
730 | |||
731 | /* Basic test to determine if underlying mtd device if functional */ | ||
732 | static int sm_recheck_media(struct sm_ftl *ftl) | ||
733 | { | ||
734 | if (sm_read_cis(ftl)) { | ||
735 | |||
736 | if (!ftl->unstable) { | ||
737 | sm_printk("media unstable, not allowing writes"); | ||
738 | ftl->unstable = 1; | ||
739 | } | ||
740 | return -EIO; | ||
741 | } | ||
742 | return 0; | ||
743 | } | ||
744 | |||
745 | /* Initialize a FTL zone */ | ||
746 | static int sm_init_zone(struct sm_ftl *ftl, int zone_num) | ||
747 | { | ||
748 | struct ftl_zone *zone = &ftl->zones[zone_num]; | ||
749 | struct sm_oob oob; | ||
750 | uint16_t block; | ||
751 | int lba; | ||
752 | int i = 0; | ||
753 | int len; | ||
754 | |||
755 | dbg("initializing zone %d", zone_num); | ||
756 | |||
757 | /* Allocate memory for FTL table */ | ||
758 | zone->lba_to_phys_table = kmalloc(ftl->max_lba * 2, GFP_KERNEL); | ||
759 | |||
760 | if (!zone->lba_to_phys_table) | ||
761 | return -ENOMEM; | ||
762 | memset(zone->lba_to_phys_table, -1, ftl->max_lba * 2); | ||
763 | |||
764 | |||
765 | /* Allocate memory for free sectors FIFO */ | ||
766 | if (kfifo_alloc(&zone->free_sectors, ftl->zone_size * 2, GFP_KERNEL)) { | ||
767 | kfree(zone->lba_to_phys_table); | ||
768 | return -ENOMEM; | ||
769 | } | ||
770 | |||
771 | /* Now scan the zone */ | ||
772 | for (block = 0 ; block < ftl->zone_size ; block++) { | ||
773 | |||
774 | /* Skip blocks till the CIS (including) */ | ||
775 | if (zone_num == 0 && block <= ftl->cis_block) | ||
776 | continue; | ||
777 | |||
778 | /* Read the oob of first sector */ | ||
779 | if (sm_read_sector(ftl, zone_num, block, 0, NULL, &oob)) | ||
780 | return -EIO; | ||
781 | |||
782 | /* Test to see if block is erased. It is enough to test | ||
783 | first sector, because erase happens in one shot */ | ||
784 | if (sm_block_erased(&oob)) { | ||
785 | kfifo_in(&zone->free_sectors, | ||
786 | (unsigned char *)&block, 2); | ||
787 | continue; | ||
788 | } | ||
789 | |||
790 | /* If block is marked as bad, skip it */ | ||
791 | /* This assumes we can trust first sector*/ | ||
792 | /* However the way the block valid status is defined, ensures | ||
793 | very low probability of failure here */ | ||
794 | if (!sm_block_valid(&oob)) { | ||
795 | dbg("PH %04d <-> <marked bad>", block); | ||
796 | continue; | ||
797 | } | ||
798 | |||
799 | |||
800 | lba = sm_read_lba(&oob); | ||
801 | |||
802 | /* Invalid LBA means that block is damaged. */ | ||
803 | /* We can try to erase it, or mark it as bad, but | ||
804 | lets leave that to recovery application */ | ||
805 | if (lba == -2 || lba >= ftl->max_lba) { | ||
806 | dbg("PH %04d <-> LBA %04d(bad)", block, lba); | ||
807 | continue; | ||
808 | } | ||
809 | |||
810 | |||
811 | /* If there is no collision, | ||
812 | just put the sector in the FTL table */ | ||
813 | if (zone->lba_to_phys_table[lba] < 0) { | ||
814 | dbg_verbose("PH %04d <-> LBA %04d", block, lba); | ||
815 | zone->lba_to_phys_table[lba] = block; | ||
816 | continue; | ||
817 | } | ||
818 | |||
819 | sm_printk("collision" | ||
820 | " of LBA %d between blocks %d and %d in zone %d", | ||
821 | lba, zone->lba_to_phys_table[lba], block, zone_num); | ||
822 | |||
823 | /* Test that this block is valid*/ | ||
824 | if (sm_check_block(ftl, zone_num, block)) | ||
825 | continue; | ||
826 | |||
827 | /* Test now the old block */ | ||
828 | if (sm_check_block(ftl, zone_num, | ||
829 | zone->lba_to_phys_table[lba])) { | ||
830 | zone->lba_to_phys_table[lba] = block; | ||
831 | continue; | ||
832 | } | ||
833 | |||
834 | /* If both blocks are valid and share same LBA, it means that | ||
835 | they hold different versions of same data. It not | ||
836 | known which is more recent, thus just erase one of them | ||
837 | */ | ||
838 | sm_printk("both blocks are valid, erasing the later"); | ||
839 | sm_erase_block(ftl, zone_num, block, 1); | ||
840 | } | ||
841 | |||
842 | dbg("zone initialized"); | ||
843 | zone->initialized = 1; | ||
844 | |||
845 | /* No free sectors, means that the zone is heavily damaged, write won't | ||
846 | work, but it can still can be (partially) read */ | ||
847 | if (!kfifo_len(&zone->free_sectors)) { | ||
848 | sm_printk("no free blocks in zone %d", zone_num); | ||
849 | return 0; | ||
850 | } | ||
851 | |||
852 | /* Randomize first block we write to */ | ||
853 | get_random_bytes(&i, 2); | ||
854 | i %= (kfifo_len(&zone->free_sectors) / 2); | ||
855 | |||
856 | while (i--) { | ||
857 | len = kfifo_out(&zone->free_sectors, | ||
858 | (unsigned char *)&block, 2); | ||
859 | WARN_ON(len != 2); | ||
860 | kfifo_in(&zone->free_sectors, (const unsigned char *)&block, 2); | ||
861 | } | ||
862 | return 0; | ||
863 | } | ||
864 | |||
865 | /* Get and automaticly initialize an FTL mapping for one zone */ | ||
866 | struct ftl_zone *sm_get_zone(struct sm_ftl *ftl, int zone_num) | ||
867 | { | ||
868 | struct ftl_zone *zone; | ||
869 | int error; | ||
870 | |||
871 | BUG_ON(zone_num >= ftl->zone_count); | ||
872 | zone = &ftl->zones[zone_num]; | ||
873 | |||
874 | if (!zone->initialized) { | ||
875 | error = sm_init_zone(ftl, zone_num); | ||
876 | |||
877 | if (error) | ||
878 | return ERR_PTR(error); | ||
879 | } | ||
880 | return zone; | ||
881 | } | ||
882 | |||
883 | |||
884 | /* ----------------- cache handling ------------------------------------------*/ | ||
885 | |||
886 | /* Initialize the one block cache */ | ||
887 | void sm_cache_init(struct sm_ftl *ftl) | ||
888 | { | ||
889 | ftl->cache_data_invalid_bitmap = 0xFFFFFFFF; | ||
890 | ftl->cache_clean = 1; | ||
891 | ftl->cache_zone = -1; | ||
892 | ftl->cache_block = -1; | ||
893 | /*memset(ftl->cache_data, 0xAA, ftl->block_size);*/ | ||
894 | } | ||
895 | |||
896 | /* Put sector in one block cache */ | ||
897 | void sm_cache_put(struct sm_ftl *ftl, char *buffer, int boffset) | ||
898 | { | ||
899 | memcpy(ftl->cache_data + boffset, buffer, SM_SECTOR_SIZE); | ||
900 | clear_bit(boffset / SM_SECTOR_SIZE, &ftl->cache_data_invalid_bitmap); | ||
901 | ftl->cache_clean = 0; | ||
902 | } | ||
903 | |||
904 | /* Read a sector from the cache */ | ||
905 | int sm_cache_get(struct sm_ftl *ftl, char *buffer, int boffset) | ||
906 | { | ||
907 | if (test_bit(boffset / SM_SECTOR_SIZE, | ||
908 | &ftl->cache_data_invalid_bitmap)) | ||
909 | return -1; | ||
910 | |||
911 | memcpy(buffer, ftl->cache_data + boffset, SM_SECTOR_SIZE); | ||
912 | return 0; | ||
913 | } | ||
914 | |||
915 | /* Write the cache to hardware */ | ||
916 | int sm_cache_flush(struct sm_ftl *ftl) | ||
917 | { | ||
918 | struct ftl_zone *zone; | ||
919 | |||
920 | int sector_num; | ||
921 | uint16_t write_sector; | ||
922 | int zone_num = ftl->cache_zone; | ||
923 | int block_num; | ||
924 | |||
925 | if (ftl->cache_clean) | ||
926 | return 0; | ||
927 | |||
928 | if (ftl->unstable) | ||
929 | return -EIO; | ||
930 | |||
931 | BUG_ON(zone_num < 0); | ||
932 | zone = &ftl->zones[zone_num]; | ||
933 | block_num = zone->lba_to_phys_table[ftl->cache_block]; | ||
934 | |||
935 | |||
936 | /* Try to read all unread areas of the cache block*/ | ||
937 | for_each_set_bit(sector_num, &ftl->cache_data_invalid_bitmap, | ||
938 | ftl->block_size / SM_SECTOR_SIZE) { | ||
939 | |||
940 | if (!sm_read_sector(ftl, | ||
941 | zone_num, block_num, sector_num * SM_SECTOR_SIZE, | ||
942 | ftl->cache_data + sector_num * SM_SECTOR_SIZE, NULL)) | ||
943 | clear_bit(sector_num, | ||
944 | &ftl->cache_data_invalid_bitmap); | ||
945 | } | ||
946 | restart: | ||
947 | |||
948 | if (ftl->unstable) | ||
949 | return -EIO; | ||
950 | |||
951 | /* If there are no spare blocks, */ | ||
952 | /* we could still continue by erasing/writing the current block, | ||
953 | but for such worn out media it doesn't worth the trouble, | ||
954 | and the dangers */ | ||
955 | if (kfifo_out(&zone->free_sectors, | ||
956 | (unsigned char *)&write_sector, 2) != 2) { | ||
957 | dbg("no free sectors for write!"); | ||
958 | return -EIO; | ||
959 | } | ||
960 | |||
961 | |||
962 | if (sm_write_block(ftl, ftl->cache_data, zone_num, write_sector, | ||
963 | ftl->cache_block, ftl->cache_data_invalid_bitmap)) | ||
964 | goto restart; | ||
965 | |||
966 | /* Update the FTL table */ | ||
967 | zone->lba_to_phys_table[ftl->cache_block] = write_sector; | ||
968 | |||
969 | /* Write succesfull, so erase and free the old block */ | ||
970 | if (block_num > 0) | ||
971 | sm_erase_block(ftl, zone_num, block_num, 1); | ||
972 | |||
973 | sm_cache_init(ftl); | ||
974 | return 0; | ||
975 | } | ||
976 | |||
977 | |||
978 | /* flush timer, runs a second after last write */ | ||
979 | static void sm_cache_flush_timer(unsigned long data) | ||
980 | { | ||
981 | struct sm_ftl *ftl = (struct sm_ftl *)data; | ||
982 | queue_work(cache_flush_workqueue, &ftl->flush_work); | ||
983 | } | ||
984 | |||
985 | /* cache flush work, kicked by timer */ | ||
986 | static void sm_cache_flush_work(struct work_struct *work) | ||
987 | { | ||
988 | struct sm_ftl *ftl = container_of(work, struct sm_ftl, flush_work); | ||
989 | mutex_lock(&ftl->mutex); | ||
990 | sm_cache_flush(ftl); | ||
991 | mutex_unlock(&ftl->mutex); | ||
992 | return; | ||
993 | } | ||
994 | |||
995 | /* ---------------- outside interface -------------------------------------- */ | ||
996 | |||
997 | /* outside interface: read a sector */ | ||
998 | static int sm_read(struct mtd_blktrans_dev *dev, | ||
999 | unsigned long sect_no, char *buf) | ||
1000 | { | ||
1001 | struct sm_ftl *ftl = dev->priv; | ||
1002 | struct ftl_zone *zone; | ||
1003 | int error = 0, in_cache = 0; | ||
1004 | int zone_num, block, boffset; | ||
1005 | |||
1006 | sm_break_offset(ftl, sect_no << 9, &zone_num, &block, &boffset); | ||
1007 | mutex_lock(&ftl->mutex); | ||
1008 | |||
1009 | |||
1010 | zone = sm_get_zone(ftl, zone_num); | ||
1011 | if (IS_ERR(zone)) { | ||
1012 | error = PTR_ERR(zone); | ||
1013 | goto unlock; | ||
1014 | } | ||
1015 | |||
1016 | /* Have to look at cache first */ | ||
1017 | if (ftl->cache_zone == zone_num && ftl->cache_block == block) { | ||
1018 | in_cache = 1; | ||
1019 | if (!sm_cache_get(ftl, buf, boffset)) | ||
1020 | goto unlock; | ||
1021 | } | ||
1022 | |||
1023 | /* Translate the block and return if doesn't exist in the table */ | ||
1024 | block = zone->lba_to_phys_table[block]; | ||
1025 | |||
1026 | if (block == -1) { | ||
1027 | memset(buf, 0xFF, SM_SECTOR_SIZE); | ||
1028 | goto unlock; | ||
1029 | } | ||
1030 | |||
1031 | if (sm_read_sector(ftl, zone_num, block, boffset, buf, NULL)) { | ||
1032 | error = -EIO; | ||
1033 | goto unlock; | ||
1034 | } | ||
1035 | |||
1036 | if (in_cache) | ||
1037 | sm_cache_put(ftl, buf, boffset); | ||
1038 | unlock: | ||
1039 | mutex_unlock(&ftl->mutex); | ||
1040 | return error; | ||
1041 | } | ||
1042 | |||
1043 | /* outside interface: write a sector */ | ||
1044 | static int sm_write(struct mtd_blktrans_dev *dev, | ||
1045 | unsigned long sec_no, char *buf) | ||
1046 | { | ||
1047 | struct sm_ftl *ftl = dev->priv; | ||
1048 | struct ftl_zone *zone; | ||
1049 | int error, zone_num, block, boffset; | ||
1050 | |||
1051 | BUG_ON(ftl->readonly); | ||
1052 | sm_break_offset(ftl, sec_no << 9, &zone_num, &block, &boffset); | ||
1053 | |||
1054 | /* No need in flush thread running now */ | ||
1055 | del_timer(&ftl->timer); | ||
1056 | mutex_lock(&ftl->mutex); | ||
1057 | |||
1058 | zone = sm_get_zone(ftl, zone_num); | ||
1059 | if (IS_ERR(zone)) { | ||
1060 | error = PTR_ERR(zone); | ||
1061 | goto unlock; | ||
1062 | } | ||
1063 | |||
1064 | /* If entry is not in cache, flush it */ | ||
1065 | if (ftl->cache_block != block || ftl->cache_zone != zone_num) { | ||
1066 | |||
1067 | error = sm_cache_flush(ftl); | ||
1068 | if (error) | ||
1069 | goto unlock; | ||
1070 | |||
1071 | ftl->cache_block = block; | ||
1072 | ftl->cache_zone = zone_num; | ||
1073 | } | ||
1074 | |||
1075 | sm_cache_put(ftl, buf, boffset); | ||
1076 | unlock: | ||
1077 | mod_timer(&ftl->timer, jiffies + msecs_to_jiffies(cache_timeout)); | ||
1078 | mutex_unlock(&ftl->mutex); | ||
1079 | return error; | ||
1080 | } | ||
1081 | |||
1082 | /* outside interface: flush everything */ | ||
1083 | static int sm_flush(struct mtd_blktrans_dev *dev) | ||
1084 | { | ||
1085 | struct sm_ftl *ftl = dev->priv; | ||
1086 | int retval; | ||
1087 | |||
1088 | mutex_lock(&ftl->mutex); | ||
1089 | retval = sm_cache_flush(ftl); | ||
1090 | mutex_unlock(&ftl->mutex); | ||
1091 | return retval; | ||
1092 | } | ||
1093 | |||
1094 | /* outside interface: device is released */ | ||
1095 | static int sm_release(struct mtd_blktrans_dev *dev) | ||
1096 | { | ||
1097 | struct sm_ftl *ftl = dev->priv; | ||
1098 | |||
1099 | mutex_lock(&ftl->mutex); | ||
1100 | del_timer_sync(&ftl->timer); | ||
1101 | cancel_work_sync(&ftl->flush_work); | ||
1102 | sm_cache_flush(ftl); | ||
1103 | mutex_unlock(&ftl->mutex); | ||
1104 | return 0; | ||
1105 | } | ||
1106 | |||
1107 | /* outside interface: get geometry */ | ||
1108 | static int sm_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo) | ||
1109 | { | ||
1110 | struct sm_ftl *ftl = dev->priv; | ||
1111 | geo->heads = ftl->heads; | ||
1112 | geo->sectors = ftl->sectors; | ||
1113 | geo->cylinders = ftl->cylinders; | ||
1114 | return 0; | ||
1115 | } | ||
1116 | |||
1117 | /* external interface: main initialization function */ | ||
1118 | static void sm_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) | ||
1119 | { | ||
1120 | struct mtd_blktrans_dev *trans; | ||
1121 | struct sm_ftl *ftl; | ||
1122 | |||
1123 | /* Allocate & initialize our private structure */ | ||
1124 | ftl = kzalloc(sizeof(struct sm_ftl), GFP_KERNEL); | ||
1125 | if (!ftl) | ||
1126 | goto error1; | ||
1127 | |||
1128 | |||
1129 | mutex_init(&ftl->mutex); | ||
1130 | setup_timer(&ftl->timer, sm_cache_flush_timer, (unsigned long)ftl); | ||
1131 | INIT_WORK(&ftl->flush_work, sm_cache_flush_work); | ||
1132 | init_completion(&ftl->erase_completion); | ||
1133 | |||
1134 | /* Read media information */ | ||
1135 | if (sm_get_media_info(ftl, mtd)) { | ||
1136 | dbg("found unsupported mtd device, aborting"); | ||
1137 | goto error2; | ||
1138 | } | ||
1139 | |||
1140 | |||
1141 | /* Allocate temporary CIS buffer for read retry support */ | ||
1142 | ftl->cis_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL); | ||
1143 | if (!ftl->cis_buffer) | ||
1144 | goto error2; | ||
1145 | |||
1146 | /* Allocate zone array, it will be initialized on demand */ | ||
1147 | ftl->zones = kzalloc(sizeof(struct ftl_zone) * ftl->zone_count, | ||
1148 | GFP_KERNEL); | ||
1149 | if (!ftl->zones) | ||
1150 | goto error3; | ||
1151 | |||
1152 | /* Allocate the cache*/ | ||
1153 | ftl->cache_data = kzalloc(ftl->block_size, GFP_KERNEL); | ||
1154 | |||
1155 | if (!ftl->cache_data) | ||
1156 | goto error4; | ||
1157 | |||
1158 | sm_cache_init(ftl); | ||
1159 | |||
1160 | |||
1161 | /* Allocate upper layer structure and initialize it */ | ||
1162 | trans = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL); | ||
1163 | if (!trans) | ||
1164 | goto error5; | ||
1165 | |||
1166 | ftl->trans = trans; | ||
1167 | trans->priv = ftl; | ||
1168 | |||
1169 | trans->tr = tr; | ||
1170 | trans->mtd = mtd; | ||
1171 | trans->devnum = -1; | ||
1172 | trans->size = (ftl->block_size * ftl->max_lba * ftl->zone_count) >> 9; | ||
1173 | trans->readonly = ftl->readonly; | ||
1174 | |||
1175 | if (sm_find_cis(ftl)) { | ||
1176 | dbg("CIS not found on mtd device, aborting"); | ||
1177 | goto error6; | ||
1178 | } | ||
1179 | |||
1180 | ftl->disk_attributes = sm_create_sysfs_attributes(ftl); | ||
1181 | trans->disk_attributes = ftl->disk_attributes; | ||
1182 | |||
1183 | sm_printk("Found %d MiB xD/SmartMedia FTL on mtd%d", | ||
1184 | (int)(mtd->size / (1024 * 1024)), mtd->index); | ||
1185 | |||
1186 | dbg("FTL layout:"); | ||
1187 | dbg("%d zone(s), each consists of %d blocks (+%d spares)", | ||
1188 | ftl->zone_count, ftl->max_lba, | ||
1189 | ftl->zone_size - ftl->max_lba); | ||
1190 | dbg("each block consists of %d bytes", | ||
1191 | ftl->block_size); | ||
1192 | |||
1193 | |||
1194 | /* Register device*/ | ||
1195 | if (add_mtd_blktrans_dev(trans)) { | ||
1196 | dbg("error in mtdblktrans layer"); | ||
1197 | goto error6; | ||
1198 | } | ||
1199 | return; | ||
1200 | error6: | ||
1201 | kfree(trans); | ||
1202 | error5: | ||
1203 | kfree(ftl->cache_data); | ||
1204 | error4: | ||
1205 | kfree(ftl->zones); | ||
1206 | error3: | ||
1207 | kfree(ftl->cis_buffer); | ||
1208 | error2: | ||
1209 | kfree(ftl); | ||
1210 | error1: | ||
1211 | return; | ||
1212 | } | ||
1213 | |||
1214 | /* main interface: device {surprise,} removal */ | ||
1215 | static void sm_remove_dev(struct mtd_blktrans_dev *dev) | ||
1216 | { | ||
1217 | struct sm_ftl *ftl = dev->priv; | ||
1218 | int i; | ||
1219 | |||
1220 | del_mtd_blktrans_dev(dev); | ||
1221 | ftl->trans = NULL; | ||
1222 | |||
1223 | for (i = 0 ; i < ftl->zone_count; i++) { | ||
1224 | |||
1225 | if (!ftl->zones[i].initialized) | ||
1226 | continue; | ||
1227 | |||
1228 | kfree(ftl->zones[i].lba_to_phys_table); | ||
1229 | kfifo_free(&ftl->zones[i].free_sectors); | ||
1230 | } | ||
1231 | |||
1232 | sm_delete_sysfs_attributes(ftl); | ||
1233 | kfree(ftl->cis_buffer); | ||
1234 | kfree(ftl->zones); | ||
1235 | kfree(ftl->cache_data); | ||
1236 | kfree(ftl); | ||
1237 | } | ||
1238 | |||
1239 | static struct mtd_blktrans_ops sm_ftl_ops = { | ||
1240 | .name = "smblk", | ||
1241 | .major = -1, | ||
1242 | .part_bits = SM_FTL_PARTN_BITS, | ||
1243 | .blksize = SM_SECTOR_SIZE, | ||
1244 | .getgeo = sm_getgeo, | ||
1245 | |||
1246 | .add_mtd = sm_add_mtd, | ||
1247 | .remove_dev = sm_remove_dev, | ||
1248 | |||
1249 | .readsect = sm_read, | ||
1250 | .writesect = sm_write, | ||
1251 | |||
1252 | .flush = sm_flush, | ||
1253 | .release = sm_release, | ||
1254 | |||
1255 | .owner = THIS_MODULE, | ||
1256 | }; | ||
1257 | |||
1258 | static __init int sm_module_init(void) | ||
1259 | { | ||
1260 | int error = 0; | ||
1261 | cache_flush_workqueue = create_freezeable_workqueue("smflush"); | ||
1262 | |||
1263 | if (IS_ERR(cache_flush_workqueue)) | ||
1264 | return PTR_ERR(cache_flush_workqueue); | ||
1265 | |||
1266 | error = register_mtd_blktrans(&sm_ftl_ops); | ||
1267 | if (error) | ||
1268 | destroy_workqueue(cache_flush_workqueue); | ||
1269 | return error; | ||
1270 | |||
1271 | } | ||
1272 | |||
1273 | static void __exit sm_module_exit(void) | ||
1274 | { | ||
1275 | destroy_workqueue(cache_flush_workqueue); | ||
1276 | deregister_mtd_blktrans(&sm_ftl_ops); | ||
1277 | } | ||
1278 | |||
1279 | module_init(sm_module_init); | ||
1280 | module_exit(sm_module_exit); | ||
1281 | |||
1282 | MODULE_LICENSE("GPL"); | ||
1283 | MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>"); | ||
1284 | MODULE_DESCRIPTION("Smartmedia/xD mtd translation layer"); | ||
diff --git a/drivers/mtd/sm_ftl.h b/drivers/mtd/sm_ftl.h new file mode 100644 index 000000000000..e30e48e7f63d --- /dev/null +++ b/drivers/mtd/sm_ftl.h | |||
@@ -0,0 +1,94 @@ | |||
1 | /* | ||
2 | * Copyright © 2009 - Maxim Levitsky | ||
3 | * SmartMedia/xD translation layer | ||
4 | * | ||
5 | * Based loosly on ssfdc.c which is | ||
6 | * © 2005 Eptar srl | ||
7 | * Author: Claudio Lanconelli <lanconelli.claudio@eptar.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | |||
14 | #include <linux/mtd/blktrans.h> | ||
15 | #include <linux/kfifo.h> | ||
16 | #include <linux/sched.h> | ||
17 | #include <linux/completion.h> | ||
18 | #include <linux/mtd/mtd.h> | ||
19 | |||
20 | |||
21 | |||
22 | struct ftl_zone { | ||
23 | int initialized; | ||
24 | int16_t *lba_to_phys_table; /* LBA to physical table */ | ||
25 | struct kfifo free_sectors; /* queue of free sectors */ | ||
26 | }; | ||
27 | |||
28 | struct sm_ftl { | ||
29 | struct mtd_blktrans_dev *trans; | ||
30 | |||
31 | struct mutex mutex; /* protects the structure */ | ||
32 | struct ftl_zone *zones; /* FTL tables for each zone */ | ||
33 | |||
34 | /* Media information */ | ||
35 | int block_size; /* block size in bytes */ | ||
36 | int zone_size; /* zone size in blocks */ | ||
37 | int zone_count; /* number of zones */ | ||
38 | int max_lba; /* maximum lba in a zone */ | ||
39 | int smallpagenand; /* 256 bytes/page nand */ | ||
40 | int readonly; /* is FS readonly */ | ||
41 | int unstable; | ||
42 | int cis_block; /* CIS block location */ | ||
43 | int cis_boffset; /* CIS offset in the block */ | ||
44 | int cis_page_offset; /* CIS offset in the page */ | ||
45 | void *cis_buffer; /* tmp buffer for cis reads */ | ||
46 | |||
47 | /* Cache */ | ||
48 | int cache_block; /* block number of cached block */ | ||
49 | int cache_zone; /* zone of cached block */ | ||
50 | unsigned char *cache_data; /* cached block data */ | ||
51 | long unsigned int cache_data_invalid_bitmap; | ||
52 | int cache_clean; | ||
53 | struct work_struct flush_work; | ||
54 | struct timer_list timer; | ||
55 | |||
56 | /* Async erase stuff */ | ||
57 | struct completion erase_completion; | ||
58 | |||
59 | /* Geometry stuff */ | ||
60 | int heads; | ||
61 | int sectors; | ||
62 | int cylinders; | ||
63 | |||
64 | struct attribute_group *disk_attributes; | ||
65 | }; | ||
66 | |||
67 | struct chs_entry { | ||
68 | unsigned long size; | ||
69 | unsigned short cyl; | ||
70 | unsigned char head; | ||
71 | unsigned char sec; | ||
72 | }; | ||
73 | |||
74 | |||
75 | #define SM_FTL_PARTN_BITS 3 | ||
76 | |||
77 | #define sm_printk(format, ...) \ | ||
78 | printk(KERN_WARNING "sm_ftl" ": " format "\n", ## __VA_ARGS__) | ||
79 | |||
80 | #define dbg(format, ...) \ | ||
81 | if (debug) \ | ||
82 | printk(KERN_DEBUG "sm_ftl" ": " format "\n", ## __VA_ARGS__) | ||
83 | |||
84 | #define dbg_verbose(format, ...) \ | ||
85 | if (debug > 1) \ | ||
86 | printk(KERN_DEBUG "sm_ftl" ": " format "\n", ## __VA_ARGS__) | ||
87 | |||
88 | |||
89 | static void sm_erase_callback(struct erase_info *self); | ||
90 | static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block, | ||
91 | int put_free); | ||
92 | static void sm_mark_block_bad(struct sm_ftl *ftl, int zone_num, int block); | ||
93 | |||
94 | static int sm_recheck_media(struct sm_ftl *ftl); | ||
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c index 3f67e00d98e0..81c4ecdc11f5 100644 --- a/drivers/mtd/ssfdc.c +++ b/drivers/mtd/ssfdc.c | |||
@@ -375,7 +375,6 @@ static void ssfdcr_remove_dev(struct mtd_blktrans_dev *dev) | |||
375 | 375 | ||
376 | del_mtd_blktrans_dev(dev); | 376 | del_mtd_blktrans_dev(dev); |
377 | kfree(ssfdc->logic_block_map); | 377 | kfree(ssfdc->logic_block_map); |
378 | kfree(ssfdc); | ||
379 | } | 378 | } |
380 | 379 | ||
381 | static int ssfdcr_readsect(struct mtd_blktrans_dev *dev, | 380 | static int ssfdcr_readsect(struct mtd_blktrans_dev *dev, |
diff --git a/drivers/mtd/tests/mtd_pagetest.c b/drivers/mtd/tests/mtd_pagetest.c index 921a85df9196..6bc1b8276c62 100644 --- a/drivers/mtd/tests/mtd_pagetest.c +++ b/drivers/mtd/tests/mtd_pagetest.c | |||
@@ -480,12 +480,11 @@ static int scan_for_bad_eraseblocks(void) | |||
480 | { | 480 | { |
481 | int i, bad = 0; | 481 | int i, bad = 0; |
482 | 482 | ||
483 | bbt = kmalloc(ebcnt, GFP_KERNEL); | 483 | bbt = kzalloc(ebcnt, GFP_KERNEL); |
484 | if (!bbt) { | 484 | if (!bbt) { |
485 | printk(PRINT_PREF "error: cannot allocate memory\n"); | 485 | printk(PRINT_PREF "error: cannot allocate memory\n"); |
486 | return -ENOMEM; | 486 | return -ENOMEM; |
487 | } | 487 | } |
488 | memset(bbt, 0 , ebcnt); | ||
489 | 488 | ||
490 | printk(PRINT_PREF "scanning for bad eraseblocks\n"); | 489 | printk(PRINT_PREF "scanning for bad eraseblocks\n"); |
491 | for (i = 0; i < ebcnt; ++i) { | 490 | for (i = 0; i < ebcnt; ++i) { |
diff --git a/drivers/mtd/tests/mtd_readtest.c b/drivers/mtd/tests/mtd_readtest.c index 7107fccbc7de..afe71aa15c4b 100644 --- a/drivers/mtd/tests/mtd_readtest.c +++ b/drivers/mtd/tests/mtd_readtest.c | |||
@@ -141,12 +141,11 @@ static int scan_for_bad_eraseblocks(void) | |||
141 | { | 141 | { |
142 | int i, bad = 0; | 142 | int i, bad = 0; |
143 | 143 | ||
144 | bbt = kmalloc(ebcnt, GFP_KERNEL); | 144 | bbt = kzalloc(ebcnt, GFP_KERNEL); |
145 | if (!bbt) { | 145 | if (!bbt) { |
146 | printk(PRINT_PREF "error: cannot allocate memory\n"); | 146 | printk(PRINT_PREF "error: cannot allocate memory\n"); |
147 | return -ENOMEM; | 147 | return -ENOMEM; |
148 | } | 148 | } |
149 | memset(bbt, 0 , ebcnt); | ||
150 | 149 | ||
151 | /* NOR flash does not implement block_isbad */ | 150 | /* NOR flash does not implement block_isbad */ |
152 | if (mtd->block_isbad == NULL) | 151 | if (mtd->block_isbad == NULL) |
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c index 56ca62bb96bf..161feeb7b8b9 100644 --- a/drivers/mtd/tests/mtd_speedtest.c +++ b/drivers/mtd/tests/mtd_speedtest.c | |||
@@ -295,12 +295,11 @@ static int scan_for_bad_eraseblocks(void) | |||
295 | { | 295 | { |
296 | int i, bad = 0; | 296 | int i, bad = 0; |
297 | 297 | ||
298 | bbt = kmalloc(ebcnt, GFP_KERNEL); | 298 | bbt = kzalloc(ebcnt, GFP_KERNEL); |
299 | if (!bbt) { | 299 | if (!bbt) { |
300 | printk(PRINT_PREF "error: cannot allocate memory\n"); | 300 | printk(PRINT_PREF "error: cannot allocate memory\n"); |
301 | return -ENOMEM; | 301 | return -ENOMEM; |
302 | } | 302 | } |
303 | memset(bbt, 0 , ebcnt); | ||
304 | 303 | ||
305 | /* NOR flash does not implement block_isbad */ | 304 | /* NOR flash does not implement block_isbad */ |
306 | if (mtd->block_isbad == NULL) | 305 | if (mtd->block_isbad == NULL) |
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c index 3854afec56d0..531625fc9259 100644 --- a/drivers/mtd/tests/mtd_stresstest.c +++ b/drivers/mtd/tests/mtd_stresstest.c | |||
@@ -221,12 +221,11 @@ static int scan_for_bad_eraseblocks(void) | |||
221 | { | 221 | { |
222 | int i, bad = 0; | 222 | int i, bad = 0; |
223 | 223 | ||
224 | bbt = kmalloc(ebcnt, GFP_KERNEL); | 224 | bbt = kzalloc(ebcnt, GFP_KERNEL); |
225 | if (!bbt) { | 225 | if (!bbt) { |
226 | printk(PRINT_PREF "error: cannot allocate memory\n"); | 226 | printk(PRINT_PREF "error: cannot allocate memory\n"); |
227 | return -ENOMEM; | 227 | return -ENOMEM; |
228 | } | 228 | } |
229 | memset(bbt, 0 , ebcnt); | ||
230 | 229 | ||
231 | /* NOR flash does not implement block_isbad */ | 230 | /* NOR flash does not implement block_isbad */ |
232 | if (mtd->block_isbad == NULL) | 231 | if (mtd->block_isbad == NULL) |
diff --git a/drivers/mtd/tests/mtd_subpagetest.c b/drivers/mtd/tests/mtd_subpagetest.c index 700237a3d120..11204e8aab5f 100644 --- a/drivers/mtd/tests/mtd_subpagetest.c +++ b/drivers/mtd/tests/mtd_subpagetest.c | |||
@@ -354,12 +354,11 @@ static int scan_for_bad_eraseblocks(void) | |||
354 | { | 354 | { |
355 | int i, bad = 0; | 355 | int i, bad = 0; |
356 | 356 | ||
357 | bbt = kmalloc(ebcnt, GFP_KERNEL); | 357 | bbt = kzalloc(ebcnt, GFP_KERNEL); |
358 | if (!bbt) { | 358 | if (!bbt) { |
359 | printk(PRINT_PREF "error: cannot allocate memory\n"); | 359 | printk(PRINT_PREF "error: cannot allocate memory\n"); |
360 | return -ENOMEM; | 360 | return -ENOMEM; |
361 | } | 361 | } |
362 | memset(bbt, 0 , ebcnt); | ||
363 | 362 | ||
364 | printk(PRINT_PREF "scanning for bad eraseblocks\n"); | 363 | printk(PRINT_PREF "scanning for bad eraseblocks\n"); |
365 | for (i = 0; i < ebcnt; ++i) { | 364 | for (i = 0; i < ebcnt; ++i) { |
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig index 0a8c7ea764ae..f702a163d8df 100644 --- a/drivers/mtd/ubi/Kconfig +++ b/drivers/mtd/ubi/Kconfig | |||
@@ -27,7 +27,7 @@ config MTD_UBI_WL_THRESHOLD | |||
27 | The default value should be OK for SLC NAND flashes, NOR flashes and | 27 | The default value should be OK for SLC NAND flashes, NOR flashes and |
28 | other flashes which have eraseblock life-cycle 100000 or more. | 28 | other flashes which have eraseblock life-cycle 100000 or more. |
29 | However, in case of MLC NAND flashes which typically have eraseblock | 29 | However, in case of MLC NAND flashes which typically have eraseblock |
30 | life-cycle less then 10000, the threshold should be lessened (e.g., | 30 | life-cycle less than 10000, the threshold should be lessened (e.g., |
31 | to 128 or 256, although it does not have to be power of 2). | 31 | to 128 or 256, although it does not have to be power of 2). |
32 | 32 | ||
33 | config MTD_UBI_BEB_RESERVE | 33 | config MTD_UBI_BEB_RESERVE |
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 55c726dde942..13b05cb33b08 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c | |||
@@ -42,7 +42,6 @@ | |||
42 | #include <linux/miscdevice.h> | 42 | #include <linux/miscdevice.h> |
43 | #include <linux/log2.h> | 43 | #include <linux/log2.h> |
44 | #include <linux/kthread.h> | 44 | #include <linux/kthread.h> |
45 | #include <linux/reboot.h> | ||
46 | #include <linux/kernel.h> | 45 | #include <linux/kernel.h> |
47 | #include <linux/slab.h> | 46 | #include <linux/slab.h> |
48 | #include "ubi.h" | 47 | #include "ubi.h" |
@@ -50,6 +49,12 @@ | |||
50 | /* Maximum length of the 'mtd=' parameter */ | 49 | /* Maximum length of the 'mtd=' parameter */ |
51 | #define MTD_PARAM_LEN_MAX 64 | 50 | #define MTD_PARAM_LEN_MAX 64 |
52 | 51 | ||
52 | #ifdef CONFIG_MTD_UBI_MODULE | ||
53 | #define ubi_is_module() 1 | ||
54 | #else | ||
55 | #define ubi_is_module() 0 | ||
56 | #endif | ||
57 | |||
53 | /** | 58 | /** |
54 | * struct mtd_dev_param - MTD device parameter description data structure. | 59 | * struct mtd_dev_param - MTD device parameter description data structure. |
55 | * @name: MTD character device node path, MTD device name, or MTD device number | 60 | * @name: MTD character device node path, MTD device name, or MTD device number |
@@ -832,34 +837,6 @@ static int autoresize(struct ubi_device *ubi, int vol_id) | |||
832 | } | 837 | } |
833 | 838 | ||
834 | /** | 839 | /** |
835 | * ubi_reboot_notifier - halt UBI transactions immediately prior to a reboot. | ||
836 | * @n: reboot notifier object | ||
837 | * @state: SYS_RESTART, SYS_HALT, or SYS_POWER_OFF | ||
838 | * @cmd: pointer to command string for RESTART2 | ||
839 | * | ||
840 | * This function stops the UBI background thread so that the flash device | ||
841 | * remains quiescent when Linux restarts the system. Any queued work will be | ||
842 | * discarded, but this function will block until do_work() finishes if an | ||
843 | * operation is already in progress. | ||
844 | * | ||
845 | * This function solves a real-life problem observed on NOR flashes when an | ||
846 | * PEB erase operation starts, then the system is rebooted before the erase is | ||
847 | * finishes, and the boot loader gets confused and dies. So we prefer to finish | ||
848 | * the ongoing operation before rebooting. | ||
849 | */ | ||
850 | static int ubi_reboot_notifier(struct notifier_block *n, unsigned long state, | ||
851 | void *cmd) | ||
852 | { | ||
853 | struct ubi_device *ubi; | ||
854 | |||
855 | ubi = container_of(n, struct ubi_device, reboot_notifier); | ||
856 | if (ubi->bgt_thread) | ||
857 | kthread_stop(ubi->bgt_thread); | ||
858 | ubi_sync(ubi->ubi_num); | ||
859 | return NOTIFY_DONE; | ||
860 | } | ||
861 | |||
862 | /** | ||
863 | * ubi_attach_mtd_dev - attach an MTD device. | 840 | * ubi_attach_mtd_dev - attach an MTD device. |
864 | * @mtd: MTD device description object | 841 | * @mtd: MTD device description object |
865 | * @ubi_num: number to assign to the new UBI device | 842 | * @ubi_num: number to assign to the new UBI device |
@@ -1016,11 +993,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) | |||
1016 | wake_up_process(ubi->bgt_thread); | 993 | wake_up_process(ubi->bgt_thread); |
1017 | spin_unlock(&ubi->wl_lock); | 994 | spin_unlock(&ubi->wl_lock); |
1018 | 995 | ||
1019 | /* Flash device priority is 0 - UBI needs to shut down first */ | ||
1020 | ubi->reboot_notifier.priority = 1; | ||
1021 | ubi->reboot_notifier.notifier_call = ubi_reboot_notifier; | ||
1022 | register_reboot_notifier(&ubi->reboot_notifier); | ||
1023 | |||
1024 | ubi_devices[ubi_num] = ubi; | 996 | ubi_devices[ubi_num] = ubi; |
1025 | ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL); | 997 | ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL); |
1026 | return ubi_num; | 998 | return ubi_num; |
@@ -1091,7 +1063,6 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway) | |||
1091 | * Before freeing anything, we have to stop the background thread to | 1063 | * Before freeing anything, we have to stop the background thread to |
1092 | * prevent it from doing anything on this device while we are freeing. | 1064 | * prevent it from doing anything on this device while we are freeing. |
1093 | */ | 1065 | */ |
1094 | unregister_reboot_notifier(&ubi->reboot_notifier); | ||
1095 | if (ubi->bgt_thread) | 1066 | if (ubi->bgt_thread) |
1096 | kthread_stop(ubi->bgt_thread); | 1067 | kthread_stop(ubi->bgt_thread); |
1097 | 1068 | ||
@@ -1241,9 +1212,24 @@ static int __init ubi_init(void) | |||
1241 | p->vid_hdr_offs); | 1212 | p->vid_hdr_offs); |
1242 | mutex_unlock(&ubi_devices_mutex); | 1213 | mutex_unlock(&ubi_devices_mutex); |
1243 | if (err < 0) { | 1214 | if (err < 0) { |
1244 | put_mtd_device(mtd); | ||
1245 | ubi_err("cannot attach mtd%d", mtd->index); | 1215 | ubi_err("cannot attach mtd%d", mtd->index); |
1246 | goto out_detach; | 1216 | put_mtd_device(mtd); |
1217 | |||
1218 | /* | ||
1219 | * Originally UBI stopped initializing on any error. | ||
1220 | * However, later on it was found out that this | ||
1221 | * behavior is not very good when UBI is compiled into | ||
1222 | * the kernel and the MTD devices to attach are passed | ||
1223 | * through the command line. Indeed, UBI failure | ||
1224 | * stopped whole boot sequence. | ||
1225 | * | ||
1226 | * To fix this, we changed the behavior for the | ||
1227 | * non-module case, but preserved the old behavior for | ||
1228 | * the module case, just for compatibility. This is a | ||
1229 | * little inconsistent, though. | ||
1230 | */ | ||
1231 | if (ubi_is_module()) | ||
1232 | goto out_detach; | ||
1247 | } | 1233 | } |
1248 | } | 1234 | } |
1249 | 1235 | ||
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c index 533b1a4b9af1..4b979e34b159 100644 --- a/drivers/mtd/ubi/io.c +++ b/drivers/mtd/ubi/io.c | |||
@@ -64,9 +64,9 @@ | |||
64 | * device, e.g., make @ubi->min_io_size = 512 in the example above? | 64 | * device, e.g., make @ubi->min_io_size = 512 in the example above? |
65 | * | 65 | * |
66 | * A: because when writing a sub-page, MTD still writes a full 2K page but the | 66 | * A: because when writing a sub-page, MTD still writes a full 2K page but the |
67 | * bytes which are no relevant to the sub-page are 0xFF. So, basically, writing | 67 | * bytes which are not relevant to the sub-page are 0xFF. So, basically, |
68 | * 4x512 sub-pages is 4 times slower then writing one 2KiB NAND page. Thus, we | 68 | * writing 4x512 sub-pages is 4 times slower than writing one 2KiB NAND page. |
69 | * prefer to use sub-pages only for EV and VID headers. | 69 | * Thus, we prefer to use sub-pages only for EC and VID headers. |
70 | * | 70 | * |
71 | * As it was noted above, the VID header may start at a non-aligned offset. | 71 | * As it was noted above, the VID header may start at a non-aligned offset. |
72 | * For example, in case of a 2KiB page NAND flash with a 512 bytes sub-page, | 72 | * For example, in case of a 2KiB page NAND flash with a 512 bytes sub-page, |
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c index 17f287decc36..69fa4ef03c53 100644 --- a/drivers/mtd/ubi/kapi.c +++ b/drivers/mtd/ubi/kapi.c | |||
@@ -488,7 +488,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_write); | |||
488 | * | 488 | * |
489 | * This function changes the contents of a logical eraseblock atomically. @buf | 489 | * This function changes the contents of a logical eraseblock atomically. @buf |
490 | * has to contain new logical eraseblock data, and @len - the length of the | 490 | * has to contain new logical eraseblock data, and @len - the length of the |
491 | * data, which has to be aligned. The length may be shorter then the logical | 491 | * data, which has to be aligned. The length may be shorter than the logical |
492 | * eraseblock size, ant the logical eraseblock may be appended to more times | 492 | * eraseblock size, ant the logical eraseblock may be appended to more times |
493 | * later on. This function guarantees that in case of an unclean reboot the old | 493 | * later on. This function guarantees that in case of an unclean reboot the old |
494 | * contents is preserved. Returns zero in case of success and a negative error | 494 | * contents is preserved. Returns zero in case of success and a negative error |
@@ -571,7 +571,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_erase); | |||
571 | * | 571 | * |
572 | * This function un-maps logical eraseblock @lnum and schedules the | 572 | * This function un-maps logical eraseblock @lnum and schedules the |
573 | * corresponding physical eraseblock for erasure, so that it will eventually be | 573 | * corresponding physical eraseblock for erasure, so that it will eventually be |
574 | * physically erased in background. This operation is much faster then the | 574 | * physically erased in background. This operation is much faster than the |
575 | * erase operation. | 575 | * erase operation. |
576 | * | 576 | * |
577 | * Unlike erase, the un-map operation does not guarantee that the logical | 577 | * Unlike erase, the un-map operation does not guarantee that the logical |
@@ -590,7 +590,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_erase); | |||
590 | * | 590 | * |
591 | * The main and obvious use-case of this function is when the contents of a | 591 | * The main and obvious use-case of this function is when the contents of a |
592 | * logical eraseblock has to be re-written. Then it is much more efficient to | 592 | * logical eraseblock has to be re-written. Then it is much more efficient to |
593 | * first un-map it, then write new data, rather then first erase it, then write | 593 | * first un-map it, then write new data, rather than first erase it, then write |
594 | * new data. Note, once new data has been written to the logical eraseblock, | 594 | * new data. Note, once new data has been written to the logical eraseblock, |
595 | * UBI guarantees that the old contents has gone forever. In other words, if an | 595 | * UBI guarantees that the old contents has gone forever. In other words, if an |
596 | * unclean reboot happens after the logical eraseblock has been un-mapped and | 596 | * unclean reboot happens after the logical eraseblock has been un-mapped and |
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c index dc5f688699da..aed19f33b8f3 100644 --- a/drivers/mtd/ubi/scan.c +++ b/drivers/mtd/ubi/scan.c | |||
@@ -231,7 +231,7 @@ static struct ubi_scan_volume *add_volume(struct ubi_scan_info *si, int vol_id, | |||
231 | * case of success this function returns a positive value, in case of failure, a | 231 | * case of success this function returns a positive value, in case of failure, a |
232 | * negative error code is returned. The success return codes use the following | 232 | * negative error code is returned. The success return codes use the following |
233 | * bits: | 233 | * bits: |
234 | * o bit 0 is cleared: the first PEB (described by @seb) is newer then the | 234 | * o bit 0 is cleared: the first PEB (described by @seb) is newer than the |
235 | * second PEB (described by @pnum and @vid_hdr); | 235 | * second PEB (described by @pnum and @vid_hdr); |
236 | * o bit 0 is set: the second PEB is newer; | 236 | * o bit 0 is set: the second PEB is newer; |
237 | * o bit 1 is cleared: no bit-flips were detected in the newer LEB; | 237 | * o bit 1 is cleared: no bit-flips were detected in the newer LEB; |
@@ -452,7 +452,7 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si, | |||
452 | 452 | ||
453 | if (cmp_res & 1) { | 453 | if (cmp_res & 1) { |
454 | /* | 454 | /* |
455 | * This logical eraseblock is newer then the one | 455 | * This logical eraseblock is newer than the one |
456 | * found earlier. | 456 | * found earlier. |
457 | */ | 457 | */ |
458 | err = validate_vid_hdr(vid_hdr, sv, pnum); | 458 | err = validate_vid_hdr(vid_hdr, sv, pnum); |
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index 5176d4886518..a637f0283add 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h | |||
@@ -350,7 +350,6 @@ struct ubi_wl_entry; | |||
350 | * @bgt_thread: background thread description object | 350 | * @bgt_thread: background thread description object |
351 | * @thread_enabled: if the background thread is enabled | 351 | * @thread_enabled: if the background thread is enabled |
352 | * @bgt_name: background thread name | 352 | * @bgt_name: background thread name |
353 | * @reboot_notifier: notifier to terminate background thread before rebooting | ||
354 | * | 353 | * |
355 | * @flash_size: underlying MTD device size (in bytes) | 354 | * @flash_size: underlying MTD device size (in bytes) |
356 | * @peb_count: count of physical eraseblocks on the MTD device | 355 | * @peb_count: count of physical eraseblocks on the MTD device |
@@ -436,7 +435,6 @@ struct ubi_device { | |||
436 | struct task_struct *bgt_thread; | 435 | struct task_struct *bgt_thread; |
437 | int thread_enabled; | 436 | int thread_enabled; |
438 | char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2]; | 437 | char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2]; |
439 | struct notifier_block reboot_notifier; | ||
440 | 438 | ||
441 | /* I/O sub-system's stuff */ | 439 | /* I/O sub-system's stuff */ |
442 | long long flash_size; | 440 | long long flash_size; |
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c index cd90ff3b76b1..14c10bed94ee 100644 --- a/drivers/mtd/ubi/vtbl.c +++ b/drivers/mtd/ubi/vtbl.c | |||
@@ -414,7 +414,7 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi, | |||
414 | * 0 contains more recent information. | 414 | * 0 contains more recent information. |
415 | * | 415 | * |
416 | * So the plan is to first check LEB 0. Then | 416 | * So the plan is to first check LEB 0. Then |
417 | * a. if LEB 0 is OK, it must be containing the most resent data; then | 417 | * a. if LEB 0 is OK, it must be containing the most recent data; then |
418 | * we compare it with LEB 1, and if they are different, we copy LEB | 418 | * we compare it with LEB 1, and if they are different, we copy LEB |
419 | * 0 to LEB 1; | 419 | * 0 to LEB 1; |
420 | * b. if LEB 0 is corrupted, but LEB 1 has to be OK, and we copy LEB 1 | 420 | * b. if LEB 0 is corrupted, but LEB 1 has to be OK, and we copy LEB 1 |
@@ -848,7 +848,7 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si) | |||
848 | goto out_free; | 848 | goto out_free; |
849 | 849 | ||
850 | /* | 850 | /* |
851 | * Get sure that the scanning information is consistent to the | 851 | * Make sure that the scanning information is consistent to the |
852 | * information stored in the volume table. | 852 | * information stored in the volume table. |
853 | */ | 853 | */ |
854 | err = check_scanning_info(ubi, si); | 854 | err = check_scanning_info(ubi, si); |
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index f64ddabd4ac8..ee7b1d8fbb92 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c | |||
@@ -350,7 +350,7 @@ static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e) | |||
350 | * @max: highest possible erase counter | 350 | * @max: highest possible erase counter |
351 | * | 351 | * |
352 | * This function looks for a wear leveling entry with erase counter closest to | 352 | * This function looks for a wear leveling entry with erase counter closest to |
353 | * @max and less then @max. | 353 | * @max and less than @max. |
354 | */ | 354 | */ |
355 | static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max) | 355 | static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max) |
356 | { | 356 | { |