aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-10-20 12:03:12 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-20 12:03:12 -0400
commit2be508d847392e431759e370d21cea9412848758 (patch)
treebe5e00a4d7be4ef353ffe4d550fb80a251d321c3 /drivers/mtd
parent01e8ef11bc1a74e65678ed55795f59266d4add01 (diff)
parent8a1a6272057e2ad90ab531a70330165888866e60 (diff)
Merge git://git.infradead.org/mtd-2.6
* git://git.infradead.org/mtd-2.6: (69 commits) Revert "[MTD] m25p80.c code cleanup" [MTD] [NAND] GPIO driver depends on ARM... for now. [MTD] [NAND] sh_flctl: fix compile error [MTD] [NOR] AT49BV6416 has swapped erase regions [MTD] [NAND] GPIO NAND flash driver [MTD] cmdlineparts documentation change - explain where mtd-id comes from [MTD] cfi_cmdset_0002.c: Add Macronix CFI V1.0 TopBottom detection [MTD] [NAND] Fix compilation warnings in drivers/mtd/nand/cs553x_nand.c [JFFS2] Write buffer offset adjustment for NOR-ECC (Sibley) flash [MTD] mtdoops: Fix a bug where block may not be erased [MTD] mtdoops: Add a magic number to logged kernel oops [MTD] mtdoops: Fix an off by one error [JFFS2] Correct parameter names of jffs2_compress() in comments [MTD] [NAND] sh_flctl: add support for Renesas SuperH FLCTL [MTD] [NAND] Bug on atmel_nand HW ECC : OOB info not correctly written [MTD] [MAPS] Remove unused variable after ROM API cleanup. [MTD] m25p80.c extended jedec support (v2) [MTD] remove unused mtd parameter in of_mtd_parse_partitions() [MTD] [NAND] remove dead Kconfig associated with !CONFIG_PPC_MERGE [MTD] [NAND] driver extension to support NAND on TQM85xx modules ...
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/Kconfig5
-rw-r--r--drivers/mtd/chips/Kconfig4
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c71
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c52
-rw-r--r--drivers/mtd/chips/cfi_probe.c58
-rw-r--r--drivers/mtd/chips/cfi_util.c66
-rw-r--r--drivers/mtd/chips/gen_probe.c2
-rw-r--r--drivers/mtd/cmdlinepart.c1
-rw-r--r--drivers/mtd/devices/Kconfig21
-rw-r--r--drivers/mtd/devices/m25p80.c138
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c214
-rw-r--r--drivers/mtd/inftlcore.c5
-rw-r--r--drivers/mtd/maps/Kconfig33
-rw-r--r--drivers/mtd/maps/Makefile4
-rw-r--r--drivers/mtd/maps/ebony.c163
-rw-r--r--drivers/mtd/maps/ocotea.c154
-rw-r--r--drivers/mtd/maps/omap-toto-flash.c133
-rw-r--r--drivers/mtd/maps/pci.c18
-rw-r--r--drivers/mtd/maps/physmap_of.c3
-rw-r--r--drivers/mtd/maps/walnut.c122
-rw-r--r--drivers/mtd/mtdchar.c4
-rw-r--r--drivers/mtd/mtdconcat.c4
-rw-r--r--drivers/mtd/mtdoops.c42
-rw-r--r--drivers/mtd/mtdpart.c4
-rw-r--r--drivers/mtd/nand/Kconfig42
-rw-r--r--drivers/mtd/nand/Makefile4
-rw-r--r--drivers/mtd/nand/atmel_nand.c58
-rw-r--r--drivers/mtd/nand/cs553x_nand.c2
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c3
-rw-r--r--drivers/mtd/nand/fsl_upm.c68
-rw-r--r--drivers/mtd/nand/gpio.c375
-rw-r--r--drivers/mtd/nand/mxc_nand.c1077
-rw-r--r--drivers/mtd/nand/nand_base.c16
-rw-r--r--drivers/mtd/nand/nand_ecc.c554
-rw-r--r--drivers/mtd/nand/nandsim.c1
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c147
-rw-r--r--drivers/mtd/nand/sh_flctl.c878
-rw-r--r--drivers/mtd/nand/toto.c206
-rw-r--r--drivers/mtd/ofpart.c1
-rw-r--r--drivers/mtd/onenand/Kconfig8
-rw-r--r--drivers/mtd/onenand/Makefile1
-rw-r--r--drivers/mtd/onenand/omap2.c802
-rw-r--r--drivers/mtd/onenand/onenand_base.c2
-rw-r--r--drivers/mtd/ssfdc.c3
-rw-r--r--drivers/mtd/ubi/cdev.c6
-rw-r--r--drivers/mtd/ubi/scan.c2
-rw-r--r--drivers/mtd/ubi/vtbl.c4
47 files changed, 4282 insertions, 1299 deletions
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 14f11f8b9e5f..a90d50c2c3e5 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -172,6 +172,11 @@ config MTD_CHAR
172 memory chips, and also use ioctl() to obtain information about 172 memory chips, and also use ioctl() to obtain information about
173 the device, or to erase parts of it. 173 the device, or to erase parts of it.
174 174
175config HAVE_MTD_OTP
176 bool
177 help
178 Enable access to OTP regions using MTD_CHAR.
179
175config MTD_BLKDEVS 180config MTD_BLKDEVS
176 tristate "Common interface to block layer for MTD 'translation layers'" 181 tristate "Common interface to block layer for MTD 'translation layers'"
177 depends on BLOCK 182 depends on BLOCK
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
index 479d32b57a1e..9408099eec48 100644
--- a/drivers/mtd/chips/Kconfig
+++ b/drivers/mtd/chips/Kconfig
@@ -6,6 +6,7 @@ menu "RAM/ROM/Flash chip drivers"
6config MTD_CFI 6config MTD_CFI
7 tristate "Detect flash chips by Common Flash Interface (CFI) probe" 7 tristate "Detect flash chips by Common Flash Interface (CFI) probe"
8 select MTD_GEN_PROBE 8 select MTD_GEN_PROBE
9 select MTD_CFI_UTIL
9 help 10 help
10 The Common Flash Interface specification was developed by Intel, 11 The Common Flash Interface specification was developed by Intel,
11 AMD and other flash manufactures that provides a universal method 12 AMD and other flash manufactures that provides a universal method
@@ -154,6 +155,7 @@ config MTD_CFI_I8
154config MTD_OTP 155config MTD_OTP
155 bool "Protection Registers aka one-time programmable (OTP) bits" 156 bool "Protection Registers aka one-time programmable (OTP) bits"
156 depends on MTD_CFI_ADV_OPTIONS 157 depends on MTD_CFI_ADV_OPTIONS
158 select HAVE_MTD_OTP
157 default n 159 default n
158 help 160 help
159 This enables support for reading, writing and locking so called 161 This enables support for reading, writing and locking so called
@@ -187,7 +189,7 @@ config MTD_CFI_INTELEXT
187 StrataFlash and other parts. 189 StrataFlash and other parts.
188 190
189config MTD_CFI_AMDSTD 191config MTD_CFI_AMDSTD
190 tristate "Support for AMD/Fujitsu flash chips" 192 tristate "Support for AMD/Fujitsu/Spansion flash chips"
191 depends on MTD_GEN_PROBE 193 depends on MTD_GEN_PROBE
192 select MTD_CFI_UTIL 194 select MTD_CFI_UTIL
193 help 195 help
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 5f1b472137a0..c93a8be5d5f1 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -478,6 +478,28 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
478 else 478 else
479 cfi->chips[i].erase_time = 2000000; 479 cfi->chips[i].erase_time = 2000000;
480 480
481 if (cfi->cfiq->WordWriteTimeoutTyp &&
482 cfi->cfiq->WordWriteTimeoutMax)
483 cfi->chips[i].word_write_time_max =
484 1<<(cfi->cfiq->WordWriteTimeoutTyp +
485 cfi->cfiq->WordWriteTimeoutMax);
486 else
487 cfi->chips[i].word_write_time_max = 50000 * 8;
488
489 if (cfi->cfiq->BufWriteTimeoutTyp &&
490 cfi->cfiq->BufWriteTimeoutMax)
491 cfi->chips[i].buffer_write_time_max =
492 1<<(cfi->cfiq->BufWriteTimeoutTyp +
493 cfi->cfiq->BufWriteTimeoutMax);
494
495 if (cfi->cfiq->BlockEraseTimeoutTyp &&
496 cfi->cfiq->BlockEraseTimeoutMax)
497 cfi->chips[i].erase_time_max =
498 1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
499 cfi->cfiq->BlockEraseTimeoutMax);
500 else
501 cfi->chips[i].erase_time_max = 2000000 * 8;
502
481 cfi->chips[i].ref_point_counter = 0; 503 cfi->chips[i].ref_point_counter = 0;
482 init_waitqueue_head(&(cfi->chips[i].wq)); 504 init_waitqueue_head(&(cfi->chips[i].wq));
483 } 505 }
@@ -703,6 +725,10 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
703 struct cfi_pri_intelext *cfip = cfi->cmdset_priv; 725 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
704 unsigned long timeo = jiffies + HZ; 726 unsigned long timeo = jiffies + HZ;
705 727
728 /* Prevent setting state FL_SYNCING for chip in suspended state. */
729 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
730 goto sleep;
731
706 switch (chip->state) { 732 switch (chip->state) {
707 733
708 case FL_STATUS: 734 case FL_STATUS:
@@ -808,8 +834,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
808 DECLARE_WAITQUEUE(wait, current); 834 DECLARE_WAITQUEUE(wait, current);
809 835
810 retry: 836 retry:
811 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING 837 if (chip->priv &&
812 || mode == FL_OTP_WRITE || mode == FL_SHUTDOWN)) { 838 (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
839 || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
813 /* 840 /*
814 * OK. We have possibility for contention on the write/erase 841 * OK. We have possibility for contention on the write/erase
815 * operations which are global to the real chip and not per 842 * operations which are global to the real chip and not per
@@ -859,6 +886,14 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
859 return ret; 886 return ret;
860 } 887 }
861 spin_lock(&shared->lock); 888 spin_lock(&shared->lock);
889
890 /* We should not own chip if it is already
891 * in FL_SYNCING state. Put contender and retry. */
892 if (chip->state == FL_SYNCING) {
893 put_chip(map, contender, contender->start);
894 spin_unlock(contender->mutex);
895 goto retry;
896 }
862 spin_unlock(contender->mutex); 897 spin_unlock(contender->mutex);
863 } 898 }
864 899
@@ -1012,7 +1047,7 @@ static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1012 1047
1013static int __xipram xip_wait_for_operation( 1048static int __xipram xip_wait_for_operation(
1014 struct map_info *map, struct flchip *chip, 1049 struct map_info *map, struct flchip *chip,
1015 unsigned long adr, unsigned int chip_op_time ) 1050 unsigned long adr, unsigned int chip_op_time_max)
1016{ 1051{
1017 struct cfi_private *cfi = map->fldrv_priv; 1052 struct cfi_private *cfi = map->fldrv_priv;
1018 struct cfi_pri_intelext *cfip = cfi->cmdset_priv; 1053 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
@@ -1021,7 +1056,7 @@ static int __xipram xip_wait_for_operation(
1021 flstate_t oldstate, newstate; 1056 flstate_t oldstate, newstate;
1022 1057
1023 start = xip_currtime(); 1058 start = xip_currtime();
1024 usec = chip_op_time * 8; 1059 usec = chip_op_time_max;
1025 if (usec == 0) 1060 if (usec == 0)
1026 usec = 500000; 1061 usec = 500000;
1027 done = 0; 1062 done = 0;
@@ -1131,8 +1166,8 @@ static int __xipram xip_wait_for_operation(
1131#define XIP_INVAL_CACHED_RANGE(map, from, size) \ 1166#define XIP_INVAL_CACHED_RANGE(map, from, size) \
1132 INVALIDATE_CACHED_RANGE(map, from, size) 1167 INVALIDATE_CACHED_RANGE(map, from, size)
1133 1168
1134#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \ 1169#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1135 xip_wait_for_operation(map, chip, cmd_adr, usec) 1170 xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1136 1171
1137#else 1172#else
1138 1173
@@ -1144,7 +1179,7 @@ static int __xipram xip_wait_for_operation(
1144static int inval_cache_and_wait_for_operation( 1179static int inval_cache_and_wait_for_operation(
1145 struct map_info *map, struct flchip *chip, 1180 struct map_info *map, struct flchip *chip,
1146 unsigned long cmd_adr, unsigned long inval_adr, int inval_len, 1181 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1147 unsigned int chip_op_time) 1182 unsigned int chip_op_time, unsigned int chip_op_time_max)
1148{ 1183{
1149 struct cfi_private *cfi = map->fldrv_priv; 1184 struct cfi_private *cfi = map->fldrv_priv;
1150 map_word status, status_OK = CMD(0x80); 1185 map_word status, status_OK = CMD(0x80);
@@ -1156,8 +1191,7 @@ static int inval_cache_and_wait_for_operation(
1156 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len); 1191 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1157 spin_lock(chip->mutex); 1192 spin_lock(chip->mutex);
1158 1193
1159 /* set our timeout to 8 times the expected delay */ 1194 timeo = chip_op_time_max;
1160 timeo = chip_op_time * 8;
1161 if (!timeo) 1195 if (!timeo)
1162 timeo = 500000; 1196 timeo = 500000;
1163 reset_timeo = timeo; 1197 reset_timeo = timeo;
@@ -1217,8 +1251,8 @@ static int inval_cache_and_wait_for_operation(
1217 1251
1218#endif 1252#endif
1219 1253
1220#define WAIT_TIMEOUT(map, chip, adr, udelay) \ 1254#define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1221 INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay); 1255 INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1222 1256
1223 1257
1224static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len) 1258static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
@@ -1452,7 +1486,8 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1452 1486
1453 ret = INVAL_CACHE_AND_WAIT(map, chip, adr, 1487 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1454 adr, map_bankwidth(map), 1488 adr, map_bankwidth(map),
1455 chip->word_write_time); 1489 chip->word_write_time,
1490 chip->word_write_time_max);
1456 if (ret) { 1491 if (ret) {
1457 xip_enable(map, chip, adr); 1492 xip_enable(map, chip, adr);
1458 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name); 1493 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
@@ -1623,7 +1658,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1623 1658
1624 chip->state = FL_WRITING_TO_BUFFER; 1659 chip->state = FL_WRITING_TO_BUFFER;
1625 map_write(map, write_cmd, cmd_adr); 1660 map_write(map, write_cmd, cmd_adr);
1626 ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0); 1661 ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1627 if (ret) { 1662 if (ret) {
1628 /* Argh. Not ready for write to buffer */ 1663 /* Argh. Not ready for write to buffer */
1629 map_word Xstatus = map_read(map, cmd_adr); 1664 map_word Xstatus = map_read(map, cmd_adr);
@@ -1640,7 +1675,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1640 1675
1641 /* Figure out the number of words to write */ 1676 /* Figure out the number of words to write */
1642 word_gap = (-adr & (map_bankwidth(map)-1)); 1677 word_gap = (-adr & (map_bankwidth(map)-1));
1643 words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map); 1678 words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1644 if (!word_gap) { 1679 if (!word_gap) {
1645 words--; 1680 words--;
1646 } else { 1681 } else {
@@ -1692,7 +1727,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1692 1727
1693 ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, 1728 ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1694 initial_adr, initial_len, 1729 initial_adr, initial_len,
1695 chip->buffer_write_time); 1730 chip->buffer_write_time,
1731 chip->buffer_write_time_max);
1696 if (ret) { 1732 if (ret) {
1697 map_write(map, CMD(0x70), cmd_adr); 1733 map_write(map, CMD(0x70), cmd_adr);
1698 chip->state = FL_STATUS; 1734 chip->state = FL_STATUS;
@@ -1827,7 +1863,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1827 1863
1828 ret = INVAL_CACHE_AND_WAIT(map, chip, adr, 1864 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1829 adr, len, 1865 adr, len,
1830 chip->erase_time); 1866 chip->erase_time,
1867 chip->erase_time_max);
1831 if (ret) { 1868 if (ret) {
1832 map_write(map, CMD(0x70), adr); 1869 map_write(map, CMD(0x70), adr);
1833 chip->state = FL_STATUS; 1870 chip->state = FL_STATUS;
@@ -2006,7 +2043,7 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
2006 */ 2043 */
2007 udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0; 2044 udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
2008 2045
2009 ret = WAIT_TIMEOUT(map, chip, adr, udelay); 2046 ret = WAIT_TIMEOUT(map, chip, adr, udelay, udelay * 100);
2010 if (ret) { 2047 if (ret) {
2011 map_write(map, CMD(0x70), adr); 2048 map_write(map, CMD(0x70), adr);
2012 chip->state = FL_STATUS; 2049 chip->state = FL_STATUS;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index a972cc6be436..3e6f5d8609e8 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -13,6 +13,8 @@
13 * XIP support hooks by Vitaly Wool (based on code for Intel flash 13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
14 * by Nicolas Pitre) 14 * by Nicolas Pitre)
15 * 15 *
16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
17 *
16 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com 18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
17 * 19 *
18 * This code is GPL 20 * This code is GPL
@@ -43,6 +45,7 @@
43 45
44#define MANUFACTURER_AMD 0x0001 46#define MANUFACTURER_AMD 0x0001
45#define MANUFACTURER_ATMEL 0x001F 47#define MANUFACTURER_ATMEL 0x001F
48#define MANUFACTURER_MACRONIX 0x00C2
46#define MANUFACTURER_SST 0x00BF 49#define MANUFACTURER_SST 0x00BF
47#define SST49LF004B 0x0060 50#define SST49LF004B 0x0060
48#define SST49LF040B 0x0050 51#define SST49LF040B 0x0050
@@ -144,12 +147,44 @@ static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
144 147
145 if (((major << 8) | minor) < 0x3131) { 148 if (((major << 8) | minor) < 0x3131) {
146 /* CFI version 1.0 => don't trust bootloc */ 149 /* CFI version 1.0 => don't trust bootloc */
150
151 DEBUG(MTD_DEBUG_LEVEL1,
152 "%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
153 map->name, cfi->mfr, cfi->id);
154
155 /* AFAICS all 29LV400 with a bottom boot block have a device ID
156 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
157 * These were badly detected as they have the 0x80 bit set
158 * so treat them as a special case.
159 */
160 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
161
162 /* Macronix added CFI to their 2nd generation
163 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
164 * Fujitsu, Spansion, EON, ESI and older Macronix)
165 * has CFI.
166 *
167 * Therefore also check the manufacturer.
168 * This reduces the risk of false detection due to
169 * the 8-bit device ID.
170 */
171 (cfi->mfr == MANUFACTURER_MACRONIX)) {
172 DEBUG(MTD_DEBUG_LEVEL1,
173 "%s: Macronix MX29LV400C with bottom boot block"
174 " detected\n", map->name);
175 extp->TopBottom = 2; /* bottom boot */
176 } else
147 if (cfi->id & 0x80) { 177 if (cfi->id & 0x80) {
148 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id); 178 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
149 extp->TopBottom = 3; /* top boot */ 179 extp->TopBottom = 3; /* top boot */
150 } else { 180 } else {
151 extp->TopBottom = 2; /* bottom boot */ 181 extp->TopBottom = 2; /* bottom boot */
152 } 182 }
183
184 DEBUG(MTD_DEBUG_LEVEL1,
185 "%s: AMD CFI PRI V%c.%c has no boot block field;"
186 " deduced %s from Device ID\n", map->name, major, minor,
187 extp->TopBottom == 2 ? "bottom" : "top");
153 } 188 }
154} 189}
155#endif 190#endif
@@ -178,10 +213,18 @@ static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
178 if (atmel_pri.Features & 0x02) 213 if (atmel_pri.Features & 0x02)
179 extp->EraseSuspend = 2; 214 extp->EraseSuspend = 2;
180 215
181 if (atmel_pri.BottomBoot) 216 /* Some chips got it backwards... */
182 extp->TopBottom = 2; 217 if (cfi->id == AT49BV6416) {
183 else 218 if (atmel_pri.BottomBoot)
184 extp->TopBottom = 3; 219 extp->TopBottom = 3;
220 else
221 extp->TopBottom = 2;
222 } else {
223 if (atmel_pri.BottomBoot)
224 extp->TopBottom = 2;
225 else
226 extp->TopBottom = 3;
227 }
185 228
186 /* burst write mode not supported */ 229 /* burst write mode not supported */
187 cfi->cfiq->BufWriteTimeoutTyp = 0; 230 cfi->cfiq->BufWriteTimeoutTyp = 0;
@@ -243,6 +286,7 @@ static struct cfi_fixup cfi_fixup_table[] = {
243 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL }, 286 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
244#ifdef AMD_BOOTLOC_BUG 287#ifdef AMD_BOOTLOC_BUG
245 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL }, 288 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
289 { MANUFACTURER_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL },
246#endif 290#endif
247 { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, }, 291 { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
248 { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, }, 292 { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c
index c418e92e1d92..e63e6749429a 100644
--- a/drivers/mtd/chips/cfi_probe.c
+++ b/drivers/mtd/chips/cfi_probe.c
@@ -44,17 +44,14 @@ do { \
44 44
45#define xip_enable(base, map, cfi) \ 45#define xip_enable(base, map, cfi) \
46do { \ 46do { \
47 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); \ 47 cfi_qry_mode_off(base, map, cfi); \
48 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); \
49 xip_allowed(base, map); \ 48 xip_allowed(base, map); \
50} while (0) 49} while (0)
51 50
52#define xip_disable_qry(base, map, cfi) \ 51#define xip_disable_qry(base, map, cfi) \
53do { \ 52do { \
54 xip_disable(); \ 53 xip_disable(); \
55 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); \ 54 cfi_qry_mode_on(base, map, cfi); \
56 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); \
57 cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL); \
58} while (0) 55} while (0)
59 56
60#else 57#else
@@ -70,32 +67,6 @@ do { \
70 in: interleave,type,mode 67 in: interleave,type,mode
71 ret: table index, <0 for error 68 ret: table index, <0 for error
72 */ 69 */
73static int __xipram qry_present(struct map_info *map, __u32 base,
74 struct cfi_private *cfi)
75{
76 int osf = cfi->interleave * cfi->device_type; // scale factor
77 map_word val[3];
78 map_word qry[3];
79
80 qry[0] = cfi_build_cmd('Q', map, cfi);
81 qry[1] = cfi_build_cmd('R', map, cfi);
82 qry[2] = cfi_build_cmd('Y', map, cfi);
83
84 val[0] = map_read(map, base + osf*0x10);
85 val[1] = map_read(map, base + osf*0x11);
86 val[2] = map_read(map, base + osf*0x12);
87
88 if (!map_word_equal(map, qry[0], val[0]))
89 return 0;
90
91 if (!map_word_equal(map, qry[1], val[1]))
92 return 0;
93
94 if (!map_word_equal(map, qry[2], val[2]))
95 return 0;
96
97 return 1; // "QRY" found
98}
99 70
100static int __xipram cfi_probe_chip(struct map_info *map, __u32 base, 71static int __xipram cfi_probe_chip(struct map_info *map, __u32 base,
101 unsigned long *chip_map, struct cfi_private *cfi) 72 unsigned long *chip_map, struct cfi_private *cfi)
@@ -116,11 +87,7 @@ static int __xipram cfi_probe_chip(struct map_info *map, __u32 base,
116 } 87 }
117 88
118 xip_disable(); 89 xip_disable();
119 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); 90 if (!cfi_qry_mode_on(base, map, cfi)) {
120 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
121 cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
122
123 if (!qry_present(map,base,cfi)) {
124 xip_enable(base, map, cfi); 91 xip_enable(base, map, cfi);
125 return 0; 92 return 0;
126 } 93 }
@@ -141,14 +108,13 @@ static int __xipram cfi_probe_chip(struct map_info *map, __u32 base,
141 start = i << cfi->chipshift; 108 start = i << cfi->chipshift;
142 /* This chip should be in read mode if it's one 109 /* This chip should be in read mode if it's one
143 we've already touched. */ 110 we've already touched. */
144 if (qry_present(map, start, cfi)) { 111 if (cfi_qry_present(map, start, cfi)) {
145 /* Eep. This chip also had the QRY marker. 112 /* Eep. This chip also had the QRY marker.
146 * Is it an alias for the new one? */ 113 * Is it an alias for the new one? */
147 cfi_send_gen_cmd(0xF0, 0, start, map, cfi, cfi->device_type, NULL); 114 cfi_qry_mode_off(start, map, cfi);
148 cfi_send_gen_cmd(0xFF, 0, start, map, cfi, cfi->device_type, NULL);
149 115
150 /* If the QRY marker goes away, it's an alias */ 116 /* If the QRY marker goes away, it's an alias */
151 if (!qry_present(map, start, cfi)) { 117 if (!cfi_qry_present(map, start, cfi)) {
152 xip_allowed(base, map); 118 xip_allowed(base, map);
153 printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n", 119 printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
154 map->name, base, start); 120 map->name, base, start);
@@ -158,10 +124,9 @@ static int __xipram cfi_probe_chip(struct map_info *map, __u32 base,
158 * unfortunate. Stick the new chip in read mode 124 * unfortunate. Stick the new chip in read mode
159 * too and if it's the same, assume it's an alias. */ 125 * too and if it's the same, assume it's an alias. */
160 /* FIXME: Use other modes to do a proper check */ 126 /* FIXME: Use other modes to do a proper check */
161 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); 127 cfi_qry_mode_off(base, map, cfi);
162 cfi_send_gen_cmd(0xFF, 0, start, map, cfi, cfi->device_type, NULL);
163 128
164 if (qry_present(map, base, cfi)) { 129 if (cfi_qry_present(map, base, cfi)) {
165 xip_allowed(base, map); 130 xip_allowed(base, map);
166 printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n", 131 printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
167 map->name, base, start); 132 map->name, base, start);
@@ -176,8 +141,7 @@ static int __xipram cfi_probe_chip(struct map_info *map, __u32 base,
176 cfi->numchips++; 141 cfi->numchips++;
177 142
178 /* Put it back into Read Mode */ 143 /* Put it back into Read Mode */
179 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); 144 cfi_qry_mode_off(base, map, cfi);
180 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
181 xip_allowed(base, map); 145 xip_allowed(base, map);
182 146
183 printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n", 147 printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n",
@@ -237,9 +201,7 @@ static int __xipram cfi_chip_setup(struct map_info *map,
237 cfi_read_query(map, base + 0xf * ofs_factor); 201 cfi_read_query(map, base + 0xf * ofs_factor);
238 202
239 /* Put it back into Read Mode */ 203 /* Put it back into Read Mode */
240 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); 204 cfi_qry_mode_off(base, map, cfi);
241 /* ... even if it's an Intel chip */
242 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
243 xip_allowed(base, map); 205 xip_allowed(base, map);
244 206
245 /* Do any necessary byteswapping */ 207 /* Do any necessary byteswapping */
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index 0ee457018016..34d40e25d312 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -24,6 +24,66 @@
24#include <linux/mtd/cfi.h> 24#include <linux/mtd/cfi.h>
25#include <linux/mtd/compatmac.h> 25#include <linux/mtd/compatmac.h>
26 26
27int __xipram cfi_qry_present(struct map_info *map, __u32 base,
28 struct cfi_private *cfi)
29{
30 int osf = cfi->interleave * cfi->device_type; /* scale factor */
31 map_word val[3];
32 map_word qry[3];
33
34 qry[0] = cfi_build_cmd('Q', map, cfi);
35 qry[1] = cfi_build_cmd('R', map, cfi);
36 qry[2] = cfi_build_cmd('Y', map, cfi);
37
38 val[0] = map_read(map, base + osf*0x10);
39 val[1] = map_read(map, base + osf*0x11);
40 val[2] = map_read(map, base + osf*0x12);
41
42 if (!map_word_equal(map, qry[0], val[0]))
43 return 0;
44
45 if (!map_word_equal(map, qry[1], val[1]))
46 return 0;
47
48 if (!map_word_equal(map, qry[2], val[2]))
49 return 0;
50
51 return 1; /* "QRY" found */
52}
53EXPORT_SYMBOL_GPL(cfi_qry_present);
54
55int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map,
56 struct cfi_private *cfi)
57{
58 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
59 cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
60 if (cfi_qry_present(map, base, cfi))
61 return 1;
62 /* QRY not found probably we deal with some odd CFI chips */
63 /* Some revisions of some old Intel chips? */
64 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
65 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
66 cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
67 if (cfi_qry_present(map, base, cfi))
68 return 1;
69 /* ST M29DW chips */
70 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
71 cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL);
72 if (cfi_qry_present(map, base, cfi))
73 return 1;
74 /* QRY not found */
75 return 0;
76}
77EXPORT_SYMBOL_GPL(cfi_qry_mode_on);
78
79void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map,
80 struct cfi_private *cfi)
81{
82 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
83 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
84}
85EXPORT_SYMBOL_GPL(cfi_qry_mode_off);
86
27struct cfi_extquery * 87struct cfi_extquery *
28__xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* name) 88__xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* name)
29{ 89{
@@ -48,8 +108,7 @@ __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* n
48#endif 108#endif
49 109
50 /* Switch it into Query Mode */ 110 /* Switch it into Query Mode */
51 cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL); 111 cfi_qry_mode_on(base, map, cfi);
52
53 /* Read in the Extended Query Table */ 112 /* Read in the Extended Query Table */
54 for (i=0; i<size; i++) { 113 for (i=0; i<size; i++) {
55 ((unsigned char *)extp)[i] = 114 ((unsigned char *)extp)[i] =
@@ -57,8 +116,7 @@ __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* n
57 } 116 }
58 117
59 /* Make sure it returns to read mode */ 118 /* Make sure it returns to read mode */
60 cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL); 119 cfi_qry_mode_off(base, map, cfi);
61 cfi_send_gen_cmd(0xff, 0, base, map, cfi, cfi->device_type, NULL);
62 120
63#ifdef CONFIG_MTD_XIP 121#ifdef CONFIG_MTD_XIP
64 (void) map_read(map, base); 122 (void) map_read(map, base);
diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c
index f061885b2812..e2dc96441e05 100644
--- a/drivers/mtd/chips/gen_probe.c
+++ b/drivers/mtd/chips/gen_probe.c
@@ -111,7 +111,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
111 max_chips = 1; 111 max_chips = 1;
112 } 112 }
113 113
114 mapsize = sizeof(long) * ( (max_chips + BITS_PER_LONG-1) / BITS_PER_LONG ); 114 mapsize = sizeof(long) * DIV_ROUND_UP(max_chips, BITS_PER_LONG);
115 chip_map = kzalloc(mapsize, GFP_KERNEL); 115 chip_map = kzalloc(mapsize, GFP_KERNEL);
116 if (!chip_map) { 116 if (!chip_map) {
117 printk(KERN_WARNING "%s: kmalloc failed for CFI chip map\n", map->name); 117 printk(KERN_WARNING "%s: kmalloc failed for CFI chip map\n", map->name);
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index 71bc07f149b7..50a340388e74 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -7,6 +7,7 @@
7 * 7 *
8 * mtdparts=<mtddef>[;<mtddef] 8 * mtdparts=<mtddef>[;<mtddef]
9 * <mtddef> := <mtd-id>:<partdef>[,<partdef>] 9 * <mtddef> := <mtd-id>:<partdef>[,<partdef>]
10 * where <mtd-id> is the name from the "cat /proc/mtd" command
10 * <partdef> := <size>[@offset][<name>][ro][lk] 11 * <partdef> := <size>[@offset][<name>][ro][lk]
11 * <mtd-id> := unique name used in mapping driver/device (mtd->name) 12 * <mtd-id> := unique name used in mapping driver/device (mtd->name)
12 * <size> := standard linux memsize OR "-" to denote all remaining space 13 * <size> := standard linux memsize OR "-" to denote all remaining space
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 9c613f06623c..6fde0a2e3567 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -59,6 +59,27 @@ config MTD_DATAFLASH
59 Sometimes DataFlash chips are packaged inside MMC-format 59 Sometimes DataFlash chips are packaged inside MMC-format
60 cards; at this writing, the MMC stack won't handle those. 60 cards; at this writing, the MMC stack won't handle those.
61 61
62config MTD_DATAFLASH_WRITE_VERIFY
63 bool "Verify DataFlash page writes"
64 depends on MTD_DATAFLASH
65 help
66 This adds an extra check when data is written to the flash.
67 It may help if you are verifying chip setup (timings etc) on
68 your board. There is a rare possibility that even though the
69 device thinks the write was successful, a bit could have been
70 flipped accidentally due to device wear or something else.
71
72config MTD_DATAFLASH_OTP
73 bool "DataFlash OTP support (Security Register)"
74 depends on MTD_DATAFLASH
75 select HAVE_MTD_OTP
76 help
77 Newer DataFlash chips (revisions C and D) support 128 bytes of
78 one-time-programmable (OTP) data. The first half may be written
79 (once) with up to 64 bytes of data, such as a serial number or
80 other key product data. The second half is programmed with a
81 unique-to-each-chip bit pattern at the factory.
82
62config MTD_M25P80 83config MTD_M25P80
63 tristate "Support most SPI Flash chips (AT26DF, M25P, W25X, ...)" 84 tristate "Support most SPI Flash chips (AT26DF, M25P, W25X, ...)"
64 depends on SPI_MASTER && EXPERIMENTAL 85 depends on SPI_MASTER && EXPERIMENTAL
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index b35c3333e210..76a76751da36 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -39,6 +39,7 @@
39#define OPCODE_PP 0x02 /* Page program (up to 256 bytes) */ 39#define OPCODE_PP 0x02 /* Page program (up to 256 bytes) */
40#define OPCODE_BE_4K 0x20 /* Erase 4KiB block */ 40#define OPCODE_BE_4K 0x20 /* Erase 4KiB block */
41#define OPCODE_BE_32K 0x52 /* Erase 32KiB block */ 41#define OPCODE_BE_32K 0x52 /* Erase 32KiB block */
42#define OPCODE_BE 0xc7 /* Erase whole flash block */
42#define OPCODE_SE 0xd8 /* Sector erase (usually 64KiB) */ 43#define OPCODE_SE 0xd8 /* Sector erase (usually 64KiB) */
43#define OPCODE_RDID 0x9f /* Read JEDEC ID */ 44#define OPCODE_RDID 0x9f /* Read JEDEC ID */
44 45
@@ -161,6 +162,31 @@ static int wait_till_ready(struct m25p *flash)
161 return 1; 162 return 1;
162} 163}
163 164
165/*
166 * Erase the whole flash memory
167 *
168 * Returns 0 if successful, non-zero otherwise.
169 */
170static int erase_block(struct m25p *flash)
171{
172 DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB\n",
173 flash->spi->dev.bus_id, __func__,
174 flash->mtd.size / 1024);
175
176 /* Wait until finished previous write command. */
177 if (wait_till_ready(flash))
178 return 1;
179
180 /* Send write enable, then erase commands. */
181 write_enable(flash);
182
183 /* Set up command buffer. */
184 flash->command[0] = OPCODE_BE;
185
186 spi_write(flash->spi, flash->command, 1);
187
188 return 0;
189}
164 190
165/* 191/*
166 * Erase one sector of flash memory at offset ``offset'' which is any 192 * Erase one sector of flash memory at offset ``offset'' which is any
@@ -229,15 +255,21 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
229 */ 255 */
230 256
231 /* now erase those sectors */ 257 /* now erase those sectors */
232 while (len) { 258 if (len == flash->mtd.size && erase_block(flash)) {
233 if (erase_sector(flash, addr)) { 259 instr->state = MTD_ERASE_FAILED;
234 instr->state = MTD_ERASE_FAILED; 260 mutex_unlock(&flash->lock);
235 mutex_unlock(&flash->lock); 261 return -EIO;
236 return -EIO; 262 } else {
237 } 263 while (len) {
264 if (erase_sector(flash, addr)) {
265 instr->state = MTD_ERASE_FAILED;
266 mutex_unlock(&flash->lock);
267 return -EIO;
268 }
238 269
239 addr += mtd->erasesize; 270 addr += mtd->erasesize;
240 len -= mtd->erasesize; 271 len -= mtd->erasesize;
272 }
241 } 273 }
242 274
243 mutex_unlock(&flash->lock); 275 mutex_unlock(&flash->lock);
@@ -437,6 +469,7 @@ struct flash_info {
437 * then a two byte device id. 469 * then a two byte device id.
438 */ 470 */
439 u32 jedec_id; 471 u32 jedec_id;
472 u16 ext_id;
440 473
441 /* The size listed here is what works with OPCODE_SE, which isn't 474 /* The size listed here is what works with OPCODE_SE, which isn't
442 * necessarily called a "sector" by the vendor. 475 * necessarily called a "sector" by the vendor.
@@ -456,72 +489,75 @@ struct flash_info {
456static struct flash_info __devinitdata m25p_data [] = { 489static struct flash_info __devinitdata m25p_data [] = {
457 490
458 /* Atmel -- some are (confusingly) marketed as "DataFlash" */ 491 /* Atmel -- some are (confusingly) marketed as "DataFlash" */
459 { "at25fs010", 0x1f6601, 32 * 1024, 4, SECT_4K, }, 492 { "at25fs010", 0x1f6601, 0, 32 * 1024, 4, SECT_4K, },
460 { "at25fs040", 0x1f6604, 64 * 1024, 8, SECT_4K, }, 493 { "at25fs040", 0x1f6604, 0, 64 * 1024, 8, SECT_4K, },
461 494
462 { "at25df041a", 0x1f4401, 64 * 1024, 8, SECT_4K, }, 495 { "at25df041a", 0x1f4401, 0, 64 * 1024, 8, SECT_4K, },
463 { "at25df641", 0x1f4800, 64 * 1024, 128, SECT_4K, }, 496 { "at25df641", 0x1f4800, 0, 64 * 1024, 128, SECT_4K, },
464 497
465 { "at26f004", 0x1f0400, 64 * 1024, 8, SECT_4K, }, 498 { "at26f004", 0x1f0400, 0, 64 * 1024, 8, SECT_4K, },
466 { "at26df081a", 0x1f4501, 64 * 1024, 16, SECT_4K, }, 499 { "at26df081a", 0x1f4501, 0, 64 * 1024, 16, SECT_4K, },
467 { "at26df161a", 0x1f4601, 64 * 1024, 32, SECT_4K, }, 500 { "at26df161a", 0x1f4601, 0, 64 * 1024, 32, SECT_4K, },
468 { "at26df321", 0x1f4701, 64 * 1024, 64, SECT_4K, }, 501 { "at26df321", 0x1f4701, 0, 64 * 1024, 64, SECT_4K, },
469 502
470 /* Spansion -- single (large) sector size only, at least 503 /* Spansion -- single (large) sector size only, at least
471 * for the chips listed here (without boot sectors). 504 * for the chips listed here (without boot sectors).
472 */ 505 */
473 { "s25sl004a", 0x010212, 64 * 1024, 8, }, 506 { "s25sl004a", 0x010212, 0, 64 * 1024, 8, },
474 { "s25sl008a", 0x010213, 64 * 1024, 16, }, 507 { "s25sl008a", 0x010213, 0, 64 * 1024, 16, },
475 { "s25sl016a", 0x010214, 64 * 1024, 32, }, 508 { "s25sl016a", 0x010214, 0, 64 * 1024, 32, },
476 { "s25sl032a", 0x010215, 64 * 1024, 64, }, 509 { "s25sl032a", 0x010215, 0, 64 * 1024, 64, },
477 { "s25sl064a", 0x010216, 64 * 1024, 128, }, 510 { "s25sl064a", 0x010216, 0, 64 * 1024, 128, },
511 { "s25sl12800", 0x012018, 0x0300, 256 * 1024, 64, },
512 { "s25sl12801", 0x012018, 0x0301, 64 * 1024, 256, },
478 513
479 /* SST -- large erase sizes are "overlays", "sectors" are 4K */ 514 /* SST -- large erase sizes are "overlays", "sectors" are 4K */
480 { "sst25vf040b", 0xbf258d, 64 * 1024, 8, SECT_4K, }, 515 { "sst25vf040b", 0xbf258d, 0, 64 * 1024, 8, SECT_4K, },
481 { "sst25vf080b", 0xbf258e, 64 * 1024, 16, SECT_4K, }, 516 { "sst25vf080b", 0xbf258e, 0, 64 * 1024, 16, SECT_4K, },
482 { "sst25vf016b", 0xbf2541, 64 * 1024, 32, SECT_4K, }, 517 { "sst25vf016b", 0xbf2541, 0, 64 * 1024, 32, SECT_4K, },
483 { "sst25vf032b", 0xbf254a, 64 * 1024, 64, SECT_4K, }, 518 { "sst25vf032b", 0xbf254a, 0, 64 * 1024, 64, SECT_4K, },
484 519
485 /* ST Microelectronics -- newer production may have feature updates */ 520 /* ST Microelectronics -- newer production may have feature updates */
486 { "m25p05", 0x202010, 32 * 1024, 2, }, 521 { "m25p05", 0x202010, 0, 32 * 1024, 2, },
487 { "m25p10", 0x202011, 32 * 1024, 4, }, 522 { "m25p10", 0x202011, 0, 32 * 1024, 4, },
488 { "m25p20", 0x202012, 64 * 1024, 4, }, 523 { "m25p20", 0x202012, 0, 64 * 1024, 4, },
489 { "m25p40", 0x202013, 64 * 1024, 8, }, 524 { "m25p40", 0x202013, 0, 64 * 1024, 8, },
490 { "m25p80", 0, 64 * 1024, 16, }, 525 { "m25p80", 0, 0, 64 * 1024, 16, },
491 { "m25p16", 0x202015, 64 * 1024, 32, }, 526 { "m25p16", 0x202015, 0, 64 * 1024, 32, },
492 { "m25p32", 0x202016, 64 * 1024, 64, }, 527 { "m25p32", 0x202016, 0, 64 * 1024, 64, },
493 { "m25p64", 0x202017, 64 * 1024, 128, }, 528 { "m25p64", 0x202017, 0, 64 * 1024, 128, },
494 { "m25p128", 0x202018, 256 * 1024, 64, }, 529 { "m25p128", 0x202018, 0, 256 * 1024, 64, },
495 530
496 { "m45pe80", 0x204014, 64 * 1024, 16, }, 531 { "m45pe80", 0x204014, 0, 64 * 1024, 16, },
497 { "m45pe16", 0x204015, 64 * 1024, 32, }, 532 { "m45pe16", 0x204015, 0, 64 * 1024, 32, },
498 533
499 { "m25pe80", 0x208014, 64 * 1024, 16, }, 534 { "m25pe80", 0x208014, 0, 64 * 1024, 16, },
500 { "m25pe16", 0x208015, 64 * 1024, 32, SECT_4K, }, 535 { "m25pe16", 0x208015, 0, 64 * 1024, 32, SECT_4K, },
501 536
502 /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */ 537 /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
503 { "w25x10", 0xef3011, 64 * 1024, 2, SECT_4K, }, 538 { "w25x10", 0xef3011, 0, 64 * 1024, 2, SECT_4K, },
504 { "w25x20", 0xef3012, 64 * 1024, 4, SECT_4K, }, 539 { "w25x20", 0xef3012, 0, 64 * 1024, 4, SECT_4K, },
505 { "w25x40", 0xef3013, 64 * 1024, 8, SECT_4K, }, 540 { "w25x40", 0xef3013, 0, 64 * 1024, 8, SECT_4K, },
506 { "w25x80", 0xef3014, 64 * 1024, 16, SECT_4K, }, 541 { "w25x80", 0xef3014, 0, 64 * 1024, 16, SECT_4K, },
507 { "w25x16", 0xef3015, 64 * 1024, 32, SECT_4K, }, 542 { "w25x16", 0xef3015, 0, 64 * 1024, 32, SECT_4K, },
508 { "w25x32", 0xef3016, 64 * 1024, 64, SECT_4K, }, 543 { "w25x32", 0xef3016, 0, 64 * 1024, 64, SECT_4K, },
509 { "w25x64", 0xef3017, 64 * 1024, 128, SECT_4K, }, 544 { "w25x64", 0xef3017, 0, 64 * 1024, 128, SECT_4K, },
510}; 545};
511 546
512static struct flash_info *__devinit jedec_probe(struct spi_device *spi) 547static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
513{ 548{
514 int tmp; 549 int tmp;
515 u8 code = OPCODE_RDID; 550 u8 code = OPCODE_RDID;
516 u8 id[3]; 551 u8 id[5];
517 u32 jedec; 552 u32 jedec;
553 u16 ext_jedec;
518 struct flash_info *info; 554 struct flash_info *info;
519 555
520 /* JEDEC also defines an optional "extended device information" 556 /* JEDEC also defines an optional "extended device information"
521 * string for after vendor-specific data, after the three bytes 557 * string for after vendor-specific data, after the three bytes
522 * we use here. Supporting some chips might require using it. 558 * we use here. Supporting some chips might require using it.
523 */ 559 */
524 tmp = spi_write_then_read(spi, &code, 1, id, 3); 560 tmp = spi_write_then_read(spi, &code, 1, id, 5);
525 if (tmp < 0) { 561 if (tmp < 0) {
526 DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n", 562 DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n",
527 spi->dev.bus_id, tmp); 563 spi->dev.bus_id, tmp);
@@ -533,10 +569,14 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
533 jedec = jedec << 8; 569 jedec = jedec << 8;
534 jedec |= id[2]; 570 jedec |= id[2];
535 571
572 ext_jedec = id[3] << 8 | id[4];
573
536 for (tmp = 0, info = m25p_data; 574 for (tmp = 0, info = m25p_data;
537 tmp < ARRAY_SIZE(m25p_data); 575 tmp < ARRAY_SIZE(m25p_data);
538 tmp++, info++) { 576 tmp++, info++) {
539 if (info->jedec_id == jedec) 577 if (info->jedec_id == jedec)
578 if (ext_jedec != 0 && info->ext_id != ext_jedec)
579 continue;
540 return info; 580 return info;
541 } 581 }
542 dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec); 582 dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec);
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 8bd0dea6885f..6dd9aff8bb2d 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -30,12 +30,10 @@
30 * doesn't (yet) use these for any kind of i/o overlap or prefetching. 30 * doesn't (yet) use these for any kind of i/o overlap or prefetching.
31 * 31 *
32 * Sometimes DataFlash is packaged in MMC-format cards, although the 32 * Sometimes DataFlash is packaged in MMC-format cards, although the
33 * MMC stack can't use SPI (yet), or distinguish between MMC and DataFlash 33 * MMC stack can't (yet?) distinguish between MMC and DataFlash
34 * protocols during enumeration. 34 * protocols during enumeration.
35 */ 35 */
36 36
37#define CONFIG_DATAFLASH_WRITE_VERIFY
38
39/* reads can bypass the buffers */ 37/* reads can bypass the buffers */
40#define OP_READ_CONTINUOUS 0xE8 38#define OP_READ_CONTINUOUS 0xE8
41#define OP_READ_PAGE 0xD2 39#define OP_READ_PAGE 0xD2
@@ -80,7 +78,8 @@
80 */ 78 */
81#define OP_READ_ID 0x9F 79#define OP_READ_ID 0x9F
82#define OP_READ_SECURITY 0x77 80#define OP_READ_SECURITY 0x77
83#define OP_WRITE_SECURITY 0x9A /* OTP bits */ 81#define OP_WRITE_SECURITY_REVC 0x9A
82#define OP_WRITE_SECURITY 0x9B /* revision D */
84 83
85 84
86struct dataflash { 85struct dataflash {
@@ -402,7 +401,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
402 (void) dataflash_waitready(priv->spi); 401 (void) dataflash_waitready(priv->spi);
403 402
404 403
405#ifdef CONFIG_DATAFLASH_WRITE_VERIFY 404#ifdef CONFIG_MTD_DATAFLASH_VERIFY_WRITE
406 405
407 /* (3) Compare to Buffer1 */ 406 /* (3) Compare to Buffer1 */
408 addr = pageaddr << priv->page_offset; 407 addr = pageaddr << priv->page_offset;
@@ -431,7 +430,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
431 } else 430 } else
432 status = 0; 431 status = 0;
433 432
434#endif /* CONFIG_DATAFLASH_WRITE_VERIFY */ 433#endif /* CONFIG_MTD_DATAFLASH_VERIFY_WRITE */
435 434
436 remaining = remaining - writelen; 435 remaining = remaining - writelen;
437 pageaddr++; 436 pageaddr++;
@@ -451,16 +450,192 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
451 450
452/* ......................................................................... */ 451/* ......................................................................... */
453 452
453#ifdef CONFIG_MTD_DATAFLASH_OTP
454
455static int dataflash_get_otp_info(struct mtd_info *mtd,
456 struct otp_info *info, size_t len)
457{
458 /* Report both blocks as identical: bytes 0..64, locked.
459 * Unless the user block changed from all-ones, we can't
460 * tell whether it's still writable; so we assume it isn't.
461 */
462 info->start = 0;
463 info->length = 64;
464 info->locked = 1;
465 return sizeof(*info);
466}
467
468static ssize_t otp_read(struct spi_device *spi, unsigned base,
469 uint8_t *buf, loff_t off, size_t len)
470{
471 struct spi_message m;
472 size_t l;
473 uint8_t *scratch;
474 struct spi_transfer t;
475 int status;
476
477 if (off > 64)
478 return -EINVAL;
479
480 if ((off + len) > 64)
481 len = 64 - off;
482 if (len == 0)
483 return len;
484
485 spi_message_init(&m);
486
487 l = 4 + base + off + len;
488 scratch = kzalloc(l, GFP_KERNEL);
489 if (!scratch)
490 return -ENOMEM;
491
492 /* OUT: OP_READ_SECURITY, 3 don't-care bytes, zeroes
493 * IN: ignore 4 bytes, data bytes 0..N (max 127)
494 */
495 scratch[0] = OP_READ_SECURITY;
496
497 memset(&t, 0, sizeof t);
498 t.tx_buf = scratch;
499 t.rx_buf = scratch;
500 t.len = l;
501 spi_message_add_tail(&t, &m);
502
503 dataflash_waitready(spi);
504
505 status = spi_sync(spi, &m);
506 if (status >= 0) {
507 memcpy(buf, scratch + 4 + base + off, len);
508 status = len;
509 }
510
511 kfree(scratch);
512 return status;
513}
514
515static int dataflash_read_fact_otp(struct mtd_info *mtd,
516 loff_t from, size_t len, size_t *retlen, u_char *buf)
517{
518 struct dataflash *priv = (struct dataflash *)mtd->priv;
519 int status;
520
521 /* 64 bytes, from 0..63 ... start at 64 on-chip */
522 mutex_lock(&priv->lock);
523 status = otp_read(priv->spi, 64, buf, from, len);
524 mutex_unlock(&priv->lock);
525
526 if (status < 0)
527 return status;
528 *retlen = status;
529 return 0;
530}
531
532static int dataflash_read_user_otp(struct mtd_info *mtd,
533 loff_t from, size_t len, size_t *retlen, u_char *buf)
534{
535 struct dataflash *priv = (struct dataflash *)mtd->priv;
536 int status;
537
538 /* 64 bytes, from 0..63 ... start at 0 on-chip */
539 mutex_lock(&priv->lock);
540 status = otp_read(priv->spi, 0, buf, from, len);
541 mutex_unlock(&priv->lock);
542
543 if (status < 0)
544 return status;
545 *retlen = status;
546 return 0;
547}
548
549static int dataflash_write_user_otp(struct mtd_info *mtd,
550 loff_t from, size_t len, size_t *retlen, u_char *buf)
551{
552 struct spi_message m;
553 const size_t l = 4 + 64;
554 uint8_t *scratch;
555 struct spi_transfer t;
556 struct dataflash *priv = (struct dataflash *)mtd->priv;
557 int status;
558
559 if (len > 64)
560 return -EINVAL;
561
562 /* Strictly speaking, we *could* truncate the write ... but
563 * let's not do that for the only write that's ever possible.
564 */
565 if ((from + len) > 64)
566 return -EINVAL;
567
568 /* OUT: OP_WRITE_SECURITY, 3 zeroes, 64 data-or-zero bytes
569 * IN: ignore all
570 */
571 scratch = kzalloc(l, GFP_KERNEL);
572 if (!scratch)
573 return -ENOMEM;
574 scratch[0] = OP_WRITE_SECURITY;
575 memcpy(scratch + 4 + from, buf, len);
576
577 spi_message_init(&m);
578
579 memset(&t, 0, sizeof t);
580 t.tx_buf = scratch;
581 t.len = l;
582 spi_message_add_tail(&t, &m);
583
584 /* Write the OTP bits, if they've not yet been written.
585 * This modifies SRAM buffer1.
586 */
587 mutex_lock(&priv->lock);
588 dataflash_waitready(priv->spi);
589 status = spi_sync(priv->spi, &m);
590 mutex_unlock(&priv->lock);
591
592 kfree(scratch);
593
594 if (status >= 0) {
595 status = 0;
596 *retlen = len;
597 }
598 return status;
599}
600
601static char *otp_setup(struct mtd_info *device, char revision)
602{
603 device->get_fact_prot_info = dataflash_get_otp_info;
604 device->read_fact_prot_reg = dataflash_read_fact_otp;
605 device->get_user_prot_info = dataflash_get_otp_info;
606 device->read_user_prot_reg = dataflash_read_user_otp;
607
608 /* rev c parts (at45db321c and at45db1281 only!) use a
609 * different write procedure; not (yet?) implemented.
610 */
611 if (revision > 'c')
612 device->write_user_prot_reg = dataflash_write_user_otp;
613
614 return ", OTP";
615}
616
617#else
618
619static char *otp_setup(struct mtd_info *device, char revision)
620{
621 return " (OTP)";
622}
623
624#endif
625
626/* ......................................................................... */
627
454/* 628/*
455 * Register DataFlash device with MTD subsystem. 629 * Register DataFlash device with MTD subsystem.
456 */ 630 */
457static int __devinit 631static int __devinit
458add_dataflash(struct spi_device *spi, char *name, 632add_dataflash_otp(struct spi_device *spi, char *name,
459 int nr_pages, int pagesize, int pageoffset) 633 int nr_pages, int pagesize, int pageoffset, char revision)
460{ 634{
461 struct dataflash *priv; 635 struct dataflash *priv;
462 struct mtd_info *device; 636 struct mtd_info *device;
463 struct flash_platform_data *pdata = spi->dev.platform_data; 637 struct flash_platform_data *pdata = spi->dev.platform_data;
638 char *otp_tag = "";
464 639
465 priv = kzalloc(sizeof *priv, GFP_KERNEL); 640 priv = kzalloc(sizeof *priv, GFP_KERNEL);
466 if (!priv) 641 if (!priv)
@@ -489,8 +664,12 @@ add_dataflash(struct spi_device *spi, char *name,
489 device->write = dataflash_write; 664 device->write = dataflash_write;
490 device->priv = priv; 665 device->priv = priv;
491 666
492 dev_info(&spi->dev, "%s (%d KBytes) pagesize %d bytes\n", 667 if (revision >= 'c')
493 name, DIV_ROUND_UP(device->size, 1024), pagesize); 668 otp_tag = otp_setup(device, revision);
669
670 dev_info(&spi->dev, "%s (%d KBytes) pagesize %d bytes%s\n",
671 name, DIV_ROUND_UP(device->size, 1024),
672 pagesize, otp_tag);
494 dev_set_drvdata(&spi->dev, priv); 673 dev_set_drvdata(&spi->dev, priv);
495 674
496 if (mtd_has_partitions()) { 675 if (mtd_has_partitions()) {
@@ -519,6 +698,14 @@ add_dataflash(struct spi_device *spi, char *name,
519 return add_mtd_device(device) == 1 ? -ENODEV : 0; 698 return add_mtd_device(device) == 1 ? -ENODEV : 0;
520} 699}
521 700
701static inline int __devinit
702add_dataflash(struct spi_device *spi, char *name,
703 int nr_pages, int pagesize, int pageoffset)
704{
705 return add_dataflash_otp(spi, name, nr_pages, pagesize,
706 pageoffset, 0);
707}
708
522struct flash_info { 709struct flash_info {
523 char *name; 710 char *name;
524 711
@@ -664,13 +851,16 @@ static int __devinit dataflash_probe(struct spi_device *spi)
664 * Try to detect dataflash by JEDEC ID. 851 * Try to detect dataflash by JEDEC ID.
665 * If it succeeds we know we have either a C or D part. 852 * If it succeeds we know we have either a C or D part.
666 * D will support power of 2 pagesize option. 853 * D will support power of 2 pagesize option.
854 * Both support the security register, though with different
855 * write procedures.
667 */ 856 */
668 info = jedec_probe(spi); 857 info = jedec_probe(spi);
669 if (IS_ERR(info)) 858 if (IS_ERR(info))
670 return PTR_ERR(info); 859 return PTR_ERR(info);
671 if (info != NULL) 860 if (info != NULL)
672 return add_dataflash(spi, info->name, info->nr_pages, 861 return add_dataflash_otp(spi, info->name, info->nr_pages,
673 info->pagesize, info->pageoffset); 862 info->pagesize, info->pageoffset,
863 (info->flags & SUP_POW2PS) ? 'd' : 'c');
674 864
675 /* 865 /*
676 * Older chips support only legacy commands, identifing 866 * Older chips support only legacy commands, identifing
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index c4f9d3378b24..50ce13887f63 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -388,6 +388,10 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
388 if (thisEUN == targetEUN) 388 if (thisEUN == targetEUN)
389 break; 389 break;
390 390
391 /* Unlink the last block from the chain. */
392 inftl->PUtable[prevEUN] = BLOCK_NIL;
393
394 /* Now try to erase it. */
391 if (INFTL_formatblock(inftl, thisEUN) < 0) { 395 if (INFTL_formatblock(inftl, thisEUN) < 0) {
392 /* 396 /*
393 * Could not erase : mark block as reserved. 397 * Could not erase : mark block as reserved.
@@ -396,7 +400,6 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
396 } else { 400 } else {
397 /* Correctly erased : mark it as free */ 401 /* Correctly erased : mark it as free */
398 inftl->PUtable[thisEUN] = BLOCK_FREE; 402 inftl->PUtable[thisEUN] = BLOCK_FREE;
399 inftl->PUtable[prevEUN] = BLOCK_NIL;
400 inftl->numfreeEUNs++; 403 inftl->numfreeEUNs++;
401 } 404 }
402 } 405 }
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index df8e00bba07b..5ea169362164 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -332,30 +332,6 @@ config MTD_CFI_FLAGADM
332 Mapping for the Flaga digital module. If you don't have one, ignore 332 Mapping for the Flaga digital module. If you don't have one, ignore
333 this setting. 333 this setting.
334 334
335config MTD_WALNUT
336 tristate "Flash device mapped on IBM 405GP Walnut"
337 depends on MTD_JEDECPROBE && WALNUT && !PPC_MERGE
338 help
339 This enables access routines for the flash chips on the IBM 405GP
340 Walnut board. If you have one of these boards and would like to
341 use the flash chips on it, say 'Y'.
342
343config MTD_EBONY
344 tristate "Flash devices mapped on IBM 440GP Ebony"
345 depends on MTD_JEDECPROBE && EBONY && !PPC_MERGE
346 help
347 This enables access routines for the flash chips on the IBM 440GP
348 Ebony board. If you have one of these boards and would like to
349 use the flash chips on it, say 'Y'.
350
351config MTD_OCOTEA
352 tristate "Flash devices mapped on IBM 440GX Ocotea"
353 depends on MTD_CFI && OCOTEA && !PPC_MERGE
354 help
355 This enables access routines for the flash chips on the IBM 440GX
356 Ocotea board. If you have one of these boards and would like to
357 use the flash chips on it, say 'Y'.
358
359config MTD_REDWOOD 335config MTD_REDWOOD
360 tristate "CFI Flash devices mapped on IBM Redwood" 336 tristate "CFI Flash devices mapped on IBM Redwood"
361 depends on MTD_CFI && ( REDWOOD_4 || REDWOOD_5 || REDWOOD_6 ) 337 depends on MTD_CFI && ( REDWOOD_4 || REDWOOD_5 || REDWOOD_6 )
@@ -458,13 +434,6 @@ config MTD_CEIVA
458 PhotoMax Digital Picture Frame. 434 PhotoMax Digital Picture Frame.
459 If you have such a device, say 'Y'. 435 If you have such a device, say 'Y'.
460 436
461config MTD_NOR_TOTO
462 tristate "NOR Flash device on TOTO board"
463 depends on ARCH_OMAP && OMAP_TOTO
464 help
465 This enables access to the NOR flash on the Texas Instruments
466 TOTO board.
467
468config MTD_H720X 437config MTD_H720X
469 tristate "Hynix evaluation board mappings" 438 tristate "Hynix evaluation board mappings"
470 depends on MTD_CFI && ( ARCH_H7201 || ARCH_H7202 ) 439 depends on MTD_CFI && ( ARCH_H7201 || ARCH_H7202 )
@@ -522,7 +491,7 @@ config MTD_BFIN_ASYNC
522 491
523config MTD_UCLINUX 492config MTD_UCLINUX
524 tristate "Generic uClinux RAM/ROM filesystem support" 493 tristate "Generic uClinux RAM/ROM filesystem support"
525 depends on MTD_PARTITIONS && !MMU 494 depends on MTD_PARTITIONS && MTD_RAM && !MMU
526 help 495 help
527 Map driver to support image based filesystems for uClinux. 496 Map driver to support image based filesystems for uClinux.
528 497
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 6cda6df973e5..6d9ba35caf11 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -50,12 +50,8 @@ obj-$(CONFIG_MTD_REDWOOD) += redwood.o
50obj-$(CONFIG_MTD_UCLINUX) += uclinux.o 50obj-$(CONFIG_MTD_UCLINUX) += uclinux.o
51obj-$(CONFIG_MTD_NETtel) += nettel.o 51obj-$(CONFIG_MTD_NETtel) += nettel.o
52obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o 52obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o
53obj-$(CONFIG_MTD_EBONY) += ebony.o
54obj-$(CONFIG_MTD_OCOTEA) += ocotea.o
55obj-$(CONFIG_MTD_WALNUT) += walnut.o
56obj-$(CONFIG_MTD_H720X) += h720x-flash.o 53obj-$(CONFIG_MTD_H720X) += h720x-flash.o
57obj-$(CONFIG_MTD_SBC8240) += sbc8240.o 54obj-$(CONFIG_MTD_SBC8240) += sbc8240.o
58obj-$(CONFIG_MTD_NOR_TOTO) += omap-toto-flash.o
59obj-$(CONFIG_MTD_IXP4XX) += ixp4xx.o 55obj-$(CONFIG_MTD_IXP4XX) += ixp4xx.o
60obj-$(CONFIG_MTD_IXP2000) += ixp2000.o 56obj-$(CONFIG_MTD_IXP2000) += ixp2000.o
61obj-$(CONFIG_MTD_WRSBC8260) += wr_sbc82xx_flash.o 57obj-$(CONFIG_MTD_WRSBC8260) += wr_sbc82xx_flash.o
diff --git a/drivers/mtd/maps/ebony.c b/drivers/mtd/maps/ebony.c
deleted file mode 100644
index d92b7c70d3ed..000000000000
--- a/drivers/mtd/maps/ebony.c
+++ /dev/null
@@ -1,163 +0,0 @@
1/*
2 * Mapping for Ebony user flash
3 *
4 * Matt Porter <mporter@kernel.crashing.org>
5 *
6 * Copyright 2002-2004 MontaVista Software Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/mtd/mtd.h>
19#include <linux/mtd/map.h>
20#include <linux/mtd/partitions.h>
21#include <asm/io.h>
22#include <asm/ibm44x.h>
23#include <platforms/4xx/ebony.h>
24
25static struct mtd_info *flash;
26
27static struct map_info ebony_small_map = {
28 .name = "Ebony small flash",
29 .size = EBONY_SMALL_FLASH_SIZE,
30 .bankwidth = 1,
31};
32
33static struct map_info ebony_large_map = {
34 .name = "Ebony large flash",
35 .size = EBONY_LARGE_FLASH_SIZE,
36 .bankwidth = 1,
37};
38
39static struct mtd_partition ebony_small_partitions[] = {
40 {
41 .name = "OpenBIOS",
42 .offset = 0x0,
43 .size = 0x80000,
44 }
45};
46
47static struct mtd_partition ebony_large_partitions[] = {
48 {
49 .name = "fs",
50 .offset = 0,
51 .size = 0x380000,
52 },
53 {
54 .name = "firmware",
55 .offset = 0x380000,
56 .size = 0x80000,
57 }
58};
59
60int __init init_ebony(void)
61{
62 u8 fpga0_reg;
63 u8 __iomem *fpga0_adr;
64 unsigned long long small_flash_base, large_flash_base;
65
66 fpga0_adr = ioremap64(EBONY_FPGA_ADDR, 16);
67 if (!fpga0_adr)
68 return -ENOMEM;
69
70 fpga0_reg = readb(fpga0_adr);
71 iounmap(fpga0_adr);
72
73 if (EBONY_BOOT_SMALL_FLASH(fpga0_reg) &&
74 !EBONY_FLASH_SEL(fpga0_reg))
75 small_flash_base = EBONY_SMALL_FLASH_HIGH2;
76 else if (EBONY_BOOT_SMALL_FLASH(fpga0_reg) &&
77 EBONY_FLASH_SEL(fpga0_reg))
78 small_flash_base = EBONY_SMALL_FLASH_HIGH1;
79 else if (!EBONY_BOOT_SMALL_FLASH(fpga0_reg) &&
80 !EBONY_FLASH_SEL(fpga0_reg))
81 small_flash_base = EBONY_SMALL_FLASH_LOW2;
82 else
83 small_flash_base = EBONY_SMALL_FLASH_LOW1;
84
85 if (EBONY_BOOT_SMALL_FLASH(fpga0_reg) &&
86 !EBONY_ONBRD_FLASH_EN(fpga0_reg))
87 large_flash_base = EBONY_LARGE_FLASH_LOW;
88 else
89 large_flash_base = EBONY_LARGE_FLASH_HIGH;
90
91 ebony_small_map.phys = small_flash_base;
92 ebony_small_map.virt = ioremap64(small_flash_base,
93 ebony_small_map.size);
94
95 if (!ebony_small_map.virt) {
96 printk("Failed to ioremap flash\n");
97 return -EIO;
98 }
99
100 simple_map_init(&ebony_small_map);
101
102 flash = do_map_probe("jedec_probe", &ebony_small_map);
103 if (flash) {
104 flash->owner = THIS_MODULE;
105 add_mtd_partitions(flash, ebony_small_partitions,
106 ARRAY_SIZE(ebony_small_partitions));
107 } else {
108 printk("map probe failed for flash\n");
109 iounmap(ebony_small_map.virt);
110 return -ENXIO;
111 }
112
113 ebony_large_map.phys = large_flash_base;
114 ebony_large_map.virt = ioremap64(large_flash_base,
115 ebony_large_map.size);
116
117 if (!ebony_large_map.virt) {
118 printk("Failed to ioremap flash\n");
119 iounmap(ebony_small_map.virt);
120 return -EIO;
121 }
122
123 simple_map_init(&ebony_large_map);
124
125 flash = do_map_probe("jedec_probe", &ebony_large_map);
126 if (flash) {
127 flash->owner = THIS_MODULE;
128 add_mtd_partitions(flash, ebony_large_partitions,
129 ARRAY_SIZE(ebony_large_partitions));
130 } else {
131 printk("map probe failed for flash\n");
132 iounmap(ebony_small_map.virt);
133 iounmap(ebony_large_map.virt);
134 return -ENXIO;
135 }
136
137 return 0;
138}
139
140static void __exit cleanup_ebony(void)
141{
142 if (flash) {
143 del_mtd_partitions(flash);
144 map_destroy(flash);
145 }
146
147 if (ebony_small_map.virt) {
148 iounmap(ebony_small_map.virt);
149 ebony_small_map.virt = NULL;
150 }
151
152 if (ebony_large_map.virt) {
153 iounmap(ebony_large_map.virt);
154 ebony_large_map.virt = NULL;
155 }
156}
157
158module_init(init_ebony);
159module_exit(cleanup_ebony);
160
161MODULE_LICENSE("GPL");
162MODULE_AUTHOR("Matt Porter <mporter@kernel.crashing.org>");
163MODULE_DESCRIPTION("MTD map and partitions for IBM 440GP Ebony boards");
diff --git a/drivers/mtd/maps/ocotea.c b/drivers/mtd/maps/ocotea.c
deleted file mode 100644
index 5522eac8c980..000000000000
--- a/drivers/mtd/maps/ocotea.c
+++ /dev/null
@@ -1,154 +0,0 @@
1/*
2 * Mapping for Ocotea user flash
3 *
4 * Matt Porter <mporter@kernel.crashing.org>
5 *
6 * Copyright 2002-2004 MontaVista Software Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/mtd/mtd.h>
19#include <linux/mtd/map.h>
20#include <linux/mtd/partitions.h>
21#include <asm/io.h>
22#include <asm/ibm44x.h>
23#include <platforms/4xx/ocotea.h>
24
25static struct mtd_info *flash;
26
27static struct map_info ocotea_small_map = {
28 .name = "Ocotea small flash",
29 .size = OCOTEA_SMALL_FLASH_SIZE,
30 .buswidth = 1,
31};
32
33static struct map_info ocotea_large_map = {
34 .name = "Ocotea large flash",
35 .size = OCOTEA_LARGE_FLASH_SIZE,
36 .buswidth = 1,
37};
38
39static struct mtd_partition ocotea_small_partitions[] = {
40 {
41 .name = "pibs",
42 .offset = 0x0,
43 .size = 0x100000,
44 }
45};
46
47static struct mtd_partition ocotea_large_partitions[] = {
48 {
49 .name = "fs",
50 .offset = 0,
51 .size = 0x300000,
52 },
53 {
54 .name = "firmware",
55 .offset = 0x300000,
56 .size = 0x100000,
57 }
58};
59
60int __init init_ocotea(void)
61{
62 u8 fpga0_reg;
63 u8 *fpga0_adr;
64 unsigned long long small_flash_base, large_flash_base;
65
66 fpga0_adr = ioremap64(OCOTEA_FPGA_ADDR, 16);
67 if (!fpga0_adr)
68 return -ENOMEM;
69
70 fpga0_reg = readb((unsigned long)fpga0_adr);
71 iounmap(fpga0_adr);
72
73 if (OCOTEA_BOOT_LARGE_FLASH(fpga0_reg)) {
74 small_flash_base = OCOTEA_SMALL_FLASH_HIGH;
75 large_flash_base = OCOTEA_LARGE_FLASH_LOW;
76 }
77 else {
78 small_flash_base = OCOTEA_SMALL_FLASH_LOW;
79 large_flash_base = OCOTEA_LARGE_FLASH_HIGH;
80 }
81
82 ocotea_small_map.phys = small_flash_base;
83 ocotea_small_map.virt = ioremap64(small_flash_base,
84 ocotea_small_map.size);
85
86 if (!ocotea_small_map.virt) {
87 printk("Failed to ioremap flash\n");
88 return -EIO;
89 }
90
91 simple_map_init(&ocotea_small_map);
92
93 flash = do_map_probe("map_rom", &ocotea_small_map);
94 if (flash) {
95 flash->owner = THIS_MODULE;
96 add_mtd_partitions(flash, ocotea_small_partitions,
97 ARRAY_SIZE(ocotea_small_partitions));
98 } else {
99 printk("map probe failed for flash\n");
100 iounmap(ocotea_small_map.virt);
101 return -ENXIO;
102 }
103
104 ocotea_large_map.phys = large_flash_base;
105 ocotea_large_map.virt = ioremap64(large_flash_base,
106 ocotea_large_map.size);
107
108 if (!ocotea_large_map.virt) {
109 printk("Failed to ioremap flash\n");
110 iounmap(ocotea_small_map.virt);
111 return -EIO;
112 }
113
114 simple_map_init(&ocotea_large_map);
115
116 flash = do_map_probe("cfi_probe", &ocotea_large_map);
117 if (flash) {
118 flash->owner = THIS_MODULE;
119 add_mtd_partitions(flash, ocotea_large_partitions,
120 ARRAY_SIZE(ocotea_large_partitions));
121 } else {
122 printk("map probe failed for flash\n");
123 iounmap(ocotea_small_map.virt);
124 iounmap(ocotea_large_map.virt);
125 return -ENXIO;
126 }
127
128 return 0;
129}
130
131static void __exit cleanup_ocotea(void)
132{
133 if (flash) {
134 del_mtd_partitions(flash);
135 map_destroy(flash);
136 }
137
138 if (ocotea_small_map.virt) {
139 iounmap((void *)ocotea_small_map.virt);
140 ocotea_small_map.virt = 0;
141 }
142
143 if (ocotea_large_map.virt) {
144 iounmap((void *)ocotea_large_map.virt);
145 ocotea_large_map.virt = 0;
146 }
147}
148
149module_init(init_ocotea);
150module_exit(cleanup_ocotea);
151
152MODULE_LICENSE("GPL");
153MODULE_AUTHOR("Matt Porter <mporter@kernel.crashing.org>");
154MODULE_DESCRIPTION("MTD map and partitions for IBM 440GX Ocotea boards");
diff --git a/drivers/mtd/maps/omap-toto-flash.c b/drivers/mtd/maps/omap-toto-flash.c
deleted file mode 100644
index 0a60ebbc2175..000000000000
--- a/drivers/mtd/maps/omap-toto-flash.c
+++ /dev/null
@@ -1,133 +0,0 @@
1/*
2 * NOR Flash memory access on TI Toto board
3 *
4 * jzhang@ti.com (C) 2003 Texas Instruments.
5 *
6 * (C) 2002 MontVista Software, Inc.
7 */
8
9#include <linux/module.h>
10#include <linux/types.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/init.h>
14#include <linux/slab.h>
15
16#include <linux/mtd/mtd.h>
17#include <linux/mtd/map.h>
18#include <linux/mtd/partitions.h>
19
20#include <asm/hardware.h>
21#include <asm/io.h>
22
23
24#ifndef CONFIG_ARCH_OMAP
25#error This is for OMAP architecture only
26#endif
27
28//these lines need be moved to a hardware header file
29#define OMAP_TOTO_FLASH_BASE 0xd8000000
30#define OMAP_TOTO_FLASH_SIZE 0x80000
31
32static struct map_info omap_toto_map_flash = {
33 .name = "OMAP Toto flash",
34 .bankwidth = 2,
35 .virt = (void __iomem *)OMAP_TOTO_FLASH_BASE,
36};
37
38
39static struct mtd_partition toto_flash_partitions[] = {
40 {
41 .name = "BootLoader",
42 .size = 0x00040000, /* hopefully u-boot will stay 128k + 128*/
43 .offset = 0,
44 .mask_flags = MTD_WRITEABLE, /* force read-only */
45 }, {
46 .name = "ReservedSpace",
47 .size = 0x00030000,
48 .offset = MTDPART_OFS_APPEND,
49 //mask_flags: MTD_WRITEABLE, /* force read-only */
50 }, {
51 .name = "EnvArea", /* bottom 64KiB for env vars */
52 .size = MTDPART_SIZ_FULL,
53 .offset = MTDPART_OFS_APPEND,
54 }
55};
56
57static struct mtd_partition *parsed_parts;
58
59static struct mtd_info *flash_mtd;
60
61static int __init init_flash (void)
62{
63
64 struct mtd_partition *parts;
65 int nb_parts = 0;
66 int parsed_nr_parts = 0;
67 const char *part_type;
68
69 /*
70 * Static partition definition selection
71 */
72 part_type = "static";
73
74 parts = toto_flash_partitions;
75 nb_parts = ARRAY_SIZE(toto_flash_partitions);
76 omap_toto_map_flash.size = OMAP_TOTO_FLASH_SIZE;
77 omap_toto_map_flash.phys = virt_to_phys(OMAP_TOTO_FLASH_BASE);
78
79 simple_map_init(&omap_toto_map_flash);
80 /*
81 * Now let's probe for the actual flash. Do it here since
82 * specific machine settings might have been set above.
83 */
84 printk(KERN_NOTICE "OMAP toto flash: probing %d-bit flash bus\n",
85 omap_toto_map_flash.bankwidth*8);
86 flash_mtd = do_map_probe("jedec_probe", &omap_toto_map_flash);
87 if (!flash_mtd)
88 return -ENXIO;
89
90 if (parsed_nr_parts > 0) {
91 parts = parsed_parts;
92 nb_parts = parsed_nr_parts;
93 }
94
95 if (nb_parts == 0) {
96 printk(KERN_NOTICE "OMAP toto flash: no partition info available,"
97 "registering whole flash at once\n");
98 if (add_mtd_device(flash_mtd)){
99 return -ENXIO;
100 }
101 } else {
102 printk(KERN_NOTICE "Using %s partition definition\n",
103 part_type);
104 return add_mtd_partitions(flash_mtd, parts, nb_parts);
105 }
106 return 0;
107}
108
109int __init omap_toto_mtd_init(void)
110{
111 int status;
112
113 if (status = init_flash()) {
114 printk(KERN_ERR "OMAP Toto Flash: unable to init map for toto flash\n");
115 }
116 return status;
117}
118
119static void __exit omap_toto_mtd_cleanup(void)
120{
121 if (flash_mtd) {
122 del_mtd_partitions(flash_mtd);
123 map_destroy(flash_mtd);
124 kfree(parsed_parts);
125 }
126}
127
128module_init(omap_toto_mtd_init);
129module_exit(omap_toto_mtd_cleanup);
130
131MODULE_AUTHOR("Jian Zhang");
132MODULE_DESCRIPTION("OMAP Toto board map driver");
133MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index 5c6a25c90380..48f4cf5cb9d1 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -203,15 +203,8 @@ intel_dc21285_init(struct pci_dev *dev, struct map_pci_info *map)
203 * not enabled, should we be allocating a new resource for it 203 * not enabled, should we be allocating a new resource for it
204 * or simply enabling it? 204 * or simply enabling it?
205 */ 205 */
206 if (!(pci_resource_flags(dev, PCI_ROM_RESOURCE) & 206 pci_enable_rom(dev);
207 IORESOURCE_ROM_ENABLE)) { 207 printk("%s: enabling expansion ROM\n", pci_name(dev));
208 u32 val;
209 pci_resource_flags(dev, PCI_ROM_RESOURCE) |= IORESOURCE_ROM_ENABLE;
210 pci_read_config_dword(dev, PCI_ROM_ADDRESS, &val);
211 val |= PCI_ROM_ADDRESS_ENABLE;
212 pci_write_config_dword(dev, PCI_ROM_ADDRESS, val);
213 printk("%s: enabling expansion ROM\n", pci_name(dev));
214 }
215 } 208 }
216 209
217 if (!len || !base) 210 if (!len || !base)
@@ -232,18 +225,13 @@ intel_dc21285_init(struct pci_dev *dev, struct map_pci_info *map)
232static void 225static void
233intel_dc21285_exit(struct pci_dev *dev, struct map_pci_info *map) 226intel_dc21285_exit(struct pci_dev *dev, struct map_pci_info *map)
234{ 227{
235 u32 val;
236
237 if (map->base) 228 if (map->base)
238 iounmap(map->base); 229 iounmap(map->base);
239 230
240 /* 231 /*
241 * We need to undo the PCI BAR2/PCI ROM BAR address alteration. 232 * We need to undo the PCI BAR2/PCI ROM BAR address alteration.
242 */ 233 */
243 pci_resource_flags(dev, PCI_ROM_RESOURCE) &= ~IORESOURCE_ROM_ENABLE; 234 pci_disable_rom(dev);
244 pci_read_config_dword(dev, PCI_ROM_ADDRESS, &val);
245 val &= ~PCI_ROM_ADDRESS_ENABLE;
246 pci_write_config_dword(dev, PCI_ROM_ADDRESS, val);
247} 235}
248 236
249static unsigned long 237static unsigned long
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 49acd4171893..5fcfec034a94 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -230,8 +230,7 @@ static int __devinit of_flash_probe(struct of_device *dev,
230 230
231#ifdef CONFIG_MTD_OF_PARTS 231#ifdef CONFIG_MTD_OF_PARTS
232 if (err == 0) { 232 if (err == 0) {
233 err = of_mtd_parse_partitions(&dev->dev, info->mtd, 233 err = of_mtd_parse_partitions(&dev->dev, dp, &info->parts);
234 dp, &info->parts);
235 if (err < 0) 234 if (err < 0)
236 return err; 235 return err;
237 } 236 }
diff --git a/drivers/mtd/maps/walnut.c b/drivers/mtd/maps/walnut.c
deleted file mode 100644
index e243476c8171..000000000000
--- a/drivers/mtd/maps/walnut.c
+++ /dev/null
@@ -1,122 +0,0 @@
1/*
2 * Mapping for Walnut flash
3 * (used ebony.c as a "framework")
4 *
5 * Heikki Lindholm <holindho@infradead.org>
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/mtd/mtd.h>
19#include <linux/mtd/map.h>
20#include <linux/mtd/partitions.h>
21#include <asm/io.h>
22#include <asm/ibm4xx.h>
23#include <platforms/4xx/walnut.h>
24
25/* these should be in platforms/4xx/walnut.h ? */
26#define WALNUT_FLASH_ONBD_N(x) (x & 0x02)
27#define WALNUT_FLASH_SRAM_SEL(x) (x & 0x01)
28#define WALNUT_FLASH_LOW 0xFFF00000
29#define WALNUT_FLASH_HIGH 0xFFF80000
30#define WALNUT_FLASH_SIZE 0x80000
31
32static struct mtd_info *flash;
33
34static struct map_info walnut_map = {
35 .name = "Walnut flash",
36 .size = WALNUT_FLASH_SIZE,
37 .bankwidth = 1,
38};
39
40/* Actually, OpenBIOS is the last 128 KiB of the flash - better
41 * partitioning could be made */
42static struct mtd_partition walnut_partitions[] = {
43 {
44 .name = "OpenBIOS",
45 .offset = 0x0,
46 .size = WALNUT_FLASH_SIZE,
47 /*.mask_flags = MTD_WRITEABLE, */ /* force read-only */
48 }
49};
50
51int __init init_walnut(void)
52{
53 u8 fpga_brds1;
54 void *fpga_brds1_adr;
55 void *fpga_status_adr;
56 unsigned long flash_base;
57
58 /* this should already be mapped (platform/4xx/walnut.c) */
59 fpga_status_adr = ioremap(WALNUT_FPGA_BASE, 8);
60 if (!fpga_status_adr)
61 return -ENOMEM;
62
63 fpga_brds1_adr = fpga_status_adr+5;
64 fpga_brds1 = readb(fpga_brds1_adr);
65 /* iounmap(fpga_status_adr); */
66
67 if (WALNUT_FLASH_ONBD_N(fpga_brds1)) {
68 printk("The on-board flash is disabled (U79 sw 5)!");
69 iounmap(fpga_status_adr);
70 return -EIO;
71 }
72 if (WALNUT_FLASH_SRAM_SEL(fpga_brds1))
73 flash_base = WALNUT_FLASH_LOW;
74 else
75 flash_base = WALNUT_FLASH_HIGH;
76
77 walnut_map.phys = flash_base;
78 walnut_map.virt =
79 (void __iomem *)ioremap(flash_base, walnut_map.size);
80
81 if (!walnut_map.virt) {
82 printk("Failed to ioremap flash.\n");
83 iounmap(fpga_status_adr);
84 return -EIO;
85 }
86
87 simple_map_init(&walnut_map);
88
89 flash = do_map_probe("jedec_probe", &walnut_map);
90 if (flash) {
91 flash->owner = THIS_MODULE;
92 add_mtd_partitions(flash, walnut_partitions,
93 ARRAY_SIZE(walnut_partitions));
94 } else {
95 printk("map probe failed for flash\n");
96 iounmap(fpga_status_adr);
97 return -ENXIO;
98 }
99
100 iounmap(fpga_status_adr);
101 return 0;
102}
103
104static void __exit cleanup_walnut(void)
105{
106 if (flash) {
107 del_mtd_partitions(flash);
108 map_destroy(flash);
109 }
110
111 if (walnut_map.virt) {
112 iounmap((void *)walnut_map.virt);
113 walnut_map.virt = 0;
114 }
115}
116
117module_init(init_walnut);
118module_exit(cleanup_walnut);
119
120MODULE_LICENSE("GPL");
121MODULE_AUTHOR("Heikki Lindholm <holindho@infradead.org>");
122MODULE_DESCRIPTION("MTD map and partitions for IBM 405GP Walnut boards");
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 1c74762dec89..963840e9b5bf 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -348,7 +348,7 @@ static void mtdchar_erase_callback (struct erase_info *instr)
348 wake_up((wait_queue_head_t *)instr->priv); 348 wake_up((wait_queue_head_t *)instr->priv);
349} 349}
350 350
351#if defined(CONFIG_MTD_OTP) || defined(CONFIG_MTD_ONENAND_OTP) 351#ifdef CONFIG_HAVE_MTD_OTP
352static int otp_select_filemode(struct mtd_file_info *mfi, int mode) 352static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
353{ 353{
354 struct mtd_info *mtd = mfi->mtd; 354 struct mtd_info *mtd = mfi->mtd;
@@ -665,7 +665,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
665 break; 665 break;
666 } 666 }
667 667
668#if defined(CONFIG_MTD_OTP) || defined(CONFIG_MTD_ONENAND_OTP) 668#ifdef CONFIG_HAVE_MTD_OTP
669 case OTPSELECT: 669 case OTPSELECT:
670 { 670 {
671 int mode; 671 int mode;
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 2972a5edb73d..789842d0e6f2 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -444,7 +444,7 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
444 return -EINVAL; 444 return -EINVAL;
445 } 445 }
446 446
447 instr->fail_addr = 0xffffffff; 447 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
448 448
449 /* make a local copy of instr to avoid modifying the caller's struct */ 449 /* make a local copy of instr to avoid modifying the caller's struct */
450 erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL); 450 erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
@@ -493,7 +493,7 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
493 /* sanity check: should never happen since 493 /* sanity check: should never happen since
494 * block alignment has been checked above */ 494 * block alignment has been checked above */
495 BUG_ON(err == -EINVAL); 495 BUG_ON(err == -EINVAL);
496 if (erase->fail_addr != 0xffffffff) 496 if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
497 instr->fail_addr = erase->fail_addr + offset; 497 instr->fail_addr = erase->fail_addr + offset;
498 break; 498 break;
499 } 499 }
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 5a680e1e61f1..aebb3b27edbd 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -33,6 +33,7 @@
33#include <linux/interrupt.h> 33#include <linux/interrupt.h>
34#include <linux/mtd/mtd.h> 34#include <linux/mtd/mtd.h>
35 35
36#define MTDOOPS_KERNMSG_MAGIC 0x5d005d00
36#define OOPS_PAGE_SIZE 4096 37#define OOPS_PAGE_SIZE 4096
37 38
38static struct mtdoops_context { 39static struct mtdoops_context {
@@ -99,7 +100,7 @@ static void mtdoops_inc_counter(struct mtdoops_context *cxt)
99 int ret; 100 int ret;
100 101
101 cxt->nextpage++; 102 cxt->nextpage++;
102 if (cxt->nextpage > cxt->oops_pages) 103 if (cxt->nextpage >= cxt->oops_pages)
103 cxt->nextpage = 0; 104 cxt->nextpage = 0;
104 cxt->nextcount++; 105 cxt->nextcount++;
105 if (cxt->nextcount == 0xffffffff) 106 if (cxt->nextcount == 0xffffffff)
@@ -141,7 +142,7 @@ static void mtdoops_workfunc_erase(struct work_struct *work)
141 mod = (cxt->nextpage * OOPS_PAGE_SIZE) % mtd->erasesize; 142 mod = (cxt->nextpage * OOPS_PAGE_SIZE) % mtd->erasesize;
142 if (mod != 0) { 143 if (mod != 0) {
143 cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / OOPS_PAGE_SIZE); 144 cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / OOPS_PAGE_SIZE);
144 if (cxt->nextpage > cxt->oops_pages) 145 if (cxt->nextpage >= cxt->oops_pages)
145 cxt->nextpage = 0; 146 cxt->nextpage = 0;
146 } 147 }
147 148
@@ -158,7 +159,7 @@ badblock:
158 cxt->nextpage * OOPS_PAGE_SIZE); 159 cxt->nextpage * OOPS_PAGE_SIZE);
159 i++; 160 i++;
160 cxt->nextpage = cxt->nextpage + (mtd->erasesize / OOPS_PAGE_SIZE); 161 cxt->nextpage = cxt->nextpage + (mtd->erasesize / OOPS_PAGE_SIZE);
161 if (cxt->nextpage > cxt->oops_pages) 162 if (cxt->nextpage >= cxt->oops_pages)
162 cxt->nextpage = 0; 163 cxt->nextpage = 0;
163 if (i == (cxt->oops_pages / (mtd->erasesize / OOPS_PAGE_SIZE))) { 164 if (i == (cxt->oops_pages / (mtd->erasesize / OOPS_PAGE_SIZE))) {
164 printk(KERN_ERR "mtdoops: All blocks bad!\n"); 165 printk(KERN_ERR "mtdoops: All blocks bad!\n");
@@ -224,40 +225,40 @@ static void find_next_position(struct mtdoops_context *cxt)
224{ 225{
225 struct mtd_info *mtd = cxt->mtd; 226 struct mtd_info *mtd = cxt->mtd;
226 int ret, page, maxpos = 0; 227 int ret, page, maxpos = 0;
227 u32 count, maxcount = 0xffffffff; 228 u32 count[2], maxcount = 0xffffffff;
228 size_t retlen; 229 size_t retlen;
229 230
230 for (page = 0; page < cxt->oops_pages; page++) { 231 for (page = 0; page < cxt->oops_pages; page++) {
231 ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 4, &retlen, (u_char *) &count); 232 ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 8, &retlen, (u_char *) &count[0]);
232 if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) { 233 if ((retlen != 8) || ((ret < 0) && (ret != -EUCLEAN))) {
233 printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)" 234 printk(KERN_ERR "mtdoops: Read failure at %d (%td of 8 read)"
234 ", err %d.\n", page * OOPS_PAGE_SIZE, retlen, ret); 235 ", err %d.\n", page * OOPS_PAGE_SIZE, retlen, ret);
235 continue; 236 continue;
236 } 237 }
237 238
238 if (count == 0xffffffff) 239 if (count[1] != MTDOOPS_KERNMSG_MAGIC)
240 continue;
241 if (count[0] == 0xffffffff)
239 continue; 242 continue;
240 if (maxcount == 0xffffffff) { 243 if (maxcount == 0xffffffff) {
241 maxcount = count; 244 maxcount = count[0];
242 maxpos = page; 245 maxpos = page;
243 } else if ((count < 0x40000000) && (maxcount > 0xc0000000)) { 246 } else if ((count[0] < 0x40000000) && (maxcount > 0xc0000000)) {
244 maxcount = count; 247 maxcount = count[0];
245 maxpos = page; 248 maxpos = page;
246 } else if ((count > maxcount) && (count < 0xc0000000)) { 249 } else if ((count[0] > maxcount) && (count[0] < 0xc0000000)) {
247 maxcount = count; 250 maxcount = count[0];
248 maxpos = page; 251 maxpos = page;
249 } else if ((count > maxcount) && (count > 0xc0000000) 252 } else if ((count[0] > maxcount) && (count[0] > 0xc0000000)
250 && (maxcount > 0x80000000)) { 253 && (maxcount > 0x80000000)) {
251 maxcount = count; 254 maxcount = count[0];
252 maxpos = page; 255 maxpos = page;
253 } 256 }
254 } 257 }
255 if (maxcount == 0xffffffff) { 258 if (maxcount == 0xffffffff) {
256 cxt->nextpage = 0; 259 cxt->nextpage = 0;
257 cxt->nextcount = 1; 260 cxt->nextcount = 1;
258 cxt->ready = 1; 261 schedule_work(&cxt->work_erase);
259 printk(KERN_DEBUG "mtdoops: Ready %d, %d (first init)\n",
260 cxt->nextpage, cxt->nextcount);
261 return; 262 return;
262 } 263 }
263 264
@@ -358,8 +359,9 @@ mtdoops_console_write(struct console *co, const char *s, unsigned int count)
358 359
359 if (cxt->writecount == 0) { 360 if (cxt->writecount == 0) {
360 u32 *stamp = cxt->oops_buf; 361 u32 *stamp = cxt->oops_buf;
361 *stamp = cxt->nextcount; 362 *stamp++ = cxt->nextcount;
362 cxt->writecount = 4; 363 *stamp = MTDOOPS_KERNMSG_MAGIC;
364 cxt->writecount = 8;
363 } 365 }
364 366
365 if ((count + cxt->writecount) > OOPS_PAGE_SIZE) 367 if ((count + cxt->writecount) > OOPS_PAGE_SIZE)
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 9a06dc93ee0d..3728913fa5fa 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -214,7 +214,7 @@ static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
214 instr->addr += part->offset; 214 instr->addr += part->offset;
215 ret = part->master->erase(part->master, instr); 215 ret = part->master->erase(part->master, instr);
216 if (ret) { 216 if (ret) {
217 if (instr->fail_addr != 0xffffffff) 217 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
218 instr->fail_addr -= part->offset; 218 instr->fail_addr -= part->offset;
219 instr->addr -= part->offset; 219 instr->addr -= part->offset;
220 } 220 }
@@ -226,7 +226,7 @@ void mtd_erase_callback(struct erase_info *instr)
226 if (instr->mtd->erase == part_erase) { 226 if (instr->mtd->erase == part_erase) {
227 struct mtd_part *part = PART(instr->mtd); 227 struct mtd_part *part = PART(instr->mtd);
228 228
229 if (instr->fail_addr != 0xffffffff) 229 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
230 instr->fail_addr -= part->offset; 230 instr->fail_addr -= part->offset;
231 instr->addr -= part->offset; 231 instr->addr -= part->offset;
232 } 232 }
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 41f361c49b32..1c2e9450d663 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -56,6 +56,12 @@ config MTD_NAND_H1900
56 help 56 help
57 This enables the driver for the iPAQ h1900 flash. 57 This enables the driver for the iPAQ h1900 flash.
58 58
59config MTD_NAND_GPIO
60 tristate "GPIO NAND Flash driver"
61 depends on GENERIC_GPIO && ARM
62 help
63 This enables a GPIO based NAND flash driver.
64
59config MTD_NAND_SPIA 65config MTD_NAND_SPIA
60 tristate "NAND Flash device on SPIA board" 66 tristate "NAND Flash device on SPIA board"
61 depends on ARCH_P720T 67 depends on ARCH_P720T
@@ -68,12 +74,6 @@ config MTD_NAND_AMS_DELTA
68 help 74 help
69 Support for NAND flash on Amstrad E3 (Delta). 75 Support for NAND flash on Amstrad E3 (Delta).
70 76
71config MTD_NAND_TOTO
72 tristate "NAND Flash device on TOTO board"
73 depends on ARCH_OMAP && BROKEN
74 help
75 Support for NAND flash on Texas Instruments Toto platform.
76
77config MTD_NAND_TS7250 77config MTD_NAND_TS7250
78 tristate "NAND Flash device on TS-7250 board" 78 tristate "NAND Flash device on TS-7250 board"
79 depends on MACH_TS72XX 79 depends on MACH_TS72XX
@@ -163,13 +163,6 @@ config MTD_NAND_S3C2410_HWECC
163 incorrect ECC generation, and if using these, the default of 163 incorrect ECC generation, and if using these, the default of
164 software ECC is preferable. 164 software ECC is preferable.
165 165
166config MTD_NAND_NDFC
167 tristate "NDFC NanD Flash Controller"
168 depends on 4xx && !PPC_MERGE
169 select MTD_NAND_ECC_SMC
170 help
171 NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs
172
173config MTD_NAND_S3C2410_CLKSTOP 166config MTD_NAND_S3C2410_CLKSTOP
174 bool "S3C2410 NAND IDLE clock stop" 167 bool "S3C2410 NAND IDLE clock stop"
175 depends on MTD_NAND_S3C2410 168 depends on MTD_NAND_S3C2410
@@ -340,6 +333,13 @@ config MTD_NAND_PXA3xx
340 This enables the driver for the NAND flash device found on 333 This enables the driver for the NAND flash device found on
341 PXA3xx processors 334 PXA3xx processors
342 335
336config MTD_NAND_PXA3xx_BUILTIN
337 bool "Use builtin definitions for some NAND chips (deprecated)"
338 depends on MTD_NAND_PXA3xx
339 help
340 This enables builtin definitions for some NAND chips. This
341 is deprecated in favor of platform specific data.
342
343config MTD_NAND_CM_X270 343config MTD_NAND_CM_X270
344 tristate "Support for NAND Flash on CM-X270 modules" 344 tristate "Support for NAND Flash on CM-X270 modules"
345 depends on MTD_NAND && MACH_ARMCORE 345 depends on MTD_NAND && MACH_ARMCORE
@@ -400,10 +400,24 @@ config MTD_NAND_FSL_ELBC
400 400
401config MTD_NAND_FSL_UPM 401config MTD_NAND_FSL_UPM
402 tristate "Support for NAND on Freescale UPM" 402 tristate "Support for NAND on Freescale UPM"
403 depends on MTD_NAND && OF_GPIO && (PPC_83xx || PPC_85xx) 403 depends on MTD_NAND && (PPC_83xx || PPC_85xx)
404 select FSL_LBC 404 select FSL_LBC
405 help 405 help
406 Enables support for NAND Flash chips wired onto Freescale PowerPC 406 Enables support for NAND Flash chips wired onto Freescale PowerPC
407 processor localbus with User-Programmable Machine support. 407 processor localbus with User-Programmable Machine support.
408 408
409config MTD_NAND_MXC
410 tristate "MXC NAND support"
411 depends on ARCH_MX2
412 help
413 This enables the driver for the NAND flash controller on the
414 MXC processors.
415
416config MTD_NAND_SH_FLCTL
417 tristate "Support for NAND on Renesas SuperH FLCTL"
418 depends on MTD_NAND && SUPERH && CPU_SUBTYPE_SH7723
419 help
420 Several Renesas SuperH CPU has FLCTL. This option enables support
421 for NAND Flash using FLCTL. This driver support SH7723.
422
409endif # MTD_NAND 423endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index b786c5da82da..b661586afbfc 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -8,7 +8,6 @@ obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o
8obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o 8obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o
9obj-$(CONFIG_MTD_NAND_SPIA) += spia.o 9obj-$(CONFIG_MTD_NAND_SPIA) += spia.o
10obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o 10obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o
11obj-$(CONFIG_MTD_NAND_TOTO) += toto.o
12obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o 11obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o
13obj-$(CONFIG_MTD_NAND_EDB7312) += edb7312.o 12obj-$(CONFIG_MTD_NAND_EDB7312) += edb7312.o
14obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o 13obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o
@@ -24,6 +23,7 @@ obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o
24obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o 23obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o
25obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o 24obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
26obj-$(CONFIG_MTD_NAND_ATMEL) += atmel_nand.o 25obj-$(CONFIG_MTD_NAND_ATMEL) += atmel_nand.o
26obj-$(CONFIG_MTD_NAND_GPIO) += gpio.o
27obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o 27obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
28obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o 28obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o
29obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o 29obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o
@@ -34,5 +34,7 @@ obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o
34obj-$(CONFIG_MTD_NAND_ORION) += orion_nand.o 34obj-$(CONFIG_MTD_NAND_ORION) += orion_nand.o
35obj-$(CONFIG_MTD_NAND_FSL_ELBC) += fsl_elbc_nand.o 35obj-$(CONFIG_MTD_NAND_FSL_ELBC) += fsl_elbc_nand.o
36obj-$(CONFIG_MTD_NAND_FSL_UPM) += fsl_upm.o 36obj-$(CONFIG_MTD_NAND_FSL_UPM) += fsl_upm.o
37obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o
38obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o
37 39
38nand-objs := nand_base.o nand_bbt.o 40nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 3387e0d5076b..c98c1570a40b 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -174,48 +174,6 @@ static void atmel_write_buf16(struct mtd_info *mtd, const u8 *buf, int len)
174} 174}
175 175
176/* 176/*
177 * write oob for small pages
178 */
179static int atmel_nand_write_oob_512(struct mtd_info *mtd,
180 struct nand_chip *chip, int page)
181{
182 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
183 int eccsize = chip->ecc.size, length = mtd->oobsize;
184 int len, pos, status = 0;
185 const uint8_t *bufpoi = chip->oob_poi;
186
187 pos = eccsize + chunk;
188
189 chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
190 len = min_t(int, length, chunk);
191 chip->write_buf(mtd, bufpoi, len);
192 bufpoi += len;
193 length -= len;
194 if (length > 0)
195 chip->write_buf(mtd, bufpoi, length);
196
197 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
198 status = chip->waitfunc(mtd, chip);
199
200 return status & NAND_STATUS_FAIL ? -EIO : 0;
201
202}
203
204/*
205 * read oob for small pages
206 */
207static int atmel_nand_read_oob_512(struct mtd_info *mtd,
208 struct nand_chip *chip, int page, int sndcmd)
209{
210 if (sndcmd) {
211 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
212 sndcmd = 0;
213 }
214 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
215 return sndcmd;
216}
217
218/*
219 * Calculate HW ECC 177 * Calculate HW ECC
220 * 178 *
221 * function called after a write 179 * function called after a write
@@ -235,14 +193,14 @@ static int atmel_nand_calculate(struct mtd_info *mtd,
235 /* get the first 2 ECC bytes */ 193 /* get the first 2 ECC bytes */
236 ecc_value = ecc_readl(host->ecc, PR); 194 ecc_value = ecc_readl(host->ecc, PR);
237 195
238 ecc_code[eccpos[0]] = ecc_value & 0xFF; 196 ecc_code[0] = ecc_value & 0xFF;
239 ecc_code[eccpos[1]] = (ecc_value >> 8) & 0xFF; 197 ecc_code[1] = (ecc_value >> 8) & 0xFF;
240 198
241 /* get the last 2 ECC bytes */ 199 /* get the last 2 ECC bytes */
242 ecc_value = ecc_readl(host->ecc, NPR) & ATMEL_ECC_NPARITY; 200 ecc_value = ecc_readl(host->ecc, NPR) & ATMEL_ECC_NPARITY;
243 201
244 ecc_code[eccpos[2]] = ecc_value & 0xFF; 202 ecc_code[2] = ecc_value & 0xFF;
245 ecc_code[eccpos[3]] = (ecc_value >> 8) & 0xFF; 203 ecc_code[3] = (ecc_value >> 8) & 0xFF;
246 204
247 return 0; 205 return 0;
248} 206}
@@ -476,14 +434,12 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
476 res = -EIO; 434 res = -EIO;
477 goto err_ecc_ioremap; 435 goto err_ecc_ioremap;
478 } 436 }
479 nand_chip->ecc.mode = NAND_ECC_HW_SYNDROME; 437 nand_chip->ecc.mode = NAND_ECC_HW;
480 nand_chip->ecc.calculate = atmel_nand_calculate; 438 nand_chip->ecc.calculate = atmel_nand_calculate;
481 nand_chip->ecc.correct = atmel_nand_correct; 439 nand_chip->ecc.correct = atmel_nand_correct;
482 nand_chip->ecc.hwctl = atmel_nand_hwctl; 440 nand_chip->ecc.hwctl = atmel_nand_hwctl;
483 nand_chip->ecc.read_page = atmel_nand_read_page; 441 nand_chip->ecc.read_page = atmel_nand_read_page;
484 nand_chip->ecc.bytes = 4; 442 nand_chip->ecc.bytes = 4;
485 nand_chip->ecc.prepad = 0;
486 nand_chip->ecc.postpad = 0;
487 } 443 }
488 444
489 nand_chip->chip_delay = 20; /* 20us command delay time */ 445 nand_chip->chip_delay = 20; /* 20us command delay time */
@@ -514,7 +470,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
514 goto err_scan_ident; 470 goto err_scan_ident;
515 } 471 }
516 472
517 if (nand_chip->ecc.mode == NAND_ECC_HW_SYNDROME) { 473 if (nand_chip->ecc.mode == NAND_ECC_HW) {
518 /* ECC is calculated for the whole page (1 step) */ 474 /* ECC is calculated for the whole page (1 step) */
519 nand_chip->ecc.size = mtd->writesize; 475 nand_chip->ecc.size = mtd->writesize;
520 476
@@ -522,8 +478,6 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
522 switch (mtd->writesize) { 478 switch (mtd->writesize) {
523 case 512: 479 case 512:
524 nand_chip->ecc.layout = &atmel_oobinfo_small; 480 nand_chip->ecc.layout = &atmel_oobinfo_small;
525 nand_chip->ecc.read_oob = atmel_nand_read_oob_512;
526 nand_chip->ecc.write_oob = atmel_nand_write_oob_512;
527 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_528); 481 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_528);
528 break; 482 break;
529 case 1024: 483 case 1024:
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index 3370a800fd36..9f1b451005ca 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -289,8 +289,10 @@ static int __init cs553x_init(void)
289 int i; 289 int i;
290 uint64_t val; 290 uint64_t val;
291 291
292#ifdef CONFIG_MTD_PARTITIONS
292 int mtd_parts_nb = 0; 293 int mtd_parts_nb = 0;
293 struct mtd_partition *mtd_parts = NULL; 294 struct mtd_partition *mtd_parts = NULL;
295#endif
294 296
295 /* If the CPU isn't a Geode GX or LX, abort */ 297 /* If the CPU isn't a Geode GX or LX, abort */
296 if (!is_geode()) 298 if (!is_geode())
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 98ad3cefcaf4..4aa5bd6158da 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -918,8 +918,7 @@ static int __devinit fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl,
918 918
919#ifdef CONFIG_MTD_OF_PARTS 919#ifdef CONFIG_MTD_OF_PARTS
920 if (ret == 0) { 920 if (ret == 0) {
921 ret = of_mtd_parse_partitions(priv->dev, &priv->mtd, 921 ret = of_mtd_parse_partitions(priv->dev, node, &parts);
922 node, &parts);
923 if (ret < 0) 922 if (ret < 0)
924 goto err; 923 goto err;
925 } 924 }
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 1ebfd87f00b4..024e3fffd4bb 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/delay.h>
16#include <linux/mtd/nand.h> 17#include <linux/mtd/nand.h>
17#include <linux/mtd/nand_ecc.h> 18#include <linux/mtd/nand_ecc.h>
18#include <linux/mtd/partitions.h> 19#include <linux/mtd/partitions.h>
@@ -36,8 +37,6 @@ struct fsl_upm_nand {
36 uint8_t upm_cmd_offset; 37 uint8_t upm_cmd_offset;
37 void __iomem *io_base; 38 void __iomem *io_base;
38 int rnb_gpio; 39 int rnb_gpio;
39 const uint32_t *wait_pattern;
40 const uint32_t *wait_write;
41 int chip_delay; 40 int chip_delay;
42}; 41};
43 42
@@ -61,10 +60,11 @@ static void fun_wait_rnb(struct fsl_upm_nand *fun)
61 if (fun->rnb_gpio >= 0) { 60 if (fun->rnb_gpio >= 0) {
62 while (--cnt && !fun_chip_ready(&fun->mtd)) 61 while (--cnt && !fun_chip_ready(&fun->mtd))
63 cpu_relax(); 62 cpu_relax();
63 if (!cnt)
64 dev_err(fun->dev, "tired waiting for RNB\n");
65 } else {
66 ndelay(100);
64 } 67 }
65
66 if (!cnt)
67 dev_err(fun->dev, "tired waiting for RNB\n");
68} 68}
69 69
70static void fun_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) 70static void fun_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
@@ -89,8 +89,7 @@ static void fun_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
89 89
90 fsl_upm_run_pattern(&fun->upm, fun->io_base, cmd); 90 fsl_upm_run_pattern(&fun->upm, fun->io_base, cmd);
91 91
92 if (fun->wait_pattern) 92 fun_wait_rnb(fun);
93 fun_wait_rnb(fun);
94} 93}
95 94
96static uint8_t fun_read_byte(struct mtd_info *mtd) 95static uint8_t fun_read_byte(struct mtd_info *mtd)
@@ -116,14 +115,16 @@ static void fun_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
116 115
117 for (i = 0; i < len; i++) { 116 for (i = 0; i < len; i++) {
118 out_8(fun->chip.IO_ADDR_W, buf[i]); 117 out_8(fun->chip.IO_ADDR_W, buf[i]);
119 if (fun->wait_write) 118 fun_wait_rnb(fun);
120 fun_wait_rnb(fun);
121 } 119 }
122} 120}
123 121
124static int __devinit fun_chip_init(struct fsl_upm_nand *fun) 122static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
123 const struct device_node *upm_np,
124 const struct resource *io_res)
125{ 125{
126 int ret; 126 int ret;
127 struct device_node *flash_np;
127#ifdef CONFIG_MTD_PARTITIONS 128#ifdef CONFIG_MTD_PARTITIONS
128 static const char *part_types[] = { "cmdlinepart", NULL, }; 129 static const char *part_types[] = { "cmdlinepart", NULL, };
129#endif 130#endif
@@ -143,18 +144,37 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun)
143 fun->mtd.priv = &fun->chip; 144 fun->mtd.priv = &fun->chip;
144 fun->mtd.owner = THIS_MODULE; 145 fun->mtd.owner = THIS_MODULE;
145 146
147 flash_np = of_get_next_child(upm_np, NULL);
148 if (!flash_np)
149 return -ENODEV;
150
151 fun->mtd.name = kasprintf(GFP_KERNEL, "%x.%s", io_res->start,
152 flash_np->name);
153 if (!fun->mtd.name) {
154 ret = -ENOMEM;
155 goto err;
156 }
157
146 ret = nand_scan(&fun->mtd, 1); 158 ret = nand_scan(&fun->mtd, 1);
147 if (ret) 159 if (ret)
148 return ret; 160 goto err;
149
150 fun->mtd.name = fun->dev->bus_id;
151 161
152#ifdef CONFIG_MTD_PARTITIONS 162#ifdef CONFIG_MTD_PARTITIONS
153 ret = parse_mtd_partitions(&fun->mtd, part_types, &fun->parts, 0); 163 ret = parse_mtd_partitions(&fun->mtd, part_types, &fun->parts, 0);
164
165#ifdef CONFIG_MTD_OF_PARTS
166 if (ret == 0)
167 ret = of_mtd_parse_partitions(fun->dev, &fun->mtd,
168 flash_np, &fun->parts);
169#endif
154 if (ret > 0) 170 if (ret > 0)
155 return add_mtd_partitions(&fun->mtd, fun->parts, ret); 171 ret = add_mtd_partitions(&fun->mtd, fun->parts, ret);
172 else
156#endif 173#endif
157 return add_mtd_device(&fun->mtd); 174 ret = add_mtd_device(&fun->mtd);
175err:
176 of_node_put(flash_np);
177 return ret;
158} 178}
159 179
160static int __devinit fun_probe(struct of_device *ofdev, 180static int __devinit fun_probe(struct of_device *ofdev,
@@ -211,6 +231,12 @@ static int __devinit fun_probe(struct of_device *ofdev,
211 goto err2; 231 goto err2;
212 } 232 }
213 233
234 prop = of_get_property(ofdev->node, "chip-delay", NULL);
235 if (prop)
236 fun->chip_delay = *prop;
237 else
238 fun->chip_delay = 50;
239
214 fun->io_base = devm_ioremap_nocache(&ofdev->dev, io_res.start, 240 fun->io_base = devm_ioremap_nocache(&ofdev->dev, io_res.start,
215 io_res.end - io_res.start + 1); 241 io_res.end - io_res.start + 1);
216 if (!fun->io_base) { 242 if (!fun->io_base) {
@@ -220,17 +246,8 @@ static int __devinit fun_probe(struct of_device *ofdev,
220 246
221 fun->dev = &ofdev->dev; 247 fun->dev = &ofdev->dev;
222 fun->last_ctrl = NAND_CLE; 248 fun->last_ctrl = NAND_CLE;
223 fun->wait_pattern = of_get_property(ofdev->node, "fsl,wait-pattern",
224 NULL);
225 fun->wait_write = of_get_property(ofdev->node, "fsl,wait-write", NULL);
226
227 prop = of_get_property(ofdev->node, "chip-delay", NULL);
228 if (prop)
229 fun->chip_delay = *prop;
230 else
231 fun->chip_delay = 50;
232 249
233 ret = fun_chip_init(fun); 250 ret = fun_chip_init(fun, ofdev->node, &io_res);
234 if (ret) 251 if (ret)
235 goto err2; 252 goto err2;
236 253
@@ -251,6 +268,7 @@ static int __devexit fun_remove(struct of_device *ofdev)
251 struct fsl_upm_nand *fun = dev_get_drvdata(&ofdev->dev); 268 struct fsl_upm_nand *fun = dev_get_drvdata(&ofdev->dev);
252 269
253 nand_release(&fun->mtd); 270 nand_release(&fun->mtd);
271 kfree(fun->mtd.name);
254 272
255 if (fun->rnb_gpio >= 0) 273 if (fun->rnb_gpio >= 0)
256 gpio_free(fun->rnb_gpio); 274 gpio_free(fun->rnb_gpio);
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
new file mode 100644
index 000000000000..8f902e75aa85
--- /dev/null
+++ b/drivers/mtd/nand/gpio.c
@@ -0,0 +1,375 @@
1/*
2 * drivers/mtd/nand/gpio.c
3 *
4 * Updated, and converted to generic GPIO based driver by Russell King.
5 *
6 * Written by Ben Dooks <ben@simtec.co.uk>
7 * Based on 2.4 version by Mark Whittaker
8 *
9 * © 2004 Simtec Electronics
10 *
11 * Device driver for NAND connected via GPIO
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
16 *
17 */
18
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/slab.h>
22#include <linux/module.h>
23#include <linux/platform_device.h>
24#include <linux/gpio.h>
25#include <linux/io.h>
26#include <linux/mtd/mtd.h>
27#include <linux/mtd/nand.h>
28#include <linux/mtd/partitions.h>
29#include <linux/mtd/nand-gpio.h>
30
31struct gpiomtd {
32 void __iomem *io_sync;
33 struct mtd_info mtd_info;
34 struct nand_chip nand_chip;
35 struct gpio_nand_platdata plat;
36};
37
38#define gpio_nand_getpriv(x) container_of(x, struct gpiomtd, mtd_info)
39
40
41#ifdef CONFIG_ARM
42/* gpio_nand_dosync()
43 *
44 * Make sure the GPIO state changes occur in-order with writes to NAND
45 * memory region.
46 * Needed on PXA due to bus-reordering within the SoC itself (see section on
47 * I/O ordering in PXA manual (section 2.3, p35)
48 */
49static void gpio_nand_dosync(struct gpiomtd *gpiomtd)
50{
51 unsigned long tmp;
52
53 if (gpiomtd->io_sync) {
54 /*
55 * Linux memory barriers don't cater for what's required here.
56 * What's required is what's here - a read from a separate
57 * region with a dependency on that read.
58 */
59 tmp = readl(gpiomtd->io_sync);
60 asm volatile("mov %1, %0\n" : "=r" (tmp) : "r" (tmp));
61 }
62}
63#else
64static inline void gpio_nand_dosync(struct gpiomtd *gpiomtd) {}
65#endif
66
67static void gpio_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
68{
69 struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
70
71 gpio_nand_dosync(gpiomtd);
72
73 if (ctrl & NAND_CTRL_CHANGE) {
74 gpio_set_value(gpiomtd->plat.gpio_nce, !(ctrl & NAND_NCE));
75 gpio_set_value(gpiomtd->plat.gpio_cle, !!(ctrl & NAND_CLE));
76 gpio_set_value(gpiomtd->plat.gpio_ale, !!(ctrl & NAND_ALE));
77 gpio_nand_dosync(gpiomtd);
78 }
79 if (cmd == NAND_CMD_NONE)
80 return;
81
82 writeb(cmd, gpiomtd->nand_chip.IO_ADDR_W);
83 gpio_nand_dosync(gpiomtd);
84}
85
86static void gpio_nand_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
87{
88 struct nand_chip *this = mtd->priv;
89
90 writesb(this->IO_ADDR_W, buf, len);
91}
92
93static void gpio_nand_readbuf(struct mtd_info *mtd, u_char *buf, int len)
94{
95 struct nand_chip *this = mtd->priv;
96
97 readsb(this->IO_ADDR_R, buf, len);
98}
99
100static int gpio_nand_verifybuf(struct mtd_info *mtd, const u_char *buf, int len)
101{
102 struct nand_chip *this = mtd->priv;
103 unsigned char read, *p = (unsigned char *) buf;
104 int i, err = 0;
105
106 for (i = 0; i < len; i++) {
107 read = readb(this->IO_ADDR_R);
108 if (read != p[i]) {
109 pr_debug("%s: err at %d (read %04x vs %04x)\n",
110 __func__, i, read, p[i]);
111 err = -EFAULT;
112 }
113 }
114 return err;
115}
116
117static void gpio_nand_writebuf16(struct mtd_info *mtd, const u_char *buf,
118 int len)
119{
120 struct nand_chip *this = mtd->priv;
121
122 if (IS_ALIGNED((unsigned long)buf, 2)) {
123 writesw(this->IO_ADDR_W, buf, len>>1);
124 } else {
125 int i;
126 unsigned short *ptr = (unsigned short *)buf;
127
128 for (i = 0; i < len; i += 2, ptr++)
129 writew(*ptr, this->IO_ADDR_W);
130 }
131}
132
133static void gpio_nand_readbuf16(struct mtd_info *mtd, u_char *buf, int len)
134{
135 struct nand_chip *this = mtd->priv;
136
137 if (IS_ALIGNED((unsigned long)buf, 2)) {
138 readsw(this->IO_ADDR_R, buf, len>>1);
139 } else {
140 int i;
141 unsigned short *ptr = (unsigned short *)buf;
142
143 for (i = 0; i < len; i += 2, ptr++)
144 *ptr = readw(this->IO_ADDR_R);
145 }
146}
147
148static int gpio_nand_verifybuf16(struct mtd_info *mtd, const u_char *buf,
149 int len)
150{
151 struct nand_chip *this = mtd->priv;
152 unsigned short read, *p = (unsigned short *) buf;
153 int i, err = 0;
154 len >>= 1;
155
156 for (i = 0; i < len; i++) {
157 read = readw(this->IO_ADDR_R);
158 if (read != p[i]) {
159 pr_debug("%s: err at %d (read %04x vs %04x)\n",
160 __func__, i, read, p[i]);
161 err = -EFAULT;
162 }
163 }
164 return err;
165}
166
167
168static int gpio_nand_devready(struct mtd_info *mtd)
169{
170 struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
171 return gpio_get_value(gpiomtd->plat.gpio_rdy);
172}
173
174static int __devexit gpio_nand_remove(struct platform_device *dev)
175{
176 struct gpiomtd *gpiomtd = platform_get_drvdata(dev);
177 struct resource *res;
178
179 nand_release(&gpiomtd->mtd_info);
180
181 res = platform_get_resource(dev, IORESOURCE_MEM, 1);
182 iounmap(gpiomtd->io_sync);
183 if (res)
184 release_mem_region(res->start, res->end - res->start + 1);
185
186 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
187 iounmap(gpiomtd->nand_chip.IO_ADDR_R);
188 release_mem_region(res->start, res->end - res->start + 1);
189
190 if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
191 gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
192 gpio_set_value(gpiomtd->plat.gpio_nce, 1);
193
194 gpio_free(gpiomtd->plat.gpio_cle);
195 gpio_free(gpiomtd->plat.gpio_ale);
196 gpio_free(gpiomtd->plat.gpio_nce);
197 if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
198 gpio_free(gpiomtd->plat.gpio_nwp);
199 gpio_free(gpiomtd->plat.gpio_rdy);
200
201 kfree(gpiomtd);
202
203 return 0;
204}
205
206static void __iomem *request_and_remap(struct resource *res, size_t size,
207 const char *name, int *err)
208{
209 void __iomem *ptr;
210
211 if (!request_mem_region(res->start, res->end - res->start + 1, name)) {
212 *err = -EBUSY;
213 return NULL;
214 }
215
216 ptr = ioremap(res->start, size);
217 if (!ptr) {
218 release_mem_region(res->start, res->end - res->start + 1);
219 *err = -ENOMEM;
220 }
221 return ptr;
222}
223
224static int __devinit gpio_nand_probe(struct platform_device *dev)
225{
226 struct gpiomtd *gpiomtd;
227 struct nand_chip *this;
228 struct resource *res0, *res1;
229 int ret;
230
231 if (!dev->dev.platform_data)
232 return -EINVAL;
233
234 res0 = platform_get_resource(dev, IORESOURCE_MEM, 0);
235 if (!res0)
236 return -EINVAL;
237
238 gpiomtd = kzalloc(sizeof(*gpiomtd), GFP_KERNEL);
239 if (gpiomtd == NULL) {
240 dev_err(&dev->dev, "failed to create NAND MTD\n");
241 return -ENOMEM;
242 }
243
244 this = &gpiomtd->nand_chip;
245 this->IO_ADDR_R = request_and_remap(res0, 2, "NAND", &ret);
246 if (!this->IO_ADDR_R) {
247 dev_err(&dev->dev, "unable to map NAND\n");
248 goto err_map;
249 }
250
251 res1 = platform_get_resource(dev, IORESOURCE_MEM, 1);
252 if (res1) {
253 gpiomtd->io_sync = request_and_remap(res1, 4, "NAND sync", &ret);
254 if (!gpiomtd->io_sync) {
255 dev_err(&dev->dev, "unable to map sync NAND\n");
256 goto err_sync;
257 }
258 }
259
260 memcpy(&gpiomtd->plat, dev->dev.platform_data, sizeof(gpiomtd->plat));
261
262 ret = gpio_request(gpiomtd->plat.gpio_nce, "NAND NCE");
263 if (ret)
264 goto err_nce;
265 gpio_direction_output(gpiomtd->plat.gpio_nce, 1);
266 if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) {
267 ret = gpio_request(gpiomtd->plat.gpio_nwp, "NAND NWP");
268 if (ret)
269 goto err_nwp;
270 gpio_direction_output(gpiomtd->plat.gpio_nwp, 1);
271 }
272 ret = gpio_request(gpiomtd->plat.gpio_ale, "NAND ALE");
273 if (ret)
274 goto err_ale;
275 gpio_direction_output(gpiomtd->plat.gpio_ale, 0);
276 ret = gpio_request(gpiomtd->plat.gpio_cle, "NAND CLE");
277 if (ret)
278 goto err_cle;
279 gpio_direction_output(gpiomtd->plat.gpio_cle, 0);
280 ret = gpio_request(gpiomtd->plat.gpio_rdy, "NAND RDY");
281 if (ret)
282 goto err_rdy;
283 gpio_direction_input(gpiomtd->plat.gpio_rdy);
284
285
286 this->IO_ADDR_W = this->IO_ADDR_R;
287 this->ecc.mode = NAND_ECC_SOFT;
288 this->options = gpiomtd->plat.options;
289 this->chip_delay = gpiomtd->plat.chip_delay;
290
291 /* install our routines */
292 this->cmd_ctrl = gpio_nand_cmd_ctrl;
293 this->dev_ready = gpio_nand_devready;
294
295 if (this->options & NAND_BUSWIDTH_16) {
296 this->read_buf = gpio_nand_readbuf16;
297 this->write_buf = gpio_nand_writebuf16;
298 this->verify_buf = gpio_nand_verifybuf16;
299 } else {
300 this->read_buf = gpio_nand_readbuf;
301 this->write_buf = gpio_nand_writebuf;
302 this->verify_buf = gpio_nand_verifybuf;
303 }
304
305 /* set the mtd private data for the nand driver */
306 gpiomtd->mtd_info.priv = this;
307 gpiomtd->mtd_info.owner = THIS_MODULE;
308
309 if (nand_scan(&gpiomtd->mtd_info, 1)) {
310 dev_err(&dev->dev, "no nand chips found?\n");
311 ret = -ENXIO;
312 goto err_wp;
313 }
314
315 if (gpiomtd->plat.adjust_parts)
316 gpiomtd->plat.adjust_parts(&gpiomtd->plat,
317 gpiomtd->mtd_info.size);
318
319 add_mtd_partitions(&gpiomtd->mtd_info, gpiomtd->plat.parts,
320 gpiomtd->plat.num_parts);
321 platform_set_drvdata(dev, gpiomtd);
322
323 return 0;
324
325err_wp:
326 if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
327 gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
328 gpio_free(gpiomtd->plat.gpio_rdy);
329err_rdy:
330 gpio_free(gpiomtd->plat.gpio_cle);
331err_cle:
332 gpio_free(gpiomtd->plat.gpio_ale);
333err_ale:
334 if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
335 gpio_free(gpiomtd->plat.gpio_nwp);
336err_nwp:
337 gpio_free(gpiomtd->plat.gpio_nce);
338err_nce:
339 iounmap(gpiomtd->io_sync);
340 if (res1)
341 release_mem_region(res1->start, res1->end - res1->start + 1);
342err_sync:
343 iounmap(gpiomtd->nand_chip.IO_ADDR_R);
344 release_mem_region(res0->start, res0->end - res0->start + 1);
345err_map:
346 kfree(gpiomtd);
347 return ret;
348}
349
350static struct platform_driver gpio_nand_driver = {
351 .probe = gpio_nand_probe,
352 .remove = gpio_nand_remove,
353 .driver = {
354 .name = "gpio-nand",
355 },
356};
357
358static int __init gpio_nand_init(void)
359{
360 printk(KERN_INFO "GPIO NAND driver, © 2004 Simtec Electronics\n");
361
362 return platform_driver_register(&gpio_nand_driver);
363}
364
365static void __exit gpio_nand_exit(void)
366{
367 platform_driver_unregister(&gpio_nand_driver);
368}
369
370module_init(gpio_nand_init);
371module_exit(gpio_nand_exit);
372
373MODULE_LICENSE("GPL");
374MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
375MODULE_DESCRIPTION("GPIO NAND Driver");
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
new file mode 100644
index 000000000000..21fd4f1c4806
--- /dev/null
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -0,0 +1,1077 @@
1/*
2 * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
3 * Copyright 2008 Sascha Hauer, kernel@pengutronix.de
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
17 * MA 02110-1301, USA.
18 */
19
20#include <linux/delay.h>
21#include <linux/slab.h>
22#include <linux/init.h>
23#include <linux/module.h>
24#include <linux/mtd/mtd.h>
25#include <linux/mtd/nand.h>
26#include <linux/mtd/partitions.h>
27#include <linux/interrupt.h>
28#include <linux/device.h>
29#include <linux/platform_device.h>
30#include <linux/clk.h>
31#include <linux/err.h>
32#include <linux/io.h>
33
34#include <asm/mach/flash.h>
35#include <mach/mxc_nand.h>
36
37#define DRIVER_NAME "mxc_nand"
38
39/* Addresses for NFC registers */
40#define NFC_BUF_SIZE 0xE00
41#define NFC_BUF_ADDR 0xE04
42#define NFC_FLASH_ADDR 0xE06
43#define NFC_FLASH_CMD 0xE08
44#define NFC_CONFIG 0xE0A
45#define NFC_ECC_STATUS_RESULT 0xE0C
46#define NFC_RSLTMAIN_AREA 0xE0E
47#define NFC_RSLTSPARE_AREA 0xE10
48#define NFC_WRPROT 0xE12
49#define NFC_UNLOCKSTART_BLKADDR 0xE14
50#define NFC_UNLOCKEND_BLKADDR 0xE16
51#define NFC_NF_WRPRST 0xE18
52#define NFC_CONFIG1 0xE1A
53#define NFC_CONFIG2 0xE1C
54
55/* Addresses for NFC RAM BUFFER Main area 0 */
56#define MAIN_AREA0 0x000
57#define MAIN_AREA1 0x200
58#define MAIN_AREA2 0x400
59#define MAIN_AREA3 0x600
60
61/* Addresses for NFC SPARE BUFFER Spare area 0 */
62#define SPARE_AREA0 0x800
63#define SPARE_AREA1 0x810
64#define SPARE_AREA2 0x820
65#define SPARE_AREA3 0x830
66
67/* Set INT to 0, FCMD to 1, rest to 0 in NFC_CONFIG2 Register
68 * for Command operation */
69#define NFC_CMD 0x1
70
71/* Set INT to 0, FADD to 1, rest to 0 in NFC_CONFIG2 Register
72 * for Address operation */
73#define NFC_ADDR 0x2
74
75/* Set INT to 0, FDI to 1, rest to 0 in NFC_CONFIG2 Register
76 * for Input operation */
77#define NFC_INPUT 0x4
78
79/* Set INT to 0, FDO to 001, rest to 0 in NFC_CONFIG2 Register
80 * for Data Output operation */
81#define NFC_OUTPUT 0x8
82
83/* Set INT to 0, FD0 to 010, rest to 0 in NFC_CONFIG2 Register
84 * for Read ID operation */
85#define NFC_ID 0x10
86
87/* Set INT to 0, FDO to 100, rest to 0 in NFC_CONFIG2 Register
88 * for Read Status operation */
89#define NFC_STATUS 0x20
90
91/* Set INT to 1, rest to 0 in NFC_CONFIG2 Register for Read
92 * Status operation */
93#define NFC_INT 0x8000
94
95#define NFC_SP_EN (1 << 2)
96#define NFC_ECC_EN (1 << 3)
97#define NFC_INT_MSK (1 << 4)
98#define NFC_BIG (1 << 5)
99#define NFC_RST (1 << 6)
100#define NFC_CE (1 << 7)
101#define NFC_ONE_CYCLE (1 << 8)
102
103struct mxc_nand_host {
104 struct mtd_info mtd;
105 struct nand_chip nand;
106 struct mtd_partition *parts;
107 struct device *dev;
108
109 void __iomem *regs;
110 int spare_only;
111 int status_request;
112 int pagesize_2k;
113 uint16_t col_addr;
114 struct clk *clk;
115 int clk_act;
116 int irq;
117
118 wait_queue_head_t irq_waitq;
119};
120
121/* Define delays in microsec for NAND device operations */
122#define TROP_US_DELAY 2000
123/* Macros to get byte and bit positions of ECC */
124#define COLPOS(x) ((x) >> 3)
125#define BITPOS(x) ((x) & 0xf)
126
127/* Define single bit Error positions in Main & Spare area */
128#define MAIN_SINGLEBIT_ERROR 0x4
129#define SPARE_SINGLEBIT_ERROR 0x1
130
131/* OOB placement block for use with hardware ecc generation */
132static struct nand_ecclayout nand_hw_eccoob_8 = {
133 .eccbytes = 5,
134 .eccpos = {6, 7, 8, 9, 10},
135 .oobfree = {{0, 5}, {11, 5}, }
136};
137
138static struct nand_ecclayout nand_hw_eccoob_16 = {
139 .eccbytes = 5,
140 .eccpos = {6, 7, 8, 9, 10},
141 .oobfree = {{0, 6}, {12, 4}, }
142};
143
144#ifdef CONFIG_MTD_PARTITIONS
145static const char *part_probes[] = { "RedBoot", "cmdlinepart", NULL };
146#endif
147
148static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
149{
150 struct mxc_nand_host *host = dev_id;
151
152 uint16_t tmp;
153
154 tmp = readw(host->regs + NFC_CONFIG1);
155 tmp |= NFC_INT_MSK; /* Disable interrupt */
156 writew(tmp, host->regs + NFC_CONFIG1);
157
158 wake_up(&host->irq_waitq);
159
160 return IRQ_HANDLED;
161}
162
163/* This function polls the NANDFC to wait for the basic operation to
164 * complete by checking the INT bit of config2 register.
165 */
166static void wait_op_done(struct mxc_nand_host *host, int max_retries,
167 uint16_t param, int useirq)
168{
169 uint32_t tmp;
170
171 if (useirq) {
172 if ((readw(host->regs + NFC_CONFIG2) & NFC_INT) == 0) {
173
174 tmp = readw(host->regs + NFC_CONFIG1);
175 tmp &= ~NFC_INT_MSK; /* Enable interrupt */
176 writew(tmp, host->regs + NFC_CONFIG1);
177
178 wait_event(host->irq_waitq,
179 readw(host->regs + NFC_CONFIG2) & NFC_INT);
180
181 tmp = readw(host->regs + NFC_CONFIG2);
182 tmp &= ~NFC_INT;
183 writew(tmp, host->regs + NFC_CONFIG2);
184 }
185 } else {
186 while (max_retries-- > 0) {
187 if (readw(host->regs + NFC_CONFIG2) & NFC_INT) {
188 tmp = readw(host->regs + NFC_CONFIG2);
189 tmp &= ~NFC_INT;
190 writew(tmp, host->regs + NFC_CONFIG2);
191 break;
192 }
193 udelay(1);
194 }
195 if (max_retries <= 0)
196 DEBUG(MTD_DEBUG_LEVEL0, "%s(%d): INT not set\n",
197 __func__, param);
198 }
199}
200
201/* This function issues the specified command to the NAND device and
202 * waits for completion. */
203static void send_cmd(struct mxc_nand_host *host, uint16_t cmd, int useirq)
204{
205 DEBUG(MTD_DEBUG_LEVEL3, "send_cmd(host, 0x%x, %d)\n", cmd, useirq);
206
207 writew(cmd, host->regs + NFC_FLASH_CMD);
208 writew(NFC_CMD, host->regs + NFC_CONFIG2);
209
210 /* Wait for operation to complete */
211 wait_op_done(host, TROP_US_DELAY, cmd, useirq);
212}
213
214/* This function sends an address (or partial address) to the
215 * NAND device. The address is used to select the source/destination for
216 * a NAND command. */
217static void send_addr(struct mxc_nand_host *host, uint16_t addr, int islast)
218{
219 DEBUG(MTD_DEBUG_LEVEL3, "send_addr(host, 0x%x %d)\n", addr, islast);
220
221 writew(addr, host->regs + NFC_FLASH_ADDR);
222 writew(NFC_ADDR, host->regs + NFC_CONFIG2);
223
224 /* Wait for operation to complete */
225 wait_op_done(host, TROP_US_DELAY, addr, islast);
226}
227
228/* This function requests the NANDFC to initate the transfer
229 * of data currently in the NANDFC RAM buffer to the NAND device. */
230static void send_prog_page(struct mxc_nand_host *host, uint8_t buf_id,
231 int spare_only)
232{
233 DEBUG(MTD_DEBUG_LEVEL3, "send_prog_page (%d)\n", spare_only);
234
235 /* NANDFC buffer 0 is used for page read/write */
236 writew(buf_id, host->regs + NFC_BUF_ADDR);
237
238 /* Configure spare or page+spare access */
239 if (!host->pagesize_2k) {
240 uint16_t config1 = readw(host->regs + NFC_CONFIG1);
241 if (spare_only)
242 config1 |= NFC_SP_EN;
243 else
244 config1 &= ~(NFC_SP_EN);
245 writew(config1, host->regs + NFC_CONFIG1);
246 }
247
248 writew(NFC_INPUT, host->regs + NFC_CONFIG2);
249
250 /* Wait for operation to complete */
251 wait_op_done(host, TROP_US_DELAY, spare_only, true);
252}
253
254/* Requests NANDFC to initated the transfer of data from the
255 * NAND device into in the NANDFC ram buffer. */
256static void send_read_page(struct mxc_nand_host *host, uint8_t buf_id,
257 int spare_only)
258{
259 DEBUG(MTD_DEBUG_LEVEL3, "send_read_page (%d)\n", spare_only);
260
261 /* NANDFC buffer 0 is used for page read/write */
262 writew(buf_id, host->regs + NFC_BUF_ADDR);
263
264 /* Configure spare or page+spare access */
265 if (!host->pagesize_2k) {
266 uint32_t config1 = readw(host->regs + NFC_CONFIG1);
267 if (spare_only)
268 config1 |= NFC_SP_EN;
269 else
270 config1 &= ~NFC_SP_EN;
271 writew(config1, host->regs + NFC_CONFIG1);
272 }
273
274 writew(NFC_OUTPUT, host->regs + NFC_CONFIG2);
275
276 /* Wait for operation to complete */
277 wait_op_done(host, TROP_US_DELAY, spare_only, true);
278}
279
280/* Request the NANDFC to perform a read of the NAND device ID. */
281static void send_read_id(struct mxc_nand_host *host)
282{
283 struct nand_chip *this = &host->nand;
284 uint16_t tmp;
285
286 /* NANDFC buffer 0 is used for device ID output */
287 writew(0x0, host->regs + NFC_BUF_ADDR);
288
289 /* Read ID into main buffer */
290 tmp = readw(host->regs + NFC_CONFIG1);
291 tmp &= ~NFC_SP_EN;
292 writew(tmp, host->regs + NFC_CONFIG1);
293
294 writew(NFC_ID, host->regs + NFC_CONFIG2);
295
296 /* Wait for operation to complete */
297 wait_op_done(host, TROP_US_DELAY, 0, true);
298
299 if (this->options & NAND_BUSWIDTH_16) {
300 void __iomem *main_buf = host->regs + MAIN_AREA0;
301 /* compress the ID info */
302 writeb(readb(main_buf + 2), main_buf + 1);
303 writeb(readb(main_buf + 4), main_buf + 2);
304 writeb(readb(main_buf + 6), main_buf + 3);
305 writeb(readb(main_buf + 8), main_buf + 4);
306 writeb(readb(main_buf + 10), main_buf + 5);
307 }
308}
309
310/* This function requests the NANDFC to perform a read of the
311 * NAND device status and returns the current status. */
312static uint16_t get_dev_status(struct mxc_nand_host *host)
313{
314 void __iomem *main_buf = host->regs + MAIN_AREA1;
315 uint32_t store;
316 uint16_t ret, tmp;
317 /* Issue status request to NAND device */
318
319 /* store the main area1 first word, later do recovery */
320 store = readl(main_buf);
321 /* NANDFC buffer 1 is used for device status to prevent
322 * corruption of read/write buffer on status requests. */
323 writew(1, host->regs + NFC_BUF_ADDR);
324
325 /* Read status into main buffer */
326 tmp = readw(host->regs + NFC_CONFIG1);
327 tmp &= ~NFC_SP_EN;
328 writew(tmp, host->regs + NFC_CONFIG1);
329
330 writew(NFC_STATUS, host->regs + NFC_CONFIG2);
331
332 /* Wait for operation to complete */
333 wait_op_done(host, TROP_US_DELAY, 0, true);
334
335 /* Status is placed in first word of main buffer */
336 /* get status, then recovery area 1 data */
337 ret = readw(main_buf);
338 writel(store, main_buf);
339
340 return ret;
341}
342
343/* This functions is used by upper layer to checks if device is ready */
344static int mxc_nand_dev_ready(struct mtd_info *mtd)
345{
346 /*
347 * NFC handles R/B internally. Therefore, this function
348 * always returns status as ready.
349 */
350 return 1;
351}
352
353static void mxc_nand_enable_hwecc(struct mtd_info *mtd, int mode)
354{
355 /*
356 * If HW ECC is enabled, we turn it on during init. There is
357 * no need to enable again here.
358 */
359}
360
361static int mxc_nand_correct_data(struct mtd_info *mtd, u_char *dat,
362 u_char *read_ecc, u_char *calc_ecc)
363{
364 struct nand_chip *nand_chip = mtd->priv;
365 struct mxc_nand_host *host = nand_chip->priv;
366
367 /*
368 * 1-Bit errors are automatically corrected in HW. No need for
369 * additional correction. 2-Bit errors cannot be corrected by
370 * HW ECC, so we need to return failure
371 */
372 uint16_t ecc_status = readw(host->regs + NFC_ECC_STATUS_RESULT);
373
374 if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) {
375 DEBUG(MTD_DEBUG_LEVEL0,
376 "MXC_NAND: HWECC uncorrectable 2-bit ECC error\n");
377 return -1;
378 }
379
380 return 0;
381}
382
383static int mxc_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
384 u_char *ecc_code)
385{
386 return 0;
387}
388
389static u_char mxc_nand_read_byte(struct mtd_info *mtd)
390{
391 struct nand_chip *nand_chip = mtd->priv;
392 struct mxc_nand_host *host = nand_chip->priv;
393 uint8_t ret = 0;
394 uint16_t col, rd_word;
395 uint16_t __iomem *main_buf = host->regs + MAIN_AREA0;
396 uint16_t __iomem *spare_buf = host->regs + SPARE_AREA0;
397
398 /* Check for status request */
399 if (host->status_request)
400 return get_dev_status(host) & 0xFF;
401
402 /* Get column for 16-bit access */
403 col = host->col_addr >> 1;
404
405 /* If we are accessing the spare region */
406 if (host->spare_only)
407 rd_word = readw(&spare_buf[col]);
408 else
409 rd_word = readw(&main_buf[col]);
410
411 /* Pick upper/lower byte of word from RAM buffer */
412 if (host->col_addr & 0x1)
413 ret = (rd_word >> 8) & 0xFF;
414 else
415 ret = rd_word & 0xFF;
416
417 /* Update saved column address */
418 host->col_addr++;
419
420 return ret;
421}
422
423static uint16_t mxc_nand_read_word(struct mtd_info *mtd)
424{
425 struct nand_chip *nand_chip = mtd->priv;
426 struct mxc_nand_host *host = nand_chip->priv;
427 uint16_t col, rd_word, ret;
428 uint16_t __iomem *p;
429
430 DEBUG(MTD_DEBUG_LEVEL3,
431 "mxc_nand_read_word(col = %d)\n", host->col_addr);
432
433 col = host->col_addr;
434 /* Adjust saved column address */
435 if (col < mtd->writesize && host->spare_only)
436 col += mtd->writesize;
437
438 if (col < mtd->writesize)
439 p = (host->regs + MAIN_AREA0) + (col >> 1);
440 else
441 p = (host->regs + SPARE_AREA0) + ((col - mtd->writesize) >> 1);
442
443 if (col & 1) {
444 rd_word = readw(p);
445 ret = (rd_word >> 8) & 0xff;
446 rd_word = readw(&p[1]);
447 ret |= (rd_word << 8) & 0xff00;
448
449 } else
450 ret = readw(p);
451
452 /* Update saved column address */
453 host->col_addr = col + 2;
454
455 return ret;
456}
457
458/* Write data of length len to buffer buf. The data to be
459 * written on NAND Flash is first copied to RAMbuffer. After the Data Input
460 * Operation by the NFC, the data is written to NAND Flash */
461static void mxc_nand_write_buf(struct mtd_info *mtd,
462 const u_char *buf, int len)
463{
464 struct nand_chip *nand_chip = mtd->priv;
465 struct mxc_nand_host *host = nand_chip->priv;
466 int n, col, i = 0;
467
468 DEBUG(MTD_DEBUG_LEVEL3,
469 "mxc_nand_write_buf(col = %d, len = %d)\n", host->col_addr,
470 len);
471
472 col = host->col_addr;
473
474 /* Adjust saved column address */
475 if (col < mtd->writesize && host->spare_only)
476 col += mtd->writesize;
477
478 n = mtd->writesize + mtd->oobsize - col;
479 n = min(len, n);
480
481 DEBUG(MTD_DEBUG_LEVEL3,
482 "%s:%d: col = %d, n = %d\n", __func__, __LINE__, col, n);
483
484 while (n) {
485 void __iomem *p;
486
487 if (col < mtd->writesize)
488 p = host->regs + MAIN_AREA0 + (col & ~3);
489 else
490 p = host->regs + SPARE_AREA0 -
491 mtd->writesize + (col & ~3);
492
493 DEBUG(MTD_DEBUG_LEVEL3, "%s:%d: p = %p\n", __func__,
494 __LINE__, p);
495
496 if (((col | (int)&buf[i]) & 3) || n < 16) {
497 uint32_t data = 0;
498
499 if (col & 3 || n < 4)
500 data = readl(p);
501
502 switch (col & 3) {
503 case 0:
504 if (n) {
505 data = (data & 0xffffff00) |
506 (buf[i++] << 0);
507 n--;
508 col++;
509 }
510 case 1:
511 if (n) {
512 data = (data & 0xffff00ff) |
513 (buf[i++] << 8);
514 n--;
515 col++;
516 }
517 case 2:
518 if (n) {
519 data = (data & 0xff00ffff) |
520 (buf[i++] << 16);
521 n--;
522 col++;
523 }
524 case 3:
525 if (n) {
526 data = (data & 0x00ffffff) |
527 (buf[i++] << 24);
528 n--;
529 col++;
530 }
531 }
532
533 writel(data, p);
534 } else {
535 int m = mtd->writesize - col;
536
537 if (col >= mtd->writesize)
538 m += mtd->oobsize;
539
540 m = min(n, m) & ~3;
541
542 DEBUG(MTD_DEBUG_LEVEL3,
543 "%s:%d: n = %d, m = %d, i = %d, col = %d\n",
544 __func__, __LINE__, n, m, i, col);
545
546 memcpy(p, &buf[i], m);
547 col += m;
548 i += m;
549 n -= m;
550 }
551 }
552 /* Update saved column address */
553 host->col_addr = col;
554}
555
556/* Read the data buffer from the NAND Flash. To read the data from NAND
557 * Flash first the data output cycle is initiated by the NFC, which copies
558 * the data to RAMbuffer. This data of length len is then copied to buffer buf.
559 */
560static void mxc_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
561{
562 struct nand_chip *nand_chip = mtd->priv;
563 struct mxc_nand_host *host = nand_chip->priv;
564 int n, col, i = 0;
565
566 DEBUG(MTD_DEBUG_LEVEL3,
567 "mxc_nand_read_buf(col = %d, len = %d)\n", host->col_addr, len);
568
569 col = host->col_addr;
570
571 /* Adjust saved column address */
572 if (col < mtd->writesize && host->spare_only)
573 col += mtd->writesize;
574
575 n = mtd->writesize + mtd->oobsize - col;
576 n = min(len, n);
577
578 while (n) {
579 void __iomem *p;
580
581 if (col < mtd->writesize)
582 p = host->regs + MAIN_AREA0 + (col & ~3);
583 else
584 p = host->regs + SPARE_AREA0 -
585 mtd->writesize + (col & ~3);
586
587 if (((col | (int)&buf[i]) & 3) || n < 16) {
588 uint32_t data;
589
590 data = readl(p);
591 switch (col & 3) {
592 case 0:
593 if (n) {
594 buf[i++] = (uint8_t) (data);
595 n--;
596 col++;
597 }
598 case 1:
599 if (n) {
600 buf[i++] = (uint8_t) (data >> 8);
601 n--;
602 col++;
603 }
604 case 2:
605 if (n) {
606 buf[i++] = (uint8_t) (data >> 16);
607 n--;
608 col++;
609 }
610 case 3:
611 if (n) {
612 buf[i++] = (uint8_t) (data >> 24);
613 n--;
614 col++;
615 }
616 }
617 } else {
618 int m = mtd->writesize - col;
619
620 if (col >= mtd->writesize)
621 m += mtd->oobsize;
622
623 m = min(n, m) & ~3;
624 memcpy(&buf[i], p, m);
625 col += m;
626 i += m;
627 n -= m;
628 }
629 }
630 /* Update saved column address */
631 host->col_addr = col;
632
633}
634
635/* Used by the upper layer to verify the data in NAND Flash
636 * with the data in the buf. */
637static int mxc_nand_verify_buf(struct mtd_info *mtd,
638 const u_char *buf, int len)
639{
640 return -EFAULT;
641}
642
643/* This function is used by upper layer for select and
644 * deselect of the NAND chip */
645static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
646{
647 struct nand_chip *nand_chip = mtd->priv;
648 struct mxc_nand_host *host = nand_chip->priv;
649
650#ifdef CONFIG_MTD_NAND_MXC_FORCE_CE
651 if (chip > 0) {
652 DEBUG(MTD_DEBUG_LEVEL0,
653 "ERROR: Illegal chip select (chip = %d)\n", chip);
654 return;
655 }
656
657 if (chip == -1) {
658 writew(readw(host->regs + NFC_CONFIG1) & ~NFC_CE,
659 host->regs + NFC_CONFIG1);
660 return;
661 }
662
663 writew(readw(host->regs + NFC_CONFIG1) | NFC_CE,
664 host->regs + NFC_CONFIG1);
665#endif
666
667 switch (chip) {
668 case -1:
669 /* Disable the NFC clock */
670 if (host->clk_act) {
671 clk_disable(host->clk);
672 host->clk_act = 0;
673 }
674 break;
675 case 0:
676 /* Enable the NFC clock */
677 if (!host->clk_act) {
678 clk_enable(host->clk);
679 host->clk_act = 1;
680 }
681 break;
682
683 default:
684 break;
685 }
686}
687
688/* Used by the upper layer to write command to NAND Flash for
689 * different operations to be carried out on NAND Flash */
690static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
691 int column, int page_addr)
692{
693 struct nand_chip *nand_chip = mtd->priv;
694 struct mxc_nand_host *host = nand_chip->priv;
695 int useirq = true;
696
697 DEBUG(MTD_DEBUG_LEVEL3,
698 "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
699 command, column, page_addr);
700
701 /* Reset command state information */
702 host->status_request = false;
703
704 /* Command pre-processing step */
705 switch (command) {
706
707 case NAND_CMD_STATUS:
708 host->col_addr = 0;
709 host->status_request = true;
710 break;
711
712 case NAND_CMD_READ0:
713 host->col_addr = column;
714 host->spare_only = false;
715 useirq = false;
716 break;
717
718 case NAND_CMD_READOOB:
719 host->col_addr = column;
720 host->spare_only = true;
721 useirq = false;
722 if (host->pagesize_2k)
723 command = NAND_CMD_READ0; /* only READ0 is valid */
724 break;
725
726 case NAND_CMD_SEQIN:
727 if (column >= mtd->writesize) {
728 /*
729 * FIXME: before send SEQIN command for write OOB,
730 * We must read one page out.
731 * For K9F1GXX has no READ1 command to set current HW
732 * pointer to spare area, we must write the whole page
733 * including OOB together.
734 */
735 if (host->pagesize_2k)
736 /* call ourself to read a page */
737 mxc_nand_command(mtd, NAND_CMD_READ0, 0,
738 page_addr);
739
740 host->col_addr = column - mtd->writesize;
741 host->spare_only = true;
742
743 /* Set program pointer to spare region */
744 if (!host->pagesize_2k)
745 send_cmd(host, NAND_CMD_READOOB, false);
746 } else {
747 host->spare_only = false;
748 host->col_addr = column;
749
750 /* Set program pointer to page start */
751 if (!host->pagesize_2k)
752 send_cmd(host, NAND_CMD_READ0, false);
753 }
754 useirq = false;
755 break;
756
757 case NAND_CMD_PAGEPROG:
758 send_prog_page(host, 0, host->spare_only);
759
760 if (host->pagesize_2k) {
761 /* data in 4 areas datas */
762 send_prog_page(host, 1, host->spare_only);
763 send_prog_page(host, 2, host->spare_only);
764 send_prog_page(host, 3, host->spare_only);
765 }
766
767 break;
768
769 case NAND_CMD_ERASE1:
770 useirq = false;
771 break;
772 }
773
774 /* Write out the command to the device. */
775 send_cmd(host, command, useirq);
776
777 /* Write out column address, if necessary */
778 if (column != -1) {
779 /*
780 * MXC NANDFC can only perform full page+spare or
781 * spare-only read/write. When the upper layers
782 * layers perform a read/write buf operation,
783 * we will used the saved column adress to index into
784 * the full page.
785 */
786 send_addr(host, 0, page_addr == -1);
787 if (host->pagesize_2k)
788 /* another col addr cycle for 2k page */
789 send_addr(host, 0, false);
790 }
791
792 /* Write out page address, if necessary */
793 if (page_addr != -1) {
794 /* paddr_0 - p_addr_7 */
795 send_addr(host, (page_addr & 0xff), false);
796
797 if (host->pagesize_2k) {
798 send_addr(host, (page_addr >> 8) & 0xFF, false);
799 if (mtd->size >= 0x40000000)
800 send_addr(host, (page_addr >> 16) & 0xff, true);
801 } else {
802 /* One more address cycle for higher density devices */
803 if (mtd->size >= 0x4000000) {
804 /* paddr_8 - paddr_15 */
805 send_addr(host, (page_addr >> 8) & 0xff, false);
806 send_addr(host, (page_addr >> 16) & 0xff, true);
807 } else
808 /* paddr_8 - paddr_15 */
809 send_addr(host, (page_addr >> 8) & 0xff, true);
810 }
811 }
812
813 /* Command post-processing step */
814 switch (command) {
815
816 case NAND_CMD_RESET:
817 break;
818
819 case NAND_CMD_READOOB:
820 case NAND_CMD_READ0:
821 if (host->pagesize_2k) {
822 /* send read confirm command */
823 send_cmd(host, NAND_CMD_READSTART, true);
824 /* read for each AREA */
825 send_read_page(host, 0, host->spare_only);
826 send_read_page(host, 1, host->spare_only);
827 send_read_page(host, 2, host->spare_only);
828 send_read_page(host, 3, host->spare_only);
829 } else
830 send_read_page(host, 0, host->spare_only);
831 break;
832
833 case NAND_CMD_READID:
834 send_read_id(host);
835 break;
836
837 case NAND_CMD_PAGEPROG:
838 break;
839
840 case NAND_CMD_STATUS:
841 break;
842
843 case NAND_CMD_ERASE2:
844 break;
845 }
846}
847
848static int __init mxcnd_probe(struct platform_device *pdev)
849{
850 struct nand_chip *this;
851 struct mtd_info *mtd;
852 struct mxc_nand_platform_data *pdata = pdev->dev.platform_data;
853 struct mxc_nand_host *host;
854 struct resource *res;
855 uint16_t tmp;
856 int err = 0, nr_parts = 0;
857
858 /* Allocate memory for MTD device structure and private data */
859 host = kzalloc(sizeof(struct mxc_nand_host), GFP_KERNEL);
860 if (!host)
861 return -ENOMEM;
862
863 host->dev = &pdev->dev;
864 /* structures must be linked */
865 this = &host->nand;
866 mtd = &host->mtd;
867 mtd->priv = this;
868 mtd->owner = THIS_MODULE;
869
870 /* 50 us command delay time */
871 this->chip_delay = 5;
872
873 this->priv = host;
874 this->dev_ready = mxc_nand_dev_ready;
875 this->cmdfunc = mxc_nand_command;
876 this->select_chip = mxc_nand_select_chip;
877 this->read_byte = mxc_nand_read_byte;
878 this->read_word = mxc_nand_read_word;
879 this->write_buf = mxc_nand_write_buf;
880 this->read_buf = mxc_nand_read_buf;
881 this->verify_buf = mxc_nand_verify_buf;
882
883 host->clk = clk_get(&pdev->dev, "nfc_clk");
884 if (IS_ERR(host->clk))
885 goto eclk;
886
887 clk_enable(host->clk);
888 host->clk_act = 1;
889
890 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
891 if (!res) {
892 err = -ENODEV;
893 goto eres;
894 }
895
896 host->regs = ioremap(res->start, res->end - res->start + 1);
897 if (!host->regs) {
898 err = -EIO;
899 goto eres;
900 }
901
902 tmp = readw(host->regs + NFC_CONFIG1);
903 tmp |= NFC_INT_MSK;
904 writew(tmp, host->regs + NFC_CONFIG1);
905
906 init_waitqueue_head(&host->irq_waitq);
907
908 host->irq = platform_get_irq(pdev, 0);
909
910 err = request_irq(host->irq, mxc_nfc_irq, 0, "mxc_nd", host);
911 if (err)
912 goto eirq;
913
914 if (pdata->hw_ecc) {
915 this->ecc.calculate = mxc_nand_calculate_ecc;
916 this->ecc.hwctl = mxc_nand_enable_hwecc;
917 this->ecc.correct = mxc_nand_correct_data;
918 this->ecc.mode = NAND_ECC_HW;
919 this->ecc.size = 512;
920 this->ecc.bytes = 3;
921 this->ecc.layout = &nand_hw_eccoob_8;
922 tmp = readw(host->regs + NFC_CONFIG1);
923 tmp |= NFC_ECC_EN;
924 writew(tmp, host->regs + NFC_CONFIG1);
925 } else {
926 this->ecc.size = 512;
927 this->ecc.bytes = 3;
928 this->ecc.layout = &nand_hw_eccoob_8;
929 this->ecc.mode = NAND_ECC_SOFT;
930 tmp = readw(host->regs + NFC_CONFIG1);
931 tmp &= ~NFC_ECC_EN;
932 writew(tmp, host->regs + NFC_CONFIG1);
933 }
934
935 /* Reset NAND */
936 this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
937
938 /* preset operation */
939 /* Unlock the internal RAM Buffer */
940 writew(0x2, host->regs + NFC_CONFIG);
941
942 /* Blocks to be unlocked */
943 writew(0x0, host->regs + NFC_UNLOCKSTART_BLKADDR);
944 writew(0x4000, host->regs + NFC_UNLOCKEND_BLKADDR);
945
946 /* Unlock Block Command for given address range */
947 writew(0x4, host->regs + NFC_WRPROT);
948
949 /* NAND bus width determines access funtions used by upper layer */
950 if (pdata->width == 2) {
951 this->options |= NAND_BUSWIDTH_16;
952 this->ecc.layout = &nand_hw_eccoob_16;
953 }
954
955 host->pagesize_2k = 0;
956
957 /* Scan to find existence of the device */
958 if (nand_scan(mtd, 1)) {
959 DEBUG(MTD_DEBUG_LEVEL0,
960 "MXC_ND: Unable to find any NAND device.\n");
961 err = -ENXIO;
962 goto escan;
963 }
964
965 /* Register the partitions */
966#ifdef CONFIG_MTD_PARTITIONS
967 nr_parts =
968 parse_mtd_partitions(mtd, part_probes, &host->parts, 0);
969 if (nr_parts > 0)
970 add_mtd_partitions(mtd, host->parts, nr_parts);
971 else
972#endif
973 {
974 pr_info("Registering %s as whole device\n", mtd->name);
975 add_mtd_device(mtd);
976 }
977
978 platform_set_drvdata(pdev, host);
979
980 return 0;
981
982escan:
983 free_irq(host->irq, NULL);
984eirq:
985 iounmap(host->regs);
986eres:
987 clk_put(host->clk);
988eclk:
989 kfree(host);
990
991 return err;
992}
993
994static int __devexit mxcnd_remove(struct platform_device *pdev)
995{
996 struct mxc_nand_host *host = platform_get_drvdata(pdev);
997
998 clk_put(host->clk);
999
1000 platform_set_drvdata(pdev, NULL);
1001
1002 nand_release(&host->mtd);
1003 free_irq(host->irq, NULL);
1004 iounmap(host->regs);
1005 kfree(host);
1006
1007 return 0;
1008}
1009
1010#ifdef CONFIG_PM
1011static int mxcnd_suspend(struct platform_device *pdev, pm_message_t state)
1012{
1013 struct mtd_info *info = platform_get_drvdata(pdev);
1014 int ret = 0;
1015
1016 DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND suspend\n");
1017 if (info)
1018 ret = info->suspend(info);
1019
1020 /* Disable the NFC clock */
1021 clk_disable(nfc_clk); /* FIXME */
1022
1023 return ret;
1024}
1025
1026static int mxcnd_resume(struct platform_device *pdev)
1027{
1028 struct mtd_info *info = platform_get_drvdata(pdev);
1029 int ret = 0;
1030
1031 DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND resume\n");
1032 /* Enable the NFC clock */
1033 clk_enable(nfc_clk); /* FIXME */
1034
1035 if (info)
1036 info->resume(info);
1037
1038 return ret;
1039}
1040
1041#else
1042# define mxcnd_suspend NULL
1043# define mxcnd_resume NULL
1044#endif /* CONFIG_PM */
1045
1046static struct platform_driver mxcnd_driver = {
1047 .driver = {
1048 .name = DRIVER_NAME,
1049 },
1050 .remove = __exit_p(mxcnd_remove),
1051 .suspend = mxcnd_suspend,
1052 .resume = mxcnd_resume,
1053};
1054
1055static int __init mxc_nd_init(void)
1056{
1057 /* Register the device driver structure. */
1058 pr_info("MXC MTD nand Driver\n");
1059 if (platform_driver_probe(&mxcnd_driver, mxcnd_probe) != 0) {
1060 printk(KERN_ERR "Driver register failed for mxcnd_driver\n");
1061 return -ENODEV;
1062 }
1063 return 0;
1064}
1065
1066static void __exit mxc_nd_cleanup(void)
1067{
1068 /* Unregister the device structure */
1069 platform_driver_unregister(&mxcnd_driver);
1070}
1071
1072module_init(mxc_nd_init);
1073module_exit(mxc_nd_cleanup);
1074
1075MODULE_AUTHOR("Freescale Semiconductor, Inc.");
1076MODULE_DESCRIPTION("MXC NAND MTD driver");
1077MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index d1129bae6c27..0a9c9cd33f96 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -801,9 +801,9 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
801 * nand_read_subpage - [REPLACABLE] software ecc based sub-page read function 801 * nand_read_subpage - [REPLACABLE] software ecc based sub-page read function
802 * @mtd: mtd info structure 802 * @mtd: mtd info structure
803 * @chip: nand chip info structure 803 * @chip: nand chip info structure
804 * @dataofs offset of requested data within the page 804 * @data_offs: offset of requested data within the page
805 * @readlen data length 805 * @readlen: data length
806 * @buf: buffer to store read data 806 * @bufpoi: buffer to store read data
807 */ 807 */
808static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi) 808static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi)
809{ 809{
@@ -2042,7 +2042,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2042 return -EINVAL; 2042 return -EINVAL;
2043 } 2043 }
2044 2044
2045 instr->fail_addr = 0xffffffff; 2045 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
2046 2046
2047 /* Grab the lock and see if the device is available */ 2047 /* Grab the lock and see if the device is available */
2048 nand_get_device(chip, mtd, FL_ERASING); 2048 nand_get_device(chip, mtd, FL_ERASING);
@@ -2318,6 +2318,12 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2318 /* Select the device */ 2318 /* Select the device */
2319 chip->select_chip(mtd, 0); 2319 chip->select_chip(mtd, 0);
2320 2320
2321 /*
2322 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
2323 * after power-up
2324 */
2325 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
2326
2321 /* Send the command for reading device ID */ 2327 /* Send the command for reading device ID */
2322 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); 2328 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
2323 2329
@@ -2488,6 +2494,8 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips)
2488 /* Check for a chip array */ 2494 /* Check for a chip array */
2489 for (i = 1; i < maxchips; i++) { 2495 for (i = 1; i < maxchips; i++) {
2490 chip->select_chip(mtd, i); 2496 chip->select_chip(mtd, i);
2497 /* See comment in nand_get_flash_type for reset */
2498 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
2491 /* Send the command for reading device ID */ 2499 /* Send the command for reading device ID */
2492 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); 2500 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
2493 /* Read manufacturer and device IDs */ 2501 /* Read manufacturer and device IDs */
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
index 918a806a8471..868147acce2c 100644
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -1,13 +1,18 @@
1/* 1/*
2 * This file contains an ECC algorithm from Toshiba that detects and 2 * This file contains an ECC algorithm that detects and corrects 1 bit
3 * corrects 1 bit errors in a 256 byte block of data. 3 * errors in a 256 byte block of data.
4 * 4 *
5 * drivers/mtd/nand/nand_ecc.c 5 * drivers/mtd/nand/nand_ecc.c
6 * 6 *
7 * Copyright (C) 2000-2004 Steven J. Hill (sjhill@realitydiluted.com) 7 * Copyright © 2008 Koninklijke Philips Electronics NV.
8 * Toshiba America Electronics Components, Inc. 8 * Author: Frans Meulenbroeks
9 * 9 *
10 * Copyright (C) 2006 Thomas Gleixner <tglx@linutronix.de> 10 * Completely replaces the previous ECC implementation which was written by:
11 * Steven J. Hill (sjhill@realitydiluted.com)
12 * Thomas Gleixner (tglx@linutronix.de)
13 *
14 * Information on how this algorithm works and how it was developed
15 * can be found in Documentation/mtd/nand_ecc.txt
11 * 16 *
12 * This file is free software; you can redistribute it and/or modify it 17 * This file is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the 18 * under the terms of the GNU General Public License as published by the
@@ -23,174 +28,475 @@
23 * with this file; if not, write to the Free Software Foundation, Inc., 28 * with this file; if not, write to the Free Software Foundation, Inc.,
24 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 29 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25 * 30 *
26 * As a special exception, if other files instantiate templates or use
27 * macros or inline functions from these files, or you compile these
28 * files and link them with other works to produce a work based on these
29 * files, these files do not by themselves cause the resulting work to be
30 * covered by the GNU General Public License. However the source code for
31 * these files must still be made available in accordance with section (3)
32 * of the GNU General Public License.
33 *
34 * This exception does not invalidate any other reasons why a work based on
35 * this file might be covered by the GNU General Public License.
36 */ 31 */
37 32
33/*
34 * The STANDALONE macro is useful when running the code outside the kernel
35 * e.g. when running the code in a testbed or a benchmark program.
36 * When STANDALONE is used, the module related macros are commented out
37 * as well as the linux include files.
38 * Instead a private definition of mtd_info is given to satisfy the compiler
39 * (the code does not use mtd_info, so the code does not care)
40 */
41#ifndef STANDALONE
38#include <linux/types.h> 42#include <linux/types.h>
39#include <linux/kernel.h> 43#include <linux/kernel.h>
40#include <linux/module.h> 44#include <linux/module.h>
45#include <linux/mtd/mtd.h>
46#include <linux/mtd/nand.h>
41#include <linux/mtd/nand_ecc.h> 47#include <linux/mtd/nand_ecc.h>
48#include <asm/byteorder.h>
49#else
50#include <stdint.h>
51struct mtd_info;
52#define EXPORT_SYMBOL(x) /* x */
53
54#define MODULE_LICENSE(x) /* x */
55#define MODULE_AUTHOR(x) /* x */
56#define MODULE_DESCRIPTION(x) /* x */
57
58#define printk printf
59#define KERN_ERR ""
60#endif
61
62/*
63 * invparity is a 256 byte table that contains the odd parity
64 * for each byte. So if the number of bits in a byte is even,
65 * the array element is 1, and when the number of bits is odd
66 * the array eleemnt is 0.
67 */
68static const char invparity[256] = {
69 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
70 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
71 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
72 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
73 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
74 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
75 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
76 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
77 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
78 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
79 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
80 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
81 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
82 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
83 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
84 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1
85};
86
87/*
88 * bitsperbyte contains the number of bits per byte
89 * this is only used for testing and repairing parity
90 * (a precalculated value slightly improves performance)
91 */
92static const char bitsperbyte[256] = {
93 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
94 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
95 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
96 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
97 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
98 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
99 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
100 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
101 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
102 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
103 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
104 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
105 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
106 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
107 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
108 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8,
109};
42 110
43/* 111/*
44 * Pre-calculated 256-way 1 byte column parity 112 * addressbits is a lookup table to filter out the bits from the xor-ed
113 * ecc data that identify the faulty location.
114 * this is only used for repairing parity
115 * see the comments in nand_correct_data for more details
45 */ 116 */
46static const u_char nand_ecc_precalc_table[] = { 117static const char addressbits[256] = {
47 0x00, 0x55, 0x56, 0x03, 0x59, 0x0c, 0x0f, 0x5a, 0x5a, 0x0f, 0x0c, 0x59, 0x03, 0x56, 0x55, 0x00, 118 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
48 0x65, 0x30, 0x33, 0x66, 0x3c, 0x69, 0x6a, 0x3f, 0x3f, 0x6a, 0x69, 0x3c, 0x66, 0x33, 0x30, 0x65, 119 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
49 0x66, 0x33, 0x30, 0x65, 0x3f, 0x6a, 0x69, 0x3c, 0x3c, 0x69, 0x6a, 0x3f, 0x65, 0x30, 0x33, 0x66, 120 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
50 0x03, 0x56, 0x55, 0x00, 0x5a, 0x0f, 0x0c, 0x59, 0x59, 0x0c, 0x0f, 0x5a, 0x00, 0x55, 0x56, 0x03, 121 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
51 0x69, 0x3c, 0x3f, 0x6a, 0x30, 0x65, 0x66, 0x33, 0x33, 0x66, 0x65, 0x30, 0x6a, 0x3f, 0x3c, 0x69, 122 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
52 0x0c, 0x59, 0x5a, 0x0f, 0x55, 0x00, 0x03, 0x56, 0x56, 0x03, 0x00, 0x55, 0x0f, 0x5a, 0x59, 0x0c, 123 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
53 0x0f, 0x5a, 0x59, 0x0c, 0x56, 0x03, 0x00, 0x55, 0x55, 0x00, 0x03, 0x56, 0x0c, 0x59, 0x5a, 0x0f, 124 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
54 0x6a, 0x3f, 0x3c, 0x69, 0x33, 0x66, 0x65, 0x30, 0x30, 0x65, 0x66, 0x33, 0x69, 0x3c, 0x3f, 0x6a, 125 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
55 0x6a, 0x3f, 0x3c, 0x69, 0x33, 0x66, 0x65, 0x30, 0x30, 0x65, 0x66, 0x33, 0x69, 0x3c, 0x3f, 0x6a, 126 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
56 0x0f, 0x5a, 0x59, 0x0c, 0x56, 0x03, 0x00, 0x55, 0x55, 0x00, 0x03, 0x56, 0x0c, 0x59, 0x5a, 0x0f, 127 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
57 0x0c, 0x59, 0x5a, 0x0f, 0x55, 0x00, 0x03, 0x56, 0x56, 0x03, 0x00, 0x55, 0x0f, 0x5a, 0x59, 0x0c, 128 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
58 0x69, 0x3c, 0x3f, 0x6a, 0x30, 0x65, 0x66, 0x33, 0x33, 0x66, 0x65, 0x30, 0x6a, 0x3f, 0x3c, 0x69, 129 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
59 0x03, 0x56, 0x55, 0x00, 0x5a, 0x0f, 0x0c, 0x59, 0x59, 0x0c, 0x0f, 0x5a, 0x00, 0x55, 0x56, 0x03, 130 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
60 0x66, 0x33, 0x30, 0x65, 0x3f, 0x6a, 0x69, 0x3c, 0x3c, 0x69, 0x6a, 0x3f, 0x65, 0x30, 0x33, 0x66, 131 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
61 0x65, 0x30, 0x33, 0x66, 0x3c, 0x69, 0x6a, 0x3f, 0x3f, 0x6a, 0x69, 0x3c, 0x66, 0x33, 0x30, 0x65, 132 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
62 0x00, 0x55, 0x56, 0x03, 0x59, 0x0c, 0x0f, 0x5a, 0x5a, 0x0f, 0x0c, 0x59, 0x03, 0x56, 0x55, 0x00 133 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
134 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
135 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
136 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
137 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
138 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
139 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
140 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
141 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
142 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
143 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
144 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
145 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
146 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
147 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
148 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
149 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f
63}; 150};
64 151
65/** 152/**
66 * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256-byte block 153 * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
154 * block
67 * @mtd: MTD block structure 155 * @mtd: MTD block structure
68 * @dat: raw data 156 * @buf: input buffer with raw data
69 * @ecc_code: buffer for ECC 157 * @code: output buffer with ECC
70 */ 158 */
71int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, 159int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
72 u_char *ecc_code) 160 unsigned char *code)
73{ 161{
74 uint8_t idx, reg1, reg2, reg3, tmp1, tmp2;
75 int i; 162 int i;
163 const uint32_t *bp = (uint32_t *)buf;
164 /* 256 or 512 bytes/ecc */
165 const uint32_t eccsize_mult =
166 (((struct nand_chip *)mtd->priv)->ecc.size) >> 8;
167 uint32_t cur; /* current value in buffer */
168 /* rp0..rp15..rp17 are the various accumulated parities (per byte) */
169 uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7;
170 uint32_t rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15, rp16;
171 uint32_t uninitialized_var(rp17); /* to make compiler happy */
172 uint32_t par; /* the cumulative parity for all data */
173 uint32_t tmppar; /* the cumulative parity for this iteration;
174 for rp12, rp14 and rp16 at the end of the
175 loop */
176
177 par = 0;
178 rp4 = 0;
179 rp6 = 0;
180 rp8 = 0;
181 rp10 = 0;
182 rp12 = 0;
183 rp14 = 0;
184 rp16 = 0;
185
186 /*
187 * The loop is unrolled a number of times;
188 * This avoids if statements to decide on which rp value to update
189 * Also we process the data by longwords.
190 * Note: passing unaligned data might give a performance penalty.
191 * It is assumed that the buffers are aligned.
192 * tmppar is the cumulative sum of this iteration.
193 * needed for calculating rp12, rp14, rp16 and par
194 * also used as a performance improvement for rp6, rp8 and rp10
195 */
196 for (i = 0; i < eccsize_mult << 2; i++) {
197 cur = *bp++;
198 tmppar = cur;
199 rp4 ^= cur;
200 cur = *bp++;
201 tmppar ^= cur;
202 rp6 ^= tmppar;
203 cur = *bp++;
204 tmppar ^= cur;
205 rp4 ^= cur;
206 cur = *bp++;
207 tmppar ^= cur;
208 rp8 ^= tmppar;
76 209
77 /* Initialize variables */ 210 cur = *bp++;
78 reg1 = reg2 = reg3 = 0; 211 tmppar ^= cur;
212 rp4 ^= cur;
213 rp6 ^= cur;
214 cur = *bp++;
215 tmppar ^= cur;
216 rp6 ^= cur;
217 cur = *bp++;
218 tmppar ^= cur;
219 rp4 ^= cur;
220 cur = *bp++;
221 tmppar ^= cur;
222 rp10 ^= tmppar;
79 223
80 /* Build up column parity */ 224 cur = *bp++;
81 for(i = 0; i < 256; i++) { 225 tmppar ^= cur;
82 /* Get CP0 - CP5 from table */ 226 rp4 ^= cur;
83 idx = nand_ecc_precalc_table[*dat++]; 227 rp6 ^= cur;
84 reg1 ^= (idx & 0x3f); 228 rp8 ^= cur;
229 cur = *bp++;
230 tmppar ^= cur;
231 rp6 ^= cur;
232 rp8 ^= cur;
233 cur = *bp++;
234 tmppar ^= cur;
235 rp4 ^= cur;
236 rp8 ^= cur;
237 cur = *bp++;
238 tmppar ^= cur;
239 rp8 ^= cur;
85 240
86 /* All bit XOR = 1 ? */ 241 cur = *bp++;
87 if (idx & 0x40) { 242 tmppar ^= cur;
88 reg3 ^= (uint8_t) i; 243 rp4 ^= cur;
89 reg2 ^= ~((uint8_t) i); 244 rp6 ^= cur;
90 } 245 cur = *bp++;
246 tmppar ^= cur;
247 rp6 ^= cur;
248 cur = *bp++;
249 tmppar ^= cur;
250 rp4 ^= cur;
251 cur = *bp++;
252 tmppar ^= cur;
253
254 par ^= tmppar;
255 if ((i & 0x1) == 0)
256 rp12 ^= tmppar;
257 if ((i & 0x2) == 0)
258 rp14 ^= tmppar;
259 if (eccsize_mult == 2 && (i & 0x4) == 0)
260 rp16 ^= tmppar;
91 } 261 }
92 262
93 /* Create non-inverted ECC code from line parity */ 263 /*
94 tmp1 = (reg3 & 0x80) >> 0; /* B7 -> B7 */ 264 * handle the fact that we use longword operations
95 tmp1 |= (reg2 & 0x80) >> 1; /* B7 -> B6 */ 265 * we'll bring rp4..rp14..rp16 back to single byte entities by
96 tmp1 |= (reg3 & 0x40) >> 1; /* B6 -> B5 */ 266 * shifting and xoring first fold the upper and lower 16 bits,
97 tmp1 |= (reg2 & 0x40) >> 2; /* B6 -> B4 */ 267 * then the upper and lower 8 bits.
98 tmp1 |= (reg3 & 0x20) >> 2; /* B5 -> B3 */ 268 */
99 tmp1 |= (reg2 & 0x20) >> 3; /* B5 -> B2 */ 269 rp4 ^= (rp4 >> 16);
100 tmp1 |= (reg3 & 0x10) >> 3; /* B4 -> B1 */ 270 rp4 ^= (rp4 >> 8);
101 tmp1 |= (reg2 & 0x10) >> 4; /* B4 -> B0 */ 271 rp4 &= 0xff;
102 272 rp6 ^= (rp6 >> 16);
103 tmp2 = (reg3 & 0x08) << 4; /* B3 -> B7 */ 273 rp6 ^= (rp6 >> 8);
104 tmp2 |= (reg2 & 0x08) << 3; /* B3 -> B6 */ 274 rp6 &= 0xff;
105 tmp2 |= (reg3 & 0x04) << 3; /* B2 -> B5 */ 275 rp8 ^= (rp8 >> 16);
106 tmp2 |= (reg2 & 0x04) << 2; /* B2 -> B4 */ 276 rp8 ^= (rp8 >> 8);
107 tmp2 |= (reg3 & 0x02) << 2; /* B1 -> B3 */ 277 rp8 &= 0xff;
108 tmp2 |= (reg2 & 0x02) << 1; /* B1 -> B2 */ 278 rp10 ^= (rp10 >> 16);
109 tmp2 |= (reg3 & 0x01) << 1; /* B0 -> B1 */ 279 rp10 ^= (rp10 >> 8);
110 tmp2 |= (reg2 & 0x01) << 0; /* B7 -> B0 */ 280 rp10 &= 0xff;
111 281 rp12 ^= (rp12 >> 16);
112 /* Calculate final ECC code */ 282 rp12 ^= (rp12 >> 8);
113#ifdef CONFIG_MTD_NAND_ECC_SMC 283 rp12 &= 0xff;
114 ecc_code[0] = ~tmp2; 284 rp14 ^= (rp14 >> 16);
115 ecc_code[1] = ~tmp1; 285 rp14 ^= (rp14 >> 8);
286 rp14 &= 0xff;
287 if (eccsize_mult == 2) {
288 rp16 ^= (rp16 >> 16);
289 rp16 ^= (rp16 >> 8);
290 rp16 &= 0xff;
291 }
292
293 /*
294 * we also need to calculate the row parity for rp0..rp3
295 * This is present in par, because par is now
296 * rp3 rp3 rp2 rp2 in little endian and
297 * rp2 rp2 rp3 rp3 in big endian
298 * as well as
299 * rp1 rp0 rp1 rp0 in little endian and
300 * rp0 rp1 rp0 rp1 in big endian
301 * First calculate rp2 and rp3
302 */
303#ifdef __BIG_ENDIAN
304 rp2 = (par >> 16);
305 rp2 ^= (rp2 >> 8);
306 rp2 &= 0xff;
307 rp3 = par & 0xffff;
308 rp3 ^= (rp3 >> 8);
309 rp3 &= 0xff;
116#else 310#else
117 ecc_code[0] = ~tmp1; 311 rp3 = (par >> 16);
118 ecc_code[1] = ~tmp2; 312 rp3 ^= (rp3 >> 8);
313 rp3 &= 0xff;
314 rp2 = par & 0xffff;
315 rp2 ^= (rp2 >> 8);
316 rp2 &= 0xff;
119#endif 317#endif
120 ecc_code[2] = ((~reg1) << 2) | 0x03;
121 318
122 return 0; 319 /* reduce par to 16 bits then calculate rp1 and rp0 */
123} 320 par ^= (par >> 16);
124EXPORT_SYMBOL(nand_calculate_ecc); 321#ifdef __BIG_ENDIAN
322 rp0 = (par >> 8) & 0xff;
323 rp1 = (par & 0xff);
324#else
325 rp1 = (par >> 8) & 0xff;
326 rp0 = (par & 0xff);
327#endif
125 328
126static inline int countbits(uint32_t byte) 329 /* finally reduce par to 8 bits */
127{ 330 par ^= (par >> 8);
128 int res = 0; 331 par &= 0xff;
129 332
130 for (;byte; byte >>= 1) 333 /*
131 res += byte & 0x01; 334 * and calculate rp5..rp15..rp17
132 return res; 335 * note that par = rp4 ^ rp5 and due to the commutative property
336 * of the ^ operator we can say:
337 * rp5 = (par ^ rp4);
338 * The & 0xff seems superfluous, but benchmarking learned that
339 * leaving it out gives slightly worse results. No idea why, probably
340 * it has to do with the way the pipeline in pentium is organized.
341 */
342 rp5 = (par ^ rp4) & 0xff;
343 rp7 = (par ^ rp6) & 0xff;
344 rp9 = (par ^ rp8) & 0xff;
345 rp11 = (par ^ rp10) & 0xff;
346 rp13 = (par ^ rp12) & 0xff;
347 rp15 = (par ^ rp14) & 0xff;
348 if (eccsize_mult == 2)
349 rp17 = (par ^ rp16) & 0xff;
350
351 /*
352 * Finally calculate the ecc bits.
353 * Again here it might seem that there are performance optimisations
354 * possible, but benchmarks showed that on the system this is developed
355 * the code below is the fastest
356 */
357#ifdef CONFIG_MTD_NAND_ECC_SMC
358 code[0] =
359 (invparity[rp7] << 7) |
360 (invparity[rp6] << 6) |
361 (invparity[rp5] << 5) |
362 (invparity[rp4] << 4) |
363 (invparity[rp3] << 3) |
364 (invparity[rp2] << 2) |
365 (invparity[rp1] << 1) |
366 (invparity[rp0]);
367 code[1] =
368 (invparity[rp15] << 7) |
369 (invparity[rp14] << 6) |
370 (invparity[rp13] << 5) |
371 (invparity[rp12] << 4) |
372 (invparity[rp11] << 3) |
373 (invparity[rp10] << 2) |
374 (invparity[rp9] << 1) |
375 (invparity[rp8]);
376#else
377 code[1] =
378 (invparity[rp7] << 7) |
379 (invparity[rp6] << 6) |
380 (invparity[rp5] << 5) |
381 (invparity[rp4] << 4) |
382 (invparity[rp3] << 3) |
383 (invparity[rp2] << 2) |
384 (invparity[rp1] << 1) |
385 (invparity[rp0]);
386 code[0] =
387 (invparity[rp15] << 7) |
388 (invparity[rp14] << 6) |
389 (invparity[rp13] << 5) |
390 (invparity[rp12] << 4) |
391 (invparity[rp11] << 3) |
392 (invparity[rp10] << 2) |
393 (invparity[rp9] << 1) |
394 (invparity[rp8]);
395#endif
396 if (eccsize_mult == 1)
397 code[2] =
398 (invparity[par & 0xf0] << 7) |
399 (invparity[par & 0x0f] << 6) |
400 (invparity[par & 0xcc] << 5) |
401 (invparity[par & 0x33] << 4) |
402 (invparity[par & 0xaa] << 3) |
403 (invparity[par & 0x55] << 2) |
404 3;
405 else
406 code[2] =
407 (invparity[par & 0xf0] << 7) |
408 (invparity[par & 0x0f] << 6) |
409 (invparity[par & 0xcc] << 5) |
410 (invparity[par & 0x33] << 4) |
411 (invparity[par & 0xaa] << 3) |
412 (invparity[par & 0x55] << 2) |
413 (invparity[rp17] << 1) |
414 (invparity[rp16] << 0);
415 return 0;
133} 416}
417EXPORT_SYMBOL(nand_calculate_ecc);
134 418
135/** 419/**
136 * nand_correct_data - [NAND Interface] Detect and correct bit error(s) 420 * nand_correct_data - [NAND Interface] Detect and correct bit error(s)
137 * @mtd: MTD block structure 421 * @mtd: MTD block structure
138 * @dat: raw data read from the chip 422 * @buf: raw data read from the chip
139 * @read_ecc: ECC from the chip 423 * @read_ecc: ECC from the chip
140 * @calc_ecc: the ECC calculated from raw data 424 * @calc_ecc: the ECC calculated from raw data
141 * 425 *
142 * Detect and correct a 1 bit error for 256 byte block 426 * Detect and correct a 1 bit error for 256/512 byte block
143 */ 427 */
144int nand_correct_data(struct mtd_info *mtd, u_char *dat, 428int nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
145 u_char *read_ecc, u_char *calc_ecc) 429 unsigned char *read_ecc, unsigned char *calc_ecc)
146{ 430{
147 uint8_t s0, s1, s2; 431 unsigned char b0, b1, b2;
432 unsigned char byte_addr, bit_addr;
433 /* 256 or 512 bytes/ecc */
434 const uint32_t eccsize_mult =
435 (((struct nand_chip *)mtd->priv)->ecc.size) >> 8;
148 436
437 /*
438 * b0 to b2 indicate which bit is faulty (if any)
439 * we might need the xor result more than once,
440 * so keep them in a local var
441 */
149#ifdef CONFIG_MTD_NAND_ECC_SMC 442#ifdef CONFIG_MTD_NAND_ECC_SMC
150 s0 = calc_ecc[0] ^ read_ecc[0]; 443 b0 = read_ecc[0] ^ calc_ecc[0];
151 s1 = calc_ecc[1] ^ read_ecc[1]; 444 b1 = read_ecc[1] ^ calc_ecc[1];
152 s2 = calc_ecc[2] ^ read_ecc[2];
153#else 445#else
154 s1 = calc_ecc[0] ^ read_ecc[0]; 446 b0 = read_ecc[1] ^ calc_ecc[1];
155 s0 = calc_ecc[1] ^ read_ecc[1]; 447 b1 = read_ecc[0] ^ calc_ecc[0];
156 s2 = calc_ecc[2] ^ read_ecc[2];
157#endif 448#endif
158 if ((s0 | s1 | s2) == 0) 449 b2 = read_ecc[2] ^ calc_ecc[2];
159 return 0;
160
161 /* Check for a single bit error */
162 if( ((s0 ^ (s0 >> 1)) & 0x55) == 0x55 &&
163 ((s1 ^ (s1 >> 1)) & 0x55) == 0x55 &&
164 ((s2 ^ (s2 >> 1)) & 0x54) == 0x54) {
165 450
166 uint32_t byteoffs, bitnum; 451 /* check if there are any bitfaults */
167 452
168 byteoffs = (s1 << 0) & 0x80; 453 /* repeated if statements are slightly more efficient than switch ... */
169 byteoffs |= (s1 << 1) & 0x40; 454 /* ordered in order of likelihood */
170 byteoffs |= (s1 << 2) & 0x20;
171 byteoffs |= (s1 << 3) & 0x10;
172 455
173 byteoffs |= (s0 >> 4) & 0x08; 456 if ((b0 | b1 | b2) == 0)
174 byteoffs |= (s0 >> 3) & 0x04; 457 return 0; /* no error */
175 byteoffs |= (s0 >> 2) & 0x02;
176 byteoffs |= (s0 >> 1) & 0x01;
177
178 bitnum = (s2 >> 5) & 0x04;
179 bitnum |= (s2 >> 4) & 0x02;
180 bitnum |= (s2 >> 3) & 0x01;
181
182 dat[byteoffs] ^= (1 << bitnum);
183 458
459 if ((((b0 ^ (b0 >> 1)) & 0x55) == 0x55) &&
460 (((b1 ^ (b1 >> 1)) & 0x55) == 0x55) &&
461 ((eccsize_mult == 1 && ((b2 ^ (b2 >> 1)) & 0x54) == 0x54) ||
462 (eccsize_mult == 2 && ((b2 ^ (b2 >> 1)) & 0x55) == 0x55))) {
463 /* single bit error */
464 /*
465 * rp17/rp15/13/11/9/7/5/3/1 indicate which byte is the faulty
466 * byte, cp 5/3/1 indicate the faulty bit.
467 * A lookup table (called addressbits) is used to filter
468 * the bits from the byte they are in.
469 * A marginal optimisation is possible by having three
470 * different lookup tables.
471 * One as we have now (for b0), one for b2
472 * (that would avoid the >> 1), and one for b1 (with all values
473 * << 4). However it was felt that introducing two more tables
474 * hardly justify the gain.
475 *
476 * The b2 shift is there to get rid of the lowest two bits.
477 * We could also do addressbits[b2] >> 1 but for the
478 * performace it does not make any difference
479 */
480 if (eccsize_mult == 1)
481 byte_addr = (addressbits[b1] << 4) + addressbits[b0];
482 else
483 byte_addr = (addressbits[b2 & 0x3] << 8) +
484 (addressbits[b1] << 4) + addressbits[b0];
485 bit_addr = addressbits[b2 >> 2];
486 /* flip the bit */
487 buf[byte_addr] ^= (1 << bit_addr);
184 return 1; 488 return 1;
185 }
186 489
187 if(countbits(s0 | ((uint32_t)s1 << 8) | ((uint32_t)s2 <<16)) == 1) 490 }
188 return 1; 491 /* count nr of bits; use table lookup, faster than calculating it */
492 if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1)
493 return 1; /* error in ecc data; no action needed */
189 494
190 return -EBADMSG; 495 printk(KERN_ERR "uncorrectable error : ");
496 return -1;
191} 497}
192EXPORT_SYMBOL(nand_correct_data); 498EXPORT_SYMBOL(nand_correct_data);
193 499
194MODULE_LICENSE("GPL"); 500MODULE_LICENSE("GPL");
195MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>"); 501MODULE_AUTHOR("Frans Meulenbroeks <fransmeulenbroeks@gmail.com>");
196MODULE_DESCRIPTION("Generic NAND ECC support"); 502MODULE_DESCRIPTION("Generic NAND ECC support");
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 556e8131ecdc..ae7c57781a68 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -38,7 +38,6 @@
38#include <linux/delay.h> 38#include <linux/delay.h>
39#include <linux/list.h> 39#include <linux/list.h>
40#include <linux/random.h> 40#include <linux/random.h>
41#include <asm/div64.h>
42 41
43/* Default simulator parameters values */ 42/* Default simulator parameters values */
44#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \ 43#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index a64ad15b8fdd..c0fa9c9edf08 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -115,55 +115,11 @@ enum {
115 STATE_PIO_WRITING, 115 STATE_PIO_WRITING,
116}; 116};
117 117
118struct pxa3xx_nand_timing {
119 unsigned int tCH; /* Enable signal hold time */
120 unsigned int tCS; /* Enable signal setup time */
121 unsigned int tWH; /* ND_nWE high duration */
122 unsigned int tWP; /* ND_nWE pulse time */
123 unsigned int tRH; /* ND_nRE high duration */
124 unsigned int tRP; /* ND_nRE pulse width */
125 unsigned int tR; /* ND_nWE high to ND_nRE low for read */
126 unsigned int tWHR; /* ND_nWE high to ND_nRE low for status read */
127 unsigned int tAR; /* ND_ALE low to ND_nRE low delay */
128};
129
130struct pxa3xx_nand_cmdset {
131 uint16_t read1;
132 uint16_t read2;
133 uint16_t program;
134 uint16_t read_status;
135 uint16_t read_id;
136 uint16_t erase;
137 uint16_t reset;
138 uint16_t lock;
139 uint16_t unlock;
140 uint16_t lock_status;
141};
142
143struct pxa3xx_nand_flash {
144 struct pxa3xx_nand_timing *timing; /* NAND Flash timing */
145 struct pxa3xx_nand_cmdset *cmdset;
146
147 uint32_t page_per_block;/* Pages per block (PG_PER_BLK) */
148 uint32_t page_size; /* Page size in bytes (PAGE_SZ) */
149 uint32_t flash_width; /* Width of Flash memory (DWIDTH_M) */
150 uint32_t dfc_width; /* Width of flash controller(DWIDTH_C) */
151 uint32_t num_blocks; /* Number of physical blocks in Flash */
152 uint32_t chip_id;
153
154 /* NOTE: these are automatically calculated, do not define */
155 size_t oob_size;
156 size_t read_id_bytes;
157
158 unsigned int col_addr_cycles;
159 unsigned int row_addr_cycles;
160};
161
162struct pxa3xx_nand_info { 118struct pxa3xx_nand_info {
163 struct nand_chip nand_chip; 119 struct nand_chip nand_chip;
164 120
165 struct platform_device *pdev; 121 struct platform_device *pdev;
166 struct pxa3xx_nand_flash *flash_info; 122 const struct pxa3xx_nand_flash *flash_info;
167 123
168 struct clk *clk; 124 struct clk *clk;
169 void __iomem *mmio_base; 125 void __iomem *mmio_base;
@@ -202,12 +158,20 @@ struct pxa3xx_nand_info {
202 uint32_t ndcb0; 158 uint32_t ndcb0;
203 uint32_t ndcb1; 159 uint32_t ndcb1;
204 uint32_t ndcb2; 160 uint32_t ndcb2;
161
162 /* calculated from pxa3xx_nand_flash data */
163 size_t oob_size;
164 size_t read_id_bytes;
165
166 unsigned int col_addr_cycles;
167 unsigned int row_addr_cycles;
205}; 168};
206 169
207static int use_dma = 1; 170static int use_dma = 1;
208module_param(use_dma, bool, 0444); 171module_param(use_dma, bool, 0444);
209MODULE_PARM_DESC(use_dma, "enable DMA for data transfering to/from NAND HW"); 172MODULE_PARM_DESC(use_dma, "enable DMA for data transfering to/from NAND HW");
210 173
174#ifdef CONFIG_MTD_NAND_PXA3xx_BUILTIN
211static struct pxa3xx_nand_cmdset smallpage_cmdset = { 175static struct pxa3xx_nand_cmdset smallpage_cmdset = {
212 .read1 = 0x0000, 176 .read1 = 0x0000,
213 .read2 = 0x0050, 177 .read2 = 0x0050,
@@ -291,11 +255,35 @@ static struct pxa3xx_nand_flash micron1GbX16 = {
291 .chip_id = 0xb12c, 255 .chip_id = 0xb12c,
292}; 256};
293 257
258static struct pxa3xx_nand_timing stm2GbX16_timing = {
259 .tCH = 10,
260 .tCS = 35,
261 .tWH = 15,
262 .tWP = 25,
263 .tRH = 15,
264 .tRP = 25,
265 .tR = 25000,
266 .tWHR = 60,
267 .tAR = 10,
268};
269
270static struct pxa3xx_nand_flash stm2GbX16 = {
271 .timing = &stm2GbX16_timing,
272 .page_per_block = 64,
273 .page_size = 2048,
274 .flash_width = 16,
275 .dfc_width = 16,
276 .num_blocks = 2048,
277 .chip_id = 0xba20,
278};
279
294static struct pxa3xx_nand_flash *builtin_flash_types[] = { 280static struct pxa3xx_nand_flash *builtin_flash_types[] = {
295 &samsung512MbX16, 281 &samsung512MbX16,
296 &micron1GbX8, 282 &micron1GbX8,
297 &micron1GbX16, 283 &micron1GbX16,
284 &stm2GbX16,
298}; 285};
286#endif /* CONFIG_MTD_NAND_PXA3xx_BUILTIN */
299 287
300#define NDTR0_tCH(c) (min((c), 7) << 19) 288#define NDTR0_tCH(c) (min((c), 7) << 19)
301#define NDTR0_tCS(c) (min((c), 7) << 16) 289#define NDTR0_tCS(c) (min((c), 7) << 16)
@@ -312,7 +300,7 @@ static struct pxa3xx_nand_flash *builtin_flash_types[] = {
312#define ns2cycle(ns, clk) (int)(((ns) * (clk / 1000000) / 1000) + 1) 300#define ns2cycle(ns, clk) (int)(((ns) * (clk / 1000000) / 1000) + 1)
313 301
314static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info, 302static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info,
315 struct pxa3xx_nand_timing *t) 303 const struct pxa3xx_nand_timing *t)
316{ 304{
317 unsigned long nand_clk = clk_get_rate(info->clk); 305 unsigned long nand_clk = clk_get_rate(info->clk);
318 uint32_t ndtr0, ndtr1; 306 uint32_t ndtr0, ndtr1;
@@ -354,8 +342,8 @@ static int wait_for_event(struct pxa3xx_nand_info *info, uint32_t event)
354static int prepare_read_prog_cmd(struct pxa3xx_nand_info *info, 342static int prepare_read_prog_cmd(struct pxa3xx_nand_info *info,
355 uint16_t cmd, int column, int page_addr) 343 uint16_t cmd, int column, int page_addr)
356{ 344{
357 struct pxa3xx_nand_flash *f = info->flash_info; 345 const struct pxa3xx_nand_flash *f = info->flash_info;
358 struct pxa3xx_nand_cmdset *cmdset = f->cmdset; 346 const struct pxa3xx_nand_cmdset *cmdset = f->cmdset;
359 347
360 /* calculate data size */ 348 /* calculate data size */
361 switch (f->page_size) { 349 switch (f->page_size) {
@@ -373,14 +361,14 @@ static int prepare_read_prog_cmd(struct pxa3xx_nand_info *info,
373 info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0); 361 info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
374 info->ndcb1 = 0; 362 info->ndcb1 = 0;
375 info->ndcb2 = 0; 363 info->ndcb2 = 0;
376 info->ndcb0 |= NDCB0_ADDR_CYC(f->row_addr_cycles + f->col_addr_cycles); 364 info->ndcb0 |= NDCB0_ADDR_CYC(info->row_addr_cycles + info->col_addr_cycles);
377 365
378 if (f->col_addr_cycles == 2) { 366 if (info->col_addr_cycles == 2) {
379 /* large block, 2 cycles for column address 367 /* large block, 2 cycles for column address
380 * row address starts from 3rd cycle 368 * row address starts from 3rd cycle
381 */ 369 */
382 info->ndcb1 |= (page_addr << 16) | (column & 0xffff); 370 info->ndcb1 |= (page_addr << 16) | (column & 0xffff);
383 if (f->row_addr_cycles == 3) 371 if (info->row_addr_cycles == 3)
384 info->ndcb2 = (page_addr >> 16) & 0xff; 372 info->ndcb2 = (page_addr >> 16) & 0xff;
385 } else 373 } else
386 /* small block, 1 cycles for column address 374 /* small block, 1 cycles for column address
@@ -406,7 +394,7 @@ static int prepare_erase_cmd(struct pxa3xx_nand_info *info,
406 394
407static int prepare_other_cmd(struct pxa3xx_nand_info *info, uint16_t cmd) 395static int prepare_other_cmd(struct pxa3xx_nand_info *info, uint16_t cmd)
408{ 396{
409 struct pxa3xx_nand_cmdset *cmdset = info->flash_info->cmdset; 397 const struct pxa3xx_nand_cmdset *cmdset = info->flash_info->cmdset;
410 398
411 info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0); 399 info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
412 info->ndcb1 = 0; 400 info->ndcb1 = 0;
@@ -641,8 +629,8 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
641 int column, int page_addr) 629 int column, int page_addr)
642{ 630{
643 struct pxa3xx_nand_info *info = mtd->priv; 631 struct pxa3xx_nand_info *info = mtd->priv;
644 struct pxa3xx_nand_flash *flash_info = info->flash_info; 632 const struct pxa3xx_nand_flash *flash_info = info->flash_info;
645 struct pxa3xx_nand_cmdset *cmdset = flash_info->cmdset; 633 const struct pxa3xx_nand_cmdset *cmdset = flash_info->cmdset;
646 int ret; 634 int ret;
647 635
648 info->use_dma = (use_dma) ? 1 : 0; 636 info->use_dma = (use_dma) ? 1 : 0;
@@ -720,7 +708,7 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
720 info->use_dma = 0; /* force PIO read */ 708 info->use_dma = 0; /* force PIO read */
721 info->buf_start = 0; 709 info->buf_start = 0;
722 info->buf_count = (command == NAND_CMD_READID) ? 710 info->buf_count = (command == NAND_CMD_READID) ?
723 flash_info->read_id_bytes : 1; 711 info->read_id_bytes : 1;
724 712
725 if (prepare_other_cmd(info, (command == NAND_CMD_READID) ? 713 if (prepare_other_cmd(info, (command == NAND_CMD_READID) ?
726 cmdset->read_id : cmdset->read_status)) 714 cmdset->read_id : cmdset->read_status))
@@ -861,8 +849,8 @@ static int pxa3xx_nand_ecc_correct(struct mtd_info *mtd,
861 849
862static int __readid(struct pxa3xx_nand_info *info, uint32_t *id) 850static int __readid(struct pxa3xx_nand_info *info, uint32_t *id)
863{ 851{
864 struct pxa3xx_nand_flash *f = info->flash_info; 852 const struct pxa3xx_nand_flash *f = info->flash_info;
865 struct pxa3xx_nand_cmdset *cmdset = f->cmdset; 853 const struct pxa3xx_nand_cmdset *cmdset = f->cmdset;
866 uint32_t ndcr; 854 uint32_t ndcr;
867 uint8_t id_buff[8]; 855 uint8_t id_buff[8];
868 856
@@ -891,7 +879,7 @@ fail_timeout:
891} 879}
892 880
893static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info, 881static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
894 struct pxa3xx_nand_flash *f) 882 const struct pxa3xx_nand_flash *f)
895{ 883{
896 struct platform_device *pdev = info->pdev; 884 struct platform_device *pdev = info->pdev;
897 struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data; 885 struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
@@ -904,25 +892,25 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
904 return -EINVAL; 892 return -EINVAL;
905 893
906 /* calculate flash information */ 894 /* calculate flash information */
907 f->oob_size = (f->page_size == 2048) ? 64 : 16; 895 info->oob_size = (f->page_size == 2048) ? 64 : 16;
908 f->read_id_bytes = (f->page_size == 2048) ? 4 : 2; 896 info->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
909 897
910 /* calculate addressing information */ 898 /* calculate addressing information */
911 f->col_addr_cycles = (f->page_size == 2048) ? 2 : 1; 899 info->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
912 900
913 if (f->num_blocks * f->page_per_block > 65536) 901 if (f->num_blocks * f->page_per_block > 65536)
914 f->row_addr_cycles = 3; 902 info->row_addr_cycles = 3;
915 else 903 else
916 f->row_addr_cycles = 2; 904 info->row_addr_cycles = 2;
917 905
918 ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0; 906 ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
919 ndcr |= (f->col_addr_cycles == 2) ? NDCR_RA_START : 0; 907 ndcr |= (info->col_addr_cycles == 2) ? NDCR_RA_START : 0;
920 ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0; 908 ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
921 ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0; 909 ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
922 ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0; 910 ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
923 ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0; 911 ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
924 912
925 ndcr |= NDCR_RD_ID_CNT(f->read_id_bytes); 913 ndcr |= NDCR_RD_ID_CNT(info->read_id_bytes);
926 ndcr |= NDCR_SPARE_EN; /* enable spare by default */ 914 ndcr |= NDCR_SPARE_EN; /* enable spare by default */
927 915
928 info->reg_ndcr = ndcr; 916 info->reg_ndcr = ndcr;
@@ -932,12 +920,27 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
932 return 0; 920 return 0;
933} 921}
934 922
935static int pxa3xx_nand_detect_flash(struct pxa3xx_nand_info *info) 923static int pxa3xx_nand_detect_flash(struct pxa3xx_nand_info *info,
924 const struct pxa3xx_nand_platform_data *pdata)
936{ 925{
937 struct pxa3xx_nand_flash *f; 926 const struct pxa3xx_nand_flash *f;
938 uint32_t id; 927 uint32_t id = -1;
939 int i; 928 int i;
940 929
930 for (i = 0; i<pdata->num_flash; ++i) {
931 f = pdata->flash + i;
932
933 if (pxa3xx_nand_config_flash(info, f))
934 continue;
935
936 if (__readid(info, &id))
937 continue;
938
939 if (id == f->chip_id)
940 return 0;
941 }
942
943#ifdef CONFIG_MTD_NAND_PXA3xx_BUILTIN
941 for (i = 0; i < ARRAY_SIZE(builtin_flash_types); i++) { 944 for (i = 0; i < ARRAY_SIZE(builtin_flash_types); i++) {
942 945
943 f = builtin_flash_types[i]; 946 f = builtin_flash_types[i];
@@ -951,7 +954,11 @@ static int pxa3xx_nand_detect_flash(struct pxa3xx_nand_info *info)
951 if (id == f->chip_id) 954 if (id == f->chip_id)
952 return 0; 955 return 0;
953 } 956 }
957#endif
954 958
959 dev_warn(&info->pdev->dev,
960 "failed to detect configured nand flash; found %04x instead of\n",
961 id);
955 return -ENODEV; 962 return -ENODEV;
956} 963}
957 964
@@ -1014,7 +1021,7 @@ static struct nand_ecclayout hw_largepage_ecclayout = {
1014static void pxa3xx_nand_init_mtd(struct mtd_info *mtd, 1021static void pxa3xx_nand_init_mtd(struct mtd_info *mtd,
1015 struct pxa3xx_nand_info *info) 1022 struct pxa3xx_nand_info *info)
1016{ 1023{
1017 struct pxa3xx_nand_flash *f = info->flash_info; 1024 const struct pxa3xx_nand_flash *f = info->flash_info;
1018 struct nand_chip *this = &info->nand_chip; 1025 struct nand_chip *this = &info->nand_chip;
1019 1026
1020 this->options = (f->flash_width == 16) ? NAND_BUSWIDTH_16: 0; 1027 this->options = (f->flash_width == 16) ? NAND_BUSWIDTH_16: 0;
@@ -1135,7 +1142,7 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
1135 goto fail_free_buf; 1142 goto fail_free_buf;
1136 } 1143 }
1137 1144
1138 ret = pxa3xx_nand_detect_flash(info); 1145 ret = pxa3xx_nand_detect_flash(info, pdata);
1139 if (ret) { 1146 if (ret) {
1140 dev_err(&pdev->dev, "failed to detect flash\n"); 1147 dev_err(&pdev->dev, "failed to detect flash\n");
1141 ret = -ENODEV; 1148 ret = -ENODEV;
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
new file mode 100644
index 000000000000..821acb08ff1c
--- /dev/null
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -0,0 +1,878 @@
1/*
2 * SuperH FLCTL nand controller
3 *
4 * Copyright © 2008 Renesas Solutions Corp.
5 * Copyright © 2008 Atom Create Engineering Co., Ltd.
6 *
7 * Based on fsl_elbc_nand.c, Copyright © 2006-2007 Freescale Semiconductor
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 *
22 */
23
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/delay.h>
27#include <linux/io.h>
28#include <linux/platform_device.h>
29
30#include <linux/mtd/mtd.h>
31#include <linux/mtd/nand.h>
32#include <linux/mtd/partitions.h>
33#include <linux/mtd/sh_flctl.h>
34
35static struct nand_ecclayout flctl_4secc_oob_16 = {
36 .eccbytes = 10,
37 .eccpos = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
38 .oobfree = {
39 {.offset = 12,
40 . length = 4} },
41};
42
43static struct nand_ecclayout flctl_4secc_oob_64 = {
44 .eccbytes = 10,
45 .eccpos = {48, 49, 50, 51, 52, 53, 54, 55, 56, 57},
46 .oobfree = {
47 {.offset = 60,
48 . length = 4} },
49};
50
51static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
52
53static struct nand_bbt_descr flctl_4secc_smallpage = {
54 .options = NAND_BBT_SCAN2NDPAGE,
55 .offs = 11,
56 .len = 1,
57 .pattern = scan_ff_pattern,
58};
59
60static struct nand_bbt_descr flctl_4secc_largepage = {
61 .options = 0,
62 .offs = 58,
63 .len = 2,
64 .pattern = scan_ff_pattern,
65};
66
67static void empty_fifo(struct sh_flctl *flctl)
68{
69 writel(0x000c0000, FLINTDMACR(flctl)); /* FIFO Clear */
70 writel(0x00000000, FLINTDMACR(flctl)); /* Clear Error flags */
71}
72
73static void start_translation(struct sh_flctl *flctl)
74{
75 writeb(TRSTRT, FLTRCR(flctl));
76}
77
78static void wait_completion(struct sh_flctl *flctl)
79{
80 uint32_t timeout = LOOP_TIMEOUT_MAX;
81
82 while (timeout--) {
83 if (readb(FLTRCR(flctl)) & TREND) {
84 writeb(0x0, FLTRCR(flctl));
85 return;
86 }
87 udelay(1);
88 }
89
90 printk(KERN_ERR "wait_completion(): Timeout occured \n");
91 writeb(0x0, FLTRCR(flctl));
92}
93
94static void set_addr(struct mtd_info *mtd, int column, int page_addr)
95{
96 struct sh_flctl *flctl = mtd_to_flctl(mtd);
97 uint32_t addr = 0;
98
99 if (column == -1) {
100 addr = page_addr; /* ERASE1 */
101 } else if (page_addr != -1) {
102 /* SEQIN, READ0, etc.. */
103 if (flctl->page_size) {
104 addr = column & 0x0FFF;
105 addr |= (page_addr & 0xff) << 16;
106 addr |= ((page_addr >> 8) & 0xff) << 24;
107 /* big than 128MB */
108 if (flctl->rw_ADRCNT == ADRCNT2_E) {
109 uint32_t addr2;
110 addr2 = (page_addr >> 16) & 0xff;
111 writel(addr2, FLADR2(flctl));
112 }
113 } else {
114 addr = column;
115 addr |= (page_addr & 0xff) << 8;
116 addr |= ((page_addr >> 8) & 0xff) << 16;
117 addr |= ((page_addr >> 16) & 0xff) << 24;
118 }
119 }
120 writel(addr, FLADR(flctl));
121}
122
123static void wait_rfifo_ready(struct sh_flctl *flctl)
124{
125 uint32_t timeout = LOOP_TIMEOUT_MAX;
126
127 while (timeout--) {
128 uint32_t val;
129 /* check FIFO */
130 val = readl(FLDTCNTR(flctl)) >> 16;
131 if (val & 0xFF)
132 return;
133 udelay(1);
134 }
135 printk(KERN_ERR "wait_rfifo_ready(): Timeout occured \n");
136}
137
138static void wait_wfifo_ready(struct sh_flctl *flctl)
139{
140 uint32_t len, timeout = LOOP_TIMEOUT_MAX;
141
142 while (timeout--) {
143 /* check FIFO */
144 len = (readl(FLDTCNTR(flctl)) >> 16) & 0xFF;
145 if (len >= 4)
146 return;
147 udelay(1);
148 }
149 printk(KERN_ERR "wait_wfifo_ready(): Timeout occured \n");
150}
151
152static int wait_recfifo_ready(struct sh_flctl *flctl)
153{
154 uint32_t timeout = LOOP_TIMEOUT_MAX;
155 int checked[4];
156 void __iomem *ecc_reg[4];
157 int i;
158 uint32_t data, size;
159
160 memset(checked, 0, sizeof(checked));
161
162 while (timeout--) {
163 size = readl(FLDTCNTR(flctl)) >> 24;
164 if (size & 0xFF)
165 return 0; /* success */
166
167 if (readl(FL4ECCCR(flctl)) & _4ECCFA)
168 return 1; /* can't correct */
169
170 udelay(1);
171 if (!(readl(FL4ECCCR(flctl)) & _4ECCEND))
172 continue;
173
174 /* start error correction */
175 ecc_reg[0] = FL4ECCRESULT0(flctl);
176 ecc_reg[1] = FL4ECCRESULT1(flctl);
177 ecc_reg[2] = FL4ECCRESULT2(flctl);
178 ecc_reg[3] = FL4ECCRESULT3(flctl);
179
180 for (i = 0; i < 3; i++) {
181 data = readl(ecc_reg[i]);
182 if (data != INIT_FL4ECCRESULT_VAL && !checked[i]) {
183 uint8_t org;
184 int index;
185
186 index = data >> 16;
187 org = flctl->done_buff[index];
188 flctl->done_buff[index] = org ^ (data & 0xFF);
189 checked[i] = 1;
190 }
191 }
192
193 writel(0, FL4ECCCR(flctl));
194 }
195
196 printk(KERN_ERR "wait_recfifo_ready(): Timeout occured \n");
197 return 1; /* timeout */
198}
199
200static void wait_wecfifo_ready(struct sh_flctl *flctl)
201{
202 uint32_t timeout = LOOP_TIMEOUT_MAX;
203 uint32_t len;
204
205 while (timeout--) {
206 /* check FLECFIFO */
207 len = (readl(FLDTCNTR(flctl)) >> 24) & 0xFF;
208 if (len >= 4)
209 return;
210 udelay(1);
211 }
212 printk(KERN_ERR "wait_wecfifo_ready(): Timeout occured \n");
213}
214
215static void read_datareg(struct sh_flctl *flctl, int offset)
216{
217 unsigned long data;
218 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
219
220 wait_completion(flctl);
221
222 data = readl(FLDATAR(flctl));
223 *buf = le32_to_cpu(data);
224}
225
226static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
227{
228 int i, len_4align;
229 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
230 void *fifo_addr = (void *)FLDTFIFO(flctl);
231
232 len_4align = (rlen + 3) / 4;
233
234 for (i = 0; i < len_4align; i++) {
235 wait_rfifo_ready(flctl);
236 buf[i] = readl(fifo_addr);
237 buf[i] = be32_to_cpu(buf[i]);
238 }
239}
240
241static int read_ecfiforeg(struct sh_flctl *flctl, uint8_t *buff)
242{
243 int i;
244 unsigned long *ecc_buf = (unsigned long *)buff;
245 void *fifo_addr = (void *)FLECFIFO(flctl);
246
247 for (i = 0; i < 4; i++) {
248 if (wait_recfifo_ready(flctl))
249 return 1;
250 ecc_buf[i] = readl(fifo_addr);
251 ecc_buf[i] = be32_to_cpu(ecc_buf[i]);
252 }
253
254 return 0;
255}
256
257static void write_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
258{
259 int i, len_4align;
260 unsigned long *data = (unsigned long *)&flctl->done_buff[offset];
261 void *fifo_addr = (void *)FLDTFIFO(flctl);
262
263 len_4align = (rlen + 3) / 4;
264 for (i = 0; i < len_4align; i++) {
265 wait_wfifo_ready(flctl);
266 writel(cpu_to_be32(data[i]), fifo_addr);
267 }
268}
269
270static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val)
271{
272 struct sh_flctl *flctl = mtd_to_flctl(mtd);
273 uint32_t flcmncr_val = readl(FLCMNCR(flctl));
274 uint32_t flcmdcr_val, addr_len_bytes = 0;
275
276 /* Set SNAND bit if page size is 2048byte */
277 if (flctl->page_size)
278 flcmncr_val |= SNAND_E;
279 else
280 flcmncr_val &= ~SNAND_E;
281
282 /* default FLCMDCR val */
283 flcmdcr_val = DOCMD1_E | DOADR_E;
284
285 /* Set for FLCMDCR */
286 switch (cmd) {
287 case NAND_CMD_ERASE1:
288 addr_len_bytes = flctl->erase_ADRCNT;
289 flcmdcr_val |= DOCMD2_E;
290 break;
291 case NAND_CMD_READ0:
292 case NAND_CMD_READOOB:
293 addr_len_bytes = flctl->rw_ADRCNT;
294 flcmdcr_val |= CDSRC_E;
295 break;
296 case NAND_CMD_SEQIN:
297 /* This case is that cmd is READ0 or READ1 or READ00 */
298 flcmdcr_val &= ~DOADR_E; /* ONLY execute 1st cmd */
299 break;
300 case NAND_CMD_PAGEPROG:
301 addr_len_bytes = flctl->rw_ADRCNT;
302 flcmdcr_val |= DOCMD2_E | CDSRC_E | SELRW;
303 break;
304 case NAND_CMD_READID:
305 flcmncr_val &= ~SNAND_E;
306 addr_len_bytes = ADRCNT_1;
307 break;
308 case NAND_CMD_STATUS:
309 case NAND_CMD_RESET:
310 flcmncr_val &= ~SNAND_E;
311 flcmdcr_val &= ~(DOADR_E | DOSR_E);
312 break;
313 default:
314 break;
315 }
316
317 /* Set address bytes parameter */
318 flcmdcr_val |= addr_len_bytes;
319
320 /* Now actually write */
321 writel(flcmncr_val, FLCMNCR(flctl));
322 writel(flcmdcr_val, FLCMDCR(flctl));
323 writel(flcmcdr_val, FLCMCDR(flctl));
324}
325
326static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
327 uint8_t *buf)
328{
329 int i, eccsize = chip->ecc.size;
330 int eccbytes = chip->ecc.bytes;
331 int eccsteps = chip->ecc.steps;
332 uint8_t *p = buf;
333 struct sh_flctl *flctl = mtd_to_flctl(mtd);
334
335 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
336 chip->read_buf(mtd, p, eccsize);
337
338 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
339 if (flctl->hwecc_cant_correct[i])
340 mtd->ecc_stats.failed++;
341 else
342 mtd->ecc_stats.corrected += 0;
343 }
344
345 return 0;
346}
347
348static void flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
349 const uint8_t *buf)
350{
351 int i, eccsize = chip->ecc.size;
352 int eccbytes = chip->ecc.bytes;
353 int eccsteps = chip->ecc.steps;
354 const uint8_t *p = buf;
355
356 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
357 chip->write_buf(mtd, p, eccsize);
358}
359
360static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
361{
362 struct sh_flctl *flctl = mtd_to_flctl(mtd);
363 int sector, page_sectors;
364
365 if (flctl->page_size)
366 page_sectors = 4;
367 else
368 page_sectors = 1;
369
370 writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT,
371 FLCMNCR(flctl));
372
373 set_cmd_regs(mtd, NAND_CMD_READ0,
374 (NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
375
376 for (sector = 0; sector < page_sectors; sector++) {
377 int ret;
378
379 empty_fifo(flctl);
380 writel(readl(FLCMDCR(flctl)) | 1, FLCMDCR(flctl));
381 writel(page_addr << 2 | sector, FLADR(flctl));
382
383 start_translation(flctl);
384 read_fiforeg(flctl, 512, 512 * sector);
385
386 ret = read_ecfiforeg(flctl,
387 &flctl->done_buff[mtd->writesize + 16 * sector]);
388
389 if (ret)
390 flctl->hwecc_cant_correct[sector] = 1;
391
392 writel(0x0, FL4ECCCR(flctl));
393 wait_completion(flctl);
394 }
395 writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT),
396 FLCMNCR(flctl));
397}
398
399static void execmd_read_oob(struct mtd_info *mtd, int page_addr)
400{
401 struct sh_flctl *flctl = mtd_to_flctl(mtd);
402
403 set_cmd_regs(mtd, NAND_CMD_READ0,
404 (NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
405
406 empty_fifo(flctl);
407 if (flctl->page_size) {
408 int i;
409 /* In case that the page size is 2k */
410 for (i = 0; i < 16 * 3; i++)
411 flctl->done_buff[i] = 0xFF;
412
413 set_addr(mtd, 3 * 528 + 512, page_addr);
414 writel(16, FLDTCNTR(flctl));
415
416 start_translation(flctl);
417 read_fiforeg(flctl, 16, 16 * 3);
418 wait_completion(flctl);
419 } else {
420 /* In case that the page size is 512b */
421 set_addr(mtd, 512, page_addr);
422 writel(16, FLDTCNTR(flctl));
423
424 start_translation(flctl);
425 read_fiforeg(flctl, 16, 0);
426 wait_completion(flctl);
427 }
428}
429
430static void execmd_write_page_sector(struct mtd_info *mtd)
431{
432 struct sh_flctl *flctl = mtd_to_flctl(mtd);
433 int i, page_addr = flctl->seqin_page_addr;
434 int sector, page_sectors;
435
436 if (flctl->page_size)
437 page_sectors = 4;
438 else
439 page_sectors = 1;
440
441 writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl));
442
443 set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
444 (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
445
446 for (sector = 0; sector < page_sectors; sector++) {
447 empty_fifo(flctl);
448 writel(readl(FLCMDCR(flctl)) | 1, FLCMDCR(flctl));
449 writel(page_addr << 2 | sector, FLADR(flctl));
450
451 start_translation(flctl);
452 write_fiforeg(flctl, 512, 512 * sector);
453
454 for (i = 0; i < 4; i++) {
455 wait_wecfifo_ready(flctl); /* wait for write ready */
456 writel(0xFFFFFFFF, FLECFIFO(flctl));
457 }
458 wait_completion(flctl);
459 }
460
461 writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl));
462}
463
464static void execmd_write_oob(struct mtd_info *mtd)
465{
466 struct sh_flctl *flctl = mtd_to_flctl(mtd);
467 int page_addr = flctl->seqin_page_addr;
468 int sector, page_sectors;
469
470 if (flctl->page_size) {
471 sector = 3;
472 page_sectors = 4;
473 } else {
474 sector = 0;
475 page_sectors = 1;
476 }
477
478 set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
479 (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
480
481 for (; sector < page_sectors; sector++) {
482 empty_fifo(flctl);
483 set_addr(mtd, sector * 528 + 512, page_addr);
484 writel(16, FLDTCNTR(flctl)); /* set read size */
485
486 start_translation(flctl);
487 write_fiforeg(flctl, 16, 16 * sector);
488 wait_completion(flctl);
489 }
490}
491
492static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
493 int column, int page_addr)
494{
495 struct sh_flctl *flctl = mtd_to_flctl(mtd);
496 uint32_t read_cmd = 0;
497
498 flctl->read_bytes = 0;
499 if (command != NAND_CMD_PAGEPROG)
500 flctl->index = 0;
501
502 switch (command) {
503 case NAND_CMD_READ1:
504 case NAND_CMD_READ0:
505 if (flctl->hwecc) {
506 /* read page with hwecc */
507 execmd_read_page_sector(mtd, page_addr);
508 break;
509 }
510 empty_fifo(flctl);
511 if (flctl->page_size)
512 set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
513 | command);
514 else
515 set_cmd_regs(mtd, command, command);
516
517 set_addr(mtd, 0, page_addr);
518
519 flctl->read_bytes = mtd->writesize + mtd->oobsize;
520 flctl->index += column;
521 goto read_normal_exit;
522
523 case NAND_CMD_READOOB:
524 if (flctl->hwecc) {
525 /* read page with hwecc */
526 execmd_read_oob(mtd, page_addr);
527 break;
528 }
529
530 empty_fifo(flctl);
531 if (flctl->page_size) {
532 set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
533 | NAND_CMD_READ0);
534 set_addr(mtd, mtd->writesize, page_addr);
535 } else {
536 set_cmd_regs(mtd, command, command);
537 set_addr(mtd, 0, page_addr);
538 }
539 flctl->read_bytes = mtd->oobsize;
540 goto read_normal_exit;
541
542 case NAND_CMD_READID:
543 empty_fifo(flctl);
544 set_cmd_regs(mtd, command, command);
545 set_addr(mtd, 0, 0);
546
547 flctl->read_bytes = 4;
548 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
549 start_translation(flctl);
550 read_datareg(flctl, 0); /* read and end */
551 break;
552
553 case NAND_CMD_ERASE1:
554 flctl->erase1_page_addr = page_addr;
555 break;
556
557 case NAND_CMD_ERASE2:
558 set_cmd_regs(mtd, NAND_CMD_ERASE1,
559 (command << 8) | NAND_CMD_ERASE1);
560 set_addr(mtd, -1, flctl->erase1_page_addr);
561 start_translation(flctl);
562 wait_completion(flctl);
563 break;
564
565 case NAND_CMD_SEQIN:
566 if (!flctl->page_size) {
567 /* output read command */
568 if (column >= mtd->writesize) {
569 column -= mtd->writesize;
570 read_cmd = NAND_CMD_READOOB;
571 } else if (column < 256) {
572 read_cmd = NAND_CMD_READ0;
573 } else {
574 column -= 256;
575 read_cmd = NAND_CMD_READ1;
576 }
577 }
578 flctl->seqin_column = column;
579 flctl->seqin_page_addr = page_addr;
580 flctl->seqin_read_cmd = read_cmd;
581 break;
582
583 case NAND_CMD_PAGEPROG:
584 empty_fifo(flctl);
585 if (!flctl->page_size) {
586 set_cmd_regs(mtd, NAND_CMD_SEQIN,
587 flctl->seqin_read_cmd);
588 set_addr(mtd, -1, -1);
589 writel(0, FLDTCNTR(flctl)); /* set 0 size */
590 start_translation(flctl);
591 wait_completion(flctl);
592 }
593 if (flctl->hwecc) {
594 /* write page with hwecc */
595 if (flctl->seqin_column == mtd->writesize)
596 execmd_write_oob(mtd);
597 else if (!flctl->seqin_column)
598 execmd_write_page_sector(mtd);
599 else
600 printk(KERN_ERR "Invalid address !?\n");
601 break;
602 }
603 set_cmd_regs(mtd, command, (command << 8) | NAND_CMD_SEQIN);
604 set_addr(mtd, flctl->seqin_column, flctl->seqin_page_addr);
605 writel(flctl->index, FLDTCNTR(flctl)); /* set write size */
606 start_translation(flctl);
607 write_fiforeg(flctl, flctl->index, 0);
608 wait_completion(flctl);
609 break;
610
611 case NAND_CMD_STATUS:
612 set_cmd_regs(mtd, command, command);
613 set_addr(mtd, -1, -1);
614
615 flctl->read_bytes = 1;
616 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
617 start_translation(flctl);
618 read_datareg(flctl, 0); /* read and end */
619 break;
620
621 case NAND_CMD_RESET:
622 set_cmd_regs(mtd, command, command);
623 set_addr(mtd, -1, -1);
624
625 writel(0, FLDTCNTR(flctl)); /* set 0 size */
626 start_translation(flctl);
627 wait_completion(flctl);
628 break;
629
630 default:
631 break;
632 }
633 return;
634
635read_normal_exit:
636 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
637 start_translation(flctl);
638 read_fiforeg(flctl, flctl->read_bytes, 0);
639 wait_completion(flctl);
640 return;
641}
642
643static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
644{
645 struct sh_flctl *flctl = mtd_to_flctl(mtd);
646 uint32_t flcmncr_val = readl(FLCMNCR(flctl));
647
648 switch (chipnr) {
649 case -1:
650 flcmncr_val &= ~CE0_ENABLE;
651 writel(flcmncr_val, FLCMNCR(flctl));
652 break;
653 case 0:
654 flcmncr_val |= CE0_ENABLE;
655 writel(flcmncr_val, FLCMNCR(flctl));
656 break;
657 default:
658 BUG();
659 }
660}
661
662static void flctl_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
663{
664 struct sh_flctl *flctl = mtd_to_flctl(mtd);
665 int i, index = flctl->index;
666
667 for (i = 0; i < len; i++)
668 flctl->done_buff[index + i] = buf[i];
669 flctl->index += len;
670}
671
672static uint8_t flctl_read_byte(struct mtd_info *mtd)
673{
674 struct sh_flctl *flctl = mtd_to_flctl(mtd);
675 int index = flctl->index;
676 uint8_t data;
677
678 data = flctl->done_buff[index];
679 flctl->index++;
680 return data;
681}
682
683static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
684{
685 int i;
686
687 for (i = 0; i < len; i++)
688 buf[i] = flctl_read_byte(mtd);
689}
690
691static int flctl_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
692{
693 int i;
694
695 for (i = 0; i < len; i++)
696 if (buf[i] != flctl_read_byte(mtd))
697 return -EFAULT;
698 return 0;
699}
700
701static void flctl_register_init(struct sh_flctl *flctl, unsigned long val)
702{
703 writel(val, FLCMNCR(flctl));
704}
705
706static int flctl_chip_init_tail(struct mtd_info *mtd)
707{
708 struct sh_flctl *flctl = mtd_to_flctl(mtd);
709 struct nand_chip *chip = &flctl->chip;
710
711 if (mtd->writesize == 512) {
712 flctl->page_size = 0;
713 if (chip->chipsize > (32 << 20)) {
714 /* big than 32MB */
715 flctl->rw_ADRCNT = ADRCNT_4;
716 flctl->erase_ADRCNT = ADRCNT_3;
717 } else if (chip->chipsize > (2 << 16)) {
718 /* big than 128KB */
719 flctl->rw_ADRCNT = ADRCNT_3;
720 flctl->erase_ADRCNT = ADRCNT_2;
721 } else {
722 flctl->rw_ADRCNT = ADRCNT_2;
723 flctl->erase_ADRCNT = ADRCNT_1;
724 }
725 } else {
726 flctl->page_size = 1;
727 if (chip->chipsize > (128 << 20)) {
728 /* big than 128MB */
729 flctl->rw_ADRCNT = ADRCNT2_E;
730 flctl->erase_ADRCNT = ADRCNT_3;
731 } else if (chip->chipsize > (8 << 16)) {
732 /* big than 512KB */
733 flctl->rw_ADRCNT = ADRCNT_4;
734 flctl->erase_ADRCNT = ADRCNT_2;
735 } else {
736 flctl->rw_ADRCNT = ADRCNT_3;
737 flctl->erase_ADRCNT = ADRCNT_1;
738 }
739 }
740
741 if (flctl->hwecc) {
742 if (mtd->writesize == 512) {
743 chip->ecc.layout = &flctl_4secc_oob_16;
744 chip->badblock_pattern = &flctl_4secc_smallpage;
745 } else {
746 chip->ecc.layout = &flctl_4secc_oob_64;
747 chip->badblock_pattern = &flctl_4secc_largepage;
748 }
749
750 chip->ecc.size = 512;
751 chip->ecc.bytes = 10;
752 chip->ecc.read_page = flctl_read_page_hwecc;
753 chip->ecc.write_page = flctl_write_page_hwecc;
754 chip->ecc.mode = NAND_ECC_HW;
755
756 /* 4 symbols ECC enabled */
757 writel(readl(FLCMNCR(flctl)) | _4ECCEN | ECCPOS2 | ECCPOS_02,
758 FLCMNCR(flctl));
759 } else {
760 chip->ecc.mode = NAND_ECC_SOFT;
761 }
762
763 return 0;
764}
765
766static int __init flctl_probe(struct platform_device *pdev)
767{
768 struct resource *res;
769 struct sh_flctl *flctl;
770 struct mtd_info *flctl_mtd;
771 struct nand_chip *nand;
772 struct sh_flctl_platform_data *pdata;
773 int ret;
774
775 pdata = pdev->dev.platform_data;
776 if (pdata == NULL) {
777 printk(KERN_ERR "sh_flctl platform_data not found.\n");
778 return -ENODEV;
779 }
780
781 flctl = kzalloc(sizeof(struct sh_flctl), GFP_KERNEL);
782 if (!flctl) {
783 printk(KERN_ERR "Unable to allocate NAND MTD dev structure.\n");
784 return -ENOMEM;
785 }
786
787 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
788 if (!res) {
789 printk(KERN_ERR "%s: resource not found.\n", __func__);
790 ret = -ENODEV;
791 goto err;
792 }
793
794 flctl->reg = ioremap(res->start, res->end - res->start + 1);
795 if (flctl->reg == NULL) {
796 printk(KERN_ERR "%s: ioremap error.\n", __func__);
797 ret = -ENOMEM;
798 goto err;
799 }
800
801 platform_set_drvdata(pdev, flctl);
802 flctl_mtd = &flctl->mtd;
803 nand = &flctl->chip;
804 flctl_mtd->priv = nand;
805 flctl->hwecc = pdata->has_hwecc;
806
807 flctl_register_init(flctl, pdata->flcmncr_val);
808
809 nand->options = NAND_NO_AUTOINCR;
810
811 /* Set address of hardware control function */
812 /* 20 us command delay time */
813 nand->chip_delay = 20;
814
815 nand->read_byte = flctl_read_byte;
816 nand->write_buf = flctl_write_buf;
817 nand->read_buf = flctl_read_buf;
818 nand->verify_buf = flctl_verify_buf;
819 nand->select_chip = flctl_select_chip;
820 nand->cmdfunc = flctl_cmdfunc;
821
822 ret = nand_scan_ident(flctl_mtd, 1);
823 if (ret)
824 goto err;
825
826 ret = flctl_chip_init_tail(flctl_mtd);
827 if (ret)
828 goto err;
829
830 ret = nand_scan_tail(flctl_mtd);
831 if (ret)
832 goto err;
833
834 add_mtd_partitions(flctl_mtd, pdata->parts, pdata->nr_parts);
835
836 return 0;
837
838err:
839 kfree(flctl);
840 return ret;
841}
842
843static int __exit flctl_remove(struct platform_device *pdev)
844{
845 struct sh_flctl *flctl = platform_get_drvdata(pdev);
846
847 nand_release(&flctl->mtd);
848 kfree(flctl);
849
850 return 0;
851}
852
853static struct platform_driver flctl_driver = {
854 .probe = flctl_probe,
855 .remove = flctl_remove,
856 .driver = {
857 .name = "sh_flctl",
858 .owner = THIS_MODULE,
859 },
860};
861
862static int __init flctl_nand_init(void)
863{
864 return platform_driver_register(&flctl_driver);
865}
866
867static void __exit flctl_nand_cleanup(void)
868{
869 platform_driver_unregister(&flctl_driver);
870}
871
872module_init(flctl_nand_init);
873module_exit(flctl_nand_cleanup);
874
875MODULE_LICENSE("GPL");
876MODULE_AUTHOR("Yoshihiro Shimoda");
877MODULE_DESCRIPTION("SuperH FLCTL driver");
878MODULE_ALIAS("platform:sh_flctl");
diff --git a/drivers/mtd/nand/toto.c b/drivers/mtd/nand/toto.c
deleted file mode 100644
index bbf492e6830d..000000000000
--- a/drivers/mtd/nand/toto.c
+++ /dev/null
@@ -1,206 +0,0 @@
1/*
2 * drivers/mtd/nand/toto.c
3 *
4 * Copyright (c) 2003 Texas Instruments
5 *
6 * Derived from drivers/mtd/autcpu12.c
7 *
8 * Copyright (c) 2002 Thomas Gleixner <tgxl@linutronix.de>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * Overview:
15 * This is a device driver for the NAND flash device found on the
16 * TI fido board. It supports 32MiB and 64MiB cards
17 */
18
19#include <linux/slab.h>
20#include <linux/init.h>
21#include <linux/module.h>
22#include <linux/delay.h>
23#include <linux/mtd/mtd.h>
24#include <linux/mtd/nand.h>
25#include <linux/mtd/partitions.h>
26#include <asm/io.h>
27#include <asm/arch/hardware.h>
28#include <asm/sizes.h>
29#include <asm/arch/toto.h>
30#include <asm/arch-omap1510/hardware.h>
31#include <asm/arch/gpio.h>
32
33#define CONFIG_NAND_WORKAROUND 1
34
35/*
36 * MTD structure for TOTO board
37 */
38static struct mtd_info *toto_mtd = NULL;
39
40static unsigned long toto_io_base = OMAP_FLASH_1_BASE;
41
42/*
43 * Define partitions for flash devices
44 */
45
46static struct mtd_partition partition_info64M[] = {
47 { .name = "toto kernel partition 1",
48 .offset = 0,
49 .size = 2 * SZ_1M },
50 { .name = "toto file sys partition 2",
51 .offset = 2 * SZ_1M,
52 .size = 14 * SZ_1M },
53 { .name = "toto user partition 3",
54 .offset = 16 * SZ_1M,
55 .size = 16 * SZ_1M },
56 { .name = "toto devboard extra partition 4",
57 .offset = 32 * SZ_1M,
58 .size = 32 * SZ_1M },
59};
60
61static struct mtd_partition partition_info32M[] = {
62 { .name = "toto kernel partition 1",
63 .offset = 0,
64 .size = 2 * SZ_1M },
65 { .name = "toto file sys partition 2",
66 .offset = 2 * SZ_1M,
67 .size = 14 * SZ_1M },
68 { .name = "toto user partition 3",
69 .offset = 16 * SZ_1M,
70 .size = 16 * SZ_1M },
71};
72
73#define NUM_PARTITIONS32M 3
74#define NUM_PARTITIONS64M 4
75
76/*
77 * hardware specific access to control-lines
78 *
79 * ctrl:
80 * NAND_NCE: bit 0 -> bit 14 (0x4000)
81 * NAND_CLE: bit 1 -> bit 12 (0x1000)
82 * NAND_ALE: bit 2 -> bit 1 (0x0002)
83 */
84static void toto_hwcontrol(struct mtd_info *mtd, int cmd,
85 unsigned int ctrl)
86{
87 struct nand_chip *chip = mtd->priv;
88
89 if (ctrl & NAND_CTRL_CHANGE) {
90 unsigned long bits;
91
92 /* hopefully enough time for tc make proceding write to clear */
93 udelay(1);
94
95 bits = (~ctrl & NAND_NCE) << 14;
96 bits |= (ctrl & NAND_CLE) << 12;
97 bits |= (ctrl & NAND_ALE) >> 1;
98
99#warning Wild guess as gpiosetout() is nowhere defined in the kernel source - tglx
100 gpiosetout(0x5002, bits);
101
102#ifdef CONFIG_NAND_WORKAROUND
103 /* "some" dev boards busted, blue wired to rts2 :( */
104 rts2setout(2, (ctrl & NAND_CLE) << 1);
105#endif
106 /* allow time to ensure gpio state to over take memory write */
107 udelay(1);
108 }
109
110 if (cmd != NAND_CMD_NONE)
111 writeb(cmd, chip->IO_ADDR_W);
112}
113
114/*
115 * Main initialization routine
116 */
117static int __init toto_init(void)
118{
119 struct nand_chip *this;
120 int err = 0;
121
122 /* Allocate memory for MTD device structure and private data */
123 toto_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
124 if (!toto_mtd) {
125 printk(KERN_WARNING "Unable to allocate toto NAND MTD device structure.\n");
126 err = -ENOMEM;
127 goto out;
128 }
129
130 /* Get pointer to private data */
131 this = (struct nand_chip *)(&toto_mtd[1]);
132
133 /* Initialize structures */
134 memset(toto_mtd, 0, sizeof(struct mtd_info));
135 memset(this, 0, sizeof(struct nand_chip));
136
137 /* Link the private data with the MTD structure */
138 toto_mtd->priv = this;
139 toto_mtd->owner = THIS_MODULE;
140
141 /* Set address of NAND IO lines */
142 this->IO_ADDR_R = toto_io_base;
143 this->IO_ADDR_W = toto_io_base;
144 this->cmd_ctrl = toto_hwcontrol;
145 this->dev_ready = NULL;
146 /* 25 us command delay time */
147 this->chip_delay = 30;
148 this->ecc.mode = NAND_ECC_SOFT;
149
150 /* Scan to find existance of the device */
151 if (nand_scan(toto_mtd, 1)) {
152 err = -ENXIO;
153 goto out_mtd;
154 }
155
156 /* Register the partitions */
157 switch (toto_mtd->size) {
158 case SZ_64M:
159 add_mtd_partitions(toto_mtd, partition_info64M, NUM_PARTITIONS64M);
160 break;
161 case SZ_32M:
162 add_mtd_partitions(toto_mtd, partition_info32M, NUM_PARTITIONS32M);
163 break;
164 default:{
165 printk(KERN_WARNING "Unsupported Nand device\n");
166 err = -ENXIO;
167 goto out_buf;
168 }
169 }
170
171 gpioreserve(NAND_MASK); /* claim our gpios */
172 archflashwp(0, 0); /* open up flash for writing */
173
174 goto out;
175
176 out_mtd:
177 kfree(toto_mtd);
178 out:
179 return err;
180}
181
182module_init(toto_init);
183
184/*
185 * Clean up routine
186 */
187static void __exit toto_cleanup(void)
188{
189 /* Release resources, unregister device */
190 nand_release(toto_mtd);
191
192 /* Free the MTD device structure */
193 kfree(toto_mtd);
194
195 /* stop flash writes */
196 archflashwp(0, 1);
197
198 /* release gpios to system */
199 gpiorelease(NAND_MASK);
200}
201
202module_exit(toto_cleanup);
203
204MODULE_LICENSE("GPL");
205MODULE_AUTHOR("Richard Woodruff <r-woodruff2@ti.com>");
206MODULE_DESCRIPTION("Glue layer for NAND flash on toto board");
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index 4f80c2fd89af..9e45b3f39c0e 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -20,7 +20,6 @@
20#include <linux/mtd/partitions.h> 20#include <linux/mtd/partitions.h>
21 21
22int __devinit of_mtd_parse_partitions(struct device *dev, 22int __devinit of_mtd_parse_partitions(struct device *dev,
23 struct mtd_info *mtd,
24 struct device_node *node, 23 struct device_node *node,
25 struct mtd_partition **pparts) 24 struct mtd_partition **pparts)
26{ 25{
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index cb41cbca64f7..79fa79e8f8de 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -27,8 +27,16 @@ config MTD_ONENAND_GENERIC
27 help 27 help
28 Support for OneNAND flash via platform device driver. 28 Support for OneNAND flash via platform device driver.
29 29
30config MTD_ONENAND_OMAP2
31 tristate "OneNAND on OMAP2/OMAP3 support"
32 depends on MTD_ONENAND && (ARCH_OMAP2 || ARCH_OMAP3)
33 help
34 Support for a OneNAND flash device connected to an OMAP2/OMAP3 CPU
35 via the GPMC memory controller.
36
30config MTD_ONENAND_OTP 37config MTD_ONENAND_OTP
31 bool "OneNAND OTP Support" 38 bool "OneNAND OTP Support"
39 select HAVE_MTD_OTP
32 help 40 help
33 One Block of the NAND Flash Array memory is reserved as 41 One Block of the NAND Flash Array memory is reserved as
34 a One-Time Programmable Block memory area. 42 a One-Time Programmable Block memory area.
diff --git a/drivers/mtd/onenand/Makefile b/drivers/mtd/onenand/Makefile
index 4d2eacfd7e11..64b6cc61a520 100644
--- a/drivers/mtd/onenand/Makefile
+++ b/drivers/mtd/onenand/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_MTD_ONENAND) += onenand.o
7 7
8# Board specific. 8# Board specific.
9obj-$(CONFIG_MTD_ONENAND_GENERIC) += generic.o 9obj-$(CONFIG_MTD_ONENAND_GENERIC) += generic.o
10obj-$(CONFIG_MTD_ONENAND_OMAP2) += omap2.o
10 11
11# Simulator 12# Simulator
12obj-$(CONFIG_MTD_ONENAND_SIM) += onenand_sim.o 13obj-$(CONFIG_MTD_ONENAND_SIM) += onenand_sim.o
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
new file mode 100644
index 000000000000..8387e05daae2
--- /dev/null
+++ b/drivers/mtd/onenand/omap2.c
@@ -0,0 +1,802 @@
1/*
2 * linux/drivers/mtd/onenand/omap2.c
3 *
4 * OneNAND driver for OMAP2 / OMAP3
5 *
6 * Copyright © 2005-2006 Nokia Corporation
7 *
8 * Author: Jarkko Lavinen <jarkko.lavinen@nokia.com> and Juha Yrjölä
9 * IRQ and DMA support written by Timo Teras
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published by
13 * the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 * You should have received a copy of the GNU General Public License along with
21 * this program; see the file COPYING. If not, write to the Free Software
22 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 *
24 */
25
26#include <linux/device.h>
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/mtd/mtd.h>
30#include <linux/mtd/onenand.h>
31#include <linux/mtd/partitions.h>
32#include <linux/platform_device.h>
33#include <linux/interrupt.h>
34#include <linux/delay.h>
35
36#include <asm/io.h>
37#include <asm/mach/flash.h>
38#include <asm/arch/gpmc.h>
39#include <asm/arch/onenand.h>
40#include <asm/arch/gpio.h>
41#include <asm/arch/gpmc.h>
42#include <asm/arch/pm.h>
43
44#include <linux/dma-mapping.h>
45#include <asm/dma-mapping.h>
46#include <asm/arch/dma.h>
47
48#include <asm/arch/board.h>
49
50#define DRIVER_NAME "omap2-onenand"
51
52#define ONENAND_IO_SIZE SZ_128K
53#define ONENAND_BUFRAM_SIZE (1024 * 5)
54
55struct omap2_onenand {
56 struct platform_device *pdev;
57 int gpmc_cs;
58 unsigned long phys_base;
59 int gpio_irq;
60 struct mtd_info mtd;
61 struct mtd_partition *parts;
62 struct onenand_chip onenand;
63 struct completion irq_done;
64 struct completion dma_done;
65 int dma_channel;
66 int freq;
67 int (*setup)(void __iomem *base, int freq);
68};
69
70static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
71{
72 struct omap2_onenand *c = data;
73
74 complete(&c->dma_done);
75}
76
77static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
78{
79 struct omap2_onenand *c = dev_id;
80
81 complete(&c->irq_done);
82
83 return IRQ_HANDLED;
84}
85
86static inline unsigned short read_reg(struct omap2_onenand *c, int reg)
87{
88 return readw(c->onenand.base + reg);
89}
90
91static inline void write_reg(struct omap2_onenand *c, unsigned short value,
92 int reg)
93{
94 writew(value, c->onenand.base + reg);
95}
96
97static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
98{
99 printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
100 msg, state, ctrl, intr);
101}
102
103static void wait_warn(char *msg, int state, unsigned int ctrl,
104 unsigned int intr)
105{
106 printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x "
107 "intr 0x%04x\n", msg, state, ctrl, intr);
108}
109
110static int omap2_onenand_wait(struct mtd_info *mtd, int state)
111{
112 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
113 unsigned int intr = 0;
114 unsigned int ctrl;
115 unsigned long timeout;
116 u32 syscfg;
117
118 if (state == FL_RESETING) {
119 int i;
120
121 for (i = 0; i < 20; i++) {
122 udelay(1);
123 intr = read_reg(c, ONENAND_REG_INTERRUPT);
124 if (intr & ONENAND_INT_MASTER)
125 break;
126 }
127 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
128 if (ctrl & ONENAND_CTRL_ERROR) {
129 wait_err("controller error", state, ctrl, intr);
130 return -EIO;
131 }
132 if (!(intr & ONENAND_INT_RESET)) {
133 wait_err("timeout", state, ctrl, intr);
134 return -EIO;
135 }
136 return 0;
137 }
138
139 if (state != FL_READING) {
140 int result;
141
142 /* Turn interrupts on */
143 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
144 if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
145 syscfg |= ONENAND_SYS_CFG1_IOBE;
146 write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
147 if (cpu_is_omap34xx())
148 /* Add a delay to let GPIO settle */
149 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
150 }
151
152 INIT_COMPLETION(c->irq_done);
153 if (c->gpio_irq) {
154 result = omap_get_gpio_datain(c->gpio_irq);
155 if (result == -1) {
156 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
157 intr = read_reg(c, ONENAND_REG_INTERRUPT);
158 wait_err("gpio error", state, ctrl, intr);
159 return -EIO;
160 }
161 } else
162 result = 0;
163 if (result == 0) {
164 int retry_cnt = 0;
165retry:
166 result = wait_for_completion_timeout(&c->irq_done,
167 msecs_to_jiffies(20));
168 if (result == 0) {
169 /* Timeout after 20ms */
170 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
171 if (ctrl & ONENAND_CTRL_ONGO) {
172 /*
173 * The operation seems to be still going
174 * so give it some more time.
175 */
176 retry_cnt += 1;
177 if (retry_cnt < 3)
178 goto retry;
179 intr = read_reg(c,
180 ONENAND_REG_INTERRUPT);
181 wait_err("timeout", state, ctrl, intr);
182 return -EIO;
183 }
184 intr = read_reg(c, ONENAND_REG_INTERRUPT);
185 if ((intr & ONENAND_INT_MASTER) == 0)
186 wait_warn("timeout", state, ctrl, intr);
187 }
188 }
189 } else {
190 int retry_cnt = 0;
191
192 /* Turn interrupts off */
193 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
194 syscfg &= ~ONENAND_SYS_CFG1_IOBE;
195 write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
196
197 timeout = jiffies + msecs_to_jiffies(20);
198 while (1) {
199 if (time_before(jiffies, timeout)) {
200 intr = read_reg(c, ONENAND_REG_INTERRUPT);
201 if (intr & ONENAND_INT_MASTER)
202 break;
203 } else {
204 /* Timeout after 20ms */
205 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
206 if (ctrl & ONENAND_CTRL_ONGO) {
207 /*
208 * The operation seems to be still going
209 * so give it some more time.
210 */
211 retry_cnt += 1;
212 if (retry_cnt < 3) {
213 timeout = jiffies +
214 msecs_to_jiffies(20);
215 continue;
216 }
217 }
218 break;
219 }
220 }
221 }
222
223 intr = read_reg(c, ONENAND_REG_INTERRUPT);
224 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
225
226 if (intr & ONENAND_INT_READ) {
227 int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);
228
229 if (ecc) {
230 unsigned int addr1, addr8;
231
232 addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
233 addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
234 if (ecc & ONENAND_ECC_2BIT_ALL) {
235 printk(KERN_ERR "onenand_wait: ECC error = "
236 "0x%04x, addr1 %#x, addr8 %#x\n",
237 ecc, addr1, addr8);
238 mtd->ecc_stats.failed++;
239 return -EBADMSG;
240 } else if (ecc & ONENAND_ECC_1BIT_ALL) {
241 printk(KERN_NOTICE "onenand_wait: correctable "
242 "ECC error = 0x%04x, addr1 %#x, "
243 "addr8 %#x\n", ecc, addr1, addr8);
244 mtd->ecc_stats.corrected++;
245 }
246 }
247 } else if (state == FL_READING) {
248 wait_err("timeout", state, ctrl, intr);
249 return -EIO;
250 }
251
252 if (ctrl & ONENAND_CTRL_ERROR) {
253 wait_err("controller error", state, ctrl, intr);
254 if (ctrl & ONENAND_CTRL_LOCK)
255 printk(KERN_ERR "onenand_wait: "
256 "Device is write protected!!!\n");
257 return -EIO;
258 }
259
260 if (ctrl & 0xFE9F)
261 wait_warn("unexpected controller status", state, ctrl, intr);
262
263 return 0;
264}
265
266static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
267{
268 struct onenand_chip *this = mtd->priv;
269
270 if (ONENAND_CURRENT_BUFFERRAM(this)) {
271 if (area == ONENAND_DATARAM)
272 return mtd->writesize;
273 if (area == ONENAND_SPARERAM)
274 return mtd->oobsize;
275 }
276
277 return 0;
278}
279
280#if defined(CONFIG_ARCH_OMAP3) || defined(MULTI_OMAP2)
281
282static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
283 unsigned char *buffer, int offset,
284 size_t count)
285{
286 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
287 struct onenand_chip *this = mtd->priv;
288 dma_addr_t dma_src, dma_dst;
289 int bram_offset;
290 unsigned long timeout;
291 void *buf = (void *)buffer;
292 size_t xtra;
293 volatile unsigned *done;
294
295 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
296 if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
297 goto out_copy;
298
299 if (buf >= high_memory) {
300 struct page *p1;
301
302 if (((size_t)buf & PAGE_MASK) !=
303 ((size_t)(buf + count - 1) & PAGE_MASK))
304 goto out_copy;
305 p1 = vmalloc_to_page(buf);
306 if (!p1)
307 goto out_copy;
308 buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
309 }
310
311 xtra = count & 3;
312 if (xtra) {
313 count -= xtra;
314 memcpy(buf + count, this->base + bram_offset + count, xtra);
315 }
316
317 dma_src = c->phys_base + bram_offset;
318 dma_dst = dma_map_single(&c->pdev->dev, buf, count, DMA_FROM_DEVICE);
319 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
320 dev_err(&c->pdev->dev,
321 "Couldn't DMA map a %d byte buffer\n",
322 count);
323 goto out_copy;
324 }
325
326 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
327 count >> 2, 1, 0, 0, 0);
328 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
329 dma_src, 0, 0);
330 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
331 dma_dst, 0, 0);
332
333 INIT_COMPLETION(c->dma_done);
334 omap_start_dma(c->dma_channel);
335
336 timeout = jiffies + msecs_to_jiffies(20);
337 done = &c->dma_done.done;
338 while (time_before(jiffies, timeout))
339 if (*done)
340 break;
341
342 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
343
344 if (!*done) {
345 dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
346 goto out_copy;
347 }
348
349 return 0;
350
351out_copy:
352 memcpy(buf, this->base + bram_offset, count);
353 return 0;
354}
355
356static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
357 const unsigned char *buffer,
358 int offset, size_t count)
359{
360 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
361 struct onenand_chip *this = mtd->priv;
362 dma_addr_t dma_src, dma_dst;
363 int bram_offset;
364 unsigned long timeout;
365 void *buf = (void *)buffer;
366 volatile unsigned *done;
367
368 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
369 if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
370 goto out_copy;
371
372 /* panic_write() may be in an interrupt context */
373 if (in_interrupt())
374 goto out_copy;
375
376 if (buf >= high_memory) {
377 struct page *p1;
378
379 if (((size_t)buf & PAGE_MASK) !=
380 ((size_t)(buf + count - 1) & PAGE_MASK))
381 goto out_copy;
382 p1 = vmalloc_to_page(buf);
383 if (!p1)
384 goto out_copy;
385 buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
386 }
387
388 dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
389 dma_dst = c->phys_base + bram_offset;
390 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
391 dev_err(&c->pdev->dev,
392 "Couldn't DMA map a %d byte buffer\n",
393 count);
394 return -1;
395 }
396
397 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
398 count >> 2, 1, 0, 0, 0);
399 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
400 dma_src, 0, 0);
401 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
402 dma_dst, 0, 0);
403
404 INIT_COMPLETION(c->dma_done);
405 omap_start_dma(c->dma_channel);
406
407 timeout = jiffies + msecs_to_jiffies(20);
408 done = &c->dma_done.done;
409 while (time_before(jiffies, timeout))
410 if (*done)
411 break;
412
413 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
414
415 if (!*done) {
416 dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
417 goto out_copy;
418 }
419
420 return 0;
421
422out_copy:
423 memcpy(this->base + bram_offset, buf, count);
424 return 0;
425}
426
427#else
428
429int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
430 unsigned char *buffer, int offset,
431 size_t count);
432
433int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
434 const unsigned char *buffer,
435 int offset, size_t count);
436
437#endif
438
439#if defined(CONFIG_ARCH_OMAP2) || defined(MULTI_OMAP2)
440
441static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
442 unsigned char *buffer, int offset,
443 size_t count)
444{
445 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
446 struct onenand_chip *this = mtd->priv;
447 dma_addr_t dma_src, dma_dst;
448 int bram_offset;
449
450 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
451 /* DMA is not used. Revisit PM requirements before enabling it. */
452 if (1 || (c->dma_channel < 0) ||
453 ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
454 (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
455 memcpy(buffer, (__force void *)(this->base + bram_offset),
456 count);
457 return 0;
458 }
459
460 dma_src = c->phys_base + bram_offset;
461 dma_dst = dma_map_single(&c->pdev->dev, buffer, count,
462 DMA_FROM_DEVICE);
463 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
464 dev_err(&c->pdev->dev,
465 "Couldn't DMA map a %d byte buffer\n",
466 count);
467 return -1;
468 }
469
470 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
471 count / 4, 1, 0, 0, 0);
472 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
473 dma_src, 0, 0);
474 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
475 dma_dst, 0, 0);
476
477 INIT_COMPLETION(c->dma_done);
478 omap_start_dma(c->dma_channel);
479 wait_for_completion(&c->dma_done);
480
481 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
482
483 return 0;
484}
485
486static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
487 const unsigned char *buffer,
488 int offset, size_t count)
489{
490 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
491 struct onenand_chip *this = mtd->priv;
492 dma_addr_t dma_src, dma_dst;
493 int bram_offset;
494
495 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
496 /* DMA is not used. Revisit PM requirements before enabling it. */
497 if (1 || (c->dma_channel < 0) ||
498 ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
499 (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
500 memcpy((__force void *)(this->base + bram_offset), buffer,
501 count);
502 return 0;
503 }
504
505 dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
506 DMA_TO_DEVICE);
507 dma_dst = c->phys_base + bram_offset;
508 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
509 dev_err(&c->pdev->dev,
510 "Couldn't DMA map a %d byte buffer\n",
511 count);
512 return -1;
513 }
514
515 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S16,
516 count / 2, 1, 0, 0, 0);
517 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
518 dma_src, 0, 0);
519 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
520 dma_dst, 0, 0);
521
522 INIT_COMPLETION(c->dma_done);
523 omap_start_dma(c->dma_channel);
524 wait_for_completion(&c->dma_done);
525
526 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
527
528 return 0;
529}
530
531#else
532
533int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
534 unsigned char *buffer, int offset,
535 size_t count);
536
537int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
538 const unsigned char *buffer,
539 int offset, size_t count);
540
541#endif
542
543static struct platform_driver omap2_onenand_driver;
544
545static int __adjust_timing(struct device *dev, void *data)
546{
547 int ret = 0;
548 struct omap2_onenand *c;
549
550 c = dev_get_drvdata(dev);
551
552 BUG_ON(c->setup == NULL);
553
554 /* DMA is not in use so this is all that is needed */
555 /* Revisit for OMAP3! */
556 ret = c->setup(c->onenand.base, c->freq);
557
558 return ret;
559}
560
561int omap2_onenand_rephase(void)
562{
563 return driver_for_each_device(&omap2_onenand_driver.driver, NULL,
564 NULL, __adjust_timing);
565}
566
567static void __devexit omap2_onenand_shutdown(struct platform_device *pdev)
568{
569 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
570
571 /* With certain content in the buffer RAM, the OMAP boot ROM code
572 * can recognize the flash chip incorrectly. Zero it out before
573 * soft reset.
574 */
575 memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
576}
577
578static int __devinit omap2_onenand_probe(struct platform_device *pdev)
579{
580 struct omap_onenand_platform_data *pdata;
581 struct omap2_onenand *c;
582 int r;
583
584 pdata = pdev->dev.platform_data;
585 if (pdata == NULL) {
586 dev_err(&pdev->dev, "platform data missing\n");
587 return -ENODEV;
588 }
589
590 c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL);
591 if (!c)
592 return -ENOMEM;
593
594 init_completion(&c->irq_done);
595 init_completion(&c->dma_done);
596 c->gpmc_cs = pdata->cs;
597 c->gpio_irq = pdata->gpio_irq;
598 c->dma_channel = pdata->dma_channel;
599 if (c->dma_channel < 0) {
600 /* if -1, don't use DMA */
601 c->gpio_irq = 0;
602 }
603
604 r = gpmc_cs_request(c->gpmc_cs, ONENAND_IO_SIZE, &c->phys_base);
605 if (r < 0) {
606 dev_err(&pdev->dev, "Cannot request GPMC CS\n");
607 goto err_kfree;
608 }
609
610 if (request_mem_region(c->phys_base, ONENAND_IO_SIZE,
611 pdev->dev.driver->name) == NULL) {
612 dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, "
613 "size: 0x%x\n", c->phys_base, ONENAND_IO_SIZE);
614 r = -EBUSY;
615 goto err_free_cs;
616 }
617 c->onenand.base = ioremap(c->phys_base, ONENAND_IO_SIZE);
618 if (c->onenand.base == NULL) {
619 r = -ENOMEM;
620 goto err_release_mem_region;
621 }
622
623 if (pdata->onenand_setup != NULL) {
624 r = pdata->onenand_setup(c->onenand.base, c->freq);
625 if (r < 0) {
626 dev_err(&pdev->dev, "Onenand platform setup failed: "
627 "%d\n", r);
628 goto err_iounmap;
629 }
630 c->setup = pdata->onenand_setup;
631 }
632
633 if (c->gpio_irq) {
634 if ((r = omap_request_gpio(c->gpio_irq)) < 0) {
635 dev_err(&pdev->dev, "Failed to request GPIO%d for "
636 "OneNAND\n", c->gpio_irq);
637 goto err_iounmap;
638 }
639 omap_set_gpio_direction(c->gpio_irq, 1);
640
641 if ((r = request_irq(OMAP_GPIO_IRQ(c->gpio_irq),
642 omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
643 pdev->dev.driver->name, c)) < 0)
644 goto err_release_gpio;
645 }
646
647 if (c->dma_channel >= 0) {
648 r = omap_request_dma(0, pdev->dev.driver->name,
649 omap2_onenand_dma_cb, (void *) c,
650 &c->dma_channel);
651 if (r == 0) {
652 omap_set_dma_write_mode(c->dma_channel,
653 OMAP_DMA_WRITE_NON_POSTED);
654 omap_set_dma_src_data_pack(c->dma_channel, 1);
655 omap_set_dma_src_burst_mode(c->dma_channel,
656 OMAP_DMA_DATA_BURST_8);
657 omap_set_dma_dest_data_pack(c->dma_channel, 1);
658 omap_set_dma_dest_burst_mode(c->dma_channel,
659 OMAP_DMA_DATA_BURST_8);
660 } else {
661 dev_info(&pdev->dev,
662 "failed to allocate DMA for OneNAND, "
663 "using PIO instead\n");
664 c->dma_channel = -1;
665 }
666 }
667
668 dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
669 "base %p\n", c->gpmc_cs, c->phys_base,
670 c->onenand.base);
671
672 c->pdev = pdev;
673 c->mtd.name = pdev->dev.bus_id;
674 c->mtd.priv = &c->onenand;
675 c->mtd.owner = THIS_MODULE;
676
677 if (c->dma_channel >= 0) {
678 struct onenand_chip *this = &c->onenand;
679
680 this->wait = omap2_onenand_wait;
681 if (cpu_is_omap34xx()) {
682 this->read_bufferram = omap3_onenand_read_bufferram;
683 this->write_bufferram = omap3_onenand_write_bufferram;
684 } else {
685 this->read_bufferram = omap2_onenand_read_bufferram;
686 this->write_bufferram = omap2_onenand_write_bufferram;
687 }
688 }
689
690 if ((r = onenand_scan(&c->mtd, 1)) < 0)
691 goto err_release_dma;
692
693 switch ((c->onenand.version_id >> 4) & 0xf) {
694 case 0:
695 c->freq = 40;
696 break;
697 case 1:
698 c->freq = 54;
699 break;
700 case 2:
701 c->freq = 66;
702 break;
703 case 3:
704 c->freq = 83;
705 break;
706 }
707
708#ifdef CONFIG_MTD_PARTITIONS
709 if (pdata->parts != NULL)
710 r = add_mtd_partitions(&c->mtd, pdata->parts,
711 pdata->nr_parts);
712 else
713#endif
714 r = add_mtd_device(&c->mtd);
715 if (r < 0)
716 goto err_release_onenand;
717
718 platform_set_drvdata(pdev, c);
719
720 return 0;
721
722err_release_onenand:
723 onenand_release(&c->mtd);
724err_release_dma:
725 if (c->dma_channel != -1)
726 omap_free_dma(c->dma_channel);
727 if (c->gpio_irq)
728 free_irq(OMAP_GPIO_IRQ(c->gpio_irq), c);
729err_release_gpio:
730 if (c->gpio_irq)
731 omap_free_gpio(c->gpio_irq);
732err_iounmap:
733 iounmap(c->onenand.base);
734err_release_mem_region:
735 release_mem_region(c->phys_base, ONENAND_IO_SIZE);
736err_free_cs:
737 gpmc_cs_free(c->gpmc_cs);
738err_kfree:
739 kfree(c);
740
741 return r;
742}
743
744static int __devexit omap2_onenand_remove(struct platform_device *pdev)
745{
746 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
747
748 BUG_ON(c == NULL);
749
750#ifdef CONFIG_MTD_PARTITIONS
751 if (c->parts)
752 del_mtd_partitions(&c->mtd);
753 else
754 del_mtd_device(&c->mtd);
755#else
756 del_mtd_device(&c->mtd);
757#endif
758
759 onenand_release(&c->mtd);
760 if (c->dma_channel != -1)
761 omap_free_dma(c->dma_channel);
762 omap2_onenand_shutdown(pdev);
763 platform_set_drvdata(pdev, NULL);
764 if (c->gpio_irq) {
765 free_irq(OMAP_GPIO_IRQ(c->gpio_irq), c);
766 omap_free_gpio(c->gpio_irq);
767 }
768 iounmap(c->onenand.base);
769 release_mem_region(c->phys_base, ONENAND_IO_SIZE);
770 kfree(c);
771
772 return 0;
773}
774
775static struct platform_driver omap2_onenand_driver = {
776 .probe = omap2_onenand_probe,
777 .remove = omap2_onenand_remove,
778 .shutdown = omap2_onenand_shutdown,
779 .driver = {
780 .name = DRIVER_NAME,
781 .owner = THIS_MODULE,
782 },
783};
784
785static int __init omap2_onenand_init(void)
786{
787 printk(KERN_INFO "OneNAND driver initializing\n");
788 return platform_driver_register(&omap2_onenand_driver);
789}
790
791static void __exit omap2_onenand_exit(void)
792{
793 platform_driver_unregister(&omap2_onenand_driver);
794}
795
796module_init(omap2_onenand_init);
797module_exit(omap2_onenand_exit);
798
799MODULE_ALIAS(DRIVER_NAME);
800MODULE_LICENSE("GPL");
801MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
802MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 926cf3a4135d..90ed319f26e6 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1794,7 +1794,7 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
1794 return -EINVAL; 1794 return -EINVAL;
1795 } 1795 }
1796 1796
1797 instr->fail_addr = 0xffffffff; 1797 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
1798 1798
1799 /* Grab the lock and see if the device is available */ 1799 /* Grab the lock and see if the device is available */
1800 onenand_get_device(mtd, FL_ERASING); 1800 onenand_get_device(mtd, FL_ERASING);
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
index a5f3d60047d4..33a5d6ed6f18 100644
--- a/drivers/mtd/ssfdc.c
+++ b/drivers/mtd/ssfdc.c
@@ -321,8 +321,7 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
321 DEBUG(MTD_DEBUG_LEVEL1, 321 DEBUG(MTD_DEBUG_LEVEL1,
322 "SSFDC_RO: cis_block=%d,erase_size=%d,map_len=%d,n_zones=%d\n", 322 "SSFDC_RO: cis_block=%d,erase_size=%d,map_len=%d,n_zones=%d\n",
323 ssfdc->cis_block, ssfdc->erase_size, ssfdc->map_len, 323 ssfdc->cis_block, ssfdc->erase_size, ssfdc->map_len,
324 (ssfdc->map_len + MAX_PHYS_BLK_PER_ZONE - 1) / 324 DIV_ROUND_UP(ssfdc->map_len, MAX_PHYS_BLK_PER_ZONE));
325 MAX_PHYS_BLK_PER_ZONE);
326 325
327 /* Set geometry */ 326 /* Set geometry */
328 ssfdc->heads = 16; 327 ssfdc->heads = 16;
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 03c759b4eeb5..b30a0b83d7f1 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -104,12 +104,9 @@ static int vol_cdev_open(struct inode *inode, struct file *file)
104 struct ubi_volume_desc *desc; 104 struct ubi_volume_desc *desc;
105 int vol_id = iminor(inode) - 1, mode, ubi_num; 105 int vol_id = iminor(inode) - 1, mode, ubi_num;
106 106
107 lock_kernel();
108 ubi_num = ubi_major2num(imajor(inode)); 107 ubi_num = ubi_major2num(imajor(inode));
109 if (ubi_num < 0) { 108 if (ubi_num < 0)
110 unlock_kernel();
111 return ubi_num; 109 return ubi_num;
112 }
113 110
114 if (file->f_mode & FMODE_WRITE) 111 if (file->f_mode & FMODE_WRITE)
115 mode = UBI_READWRITE; 112 mode = UBI_READWRITE;
@@ -119,7 +116,6 @@ static int vol_cdev_open(struct inode *inode, struct file *file)
119 dbg_gen("open volume %d, mode %d", vol_id, mode); 116 dbg_gen("open volume %d, mode %d", vol_id, mode);
120 117
121 desc = ubi_open_volume(ubi_num, vol_id, mode); 118 desc = ubi_open_volume(ubi_num, vol_id, mode);
122 unlock_kernel();
123 if (IS_ERR(desc)) 119 if (IS_ERR(desc))
124 return PTR_ERR(desc); 120 return PTR_ERR(desc);
125 121
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index 967bb4406df9..4f2daa5bbecf 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -387,7 +387,7 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
387 pnum, vol_id, lnum, ec, sqnum, bitflips); 387 pnum, vol_id, lnum, ec, sqnum, bitflips);
388 388
389 sv = add_volume(si, vol_id, pnum, vid_hdr); 389 sv = add_volume(si, vol_id, pnum, vid_hdr);
390 if (IS_ERR(sv) < 0) 390 if (IS_ERR(sv))
391 return PTR_ERR(sv); 391 return PTR_ERR(sv);
392 392
393 if (si->max_sqnum < sqnum) 393 if (si->max_sqnum < sqnum)
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 217d0e111b2a..333c8941552f 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -244,8 +244,8 @@ static int vtbl_check(const struct ubi_device *ubi,
244 } 244 }
245 245
246 if (reserved_pebs > ubi->good_peb_count) { 246 if (reserved_pebs > ubi->good_peb_count) {
247 dbg_err("too large reserved_pebs, good PEBs %d", 247 dbg_err("too large reserved_pebs %d, good PEBs %d",
248 ubi->good_peb_count); 248 reserved_pebs, ubi->good_peb_count);
249 err = 9; 249 err = 9;
250 goto bad; 250 goto bad;
251 } 251 }